1 /*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #include <sys/time.h> 36 #include <sys/bio.h> 37 #include <sys/disk.h> 38 #include <sys/refcount.h> 39 #include <sys/stat.h> 40 41 #include <geom/gate/g_gate.h> 42 43 #include <assert.h> 44 #include <err.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <libgeom.h> 48 #include <pthread.h> 49 #include <signal.h> 50 #include <stdint.h> 51 #include <stdio.h> 52 #include <string.h> 53 #include <sysexits.h> 54 #include <unistd.h> 55 56 #include <activemap.h> 57 #include <nv.h> 58 #include <rangelock.h> 59 60 #include "control.h" 61 #include "event.h" 62 #include "hast.h" 63 #include "hast_proto.h" 64 #include "hastd.h" 65 #include "hooks.h" 66 #include "metadata.h" 67 #include "proto.h" 68 #include "pjdlog.h" 69 #include "subr.h" 70 #include "synch.h" 71 72 /* The is only one remote component for now. */ 73 #define ISREMOTE(no) ((no) == 1) 74 75 struct hio { 76 /* 77 * Number of components we are still waiting for. 78 * When this field goes to 0, we can send the request back to the 79 * kernel. Each component has to decrease this counter by one 80 * even on failure. 81 */ 82 unsigned int hio_countdown; 83 /* 84 * Each component has a place to store its own error. 85 * Once the request is handled by all components we can decide if the 86 * request overall is successful or not. 87 */ 88 int *hio_errors; 89 /* 90 * Structure used to comunicate with GEOM Gate class. 91 */ 92 struct g_gate_ctl_io hio_ggio; 93 TAILQ_ENTRY(hio) *hio_next; 94 }; 95 #define hio_free_next hio_next[0] 96 #define hio_done_next hio_next[0] 97 98 /* 99 * Free list holds unused structures. When free list is empty, we have to wait 100 * until some in-progress requests are freed. 101 */ 102 static TAILQ_HEAD(, hio) hio_free_list; 103 static pthread_mutex_t hio_free_list_lock; 104 static pthread_cond_t hio_free_list_cond; 105 /* 106 * There is one send list for every component. One requests is placed on all 107 * send lists - each component gets the same request, but each component is 108 * responsible for managing his own send list. 109 */ 110 static TAILQ_HEAD(, hio) *hio_send_list; 111 static pthread_mutex_t *hio_send_list_lock; 112 static pthread_cond_t *hio_send_list_cond; 113 /* 114 * There is one recv list for every component, although local components don't 115 * use recv lists as local requests are done synchronously. 116 */ 117 static TAILQ_HEAD(, hio) *hio_recv_list; 118 static pthread_mutex_t *hio_recv_list_lock; 119 static pthread_cond_t *hio_recv_list_cond; 120 /* 121 * Request is placed on done list by the slowest component (the one that 122 * decreased hio_countdown from 1 to 0). 123 */ 124 static TAILQ_HEAD(, hio) hio_done_list; 125 static pthread_mutex_t hio_done_list_lock; 126 static pthread_cond_t hio_done_list_cond; 127 /* 128 * Structure below are for interaction with sync thread. 129 */ 130 static bool sync_inprogress; 131 static pthread_mutex_t sync_lock; 132 static pthread_cond_t sync_cond; 133 /* 134 * The lock below allows to synchornize access to remote connections. 135 */ 136 static pthread_rwlock_t *hio_remote_lock; 137 138 /* 139 * Lock to synchronize metadata updates. Also synchronize access to 140 * hr_primary_localcnt and hr_primary_remotecnt fields. 141 */ 142 static pthread_mutex_t metadata_lock; 143 144 /* 145 * Maximum number of outstanding I/O requests. 146 */ 147 #define HAST_HIO_MAX 256 148 /* 149 * Number of components. At this point there are only two components: local 150 * and remote, but in the future it might be possible to use multiple local 151 * and remote components. 152 */ 153 #define HAST_NCOMPONENTS 2 154 /* 155 * Number of seconds to sleep between reconnect retries or keepalive packets. 156 */ 157 #define RETRY_SLEEP 10 158 159 #define ISCONNECTED(res, no) \ 160 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 161 162 #define QUEUE_INSERT1(hio, name, ncomp) do { \ 163 bool _wakeup; \ 164 \ 165 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 166 _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 167 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 168 hio_next[(ncomp)]); \ 169 mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 170 if (_wakeup) \ 171 cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 172 } while (0) 173 #define QUEUE_INSERT2(hio, name) do { \ 174 bool _wakeup; \ 175 \ 176 mtx_lock(&hio_##name##_list_lock); \ 177 _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 178 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 179 mtx_unlock(&hio_##name##_list_lock); \ 180 if (_wakeup) \ 181 cv_signal(&hio_##name##_list_cond); \ 182 } while (0) 183 #define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 184 bool _last; \ 185 \ 186 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 187 _last = false; \ 188 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 189 cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 190 &hio_##name##_list_lock[(ncomp)], (timeout)); \ 191 if ((timeout) != 0) \ 192 _last = true; \ 193 } \ 194 if (hio != NULL) { \ 195 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 196 hio_next[(ncomp)]); \ 197 } \ 198 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 199 } while (0) 200 #define QUEUE_TAKE2(hio, name) do { \ 201 mtx_lock(&hio_##name##_list_lock); \ 202 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 203 cv_wait(&hio_##name##_list_cond, \ 204 &hio_##name##_list_lock); \ 205 } \ 206 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 207 mtx_unlock(&hio_##name##_list_lock); \ 208 } while (0) 209 210 #define SYNCREQ(hio) do { \ 211 (hio)->hio_ggio.gctl_unit = -1; \ 212 (hio)->hio_ggio.gctl_seq = 1; \ 213 } while (0) 214 #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 215 #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 216 #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 217 218 static struct hast_resource *gres; 219 220 static pthread_mutex_t range_lock; 221 static struct rangelocks *range_regular; 222 static bool range_regular_wait; 223 static pthread_cond_t range_regular_cond; 224 static struct rangelocks *range_sync; 225 static bool range_sync_wait; 226 static pthread_cond_t range_sync_cond; 227 228 static void *ggate_recv_thread(void *arg); 229 static void *local_send_thread(void *arg); 230 static void *remote_send_thread(void *arg); 231 static void *remote_recv_thread(void *arg); 232 static void *ggate_send_thread(void *arg); 233 static void *sync_thread(void *arg); 234 static void *guard_thread(void *arg); 235 236 static void 237 cleanup(struct hast_resource *res) 238 { 239 int rerrno; 240 241 /* Remember errno. */ 242 rerrno = errno; 243 244 /* Destroy ggate provider if we created one. */ 245 if (res->hr_ggateunit >= 0) { 246 struct g_gate_ctl_destroy ggiod; 247 248 bzero(&ggiod, sizeof(ggiod)); 249 ggiod.gctl_version = G_GATE_VERSION; 250 ggiod.gctl_unit = res->hr_ggateunit; 251 ggiod.gctl_force = 1; 252 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 253 pjdlog_errno(LOG_WARNING, 254 "Unable to destroy hast/%s device", 255 res->hr_provname); 256 } 257 res->hr_ggateunit = -1; 258 } 259 260 /* Restore errno. */ 261 errno = rerrno; 262 } 263 264 static __dead2 void 265 primary_exit(int exitcode, const char *fmt, ...) 266 { 267 va_list ap; 268 269 assert(exitcode != EX_OK); 270 va_start(ap, fmt); 271 pjdlogv_errno(LOG_ERR, fmt, ap); 272 va_end(ap); 273 cleanup(gres); 274 exit(exitcode); 275 } 276 277 static __dead2 void 278 primary_exitx(int exitcode, const char *fmt, ...) 279 { 280 va_list ap; 281 282 va_start(ap, fmt); 283 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 284 va_end(ap); 285 cleanup(gres); 286 exit(exitcode); 287 } 288 289 static int 290 hast_activemap_flush(struct hast_resource *res) 291 { 292 const unsigned char *buf; 293 size_t size; 294 295 buf = activemap_bitmap(res->hr_amp, &size); 296 assert(buf != NULL); 297 assert((size % res->hr_local_sectorsize) == 0); 298 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 299 (ssize_t)size) { 300 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 301 "Unable to flush activemap to disk")); 302 return (-1); 303 } 304 return (0); 305 } 306 307 static bool 308 real_remote(const struct hast_resource *res) 309 { 310 311 return (strcmp(res->hr_remoteaddr, "none") != 0); 312 } 313 314 static void 315 init_environment(struct hast_resource *res __unused) 316 { 317 struct hio *hio; 318 unsigned int ii, ncomps; 319 320 /* 321 * In the future it might be per-resource value. 322 */ 323 ncomps = HAST_NCOMPONENTS; 324 325 /* 326 * Allocate memory needed by lists. 327 */ 328 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 329 if (hio_send_list == NULL) { 330 primary_exitx(EX_TEMPFAIL, 331 "Unable to allocate %zu bytes of memory for send lists.", 332 sizeof(hio_send_list[0]) * ncomps); 333 } 334 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 335 if (hio_send_list_lock == NULL) { 336 primary_exitx(EX_TEMPFAIL, 337 "Unable to allocate %zu bytes of memory for send list locks.", 338 sizeof(hio_send_list_lock[0]) * ncomps); 339 } 340 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 341 if (hio_send_list_cond == NULL) { 342 primary_exitx(EX_TEMPFAIL, 343 "Unable to allocate %zu bytes of memory for send list condition variables.", 344 sizeof(hio_send_list_cond[0]) * ncomps); 345 } 346 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 347 if (hio_recv_list == NULL) { 348 primary_exitx(EX_TEMPFAIL, 349 "Unable to allocate %zu bytes of memory for recv lists.", 350 sizeof(hio_recv_list[0]) * ncomps); 351 } 352 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 353 if (hio_recv_list_lock == NULL) { 354 primary_exitx(EX_TEMPFAIL, 355 "Unable to allocate %zu bytes of memory for recv list locks.", 356 sizeof(hio_recv_list_lock[0]) * ncomps); 357 } 358 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 359 if (hio_recv_list_cond == NULL) { 360 primary_exitx(EX_TEMPFAIL, 361 "Unable to allocate %zu bytes of memory for recv list condition variables.", 362 sizeof(hio_recv_list_cond[0]) * ncomps); 363 } 364 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 365 if (hio_remote_lock == NULL) { 366 primary_exitx(EX_TEMPFAIL, 367 "Unable to allocate %zu bytes of memory for remote connections locks.", 368 sizeof(hio_remote_lock[0]) * ncomps); 369 } 370 371 /* 372 * Initialize lists, their locks and theirs condition variables. 373 */ 374 TAILQ_INIT(&hio_free_list); 375 mtx_init(&hio_free_list_lock); 376 cv_init(&hio_free_list_cond); 377 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 378 TAILQ_INIT(&hio_send_list[ii]); 379 mtx_init(&hio_send_list_lock[ii]); 380 cv_init(&hio_send_list_cond[ii]); 381 TAILQ_INIT(&hio_recv_list[ii]); 382 mtx_init(&hio_recv_list_lock[ii]); 383 cv_init(&hio_recv_list_cond[ii]); 384 rw_init(&hio_remote_lock[ii]); 385 } 386 TAILQ_INIT(&hio_done_list); 387 mtx_init(&hio_done_list_lock); 388 cv_init(&hio_done_list_cond); 389 mtx_init(&metadata_lock); 390 391 /* 392 * Allocate requests pool and initialize requests. 393 */ 394 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 395 hio = malloc(sizeof(*hio)); 396 if (hio == NULL) { 397 primary_exitx(EX_TEMPFAIL, 398 "Unable to allocate %zu bytes of memory for hio request.", 399 sizeof(*hio)); 400 } 401 hio->hio_countdown = 0; 402 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 403 if (hio->hio_errors == NULL) { 404 primary_exitx(EX_TEMPFAIL, 405 "Unable allocate %zu bytes of memory for hio errors.", 406 sizeof(hio->hio_errors[0]) * ncomps); 407 } 408 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 409 if (hio->hio_next == NULL) { 410 primary_exitx(EX_TEMPFAIL, 411 "Unable allocate %zu bytes of memory for hio_next field.", 412 sizeof(hio->hio_next[0]) * ncomps); 413 } 414 hio->hio_ggio.gctl_version = G_GATE_VERSION; 415 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 416 if (hio->hio_ggio.gctl_data == NULL) { 417 primary_exitx(EX_TEMPFAIL, 418 "Unable to allocate %zu bytes of memory for gctl_data.", 419 MAXPHYS); 420 } 421 hio->hio_ggio.gctl_length = MAXPHYS; 422 hio->hio_ggio.gctl_error = 0; 423 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 424 } 425 } 426 427 static bool 428 init_resuid(struct hast_resource *res) 429 { 430 431 mtx_lock(&metadata_lock); 432 if (res->hr_resuid != 0) { 433 mtx_unlock(&metadata_lock); 434 return (false); 435 } else { 436 /* Initialize unique resource identifier. */ 437 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 438 mtx_unlock(&metadata_lock); 439 if (metadata_write(res) < 0) 440 exit(EX_NOINPUT); 441 return (true); 442 } 443 } 444 445 static void 446 init_local(struct hast_resource *res) 447 { 448 unsigned char *buf; 449 size_t mapsize; 450 451 if (metadata_read(res, true) < 0) 452 exit(EX_NOINPUT); 453 mtx_init(&res->hr_amp_lock); 454 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 455 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 456 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 457 } 458 mtx_init(&range_lock); 459 cv_init(&range_regular_cond); 460 if (rangelock_init(&range_regular) < 0) 461 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 462 cv_init(&range_sync_cond); 463 if (rangelock_init(&range_sync) < 0) 464 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 465 mapsize = activemap_ondisk_size(res->hr_amp); 466 buf = calloc(1, mapsize); 467 if (buf == NULL) { 468 primary_exitx(EX_TEMPFAIL, 469 "Unable to allocate buffer for activemap."); 470 } 471 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 472 (ssize_t)mapsize) { 473 primary_exit(EX_NOINPUT, "Unable to read activemap"); 474 } 475 activemap_copyin(res->hr_amp, buf, mapsize); 476 free(buf); 477 if (res->hr_resuid != 0) 478 return; 479 /* 480 * We're using provider for the first time. Initialize local and remote 481 * counters. We don't initialize resuid here, as we want to do it just 482 * in time. The reason for this is that we want to inform secondary 483 * that there were no writes yet, so there is no need to synchronize 484 * anything. 485 */ 486 res->hr_primary_localcnt = 1; 487 res->hr_primary_remotecnt = 0; 488 if (metadata_write(res) < 0) 489 exit(EX_NOINPUT); 490 } 491 492 static bool 493 init_remote(struct hast_resource *res, struct proto_conn **inp, 494 struct proto_conn **outp) 495 { 496 struct proto_conn *in, *out; 497 struct nv *nvout, *nvin; 498 const unsigned char *token; 499 unsigned char *map; 500 const char *errmsg; 501 int32_t extentsize; 502 int64_t datasize; 503 uint32_t mapsize; 504 size_t size; 505 506 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 507 assert(real_remote(res)); 508 509 in = out = NULL; 510 errmsg = NULL; 511 512 /* Prepare outgoing connection with remote node. */ 513 if (proto_client(res->hr_remoteaddr, &out) < 0) { 514 primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 515 res->hr_remoteaddr); 516 } 517 /* Try to connect, but accept failure. */ 518 if (proto_connect(out) < 0) { 519 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 520 res->hr_remoteaddr); 521 goto close; 522 } 523 /* Error in setting timeout is not critical, but why should it fail? */ 524 if (proto_timeout(out, res->hr_timeout) < 0) 525 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 526 /* 527 * First handshake step. 528 * Setup outgoing connection with remote node. 529 */ 530 nvout = nv_alloc(); 531 nv_add_string(nvout, res->hr_name, "resource"); 532 if (nv_error(nvout) != 0) { 533 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 534 "Unable to allocate header for connection with %s", 535 res->hr_remoteaddr); 536 nv_free(nvout); 537 goto close; 538 } 539 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 540 pjdlog_errno(LOG_WARNING, 541 "Unable to send handshake header to %s", 542 res->hr_remoteaddr); 543 nv_free(nvout); 544 goto close; 545 } 546 nv_free(nvout); 547 if (hast_proto_recv_hdr(out, &nvin) < 0) { 548 pjdlog_errno(LOG_WARNING, 549 "Unable to receive handshake header from %s", 550 res->hr_remoteaddr); 551 goto close; 552 } 553 errmsg = nv_get_string(nvin, "errmsg"); 554 if (errmsg != NULL) { 555 pjdlog_warning("%s", errmsg); 556 nv_free(nvin); 557 goto close; 558 } 559 token = nv_get_uint8_array(nvin, &size, "token"); 560 if (token == NULL) { 561 pjdlog_warning("Handshake header from %s has no 'token' field.", 562 res->hr_remoteaddr); 563 nv_free(nvin); 564 goto close; 565 } 566 if (size != sizeof(res->hr_token)) { 567 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 568 res->hr_remoteaddr, size, sizeof(res->hr_token)); 569 nv_free(nvin); 570 goto close; 571 } 572 bcopy(token, res->hr_token, sizeof(res->hr_token)); 573 nv_free(nvin); 574 575 /* 576 * Second handshake step. 577 * Setup incoming connection with remote node. 578 */ 579 if (proto_client(res->hr_remoteaddr, &in) < 0) { 580 pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 581 res->hr_remoteaddr); 582 } 583 /* Try to connect, but accept failure. */ 584 if (proto_connect(in) < 0) { 585 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 586 res->hr_remoteaddr); 587 goto close; 588 } 589 /* Error in setting timeout is not critical, but why should it fail? */ 590 if (proto_timeout(in, res->hr_timeout) < 0) 591 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 592 nvout = nv_alloc(); 593 nv_add_string(nvout, res->hr_name, "resource"); 594 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 595 "token"); 596 if (res->hr_resuid == 0) { 597 /* 598 * The resuid field was not yet initialized. 599 * Because we do synchronization inside init_resuid(), it is 600 * possible that someone already initialized it, the function 601 * will return false then, but if we successfully initialized 602 * it, we will get true. True means that there were no writes 603 * to this resource yet and we want to inform secondary that 604 * synchronization is not needed by sending "virgin" argument. 605 */ 606 if (init_resuid(res)) 607 nv_add_int8(nvout, 1, "virgin"); 608 } 609 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 610 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 611 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 612 if (nv_error(nvout) != 0) { 613 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 614 "Unable to allocate header for connection with %s", 615 res->hr_remoteaddr); 616 nv_free(nvout); 617 goto close; 618 } 619 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 620 pjdlog_errno(LOG_WARNING, 621 "Unable to send handshake header to %s", 622 res->hr_remoteaddr); 623 nv_free(nvout); 624 goto close; 625 } 626 nv_free(nvout); 627 if (hast_proto_recv_hdr(out, &nvin) < 0) { 628 pjdlog_errno(LOG_WARNING, 629 "Unable to receive handshake header from %s", 630 res->hr_remoteaddr); 631 goto close; 632 } 633 errmsg = nv_get_string(nvin, "errmsg"); 634 if (errmsg != NULL) { 635 pjdlog_warning("%s", errmsg); 636 nv_free(nvin); 637 goto close; 638 } 639 datasize = nv_get_int64(nvin, "datasize"); 640 if (datasize != res->hr_datasize) { 641 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 642 (intmax_t)res->hr_datasize, (intmax_t)datasize); 643 nv_free(nvin); 644 goto close; 645 } 646 extentsize = nv_get_int32(nvin, "extentsize"); 647 if (extentsize != res->hr_extentsize) { 648 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 649 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 650 nv_free(nvin); 651 goto close; 652 } 653 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 654 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 655 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 656 map = NULL; 657 mapsize = nv_get_uint32(nvin, "mapsize"); 658 if (mapsize > 0) { 659 map = malloc(mapsize); 660 if (map == NULL) { 661 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 662 (uintmax_t)mapsize); 663 nv_free(nvin); 664 goto close; 665 } 666 /* 667 * Remote node have some dirty extents on its own, lets 668 * download its activemap. 669 */ 670 if (hast_proto_recv_data(res, out, nvin, map, 671 mapsize) < 0) { 672 pjdlog_errno(LOG_ERR, 673 "Unable to receive remote activemap"); 674 nv_free(nvin); 675 free(map); 676 goto close; 677 } 678 /* 679 * Merge local and remote bitmaps. 680 */ 681 activemap_merge(res->hr_amp, map, mapsize); 682 free(map); 683 /* 684 * Now that we merged bitmaps from both nodes, flush it to the 685 * disk before we start to synchronize. 686 */ 687 (void)hast_activemap_flush(res); 688 } 689 nv_free(nvin); 690 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 691 if (inp != NULL && outp != NULL) { 692 *inp = in; 693 *outp = out; 694 } else { 695 res->hr_remotein = in; 696 res->hr_remoteout = out; 697 } 698 event_send(res, EVENT_CONNECT); 699 return (true); 700 close: 701 if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 702 event_send(res, EVENT_SPLITBRAIN); 703 proto_close(out); 704 if (in != NULL) 705 proto_close(in); 706 return (false); 707 } 708 709 static void 710 sync_start(void) 711 { 712 713 mtx_lock(&sync_lock); 714 sync_inprogress = true; 715 mtx_unlock(&sync_lock); 716 cv_signal(&sync_cond); 717 } 718 719 static void 720 sync_stop(void) 721 { 722 723 mtx_lock(&sync_lock); 724 if (sync_inprogress) 725 sync_inprogress = false; 726 mtx_unlock(&sync_lock); 727 } 728 729 static void 730 init_ggate(struct hast_resource *res) 731 { 732 struct g_gate_ctl_create ggiocreate; 733 struct g_gate_ctl_cancel ggiocancel; 734 735 /* 736 * We communicate with ggate via /dev/ggctl. Open it. 737 */ 738 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 739 if (res->hr_ggatefd < 0) 740 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 741 /* 742 * Create provider before trying to connect, as connection failure 743 * is not critical, but may take some time. 744 */ 745 bzero(&ggiocreate, sizeof(ggiocreate)); 746 ggiocreate.gctl_version = G_GATE_VERSION; 747 ggiocreate.gctl_mediasize = res->hr_datasize; 748 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 749 ggiocreate.gctl_flags = 0; 750 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 751 ggiocreate.gctl_timeout = 0; 752 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 753 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 754 res->hr_provname); 755 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 756 pjdlog_info("Device hast/%s created.", res->hr_provname); 757 res->hr_ggateunit = ggiocreate.gctl_unit; 758 return; 759 } 760 if (errno != EEXIST) { 761 primary_exit(EX_OSERR, "Unable to create hast/%s device", 762 res->hr_provname); 763 } 764 pjdlog_debug(1, 765 "Device hast/%s already exists, we will try to take it over.", 766 res->hr_provname); 767 /* 768 * If we received EEXIST, we assume that the process who created the 769 * provider died and didn't clean up. In that case we will start from 770 * where he left of. 771 */ 772 bzero(&ggiocancel, sizeof(ggiocancel)); 773 ggiocancel.gctl_version = G_GATE_VERSION; 774 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 775 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 776 res->hr_provname); 777 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 778 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 779 res->hr_ggateunit = ggiocancel.gctl_unit; 780 return; 781 } 782 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 783 res->hr_provname); 784 } 785 786 void 787 hastd_primary(struct hast_resource *res) 788 { 789 pthread_t td; 790 pid_t pid; 791 int error; 792 793 /* 794 * Create communication channel between parent and child. 795 */ 796 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 797 KEEP_ERRNO((void)pidfile_remove(pfh)); 798 pjdlog_exit(EX_OSERR, 799 "Unable to create control sockets between parent and child"); 800 } 801 /* 802 * Create communication channel between child and parent. 803 */ 804 if (proto_client("socketpair://", &res->hr_event) < 0) { 805 KEEP_ERRNO((void)pidfile_remove(pfh)); 806 pjdlog_exit(EX_OSERR, 807 "Unable to create event sockets between child and parent"); 808 } 809 810 pid = fork(); 811 if (pid < 0) { 812 KEEP_ERRNO((void)pidfile_remove(pfh)); 813 pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 814 } 815 816 if (pid > 0) { 817 /* This is parent. */ 818 /* Declare that we are receiver. */ 819 proto_recv(res->hr_event, NULL, 0); 820 res->hr_workerpid = pid; 821 return; 822 } 823 824 gres = res; 825 826 (void)pidfile_close(pfh); 827 hook_fini(); 828 829 setproctitle("%s (primary)", res->hr_name); 830 831 /* Declare that we are sender. */ 832 proto_send(res->hr_event, NULL, 0); 833 834 init_local(res); 835 init_ggate(res); 836 init_environment(res); 837 /* 838 * Create the guard thread first, so we can handle signals from the 839 * very begining. 840 */ 841 error = pthread_create(&td, NULL, guard_thread, res); 842 assert(error == 0); 843 /* 844 * Create the control thread before sending any event to the parent, 845 * as we can deadlock when parent sends control request to worker, 846 * but worker has no control thread started yet, so parent waits. 847 * In the meantime worker sends an event to the parent, but parent 848 * is unable to handle the event, because it waits for control 849 * request response. 850 */ 851 error = pthread_create(&td, NULL, ctrl_thread, res); 852 assert(error == 0); 853 if (real_remote(res) && init_remote(res, NULL, NULL)) 854 sync_start(); 855 error = pthread_create(&td, NULL, ggate_recv_thread, res); 856 assert(error == 0); 857 error = pthread_create(&td, NULL, local_send_thread, res); 858 assert(error == 0); 859 error = pthread_create(&td, NULL, remote_send_thread, res); 860 assert(error == 0); 861 error = pthread_create(&td, NULL, remote_recv_thread, res); 862 assert(error == 0); 863 error = pthread_create(&td, NULL, ggate_send_thread, res); 864 assert(error == 0); 865 (void)sync_thread(res); 866 } 867 868 static void 869 reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 870 { 871 char msg[1024]; 872 va_list ap; 873 int len; 874 875 va_start(ap, fmt); 876 len = vsnprintf(msg, sizeof(msg), fmt, ap); 877 va_end(ap); 878 if ((size_t)len < sizeof(msg)) { 879 switch (ggio->gctl_cmd) { 880 case BIO_READ: 881 (void)snprintf(msg + len, sizeof(msg) - len, 882 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 883 (uintmax_t)ggio->gctl_length); 884 break; 885 case BIO_DELETE: 886 (void)snprintf(msg + len, sizeof(msg) - len, 887 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 888 (uintmax_t)ggio->gctl_length); 889 break; 890 case BIO_FLUSH: 891 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 892 break; 893 case BIO_WRITE: 894 (void)snprintf(msg + len, sizeof(msg) - len, 895 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 896 (uintmax_t)ggio->gctl_length); 897 break; 898 default: 899 (void)snprintf(msg + len, sizeof(msg) - len, 900 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 901 break; 902 } 903 } 904 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 905 } 906 907 static void 908 remote_close(struct hast_resource *res, int ncomp) 909 { 910 911 rw_wlock(&hio_remote_lock[ncomp]); 912 /* 913 * A race is possible between dropping rlock and acquiring wlock - 914 * another thread can close connection in-between. 915 */ 916 if (!ISCONNECTED(res, ncomp)) { 917 assert(res->hr_remotein == NULL); 918 assert(res->hr_remoteout == NULL); 919 rw_unlock(&hio_remote_lock[ncomp]); 920 return; 921 } 922 923 assert(res->hr_remotein != NULL); 924 assert(res->hr_remoteout != NULL); 925 926 pjdlog_debug(2, "Closing incoming connection to %s.", 927 res->hr_remoteaddr); 928 proto_close(res->hr_remotein); 929 res->hr_remotein = NULL; 930 pjdlog_debug(2, "Closing outgoing connection to %s.", 931 res->hr_remoteaddr); 932 proto_close(res->hr_remoteout); 933 res->hr_remoteout = NULL; 934 935 rw_unlock(&hio_remote_lock[ncomp]); 936 937 pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 938 939 /* 940 * Stop synchronization if in-progress. 941 */ 942 sync_stop(); 943 944 event_send(res, EVENT_DISCONNECT); 945 } 946 947 /* 948 * Thread receives ggate I/O requests from the kernel and passes them to 949 * appropriate threads: 950 * WRITE - always goes to both local_send and remote_send threads 951 * READ (when the block is up-to-date on local component) - 952 * only local_send thread 953 * READ (when the block isn't up-to-date on local component) - 954 * only remote_send thread 955 * DELETE - always goes to both local_send and remote_send threads 956 * FLUSH - always goes to both local_send and remote_send threads 957 */ 958 static void * 959 ggate_recv_thread(void *arg) 960 { 961 struct hast_resource *res = arg; 962 struct g_gate_ctl_io *ggio; 963 struct hio *hio; 964 unsigned int ii, ncomp, ncomps; 965 int error; 966 967 ncomps = HAST_NCOMPONENTS; 968 969 for (;;) { 970 pjdlog_debug(2, "ggate_recv: Taking free request."); 971 QUEUE_TAKE2(hio, free); 972 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 973 ggio = &hio->hio_ggio; 974 ggio->gctl_unit = res->hr_ggateunit; 975 ggio->gctl_length = MAXPHYS; 976 ggio->gctl_error = 0; 977 pjdlog_debug(2, 978 "ggate_recv: (%p) Waiting for request from the kernel.", 979 hio); 980 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 981 if (sigexit_received) 982 pthread_exit(NULL); 983 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 984 } 985 error = ggio->gctl_error; 986 switch (error) { 987 case 0: 988 break; 989 case ECANCELED: 990 /* Exit gracefully. */ 991 if (!sigexit_received) { 992 pjdlog_debug(2, 993 "ggate_recv: (%p) Received cancel from the kernel.", 994 hio); 995 pjdlog_info("Received cancel from the kernel, exiting."); 996 } 997 pthread_exit(NULL); 998 case ENOMEM: 999 /* 1000 * Buffer too small? Impossible, we allocate MAXPHYS 1001 * bytes - request can't be bigger than that. 1002 */ 1003 /* FALLTHROUGH */ 1004 case ENXIO: 1005 default: 1006 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1007 strerror(error)); 1008 } 1009 for (ii = 0; ii < ncomps; ii++) 1010 hio->hio_errors[ii] = EINVAL; 1011 reqlog(LOG_DEBUG, 2, ggio, 1012 "ggate_recv: (%p) Request received from the kernel: ", 1013 hio); 1014 /* 1015 * Inform all components about new write request. 1016 * For read request prefer local component unless the given 1017 * range is out-of-date, then use remote component. 1018 */ 1019 switch (ggio->gctl_cmd) { 1020 case BIO_READ: 1021 pjdlog_debug(2, 1022 "ggate_recv: (%p) Moving request to the send queue.", 1023 hio); 1024 refcount_init(&hio->hio_countdown, 1); 1025 mtx_lock(&metadata_lock); 1026 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1027 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1028 /* 1029 * This range is up-to-date on local component, 1030 * so handle request locally. 1031 */ 1032 /* Local component is 0 for now. */ 1033 ncomp = 0; 1034 } else /* if (res->hr_syncsrc == 1035 HAST_SYNCSRC_SECONDARY) */ { 1036 assert(res->hr_syncsrc == 1037 HAST_SYNCSRC_SECONDARY); 1038 /* 1039 * This range is out-of-date on local component, 1040 * so send request to the remote node. 1041 */ 1042 /* Remote component is 1 for now. */ 1043 ncomp = 1; 1044 } 1045 mtx_unlock(&metadata_lock); 1046 QUEUE_INSERT1(hio, send, ncomp); 1047 break; 1048 case BIO_WRITE: 1049 if (res->hr_resuid == 0) { 1050 /* This is first write, initialize resuid. */ 1051 (void)init_resuid(res); 1052 } 1053 for (;;) { 1054 mtx_lock(&range_lock); 1055 if (rangelock_islocked(range_sync, 1056 ggio->gctl_offset, ggio->gctl_length)) { 1057 pjdlog_debug(2, 1058 "regular: Range offset=%jd length=%zu locked.", 1059 (intmax_t)ggio->gctl_offset, 1060 (size_t)ggio->gctl_length); 1061 range_regular_wait = true; 1062 cv_wait(&range_regular_cond, &range_lock); 1063 range_regular_wait = false; 1064 mtx_unlock(&range_lock); 1065 continue; 1066 } 1067 if (rangelock_add(range_regular, 1068 ggio->gctl_offset, ggio->gctl_length) < 0) { 1069 mtx_unlock(&range_lock); 1070 pjdlog_debug(2, 1071 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1072 (intmax_t)ggio->gctl_offset, 1073 (size_t)ggio->gctl_length); 1074 sleep(1); 1075 continue; 1076 } 1077 mtx_unlock(&range_lock); 1078 break; 1079 } 1080 mtx_lock(&res->hr_amp_lock); 1081 if (activemap_write_start(res->hr_amp, 1082 ggio->gctl_offset, ggio->gctl_length)) { 1083 (void)hast_activemap_flush(res); 1084 } 1085 mtx_unlock(&res->hr_amp_lock); 1086 /* FALLTHROUGH */ 1087 case BIO_DELETE: 1088 case BIO_FLUSH: 1089 pjdlog_debug(2, 1090 "ggate_recv: (%p) Moving request to the send queues.", 1091 hio); 1092 refcount_init(&hio->hio_countdown, ncomps); 1093 for (ii = 0; ii < ncomps; ii++) 1094 QUEUE_INSERT1(hio, send, ii); 1095 break; 1096 } 1097 } 1098 /* NOTREACHED */ 1099 return (NULL); 1100 } 1101 1102 /* 1103 * Thread reads from or writes to local component. 1104 * If local read fails, it redirects it to remote_send thread. 1105 */ 1106 static void * 1107 local_send_thread(void *arg) 1108 { 1109 struct hast_resource *res = arg; 1110 struct g_gate_ctl_io *ggio; 1111 struct hio *hio; 1112 unsigned int ncomp, rncomp; 1113 ssize_t ret; 1114 1115 /* Local component is 0 for now. */ 1116 ncomp = 0; 1117 /* Remote component is 1 for now. */ 1118 rncomp = 1; 1119 1120 for (;;) { 1121 pjdlog_debug(2, "local_send: Taking request."); 1122 QUEUE_TAKE1(hio, send, ncomp, 0); 1123 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1124 ggio = &hio->hio_ggio; 1125 switch (ggio->gctl_cmd) { 1126 case BIO_READ: 1127 ret = pread(res->hr_localfd, ggio->gctl_data, 1128 ggio->gctl_length, 1129 ggio->gctl_offset + res->hr_localoff); 1130 if (ret == ggio->gctl_length) 1131 hio->hio_errors[ncomp] = 0; 1132 else { 1133 /* 1134 * If READ failed, try to read from remote node. 1135 */ 1136 QUEUE_INSERT1(hio, send, rncomp); 1137 continue; 1138 } 1139 break; 1140 case BIO_WRITE: 1141 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1142 ggio->gctl_length, 1143 ggio->gctl_offset + res->hr_localoff); 1144 if (ret < 0) 1145 hio->hio_errors[ncomp] = errno; 1146 else if (ret != ggio->gctl_length) 1147 hio->hio_errors[ncomp] = EIO; 1148 else 1149 hio->hio_errors[ncomp] = 0; 1150 break; 1151 case BIO_DELETE: 1152 ret = g_delete(res->hr_localfd, 1153 ggio->gctl_offset + res->hr_localoff, 1154 ggio->gctl_length); 1155 if (ret < 0) 1156 hio->hio_errors[ncomp] = errno; 1157 else 1158 hio->hio_errors[ncomp] = 0; 1159 break; 1160 case BIO_FLUSH: 1161 ret = g_flush(res->hr_localfd); 1162 if (ret < 0) 1163 hio->hio_errors[ncomp] = errno; 1164 else 1165 hio->hio_errors[ncomp] = 0; 1166 break; 1167 } 1168 if (refcount_release(&hio->hio_countdown)) { 1169 if (ISSYNCREQ(hio)) { 1170 mtx_lock(&sync_lock); 1171 SYNCREQDONE(hio); 1172 mtx_unlock(&sync_lock); 1173 cv_signal(&sync_cond); 1174 } else { 1175 pjdlog_debug(2, 1176 "local_send: (%p) Moving request to the done queue.", 1177 hio); 1178 QUEUE_INSERT2(hio, done); 1179 } 1180 } 1181 } 1182 /* NOTREACHED */ 1183 return (NULL); 1184 } 1185 1186 static void 1187 keepalive_send(struct hast_resource *res, unsigned int ncomp) 1188 { 1189 struct nv *nv; 1190 1191 if (!ISCONNECTED(res, ncomp)) 1192 return; 1193 1194 assert(res->hr_remotein != NULL); 1195 assert(res->hr_remoteout != NULL); 1196 1197 nv = nv_alloc(); 1198 nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1199 if (nv_error(nv) != 0) { 1200 nv_free(nv); 1201 pjdlog_debug(1, 1202 "keepalive_send: Unable to prepare header to send."); 1203 return; 1204 } 1205 if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1206 pjdlog_common(LOG_DEBUG, 1, errno, 1207 "keepalive_send: Unable to send request"); 1208 nv_free(nv); 1209 rw_unlock(&hio_remote_lock[ncomp]); 1210 remote_close(res, ncomp); 1211 rw_rlock(&hio_remote_lock[ncomp]); 1212 return; 1213 } 1214 nv_free(nv); 1215 pjdlog_debug(2, "keepalive_send: Request sent."); 1216 } 1217 1218 /* 1219 * Thread sends request to secondary node. 1220 */ 1221 static void * 1222 remote_send_thread(void *arg) 1223 { 1224 struct hast_resource *res = arg; 1225 struct g_gate_ctl_io *ggio; 1226 time_t lastcheck, now; 1227 struct hio *hio; 1228 struct nv *nv; 1229 unsigned int ncomp; 1230 bool wakeup; 1231 uint64_t offset, length; 1232 uint8_t cmd; 1233 void *data; 1234 1235 /* Remote component is 1 for now. */ 1236 ncomp = 1; 1237 lastcheck = time(NULL); 1238 1239 for (;;) { 1240 pjdlog_debug(2, "remote_send: Taking request."); 1241 QUEUE_TAKE1(hio, send, ncomp, RETRY_SLEEP); 1242 if (hio == NULL) { 1243 now = time(NULL); 1244 if (lastcheck + RETRY_SLEEP <= now) { 1245 keepalive_send(res, ncomp); 1246 lastcheck = now; 1247 } 1248 continue; 1249 } 1250 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1251 ggio = &hio->hio_ggio; 1252 switch (ggio->gctl_cmd) { 1253 case BIO_READ: 1254 cmd = HIO_READ; 1255 data = NULL; 1256 offset = ggio->gctl_offset; 1257 length = ggio->gctl_length; 1258 break; 1259 case BIO_WRITE: 1260 cmd = HIO_WRITE; 1261 data = ggio->gctl_data; 1262 offset = ggio->gctl_offset; 1263 length = ggio->gctl_length; 1264 break; 1265 case BIO_DELETE: 1266 cmd = HIO_DELETE; 1267 data = NULL; 1268 offset = ggio->gctl_offset; 1269 length = ggio->gctl_length; 1270 break; 1271 case BIO_FLUSH: 1272 cmd = HIO_FLUSH; 1273 data = NULL; 1274 offset = 0; 1275 length = 0; 1276 break; 1277 default: 1278 assert(!"invalid condition"); 1279 abort(); 1280 } 1281 nv = nv_alloc(); 1282 nv_add_uint8(nv, cmd, "cmd"); 1283 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1284 nv_add_uint64(nv, offset, "offset"); 1285 nv_add_uint64(nv, length, "length"); 1286 if (nv_error(nv) != 0) { 1287 hio->hio_errors[ncomp] = nv_error(nv); 1288 pjdlog_debug(2, 1289 "remote_send: (%p) Unable to prepare header to send.", 1290 hio); 1291 reqlog(LOG_ERR, 0, ggio, 1292 "Unable to prepare header to send (%s): ", 1293 strerror(nv_error(nv))); 1294 /* Move failed request immediately to the done queue. */ 1295 goto done_queue; 1296 } 1297 pjdlog_debug(2, 1298 "remote_send: (%p) Moving request to the recv queue.", 1299 hio); 1300 /* 1301 * Protect connection from disappearing. 1302 */ 1303 rw_rlock(&hio_remote_lock[ncomp]); 1304 if (!ISCONNECTED(res, ncomp)) { 1305 rw_unlock(&hio_remote_lock[ncomp]); 1306 hio->hio_errors[ncomp] = ENOTCONN; 1307 goto done_queue; 1308 } 1309 /* 1310 * Move the request to recv queue before sending it, because 1311 * in different order we can get reply before we move request 1312 * to recv queue. 1313 */ 1314 mtx_lock(&hio_recv_list_lock[ncomp]); 1315 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1316 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1317 mtx_unlock(&hio_recv_list_lock[ncomp]); 1318 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1319 data != NULL ? length : 0) < 0) { 1320 hio->hio_errors[ncomp] = errno; 1321 rw_unlock(&hio_remote_lock[ncomp]); 1322 pjdlog_debug(2, 1323 "remote_send: (%p) Unable to send request.", hio); 1324 reqlog(LOG_ERR, 0, ggio, 1325 "Unable to send request (%s): ", 1326 strerror(hio->hio_errors[ncomp])); 1327 remote_close(res, ncomp); 1328 /* 1329 * Take request back from the receive queue and move 1330 * it immediately to the done queue. 1331 */ 1332 mtx_lock(&hio_recv_list_lock[ncomp]); 1333 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1334 mtx_unlock(&hio_recv_list_lock[ncomp]); 1335 goto done_queue; 1336 } 1337 rw_unlock(&hio_remote_lock[ncomp]); 1338 nv_free(nv); 1339 if (wakeup) 1340 cv_signal(&hio_recv_list_cond[ncomp]); 1341 continue; 1342 done_queue: 1343 nv_free(nv); 1344 if (ISSYNCREQ(hio)) { 1345 if (!refcount_release(&hio->hio_countdown)) 1346 continue; 1347 mtx_lock(&sync_lock); 1348 SYNCREQDONE(hio); 1349 mtx_unlock(&sync_lock); 1350 cv_signal(&sync_cond); 1351 continue; 1352 } 1353 if (ggio->gctl_cmd == BIO_WRITE) { 1354 mtx_lock(&res->hr_amp_lock); 1355 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1356 ggio->gctl_length)) { 1357 (void)hast_activemap_flush(res); 1358 } 1359 mtx_unlock(&res->hr_amp_lock); 1360 } 1361 if (!refcount_release(&hio->hio_countdown)) 1362 continue; 1363 pjdlog_debug(2, 1364 "remote_send: (%p) Moving request to the done queue.", 1365 hio); 1366 QUEUE_INSERT2(hio, done); 1367 } 1368 /* NOTREACHED */ 1369 return (NULL); 1370 } 1371 1372 /* 1373 * Thread receives answer from secondary node and passes it to ggate_send 1374 * thread. 1375 */ 1376 static void * 1377 remote_recv_thread(void *arg) 1378 { 1379 struct hast_resource *res = arg; 1380 struct g_gate_ctl_io *ggio; 1381 struct hio *hio; 1382 struct nv *nv; 1383 unsigned int ncomp; 1384 uint64_t seq; 1385 int error; 1386 1387 /* Remote component is 1 for now. */ 1388 ncomp = 1; 1389 1390 for (;;) { 1391 /* Wait until there is anything to receive. */ 1392 mtx_lock(&hio_recv_list_lock[ncomp]); 1393 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1394 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1395 cv_wait(&hio_recv_list_cond[ncomp], 1396 &hio_recv_list_lock[ncomp]); 1397 } 1398 mtx_unlock(&hio_recv_list_lock[ncomp]); 1399 rw_rlock(&hio_remote_lock[ncomp]); 1400 if (!ISCONNECTED(res, ncomp)) { 1401 rw_unlock(&hio_remote_lock[ncomp]); 1402 /* 1403 * Connection is dead, so move all pending requests to 1404 * the done queue (one-by-one). 1405 */ 1406 mtx_lock(&hio_recv_list_lock[ncomp]); 1407 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1408 assert(hio != NULL); 1409 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1410 hio_next[ncomp]); 1411 mtx_unlock(&hio_recv_list_lock[ncomp]); 1412 goto done_queue; 1413 } 1414 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1415 pjdlog_errno(LOG_ERR, 1416 "Unable to receive reply header"); 1417 rw_unlock(&hio_remote_lock[ncomp]); 1418 remote_close(res, ncomp); 1419 continue; 1420 } 1421 rw_unlock(&hio_remote_lock[ncomp]); 1422 seq = nv_get_uint64(nv, "seq"); 1423 if (seq == 0) { 1424 pjdlog_error("Header contains no 'seq' field."); 1425 nv_free(nv); 1426 continue; 1427 } 1428 mtx_lock(&hio_recv_list_lock[ncomp]); 1429 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1430 if (hio->hio_ggio.gctl_seq == seq) { 1431 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1432 hio_next[ncomp]); 1433 break; 1434 } 1435 } 1436 mtx_unlock(&hio_recv_list_lock[ncomp]); 1437 if (hio == NULL) { 1438 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1439 (uintmax_t)seq); 1440 nv_free(nv); 1441 continue; 1442 } 1443 error = nv_get_int16(nv, "error"); 1444 if (error != 0) { 1445 /* Request failed on remote side. */ 1446 hio->hio_errors[ncomp] = 0; 1447 nv_free(nv); 1448 goto done_queue; 1449 } 1450 ggio = &hio->hio_ggio; 1451 switch (ggio->gctl_cmd) { 1452 case BIO_READ: 1453 rw_rlock(&hio_remote_lock[ncomp]); 1454 if (!ISCONNECTED(res, ncomp)) { 1455 rw_unlock(&hio_remote_lock[ncomp]); 1456 nv_free(nv); 1457 goto done_queue; 1458 } 1459 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1460 ggio->gctl_data, ggio->gctl_length) < 0) { 1461 hio->hio_errors[ncomp] = errno; 1462 pjdlog_errno(LOG_ERR, 1463 "Unable to receive reply data"); 1464 rw_unlock(&hio_remote_lock[ncomp]); 1465 nv_free(nv); 1466 remote_close(res, ncomp); 1467 goto done_queue; 1468 } 1469 rw_unlock(&hio_remote_lock[ncomp]); 1470 break; 1471 case BIO_WRITE: 1472 case BIO_DELETE: 1473 case BIO_FLUSH: 1474 break; 1475 default: 1476 assert(!"invalid condition"); 1477 abort(); 1478 } 1479 hio->hio_errors[ncomp] = 0; 1480 nv_free(nv); 1481 done_queue: 1482 if (refcount_release(&hio->hio_countdown)) { 1483 if (ISSYNCREQ(hio)) { 1484 mtx_lock(&sync_lock); 1485 SYNCREQDONE(hio); 1486 mtx_unlock(&sync_lock); 1487 cv_signal(&sync_cond); 1488 } else { 1489 pjdlog_debug(2, 1490 "remote_recv: (%p) Moving request to the done queue.", 1491 hio); 1492 QUEUE_INSERT2(hio, done); 1493 } 1494 } 1495 } 1496 /* NOTREACHED */ 1497 return (NULL); 1498 } 1499 1500 /* 1501 * Thread sends answer to the kernel. 1502 */ 1503 static void * 1504 ggate_send_thread(void *arg) 1505 { 1506 struct hast_resource *res = arg; 1507 struct g_gate_ctl_io *ggio; 1508 struct hio *hio; 1509 unsigned int ii, ncomp, ncomps; 1510 1511 ncomps = HAST_NCOMPONENTS; 1512 1513 for (;;) { 1514 pjdlog_debug(2, "ggate_send: Taking request."); 1515 QUEUE_TAKE2(hio, done); 1516 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1517 ggio = &hio->hio_ggio; 1518 for (ii = 0; ii < ncomps; ii++) { 1519 if (hio->hio_errors[ii] == 0) { 1520 /* 1521 * One successful request is enough to declare 1522 * success. 1523 */ 1524 ggio->gctl_error = 0; 1525 break; 1526 } 1527 } 1528 if (ii == ncomps) { 1529 /* 1530 * None of the requests were successful. 1531 * Use first error. 1532 */ 1533 ggio->gctl_error = hio->hio_errors[0]; 1534 } 1535 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1536 mtx_lock(&res->hr_amp_lock); 1537 activemap_write_complete(res->hr_amp, 1538 ggio->gctl_offset, ggio->gctl_length); 1539 mtx_unlock(&res->hr_amp_lock); 1540 } 1541 if (ggio->gctl_cmd == BIO_WRITE) { 1542 /* 1543 * Unlock range we locked. 1544 */ 1545 mtx_lock(&range_lock); 1546 rangelock_del(range_regular, ggio->gctl_offset, 1547 ggio->gctl_length); 1548 if (range_sync_wait) 1549 cv_signal(&range_sync_cond); 1550 mtx_unlock(&range_lock); 1551 /* 1552 * Bump local count if this is first write after 1553 * connection failure with remote node. 1554 */ 1555 ncomp = 1; 1556 rw_rlock(&hio_remote_lock[ncomp]); 1557 if (!ISCONNECTED(res, ncomp)) { 1558 mtx_lock(&metadata_lock); 1559 if (res->hr_primary_localcnt == 1560 res->hr_secondary_remotecnt) { 1561 res->hr_primary_localcnt++; 1562 pjdlog_debug(1, 1563 "Increasing localcnt to %ju.", 1564 (uintmax_t)res->hr_primary_localcnt); 1565 (void)metadata_write(res); 1566 } 1567 mtx_unlock(&metadata_lock); 1568 } 1569 rw_unlock(&hio_remote_lock[ncomp]); 1570 } 1571 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1572 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1573 pjdlog_debug(2, 1574 "ggate_send: (%p) Moving request to the free queue.", hio); 1575 QUEUE_INSERT2(hio, free); 1576 } 1577 /* NOTREACHED */ 1578 return (NULL); 1579 } 1580 1581 /* 1582 * Thread synchronize local and remote components. 1583 */ 1584 static void * 1585 sync_thread(void *arg __unused) 1586 { 1587 struct hast_resource *res = arg; 1588 struct hio *hio; 1589 struct g_gate_ctl_io *ggio; 1590 unsigned int ii, ncomp, ncomps; 1591 off_t offset, length, synced; 1592 bool dorewind; 1593 int syncext; 1594 1595 ncomps = HAST_NCOMPONENTS; 1596 dorewind = true; 1597 synced = 0; 1598 offset = -1; 1599 1600 for (;;) { 1601 mtx_lock(&sync_lock); 1602 if (offset >= 0 && !sync_inprogress) { 1603 pjdlog_info("Synchronization interrupted. " 1604 "%jd bytes synchronized so far.", 1605 (intmax_t)synced); 1606 event_send(res, EVENT_SYNCINTR); 1607 } 1608 while (!sync_inprogress) { 1609 dorewind = true; 1610 synced = 0; 1611 cv_wait(&sync_cond, &sync_lock); 1612 } 1613 mtx_unlock(&sync_lock); 1614 /* 1615 * Obtain offset at which we should synchronize. 1616 * Rewind synchronization if needed. 1617 */ 1618 mtx_lock(&res->hr_amp_lock); 1619 if (dorewind) 1620 activemap_sync_rewind(res->hr_amp); 1621 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1622 if (syncext != -1) { 1623 /* 1624 * We synchronized entire syncext extent, we can mark 1625 * it as clean now. 1626 */ 1627 if (activemap_extent_complete(res->hr_amp, syncext)) 1628 (void)hast_activemap_flush(res); 1629 } 1630 mtx_unlock(&res->hr_amp_lock); 1631 if (dorewind) { 1632 dorewind = false; 1633 if (offset < 0) 1634 pjdlog_info("Nodes are in sync."); 1635 else { 1636 pjdlog_info("Synchronization started. %ju bytes to go.", 1637 (uintmax_t)(res->hr_extentsize * 1638 activemap_ndirty(res->hr_amp))); 1639 event_send(res, EVENT_SYNCSTART); 1640 } 1641 } 1642 if (offset < 0) { 1643 sync_stop(); 1644 pjdlog_debug(1, "Nothing to synchronize."); 1645 /* 1646 * Synchronization complete, make both localcnt and 1647 * remotecnt equal. 1648 */ 1649 ncomp = 1; 1650 rw_rlock(&hio_remote_lock[ncomp]); 1651 if (ISCONNECTED(res, ncomp)) { 1652 if (synced > 0) { 1653 pjdlog_info("Synchronization complete. " 1654 "%jd bytes synchronized.", 1655 (intmax_t)synced); 1656 event_send(res, EVENT_SYNCDONE); 1657 } 1658 mtx_lock(&metadata_lock); 1659 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1660 res->hr_primary_localcnt = 1661 res->hr_secondary_localcnt; 1662 res->hr_primary_remotecnt = 1663 res->hr_secondary_remotecnt; 1664 pjdlog_debug(1, 1665 "Setting localcnt to %ju and remotecnt to %ju.", 1666 (uintmax_t)res->hr_primary_localcnt, 1667 (uintmax_t)res->hr_secondary_localcnt); 1668 (void)metadata_write(res); 1669 mtx_unlock(&metadata_lock); 1670 } 1671 rw_unlock(&hio_remote_lock[ncomp]); 1672 continue; 1673 } 1674 pjdlog_debug(2, "sync: Taking free request."); 1675 QUEUE_TAKE2(hio, free); 1676 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1677 /* 1678 * Lock the range we are going to synchronize. We don't want 1679 * race where someone writes between our read and write. 1680 */ 1681 for (;;) { 1682 mtx_lock(&range_lock); 1683 if (rangelock_islocked(range_regular, offset, length)) { 1684 pjdlog_debug(2, 1685 "sync: Range offset=%jd length=%jd locked.", 1686 (intmax_t)offset, (intmax_t)length); 1687 range_sync_wait = true; 1688 cv_wait(&range_sync_cond, &range_lock); 1689 range_sync_wait = false; 1690 mtx_unlock(&range_lock); 1691 continue; 1692 } 1693 if (rangelock_add(range_sync, offset, length) < 0) { 1694 mtx_unlock(&range_lock); 1695 pjdlog_debug(2, 1696 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1697 (intmax_t)offset, (intmax_t)length); 1698 sleep(1); 1699 continue; 1700 } 1701 mtx_unlock(&range_lock); 1702 break; 1703 } 1704 /* 1705 * First read the data from synchronization source. 1706 */ 1707 SYNCREQ(hio); 1708 ggio = &hio->hio_ggio; 1709 ggio->gctl_cmd = BIO_READ; 1710 ggio->gctl_offset = offset; 1711 ggio->gctl_length = length; 1712 ggio->gctl_error = 0; 1713 for (ii = 0; ii < ncomps; ii++) 1714 hio->hio_errors[ii] = EINVAL; 1715 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1716 hio); 1717 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1718 hio); 1719 mtx_lock(&metadata_lock); 1720 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1721 /* 1722 * This range is up-to-date on local component, 1723 * so handle request locally. 1724 */ 1725 /* Local component is 0 for now. */ 1726 ncomp = 0; 1727 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1728 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1729 /* 1730 * This range is out-of-date on local component, 1731 * so send request to the remote node. 1732 */ 1733 /* Remote component is 1 for now. */ 1734 ncomp = 1; 1735 } 1736 mtx_unlock(&metadata_lock); 1737 refcount_init(&hio->hio_countdown, 1); 1738 QUEUE_INSERT1(hio, send, ncomp); 1739 1740 /* 1741 * Let's wait for READ to finish. 1742 */ 1743 mtx_lock(&sync_lock); 1744 while (!ISSYNCREQDONE(hio)) 1745 cv_wait(&sync_cond, &sync_lock); 1746 mtx_unlock(&sync_lock); 1747 1748 if (hio->hio_errors[ncomp] != 0) { 1749 pjdlog_error("Unable to read synchronization data: %s.", 1750 strerror(hio->hio_errors[ncomp])); 1751 goto free_queue; 1752 } 1753 1754 /* 1755 * We read the data from synchronization source, now write it 1756 * to synchronization target. 1757 */ 1758 SYNCREQ(hio); 1759 ggio->gctl_cmd = BIO_WRITE; 1760 for (ii = 0; ii < ncomps; ii++) 1761 hio->hio_errors[ii] = EINVAL; 1762 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1763 hio); 1764 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1765 hio); 1766 mtx_lock(&metadata_lock); 1767 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1768 /* 1769 * This range is up-to-date on local component, 1770 * so we update remote component. 1771 */ 1772 /* Remote component is 1 for now. */ 1773 ncomp = 1; 1774 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1775 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1776 /* 1777 * This range is out-of-date on local component, 1778 * so we update it. 1779 */ 1780 /* Local component is 0 for now. */ 1781 ncomp = 0; 1782 } 1783 mtx_unlock(&metadata_lock); 1784 1785 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1786 hio); 1787 refcount_init(&hio->hio_countdown, 1); 1788 QUEUE_INSERT1(hio, send, ncomp); 1789 1790 /* 1791 * Let's wait for WRITE to finish. 1792 */ 1793 mtx_lock(&sync_lock); 1794 while (!ISSYNCREQDONE(hio)) 1795 cv_wait(&sync_cond, &sync_lock); 1796 mtx_unlock(&sync_lock); 1797 1798 if (hio->hio_errors[ncomp] != 0) { 1799 pjdlog_error("Unable to write synchronization data: %s.", 1800 strerror(hio->hio_errors[ncomp])); 1801 goto free_queue; 1802 } 1803 1804 synced += length; 1805 free_queue: 1806 mtx_lock(&range_lock); 1807 rangelock_del(range_sync, offset, length); 1808 if (range_regular_wait) 1809 cv_signal(&range_regular_cond); 1810 mtx_unlock(&range_lock); 1811 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1812 hio); 1813 QUEUE_INSERT2(hio, free); 1814 } 1815 /* NOTREACHED */ 1816 return (NULL); 1817 } 1818 1819 static void 1820 config_reload(void) 1821 { 1822 struct hastd_config *newcfg; 1823 struct hast_resource *res; 1824 unsigned int ii, ncomps; 1825 int modified; 1826 1827 pjdlog_info("Reloading configuration..."); 1828 1829 ncomps = HAST_NCOMPONENTS; 1830 1831 newcfg = yy_config_parse(cfgpath, false); 1832 if (newcfg == NULL) 1833 goto failed; 1834 1835 TAILQ_FOREACH(res, &newcfg->hc_resources, hr_next) { 1836 if (strcmp(res->hr_name, gres->hr_name) == 0) 1837 break; 1838 } 1839 /* 1840 * If resource was removed from the configuration file, resource 1841 * name, provider name or path to local component was modified we 1842 * shouldn't be here. This means that someone modified configuration 1843 * file and send SIGHUP to us instead of main hastd process. 1844 * Log advice and ignore the signal. 1845 */ 1846 if (res == NULL || strcmp(gres->hr_name, res->hr_name) != 0 || 1847 strcmp(gres->hr_provname, res->hr_provname) != 0 || 1848 strcmp(gres->hr_localpath, res->hr_localpath) != 0) { 1849 pjdlog_warning("To reload configuration send SIGHUP to the main hastd process (pid %u).", 1850 (unsigned int)getppid()); 1851 goto failed; 1852 } 1853 1854 #define MODIFIED_REMOTEADDR 0x1 1855 #define MODIFIED_REPLICATION 0x2 1856 #define MODIFIED_TIMEOUT 0x4 1857 #define MODIFIED_EXEC 0x8 1858 modified = 0; 1859 if (strcmp(gres->hr_remoteaddr, res->hr_remoteaddr) != 0) { 1860 /* 1861 * Don't copy res->hr_remoteaddr to gres just yet. 1862 * We want remote_close() to log disconnect from the old 1863 * addresses, not from the new ones. 1864 */ 1865 modified |= MODIFIED_REMOTEADDR; 1866 } 1867 if (gres->hr_replication != res->hr_replication) { 1868 gres->hr_replication = res->hr_replication; 1869 modified |= MODIFIED_REPLICATION; 1870 } 1871 if (gres->hr_timeout != res->hr_timeout) { 1872 gres->hr_timeout = res->hr_timeout; 1873 modified |= MODIFIED_TIMEOUT; 1874 } 1875 if (strcmp(gres->hr_exec, res->hr_exec) != 0) { 1876 strlcpy(gres->hr_exec, res->hr_exec, sizeof(gres->hr_exec)); 1877 modified |= MODIFIED_EXEC; 1878 } 1879 /* 1880 * If only timeout was modified we only need to change it without 1881 * reconnecting. 1882 */ 1883 if (modified == MODIFIED_TIMEOUT) { 1884 for (ii = 0; ii < ncomps; ii++) { 1885 if (!ISREMOTE(ii)) 1886 continue; 1887 rw_rlock(&hio_remote_lock[ii]); 1888 if (!ISCONNECTED(gres, ii)) { 1889 rw_unlock(&hio_remote_lock[ii]); 1890 continue; 1891 } 1892 rw_unlock(&hio_remote_lock[ii]); 1893 if (proto_timeout(gres->hr_remotein, 1894 gres->hr_timeout) < 0) { 1895 pjdlog_errno(LOG_WARNING, 1896 "Unable to set connection timeout"); 1897 } 1898 if (proto_timeout(gres->hr_remoteout, 1899 gres->hr_timeout) < 0) { 1900 pjdlog_errno(LOG_WARNING, 1901 "Unable to set connection timeout"); 1902 } 1903 } 1904 } else if ((modified & 1905 (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) != 0) { 1906 for (ii = 0; ii < ncomps; ii++) { 1907 if (!ISREMOTE(ii)) 1908 continue; 1909 remote_close(gres, ii); 1910 } 1911 if (modified & MODIFIED_REMOTEADDR) { 1912 strlcpy(gres->hr_remoteaddr, res->hr_remoteaddr, 1913 sizeof(gres->hr_remoteaddr)); 1914 } 1915 } 1916 #undef MODIFIED_REMOTEADDR 1917 #undef MODIFIED_REPLICATION 1918 #undef MODIFIED_TIMEOUT 1919 #undef MODIFIED_EXEC 1920 1921 pjdlog_info("Configuration reloaded successfully."); 1922 return; 1923 failed: 1924 if (newcfg != NULL) { 1925 if (newcfg->hc_controlconn != NULL) 1926 proto_close(newcfg->hc_controlconn); 1927 if (newcfg->hc_listenconn != NULL) 1928 proto_close(newcfg->hc_listenconn); 1929 yy_config_free(newcfg); 1930 } 1931 pjdlog_warning("Configuration not reloaded."); 1932 } 1933 1934 static void 1935 guard_one(struct hast_resource *res, unsigned int ncomp) 1936 { 1937 struct proto_conn *in, *out; 1938 1939 if (!ISREMOTE(ncomp)) 1940 return; 1941 1942 rw_rlock(&hio_remote_lock[ncomp]); 1943 1944 if (!real_remote(res)) { 1945 rw_unlock(&hio_remote_lock[ncomp]); 1946 return; 1947 } 1948 1949 if (ISCONNECTED(res, ncomp)) { 1950 assert(res->hr_remotein != NULL); 1951 assert(res->hr_remoteout != NULL); 1952 rw_unlock(&hio_remote_lock[ncomp]); 1953 pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 1954 res->hr_remoteaddr); 1955 return; 1956 } 1957 1958 assert(res->hr_remotein == NULL); 1959 assert(res->hr_remoteout == NULL); 1960 /* 1961 * Upgrade the lock. It doesn't have to be atomic as no other thread 1962 * can change connection status from disconnected to connected. 1963 */ 1964 rw_unlock(&hio_remote_lock[ncomp]); 1965 pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 1966 res->hr_remoteaddr); 1967 in = out = NULL; 1968 if (init_remote(res, &in, &out)) { 1969 rw_wlock(&hio_remote_lock[ncomp]); 1970 assert(res->hr_remotein == NULL); 1971 assert(res->hr_remoteout == NULL); 1972 assert(in != NULL && out != NULL); 1973 res->hr_remotein = in; 1974 res->hr_remoteout = out; 1975 rw_unlock(&hio_remote_lock[ncomp]); 1976 pjdlog_info("Successfully reconnected to %s.", 1977 res->hr_remoteaddr); 1978 sync_start(); 1979 } else { 1980 /* Both connections should be NULL. */ 1981 assert(res->hr_remotein == NULL); 1982 assert(res->hr_remoteout == NULL); 1983 assert(in == NULL && out == NULL); 1984 pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 1985 res->hr_remoteaddr); 1986 } 1987 } 1988 1989 /* 1990 * Thread guards remote connections and reconnects when needed, handles 1991 * signals, etc. 1992 */ 1993 static void * 1994 guard_thread(void *arg) 1995 { 1996 struct hast_resource *res = arg; 1997 unsigned int ii, ncomps; 1998 struct timespec timeout; 1999 time_t lastcheck, now; 2000 sigset_t mask; 2001 int signo; 2002 2003 ncomps = HAST_NCOMPONENTS; 2004 lastcheck = time(NULL); 2005 2006 PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2007 PJDLOG_VERIFY(sigaddset(&mask, SIGHUP) == 0); 2008 PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2009 PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2010 2011 timeout.tv_nsec = 0; 2012 signo = -1; 2013 2014 for (;;) { 2015 switch (signo) { 2016 case SIGHUP: 2017 config_reload(); 2018 break; 2019 case SIGINT: 2020 case SIGTERM: 2021 sigexit_received = true; 2022 primary_exitx(EX_OK, 2023 "Termination signal received, exiting."); 2024 break; 2025 default: 2026 break; 2027 } 2028 2029 pjdlog_debug(2, "remote_guard: Checking connections."); 2030 now = time(NULL); 2031 if (lastcheck + RETRY_SLEEP <= now) { 2032 for (ii = 0; ii < ncomps; ii++) 2033 guard_one(res, ii); 2034 lastcheck = now; 2035 } 2036 timeout.tv_sec = RETRY_SLEEP; 2037 signo = sigtimedwait(&mask, NULL, &timeout); 2038 } 2039 /* NOTREACHED */ 2040 return (NULL); 2041 } 2042