1 /*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #include <sys/time.h> 36 #include <sys/bio.h> 37 #include <sys/disk.h> 38 #include <sys/refcount.h> 39 #include <sys/stat.h> 40 41 #include <geom/gate/g_gate.h> 42 43 #include <assert.h> 44 #include <err.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <libgeom.h> 48 #include <pthread.h> 49 #include <signal.h> 50 #include <stdint.h> 51 #include <stdio.h> 52 #include <string.h> 53 #include <sysexits.h> 54 #include <unistd.h> 55 56 #include <activemap.h> 57 #include <nv.h> 58 #include <rangelock.h> 59 60 #include "control.h" 61 #include "event.h" 62 #include "hast.h" 63 #include "hast_proto.h" 64 #include "hastd.h" 65 #include "hooks.h" 66 #include "metadata.h" 67 #include "proto.h" 68 #include "pjdlog.h" 69 #include "subr.h" 70 #include "synch.h" 71 72 /* The is only one remote component for now. */ 73 #define ISREMOTE(no) ((no) == 1) 74 75 struct hio { 76 /* 77 * Number of components we are still waiting for. 78 * When this field goes to 0, we can send the request back to the 79 * kernel. Each component has to decrease this counter by one 80 * even on failure. 81 */ 82 unsigned int hio_countdown; 83 /* 84 * Each component has a place to store its own error. 85 * Once the request is handled by all components we can decide if the 86 * request overall is successful or not. 87 */ 88 int *hio_errors; 89 /* 90 * Structure used to comunicate with GEOM Gate class. 91 */ 92 struct g_gate_ctl_io hio_ggio; 93 TAILQ_ENTRY(hio) *hio_next; 94 }; 95 #define hio_free_next hio_next[0] 96 #define hio_done_next hio_next[0] 97 98 /* 99 * Free list holds unused structures. When free list is empty, we have to wait 100 * until some in-progress requests are freed. 101 */ 102 static TAILQ_HEAD(, hio) hio_free_list; 103 static pthread_mutex_t hio_free_list_lock; 104 static pthread_cond_t hio_free_list_cond; 105 /* 106 * There is one send list for every component. One requests is placed on all 107 * send lists - each component gets the same request, but each component is 108 * responsible for managing his own send list. 109 */ 110 static TAILQ_HEAD(, hio) *hio_send_list; 111 static pthread_mutex_t *hio_send_list_lock; 112 static pthread_cond_t *hio_send_list_cond; 113 /* 114 * There is one recv list for every component, although local components don't 115 * use recv lists as local requests are done synchronously. 116 */ 117 static TAILQ_HEAD(, hio) *hio_recv_list; 118 static pthread_mutex_t *hio_recv_list_lock; 119 static pthread_cond_t *hio_recv_list_cond; 120 /* 121 * Request is placed on done list by the slowest component (the one that 122 * decreased hio_countdown from 1 to 0). 123 */ 124 static TAILQ_HEAD(, hio) hio_done_list; 125 static pthread_mutex_t hio_done_list_lock; 126 static pthread_cond_t hio_done_list_cond; 127 /* 128 * Structure below are for interaction with sync thread. 129 */ 130 static bool sync_inprogress; 131 static pthread_mutex_t sync_lock; 132 static pthread_cond_t sync_cond; 133 /* 134 * The lock below allows to synchornize access to remote connections. 135 */ 136 static pthread_rwlock_t *hio_remote_lock; 137 138 /* 139 * Lock to synchronize metadata updates. Also synchronize access to 140 * hr_primary_localcnt and hr_primary_remotecnt fields. 141 */ 142 static pthread_mutex_t metadata_lock; 143 144 /* 145 * Maximum number of outstanding I/O requests. 146 */ 147 #define HAST_HIO_MAX 256 148 /* 149 * Number of components. At this point there are only two components: local 150 * and remote, but in the future it might be possible to use multiple local 151 * and remote components. 152 */ 153 #define HAST_NCOMPONENTS 2 154 /* 155 * Number of seconds to sleep between reconnect retries or keepalive packets. 156 */ 157 #define RETRY_SLEEP 10 158 159 #define ISCONNECTED(res, no) \ 160 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 161 162 #define QUEUE_INSERT1(hio, name, ncomp) do { \ 163 bool _wakeup; \ 164 \ 165 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 166 _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 167 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 168 hio_next[(ncomp)]); \ 169 mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 170 if (_wakeup) \ 171 cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 172 } while (0) 173 #define QUEUE_INSERT2(hio, name) do { \ 174 bool _wakeup; \ 175 \ 176 mtx_lock(&hio_##name##_list_lock); \ 177 _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 178 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 179 mtx_unlock(&hio_##name##_list_lock); \ 180 if (_wakeup) \ 181 cv_signal(&hio_##name##_list_cond); \ 182 } while (0) 183 #define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 184 bool _last; \ 185 \ 186 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 187 _last = false; \ 188 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 189 cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 190 &hio_##name##_list_lock[(ncomp)], (timeout)); \ 191 if ((timeout) != 0) \ 192 _last = true; \ 193 } \ 194 if (hio != NULL) { \ 195 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 196 hio_next[(ncomp)]); \ 197 } \ 198 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 199 } while (0) 200 #define QUEUE_TAKE2(hio, name) do { \ 201 mtx_lock(&hio_##name##_list_lock); \ 202 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 203 cv_wait(&hio_##name##_list_cond, \ 204 &hio_##name##_list_lock); \ 205 } \ 206 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 207 mtx_unlock(&hio_##name##_list_lock); \ 208 } while (0) 209 210 #define SYNCREQ(hio) do { \ 211 (hio)->hio_ggio.gctl_unit = -1; \ 212 (hio)->hio_ggio.gctl_seq = 1; \ 213 } while (0) 214 #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 215 #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 216 #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 217 218 static struct hast_resource *gres; 219 220 static pthread_mutex_t range_lock; 221 static struct rangelocks *range_regular; 222 static bool range_regular_wait; 223 static pthread_cond_t range_regular_cond; 224 static struct rangelocks *range_sync; 225 static bool range_sync_wait; 226 static pthread_cond_t range_sync_cond; 227 228 static void *ggate_recv_thread(void *arg); 229 static void *local_send_thread(void *arg); 230 static void *remote_send_thread(void *arg); 231 static void *remote_recv_thread(void *arg); 232 static void *ggate_send_thread(void *arg); 233 static void *sync_thread(void *arg); 234 static void *guard_thread(void *arg); 235 236 static void 237 cleanup(struct hast_resource *res) 238 { 239 int rerrno; 240 241 /* Remember errno. */ 242 rerrno = errno; 243 244 /* Destroy ggate provider if we created one. */ 245 if (res->hr_ggateunit >= 0) { 246 struct g_gate_ctl_destroy ggiod; 247 248 bzero(&ggiod, sizeof(ggiod)); 249 ggiod.gctl_version = G_GATE_VERSION; 250 ggiod.gctl_unit = res->hr_ggateunit; 251 ggiod.gctl_force = 1; 252 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 253 pjdlog_errno(LOG_WARNING, 254 "Unable to destroy hast/%s device", 255 res->hr_provname); 256 } 257 res->hr_ggateunit = -1; 258 } 259 260 /* Restore errno. */ 261 errno = rerrno; 262 } 263 264 static __dead2 void 265 primary_exit(int exitcode, const char *fmt, ...) 266 { 267 va_list ap; 268 269 assert(exitcode != EX_OK); 270 va_start(ap, fmt); 271 pjdlogv_errno(LOG_ERR, fmt, ap); 272 va_end(ap); 273 cleanup(gres); 274 exit(exitcode); 275 } 276 277 static __dead2 void 278 primary_exitx(int exitcode, const char *fmt, ...) 279 { 280 va_list ap; 281 282 va_start(ap, fmt); 283 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 284 va_end(ap); 285 cleanup(gres); 286 exit(exitcode); 287 } 288 289 static int 290 hast_activemap_flush(struct hast_resource *res) 291 { 292 const unsigned char *buf; 293 size_t size; 294 295 buf = activemap_bitmap(res->hr_amp, &size); 296 assert(buf != NULL); 297 assert((size % res->hr_local_sectorsize) == 0); 298 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 299 (ssize_t)size) { 300 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 301 "Unable to flush activemap to disk")); 302 return (-1); 303 } 304 return (0); 305 } 306 307 static bool 308 real_remote(const struct hast_resource *res) 309 { 310 311 return (strcmp(res->hr_remoteaddr, "none") != 0); 312 } 313 314 static void 315 init_environment(struct hast_resource *res __unused) 316 { 317 struct hio *hio; 318 unsigned int ii, ncomps; 319 320 /* 321 * In the future it might be per-resource value. 322 */ 323 ncomps = HAST_NCOMPONENTS; 324 325 /* 326 * Allocate memory needed by lists. 327 */ 328 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 329 if (hio_send_list == NULL) { 330 primary_exitx(EX_TEMPFAIL, 331 "Unable to allocate %zu bytes of memory for send lists.", 332 sizeof(hio_send_list[0]) * ncomps); 333 } 334 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 335 if (hio_send_list_lock == NULL) { 336 primary_exitx(EX_TEMPFAIL, 337 "Unable to allocate %zu bytes of memory for send list locks.", 338 sizeof(hio_send_list_lock[0]) * ncomps); 339 } 340 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 341 if (hio_send_list_cond == NULL) { 342 primary_exitx(EX_TEMPFAIL, 343 "Unable to allocate %zu bytes of memory for send list condition variables.", 344 sizeof(hio_send_list_cond[0]) * ncomps); 345 } 346 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 347 if (hio_recv_list == NULL) { 348 primary_exitx(EX_TEMPFAIL, 349 "Unable to allocate %zu bytes of memory for recv lists.", 350 sizeof(hio_recv_list[0]) * ncomps); 351 } 352 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 353 if (hio_recv_list_lock == NULL) { 354 primary_exitx(EX_TEMPFAIL, 355 "Unable to allocate %zu bytes of memory for recv list locks.", 356 sizeof(hio_recv_list_lock[0]) * ncomps); 357 } 358 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 359 if (hio_recv_list_cond == NULL) { 360 primary_exitx(EX_TEMPFAIL, 361 "Unable to allocate %zu bytes of memory for recv list condition variables.", 362 sizeof(hio_recv_list_cond[0]) * ncomps); 363 } 364 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 365 if (hio_remote_lock == NULL) { 366 primary_exitx(EX_TEMPFAIL, 367 "Unable to allocate %zu bytes of memory for remote connections locks.", 368 sizeof(hio_remote_lock[0]) * ncomps); 369 } 370 371 /* 372 * Initialize lists, their locks and theirs condition variables. 373 */ 374 TAILQ_INIT(&hio_free_list); 375 mtx_init(&hio_free_list_lock); 376 cv_init(&hio_free_list_cond); 377 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 378 TAILQ_INIT(&hio_send_list[ii]); 379 mtx_init(&hio_send_list_lock[ii]); 380 cv_init(&hio_send_list_cond[ii]); 381 TAILQ_INIT(&hio_recv_list[ii]); 382 mtx_init(&hio_recv_list_lock[ii]); 383 cv_init(&hio_recv_list_cond[ii]); 384 rw_init(&hio_remote_lock[ii]); 385 } 386 TAILQ_INIT(&hio_done_list); 387 mtx_init(&hio_done_list_lock); 388 cv_init(&hio_done_list_cond); 389 mtx_init(&metadata_lock); 390 391 /* 392 * Allocate requests pool and initialize requests. 393 */ 394 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 395 hio = malloc(sizeof(*hio)); 396 if (hio == NULL) { 397 primary_exitx(EX_TEMPFAIL, 398 "Unable to allocate %zu bytes of memory for hio request.", 399 sizeof(*hio)); 400 } 401 hio->hio_countdown = 0; 402 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 403 if (hio->hio_errors == NULL) { 404 primary_exitx(EX_TEMPFAIL, 405 "Unable allocate %zu bytes of memory for hio errors.", 406 sizeof(hio->hio_errors[0]) * ncomps); 407 } 408 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 409 if (hio->hio_next == NULL) { 410 primary_exitx(EX_TEMPFAIL, 411 "Unable allocate %zu bytes of memory for hio_next field.", 412 sizeof(hio->hio_next[0]) * ncomps); 413 } 414 hio->hio_ggio.gctl_version = G_GATE_VERSION; 415 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 416 if (hio->hio_ggio.gctl_data == NULL) { 417 primary_exitx(EX_TEMPFAIL, 418 "Unable to allocate %zu bytes of memory for gctl_data.", 419 MAXPHYS); 420 } 421 hio->hio_ggio.gctl_length = MAXPHYS; 422 hio->hio_ggio.gctl_error = 0; 423 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 424 } 425 } 426 427 static bool 428 init_resuid(struct hast_resource *res) 429 { 430 431 mtx_lock(&metadata_lock); 432 if (res->hr_resuid != 0) { 433 mtx_unlock(&metadata_lock); 434 return (false); 435 } else { 436 /* Initialize unique resource identifier. */ 437 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 438 mtx_unlock(&metadata_lock); 439 if (metadata_write(res) < 0) 440 exit(EX_NOINPUT); 441 return (true); 442 } 443 } 444 445 static void 446 init_local(struct hast_resource *res) 447 { 448 unsigned char *buf; 449 size_t mapsize; 450 451 if (metadata_read(res, true) < 0) 452 exit(EX_NOINPUT); 453 mtx_init(&res->hr_amp_lock); 454 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 455 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 456 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 457 } 458 mtx_init(&range_lock); 459 cv_init(&range_regular_cond); 460 if (rangelock_init(&range_regular) < 0) 461 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 462 cv_init(&range_sync_cond); 463 if (rangelock_init(&range_sync) < 0) 464 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 465 mapsize = activemap_ondisk_size(res->hr_amp); 466 buf = calloc(1, mapsize); 467 if (buf == NULL) { 468 primary_exitx(EX_TEMPFAIL, 469 "Unable to allocate buffer for activemap."); 470 } 471 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 472 (ssize_t)mapsize) { 473 primary_exit(EX_NOINPUT, "Unable to read activemap"); 474 } 475 activemap_copyin(res->hr_amp, buf, mapsize); 476 free(buf); 477 if (res->hr_resuid != 0) 478 return; 479 /* 480 * We're using provider for the first time. Initialize local and remote 481 * counters. We don't initialize resuid here, as we want to do it just 482 * in time. The reason for this is that we want to inform secondary 483 * that there were no writes yet, so there is no need to synchronize 484 * anything. 485 */ 486 res->hr_primary_localcnt = 1; 487 res->hr_primary_remotecnt = 0; 488 if (metadata_write(res) < 0) 489 exit(EX_NOINPUT); 490 } 491 492 static bool 493 init_remote(struct hast_resource *res, struct proto_conn **inp, 494 struct proto_conn **outp) 495 { 496 struct proto_conn *in, *out; 497 struct nv *nvout, *nvin; 498 const unsigned char *token; 499 unsigned char *map; 500 const char *errmsg; 501 int32_t extentsize; 502 int64_t datasize; 503 uint32_t mapsize; 504 size_t size; 505 506 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 507 assert(real_remote(res)); 508 509 in = out = NULL; 510 errmsg = NULL; 511 512 /* Prepare outgoing connection with remote node. */ 513 if (proto_client(res->hr_remoteaddr, &out) < 0) { 514 primary_exit(EX_TEMPFAIL, 515 "Unable to create outgoing connection to %s", 516 res->hr_remoteaddr); 517 } 518 /* Try to connect, but accept failure. */ 519 if (proto_connect(out) < 0) { 520 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 521 res->hr_remoteaddr); 522 goto close; 523 } 524 /* Error in setting timeout is not critical, but why should it fail? */ 525 if (proto_timeout(out, res->hr_timeout) < 0) 526 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 527 /* 528 * First handshake step. 529 * Setup outgoing connection with remote node. 530 */ 531 nvout = nv_alloc(); 532 nv_add_string(nvout, res->hr_name, "resource"); 533 if (nv_error(nvout) != 0) { 534 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 535 "Unable to allocate header for connection with %s", 536 res->hr_remoteaddr); 537 nv_free(nvout); 538 goto close; 539 } 540 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 541 pjdlog_errno(LOG_WARNING, 542 "Unable to send handshake header to %s", 543 res->hr_remoteaddr); 544 nv_free(nvout); 545 goto close; 546 } 547 nv_free(nvout); 548 if (hast_proto_recv_hdr(out, &nvin) < 0) { 549 pjdlog_errno(LOG_WARNING, 550 "Unable to receive handshake header from %s", 551 res->hr_remoteaddr); 552 goto close; 553 } 554 errmsg = nv_get_string(nvin, "errmsg"); 555 if (errmsg != NULL) { 556 pjdlog_warning("%s", errmsg); 557 nv_free(nvin); 558 goto close; 559 } 560 token = nv_get_uint8_array(nvin, &size, "token"); 561 if (token == NULL) { 562 pjdlog_warning("Handshake header from %s has no 'token' field.", 563 res->hr_remoteaddr); 564 nv_free(nvin); 565 goto close; 566 } 567 if (size != sizeof(res->hr_token)) { 568 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 569 res->hr_remoteaddr, size, sizeof(res->hr_token)); 570 nv_free(nvin); 571 goto close; 572 } 573 bcopy(token, res->hr_token, sizeof(res->hr_token)); 574 nv_free(nvin); 575 576 /* 577 * Second handshake step. 578 * Setup incoming connection with remote node. 579 */ 580 if (proto_client(res->hr_remoteaddr, &in) < 0) { 581 primary_exit(EX_TEMPFAIL, 582 "Unable to create incoming connection to %s", 583 res->hr_remoteaddr); 584 } 585 /* Try to connect, but accept failure. */ 586 if (proto_connect(in) < 0) { 587 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 588 res->hr_remoteaddr); 589 goto close; 590 } 591 /* Error in setting timeout is not critical, but why should it fail? */ 592 if (proto_timeout(in, res->hr_timeout) < 0) 593 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 594 nvout = nv_alloc(); 595 nv_add_string(nvout, res->hr_name, "resource"); 596 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 597 "token"); 598 if (res->hr_resuid == 0) { 599 /* 600 * The resuid field was not yet initialized. 601 * Because we do synchronization inside init_resuid(), it is 602 * possible that someone already initialized it, the function 603 * will return false then, but if we successfully initialized 604 * it, we will get true. True means that there were no writes 605 * to this resource yet and we want to inform secondary that 606 * synchronization is not needed by sending "virgin" argument. 607 */ 608 if (init_resuid(res)) 609 nv_add_int8(nvout, 1, "virgin"); 610 } 611 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 612 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 613 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 614 if (nv_error(nvout) != 0) { 615 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 616 "Unable to allocate header for connection with %s", 617 res->hr_remoteaddr); 618 nv_free(nvout); 619 goto close; 620 } 621 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 622 pjdlog_errno(LOG_WARNING, 623 "Unable to send handshake header to %s", 624 res->hr_remoteaddr); 625 nv_free(nvout); 626 goto close; 627 } 628 nv_free(nvout); 629 if (hast_proto_recv_hdr(out, &nvin) < 0) { 630 pjdlog_errno(LOG_WARNING, 631 "Unable to receive handshake header from %s", 632 res->hr_remoteaddr); 633 goto close; 634 } 635 errmsg = nv_get_string(nvin, "errmsg"); 636 if (errmsg != NULL) { 637 pjdlog_warning("%s", errmsg); 638 nv_free(nvin); 639 goto close; 640 } 641 datasize = nv_get_int64(nvin, "datasize"); 642 if (datasize != res->hr_datasize) { 643 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 644 (intmax_t)res->hr_datasize, (intmax_t)datasize); 645 nv_free(nvin); 646 goto close; 647 } 648 extentsize = nv_get_int32(nvin, "extentsize"); 649 if (extentsize != res->hr_extentsize) { 650 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 651 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 652 nv_free(nvin); 653 goto close; 654 } 655 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 656 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 657 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 658 map = NULL; 659 mapsize = nv_get_uint32(nvin, "mapsize"); 660 if (mapsize > 0) { 661 map = malloc(mapsize); 662 if (map == NULL) { 663 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 664 (uintmax_t)mapsize); 665 nv_free(nvin); 666 goto close; 667 } 668 /* 669 * Remote node have some dirty extents on its own, lets 670 * download its activemap. 671 */ 672 if (hast_proto_recv_data(res, out, nvin, map, 673 mapsize) < 0) { 674 pjdlog_errno(LOG_ERR, 675 "Unable to receive remote activemap"); 676 nv_free(nvin); 677 free(map); 678 goto close; 679 } 680 /* 681 * Merge local and remote bitmaps. 682 */ 683 activemap_merge(res->hr_amp, map, mapsize); 684 free(map); 685 /* 686 * Now that we merged bitmaps from both nodes, flush it to the 687 * disk before we start to synchronize. 688 */ 689 (void)hast_activemap_flush(res); 690 } 691 nv_free(nvin); 692 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 693 if (inp != NULL && outp != NULL) { 694 *inp = in; 695 *outp = out; 696 } else { 697 res->hr_remotein = in; 698 res->hr_remoteout = out; 699 } 700 event_send(res, EVENT_CONNECT); 701 return (true); 702 close: 703 if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 704 event_send(res, EVENT_SPLITBRAIN); 705 proto_close(out); 706 if (in != NULL) 707 proto_close(in); 708 return (false); 709 } 710 711 static void 712 sync_start(void) 713 { 714 715 mtx_lock(&sync_lock); 716 sync_inprogress = true; 717 mtx_unlock(&sync_lock); 718 cv_signal(&sync_cond); 719 } 720 721 static void 722 sync_stop(void) 723 { 724 725 mtx_lock(&sync_lock); 726 if (sync_inprogress) 727 sync_inprogress = false; 728 mtx_unlock(&sync_lock); 729 } 730 731 static void 732 init_ggate(struct hast_resource *res) 733 { 734 struct g_gate_ctl_create ggiocreate; 735 struct g_gate_ctl_cancel ggiocancel; 736 737 /* 738 * We communicate with ggate via /dev/ggctl. Open it. 739 */ 740 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 741 if (res->hr_ggatefd < 0) 742 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 743 /* 744 * Create provider before trying to connect, as connection failure 745 * is not critical, but may take some time. 746 */ 747 bzero(&ggiocreate, sizeof(ggiocreate)); 748 ggiocreate.gctl_version = G_GATE_VERSION; 749 ggiocreate.gctl_mediasize = res->hr_datasize; 750 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 751 ggiocreate.gctl_flags = 0; 752 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 753 ggiocreate.gctl_timeout = 0; 754 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 755 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 756 res->hr_provname); 757 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 758 pjdlog_info("Device hast/%s created.", res->hr_provname); 759 res->hr_ggateunit = ggiocreate.gctl_unit; 760 return; 761 } 762 if (errno != EEXIST) { 763 primary_exit(EX_OSERR, "Unable to create hast/%s device", 764 res->hr_provname); 765 } 766 pjdlog_debug(1, 767 "Device hast/%s already exists, we will try to take it over.", 768 res->hr_provname); 769 /* 770 * If we received EEXIST, we assume that the process who created the 771 * provider died and didn't clean up. In that case we will start from 772 * where he left of. 773 */ 774 bzero(&ggiocancel, sizeof(ggiocancel)); 775 ggiocancel.gctl_version = G_GATE_VERSION; 776 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 777 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 778 res->hr_provname); 779 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 780 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 781 res->hr_ggateunit = ggiocancel.gctl_unit; 782 return; 783 } 784 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 785 res->hr_provname); 786 } 787 788 void 789 hastd_primary(struct hast_resource *res) 790 { 791 pthread_t td; 792 pid_t pid; 793 int error; 794 795 /* 796 * Create communication channel between parent and child. 797 */ 798 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 799 KEEP_ERRNO((void)pidfile_remove(pfh)); 800 pjdlog_exit(EX_OSERR, 801 "Unable to create control sockets between parent and child"); 802 } 803 /* 804 * Create communication channel between child and parent. 805 */ 806 if (proto_client("socketpair://", &res->hr_event) < 0) { 807 KEEP_ERRNO((void)pidfile_remove(pfh)); 808 pjdlog_exit(EX_OSERR, 809 "Unable to create event sockets between child and parent"); 810 } 811 812 pid = fork(); 813 if (pid < 0) { 814 KEEP_ERRNO((void)pidfile_remove(pfh)); 815 pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 816 } 817 818 if (pid > 0) { 819 /* This is parent. */ 820 /* Declare that we are receiver. */ 821 proto_recv(res->hr_event, NULL, 0); 822 res->hr_workerpid = pid; 823 return; 824 } 825 826 gres = res; 827 828 (void)pidfile_close(pfh); 829 hook_fini(); 830 831 setproctitle("%s (primary)", res->hr_name); 832 833 /* Declare that we are sender. */ 834 proto_send(res->hr_event, NULL, 0); 835 836 init_local(res); 837 init_ggate(res); 838 init_environment(res); 839 840 /* 841 * Create the guard thread first, so we can handle signals from the 842 * very begining. 843 */ 844 error = pthread_create(&td, NULL, guard_thread, res); 845 assert(error == 0); 846 /* 847 * Create the control thread before sending any event to the parent, 848 * as we can deadlock when parent sends control request to worker, 849 * but worker has no control thread started yet, so parent waits. 850 * In the meantime worker sends an event to the parent, but parent 851 * is unable to handle the event, because it waits for control 852 * request response. 853 */ 854 error = pthread_create(&td, NULL, ctrl_thread, res); 855 assert(error == 0); 856 if (real_remote(res) && init_remote(res, NULL, NULL)) 857 sync_start(); 858 error = pthread_create(&td, NULL, ggate_recv_thread, res); 859 assert(error == 0); 860 error = pthread_create(&td, NULL, local_send_thread, res); 861 assert(error == 0); 862 error = pthread_create(&td, NULL, remote_send_thread, res); 863 assert(error == 0); 864 error = pthread_create(&td, NULL, remote_recv_thread, res); 865 assert(error == 0); 866 error = pthread_create(&td, NULL, ggate_send_thread, res); 867 assert(error == 0); 868 (void)sync_thread(res); 869 } 870 871 static void 872 reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 873 { 874 char msg[1024]; 875 va_list ap; 876 int len; 877 878 va_start(ap, fmt); 879 len = vsnprintf(msg, sizeof(msg), fmt, ap); 880 va_end(ap); 881 if ((size_t)len < sizeof(msg)) { 882 switch (ggio->gctl_cmd) { 883 case BIO_READ: 884 (void)snprintf(msg + len, sizeof(msg) - len, 885 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 886 (uintmax_t)ggio->gctl_length); 887 break; 888 case BIO_DELETE: 889 (void)snprintf(msg + len, sizeof(msg) - len, 890 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 891 (uintmax_t)ggio->gctl_length); 892 break; 893 case BIO_FLUSH: 894 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 895 break; 896 case BIO_WRITE: 897 (void)snprintf(msg + len, sizeof(msg) - len, 898 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 899 (uintmax_t)ggio->gctl_length); 900 break; 901 default: 902 (void)snprintf(msg + len, sizeof(msg) - len, 903 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 904 break; 905 } 906 } 907 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 908 } 909 910 static void 911 remote_close(struct hast_resource *res, int ncomp) 912 { 913 914 rw_wlock(&hio_remote_lock[ncomp]); 915 /* 916 * A race is possible between dropping rlock and acquiring wlock - 917 * another thread can close connection in-between. 918 */ 919 if (!ISCONNECTED(res, ncomp)) { 920 assert(res->hr_remotein == NULL); 921 assert(res->hr_remoteout == NULL); 922 rw_unlock(&hio_remote_lock[ncomp]); 923 return; 924 } 925 926 assert(res->hr_remotein != NULL); 927 assert(res->hr_remoteout != NULL); 928 929 pjdlog_debug(2, "Closing incoming connection to %s.", 930 res->hr_remoteaddr); 931 proto_close(res->hr_remotein); 932 res->hr_remotein = NULL; 933 pjdlog_debug(2, "Closing outgoing connection to %s.", 934 res->hr_remoteaddr); 935 proto_close(res->hr_remoteout); 936 res->hr_remoteout = NULL; 937 938 rw_unlock(&hio_remote_lock[ncomp]); 939 940 pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 941 942 /* 943 * Stop synchronization if in-progress. 944 */ 945 sync_stop(); 946 947 event_send(res, EVENT_DISCONNECT); 948 } 949 950 /* 951 * Thread receives ggate I/O requests from the kernel and passes them to 952 * appropriate threads: 953 * WRITE - always goes to both local_send and remote_send threads 954 * READ (when the block is up-to-date on local component) - 955 * only local_send thread 956 * READ (when the block isn't up-to-date on local component) - 957 * only remote_send thread 958 * DELETE - always goes to both local_send and remote_send threads 959 * FLUSH - always goes to both local_send and remote_send threads 960 */ 961 static void * 962 ggate_recv_thread(void *arg) 963 { 964 struct hast_resource *res = arg; 965 struct g_gate_ctl_io *ggio; 966 struct hio *hio; 967 unsigned int ii, ncomp, ncomps; 968 int error; 969 970 ncomps = HAST_NCOMPONENTS; 971 972 for (;;) { 973 pjdlog_debug(2, "ggate_recv: Taking free request."); 974 QUEUE_TAKE2(hio, free); 975 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 976 ggio = &hio->hio_ggio; 977 ggio->gctl_unit = res->hr_ggateunit; 978 ggio->gctl_length = MAXPHYS; 979 ggio->gctl_error = 0; 980 pjdlog_debug(2, 981 "ggate_recv: (%p) Waiting for request from the kernel.", 982 hio); 983 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 984 if (sigexit_received) 985 pthread_exit(NULL); 986 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 987 } 988 error = ggio->gctl_error; 989 switch (error) { 990 case 0: 991 break; 992 case ECANCELED: 993 /* Exit gracefully. */ 994 if (!sigexit_received) { 995 pjdlog_debug(2, 996 "ggate_recv: (%p) Received cancel from the kernel.", 997 hio); 998 pjdlog_info("Received cancel from the kernel, exiting."); 999 } 1000 pthread_exit(NULL); 1001 case ENOMEM: 1002 /* 1003 * Buffer too small? Impossible, we allocate MAXPHYS 1004 * bytes - request can't be bigger than that. 1005 */ 1006 /* FALLTHROUGH */ 1007 case ENXIO: 1008 default: 1009 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1010 strerror(error)); 1011 } 1012 for (ii = 0; ii < ncomps; ii++) 1013 hio->hio_errors[ii] = EINVAL; 1014 reqlog(LOG_DEBUG, 2, ggio, 1015 "ggate_recv: (%p) Request received from the kernel: ", 1016 hio); 1017 /* 1018 * Inform all components about new write request. 1019 * For read request prefer local component unless the given 1020 * range is out-of-date, then use remote component. 1021 */ 1022 switch (ggio->gctl_cmd) { 1023 case BIO_READ: 1024 pjdlog_debug(2, 1025 "ggate_recv: (%p) Moving request to the send queue.", 1026 hio); 1027 refcount_init(&hio->hio_countdown, 1); 1028 mtx_lock(&metadata_lock); 1029 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1030 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1031 /* 1032 * This range is up-to-date on local component, 1033 * so handle request locally. 1034 */ 1035 /* Local component is 0 for now. */ 1036 ncomp = 0; 1037 } else /* if (res->hr_syncsrc == 1038 HAST_SYNCSRC_SECONDARY) */ { 1039 assert(res->hr_syncsrc == 1040 HAST_SYNCSRC_SECONDARY); 1041 /* 1042 * This range is out-of-date on local component, 1043 * so send request to the remote node. 1044 */ 1045 /* Remote component is 1 for now. */ 1046 ncomp = 1; 1047 } 1048 mtx_unlock(&metadata_lock); 1049 QUEUE_INSERT1(hio, send, ncomp); 1050 break; 1051 case BIO_WRITE: 1052 if (res->hr_resuid == 0) { 1053 /* This is first write, initialize resuid. */ 1054 (void)init_resuid(res); 1055 } 1056 for (;;) { 1057 mtx_lock(&range_lock); 1058 if (rangelock_islocked(range_sync, 1059 ggio->gctl_offset, ggio->gctl_length)) { 1060 pjdlog_debug(2, 1061 "regular: Range offset=%jd length=%zu locked.", 1062 (intmax_t)ggio->gctl_offset, 1063 (size_t)ggio->gctl_length); 1064 range_regular_wait = true; 1065 cv_wait(&range_regular_cond, &range_lock); 1066 range_regular_wait = false; 1067 mtx_unlock(&range_lock); 1068 continue; 1069 } 1070 if (rangelock_add(range_regular, 1071 ggio->gctl_offset, ggio->gctl_length) < 0) { 1072 mtx_unlock(&range_lock); 1073 pjdlog_debug(2, 1074 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1075 (intmax_t)ggio->gctl_offset, 1076 (size_t)ggio->gctl_length); 1077 sleep(1); 1078 continue; 1079 } 1080 mtx_unlock(&range_lock); 1081 break; 1082 } 1083 mtx_lock(&res->hr_amp_lock); 1084 if (activemap_write_start(res->hr_amp, 1085 ggio->gctl_offset, ggio->gctl_length)) { 1086 (void)hast_activemap_flush(res); 1087 } 1088 mtx_unlock(&res->hr_amp_lock); 1089 /* FALLTHROUGH */ 1090 case BIO_DELETE: 1091 case BIO_FLUSH: 1092 pjdlog_debug(2, 1093 "ggate_recv: (%p) Moving request to the send queues.", 1094 hio); 1095 refcount_init(&hio->hio_countdown, ncomps); 1096 for (ii = 0; ii < ncomps; ii++) 1097 QUEUE_INSERT1(hio, send, ii); 1098 break; 1099 } 1100 } 1101 /* NOTREACHED */ 1102 return (NULL); 1103 } 1104 1105 /* 1106 * Thread reads from or writes to local component. 1107 * If local read fails, it redirects it to remote_send thread. 1108 */ 1109 static void * 1110 local_send_thread(void *arg) 1111 { 1112 struct hast_resource *res = arg; 1113 struct g_gate_ctl_io *ggio; 1114 struct hio *hio; 1115 unsigned int ncomp, rncomp; 1116 ssize_t ret; 1117 1118 /* Local component is 0 for now. */ 1119 ncomp = 0; 1120 /* Remote component is 1 for now. */ 1121 rncomp = 1; 1122 1123 for (;;) { 1124 pjdlog_debug(2, "local_send: Taking request."); 1125 QUEUE_TAKE1(hio, send, ncomp, 0); 1126 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1127 ggio = &hio->hio_ggio; 1128 switch (ggio->gctl_cmd) { 1129 case BIO_READ: 1130 ret = pread(res->hr_localfd, ggio->gctl_data, 1131 ggio->gctl_length, 1132 ggio->gctl_offset + res->hr_localoff); 1133 if (ret == ggio->gctl_length) 1134 hio->hio_errors[ncomp] = 0; 1135 else { 1136 /* 1137 * If READ failed, try to read from remote node. 1138 */ 1139 if (ret < 0) { 1140 reqlog(LOG_WARNING, 0, ggio, 1141 "Local request failed (%s), trying remote node. ", 1142 strerror(errno)); 1143 } else if (ret != ggio->gctl_length) { 1144 reqlog(LOG_WARNING, 0, ggio, 1145 "Local request failed (%zd != %jd), trying remote node. ", 1146 ret, (intmax_t)ggio->gctl_length); 1147 } 1148 QUEUE_INSERT1(hio, send, rncomp); 1149 continue; 1150 } 1151 break; 1152 case BIO_WRITE: 1153 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1154 ggio->gctl_length, 1155 ggio->gctl_offset + res->hr_localoff); 1156 if (ret < 0) { 1157 hio->hio_errors[ncomp] = errno; 1158 reqlog(LOG_WARNING, 0, ggio, 1159 "Local request failed (%s): ", 1160 strerror(errno)); 1161 } else if (ret != ggio->gctl_length) { 1162 hio->hio_errors[ncomp] = EIO; 1163 reqlog(LOG_WARNING, 0, ggio, 1164 "Local request failed (%zd != %jd): ", 1165 ret, (intmax_t)ggio->gctl_length); 1166 } else { 1167 hio->hio_errors[ncomp] = 0; 1168 } 1169 break; 1170 case BIO_DELETE: 1171 ret = g_delete(res->hr_localfd, 1172 ggio->gctl_offset + res->hr_localoff, 1173 ggio->gctl_length); 1174 if (ret < 0) { 1175 hio->hio_errors[ncomp] = errno; 1176 reqlog(LOG_WARNING, 0, ggio, 1177 "Local request failed (%s): ", 1178 strerror(errno)); 1179 } else { 1180 hio->hio_errors[ncomp] = 0; 1181 } 1182 break; 1183 case BIO_FLUSH: 1184 ret = g_flush(res->hr_localfd); 1185 if (ret < 0) { 1186 hio->hio_errors[ncomp] = errno; 1187 reqlog(LOG_WARNING, 0, ggio, 1188 "Local request failed (%s): ", 1189 strerror(errno)); 1190 } else { 1191 hio->hio_errors[ncomp] = 0; 1192 } 1193 break; 1194 } 1195 if (refcount_release(&hio->hio_countdown)) { 1196 if (ISSYNCREQ(hio)) { 1197 mtx_lock(&sync_lock); 1198 SYNCREQDONE(hio); 1199 mtx_unlock(&sync_lock); 1200 cv_signal(&sync_cond); 1201 } else { 1202 pjdlog_debug(2, 1203 "local_send: (%p) Moving request to the done queue.", 1204 hio); 1205 QUEUE_INSERT2(hio, done); 1206 } 1207 } 1208 } 1209 /* NOTREACHED */ 1210 return (NULL); 1211 } 1212 1213 static void 1214 keepalive_send(struct hast_resource *res, unsigned int ncomp) 1215 { 1216 struct nv *nv; 1217 1218 if (!ISCONNECTED(res, ncomp)) 1219 return; 1220 1221 assert(res->hr_remotein != NULL); 1222 assert(res->hr_remoteout != NULL); 1223 1224 nv = nv_alloc(); 1225 nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1226 if (nv_error(nv) != 0) { 1227 nv_free(nv); 1228 pjdlog_debug(1, 1229 "keepalive_send: Unable to prepare header to send."); 1230 return; 1231 } 1232 if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) < 0) { 1233 pjdlog_common(LOG_DEBUG, 1, errno, 1234 "keepalive_send: Unable to send request"); 1235 nv_free(nv); 1236 rw_unlock(&hio_remote_lock[ncomp]); 1237 remote_close(res, ncomp); 1238 rw_rlock(&hio_remote_lock[ncomp]); 1239 return; 1240 } 1241 nv_free(nv); 1242 pjdlog_debug(2, "keepalive_send: Request sent."); 1243 } 1244 1245 /* 1246 * Thread sends request to secondary node. 1247 */ 1248 static void * 1249 remote_send_thread(void *arg) 1250 { 1251 struct hast_resource *res = arg; 1252 struct g_gate_ctl_io *ggio; 1253 time_t lastcheck, now; 1254 struct hio *hio; 1255 struct nv *nv; 1256 unsigned int ncomp; 1257 bool wakeup; 1258 uint64_t offset, length; 1259 uint8_t cmd; 1260 void *data; 1261 1262 /* Remote component is 1 for now. */ 1263 ncomp = 1; 1264 lastcheck = time(NULL); 1265 1266 for (;;) { 1267 pjdlog_debug(2, "remote_send: Taking request."); 1268 QUEUE_TAKE1(hio, send, ncomp, RETRY_SLEEP); 1269 if (hio == NULL) { 1270 now = time(NULL); 1271 if (lastcheck + RETRY_SLEEP <= now) { 1272 keepalive_send(res, ncomp); 1273 lastcheck = now; 1274 } 1275 continue; 1276 } 1277 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1278 ggio = &hio->hio_ggio; 1279 switch (ggio->gctl_cmd) { 1280 case BIO_READ: 1281 cmd = HIO_READ; 1282 data = NULL; 1283 offset = ggio->gctl_offset; 1284 length = ggio->gctl_length; 1285 break; 1286 case BIO_WRITE: 1287 cmd = HIO_WRITE; 1288 data = ggio->gctl_data; 1289 offset = ggio->gctl_offset; 1290 length = ggio->gctl_length; 1291 break; 1292 case BIO_DELETE: 1293 cmd = HIO_DELETE; 1294 data = NULL; 1295 offset = ggio->gctl_offset; 1296 length = ggio->gctl_length; 1297 break; 1298 case BIO_FLUSH: 1299 cmd = HIO_FLUSH; 1300 data = NULL; 1301 offset = 0; 1302 length = 0; 1303 break; 1304 default: 1305 assert(!"invalid condition"); 1306 abort(); 1307 } 1308 nv = nv_alloc(); 1309 nv_add_uint8(nv, cmd, "cmd"); 1310 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1311 nv_add_uint64(nv, offset, "offset"); 1312 nv_add_uint64(nv, length, "length"); 1313 if (nv_error(nv) != 0) { 1314 hio->hio_errors[ncomp] = nv_error(nv); 1315 pjdlog_debug(2, 1316 "remote_send: (%p) Unable to prepare header to send.", 1317 hio); 1318 reqlog(LOG_ERR, 0, ggio, 1319 "Unable to prepare header to send (%s): ", 1320 strerror(nv_error(nv))); 1321 /* Move failed request immediately to the done queue. */ 1322 goto done_queue; 1323 } 1324 pjdlog_debug(2, 1325 "remote_send: (%p) Moving request to the recv queue.", 1326 hio); 1327 /* 1328 * Protect connection from disappearing. 1329 */ 1330 rw_rlock(&hio_remote_lock[ncomp]); 1331 if (!ISCONNECTED(res, ncomp)) { 1332 rw_unlock(&hio_remote_lock[ncomp]); 1333 hio->hio_errors[ncomp] = ENOTCONN; 1334 goto done_queue; 1335 } 1336 /* 1337 * Move the request to recv queue before sending it, because 1338 * in different order we can get reply before we move request 1339 * to recv queue. 1340 */ 1341 mtx_lock(&hio_recv_list_lock[ncomp]); 1342 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1343 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1344 mtx_unlock(&hio_recv_list_lock[ncomp]); 1345 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1346 data != NULL ? length : 0) < 0) { 1347 hio->hio_errors[ncomp] = errno; 1348 rw_unlock(&hio_remote_lock[ncomp]); 1349 pjdlog_debug(2, 1350 "remote_send: (%p) Unable to send request.", hio); 1351 reqlog(LOG_ERR, 0, ggio, 1352 "Unable to send request (%s): ", 1353 strerror(hio->hio_errors[ncomp])); 1354 remote_close(res, ncomp); 1355 /* 1356 * Take request back from the receive queue and move 1357 * it immediately to the done queue. 1358 */ 1359 mtx_lock(&hio_recv_list_lock[ncomp]); 1360 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1361 mtx_unlock(&hio_recv_list_lock[ncomp]); 1362 goto done_queue; 1363 } 1364 rw_unlock(&hio_remote_lock[ncomp]); 1365 nv_free(nv); 1366 if (wakeup) 1367 cv_signal(&hio_recv_list_cond[ncomp]); 1368 continue; 1369 done_queue: 1370 nv_free(nv); 1371 if (ISSYNCREQ(hio)) { 1372 if (!refcount_release(&hio->hio_countdown)) 1373 continue; 1374 mtx_lock(&sync_lock); 1375 SYNCREQDONE(hio); 1376 mtx_unlock(&sync_lock); 1377 cv_signal(&sync_cond); 1378 continue; 1379 } 1380 if (ggio->gctl_cmd == BIO_WRITE) { 1381 mtx_lock(&res->hr_amp_lock); 1382 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1383 ggio->gctl_length)) { 1384 (void)hast_activemap_flush(res); 1385 } 1386 mtx_unlock(&res->hr_amp_lock); 1387 } 1388 if (!refcount_release(&hio->hio_countdown)) 1389 continue; 1390 pjdlog_debug(2, 1391 "remote_send: (%p) Moving request to the done queue.", 1392 hio); 1393 QUEUE_INSERT2(hio, done); 1394 } 1395 /* NOTREACHED */ 1396 return (NULL); 1397 } 1398 1399 /* 1400 * Thread receives answer from secondary node and passes it to ggate_send 1401 * thread. 1402 */ 1403 static void * 1404 remote_recv_thread(void *arg) 1405 { 1406 struct hast_resource *res = arg; 1407 struct g_gate_ctl_io *ggio; 1408 struct hio *hio; 1409 struct nv *nv; 1410 unsigned int ncomp; 1411 uint64_t seq; 1412 int error; 1413 1414 /* Remote component is 1 for now. */ 1415 ncomp = 1; 1416 1417 for (;;) { 1418 /* Wait until there is anything to receive. */ 1419 mtx_lock(&hio_recv_list_lock[ncomp]); 1420 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1421 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1422 cv_wait(&hio_recv_list_cond[ncomp], 1423 &hio_recv_list_lock[ncomp]); 1424 } 1425 mtx_unlock(&hio_recv_list_lock[ncomp]); 1426 rw_rlock(&hio_remote_lock[ncomp]); 1427 if (!ISCONNECTED(res, ncomp)) { 1428 rw_unlock(&hio_remote_lock[ncomp]); 1429 /* 1430 * Connection is dead, so move all pending requests to 1431 * the done queue (one-by-one). 1432 */ 1433 mtx_lock(&hio_recv_list_lock[ncomp]); 1434 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1435 assert(hio != NULL); 1436 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1437 hio_next[ncomp]); 1438 mtx_unlock(&hio_recv_list_lock[ncomp]); 1439 goto done_queue; 1440 } 1441 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1442 pjdlog_errno(LOG_ERR, 1443 "Unable to receive reply header"); 1444 rw_unlock(&hio_remote_lock[ncomp]); 1445 remote_close(res, ncomp); 1446 continue; 1447 } 1448 rw_unlock(&hio_remote_lock[ncomp]); 1449 seq = nv_get_uint64(nv, "seq"); 1450 if (seq == 0) { 1451 pjdlog_error("Header contains no 'seq' field."); 1452 nv_free(nv); 1453 continue; 1454 } 1455 mtx_lock(&hio_recv_list_lock[ncomp]); 1456 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1457 if (hio->hio_ggio.gctl_seq == seq) { 1458 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1459 hio_next[ncomp]); 1460 break; 1461 } 1462 } 1463 mtx_unlock(&hio_recv_list_lock[ncomp]); 1464 if (hio == NULL) { 1465 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1466 (uintmax_t)seq); 1467 nv_free(nv); 1468 continue; 1469 } 1470 error = nv_get_int16(nv, "error"); 1471 if (error != 0) { 1472 /* Request failed on remote side. */ 1473 hio->hio_errors[ncomp] = error; 1474 reqlog(LOG_WARNING, 0, &hio->hio_ggio, 1475 "Remote request failed (%s): ", strerror(error)); 1476 nv_free(nv); 1477 goto done_queue; 1478 } 1479 ggio = &hio->hio_ggio; 1480 switch (ggio->gctl_cmd) { 1481 case BIO_READ: 1482 rw_rlock(&hio_remote_lock[ncomp]); 1483 if (!ISCONNECTED(res, ncomp)) { 1484 rw_unlock(&hio_remote_lock[ncomp]); 1485 nv_free(nv); 1486 goto done_queue; 1487 } 1488 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1489 ggio->gctl_data, ggio->gctl_length) < 0) { 1490 hio->hio_errors[ncomp] = errno; 1491 pjdlog_errno(LOG_ERR, 1492 "Unable to receive reply data"); 1493 rw_unlock(&hio_remote_lock[ncomp]); 1494 nv_free(nv); 1495 remote_close(res, ncomp); 1496 goto done_queue; 1497 } 1498 rw_unlock(&hio_remote_lock[ncomp]); 1499 break; 1500 case BIO_WRITE: 1501 case BIO_DELETE: 1502 case BIO_FLUSH: 1503 break; 1504 default: 1505 assert(!"invalid condition"); 1506 abort(); 1507 } 1508 hio->hio_errors[ncomp] = 0; 1509 nv_free(nv); 1510 done_queue: 1511 if (refcount_release(&hio->hio_countdown)) { 1512 if (ISSYNCREQ(hio)) { 1513 mtx_lock(&sync_lock); 1514 SYNCREQDONE(hio); 1515 mtx_unlock(&sync_lock); 1516 cv_signal(&sync_cond); 1517 } else { 1518 pjdlog_debug(2, 1519 "remote_recv: (%p) Moving request to the done queue.", 1520 hio); 1521 QUEUE_INSERT2(hio, done); 1522 } 1523 } 1524 } 1525 /* NOTREACHED */ 1526 return (NULL); 1527 } 1528 1529 /* 1530 * Thread sends answer to the kernel. 1531 */ 1532 static void * 1533 ggate_send_thread(void *arg) 1534 { 1535 struct hast_resource *res = arg; 1536 struct g_gate_ctl_io *ggio; 1537 struct hio *hio; 1538 unsigned int ii, ncomp, ncomps; 1539 1540 ncomps = HAST_NCOMPONENTS; 1541 1542 for (;;) { 1543 pjdlog_debug(2, "ggate_send: Taking request."); 1544 QUEUE_TAKE2(hio, done); 1545 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1546 ggio = &hio->hio_ggio; 1547 for (ii = 0; ii < ncomps; ii++) { 1548 if (hio->hio_errors[ii] == 0) { 1549 /* 1550 * One successful request is enough to declare 1551 * success. 1552 */ 1553 ggio->gctl_error = 0; 1554 break; 1555 } 1556 } 1557 if (ii == ncomps) { 1558 /* 1559 * None of the requests were successful. 1560 * Use first error. 1561 */ 1562 ggio->gctl_error = hio->hio_errors[0]; 1563 } 1564 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1565 mtx_lock(&res->hr_amp_lock); 1566 activemap_write_complete(res->hr_amp, 1567 ggio->gctl_offset, ggio->gctl_length); 1568 mtx_unlock(&res->hr_amp_lock); 1569 } 1570 if (ggio->gctl_cmd == BIO_WRITE) { 1571 /* 1572 * Unlock range we locked. 1573 */ 1574 mtx_lock(&range_lock); 1575 rangelock_del(range_regular, ggio->gctl_offset, 1576 ggio->gctl_length); 1577 if (range_sync_wait) 1578 cv_signal(&range_sync_cond); 1579 mtx_unlock(&range_lock); 1580 /* 1581 * Bump local count if this is first write after 1582 * connection failure with remote node. 1583 */ 1584 ncomp = 1; 1585 rw_rlock(&hio_remote_lock[ncomp]); 1586 if (!ISCONNECTED(res, ncomp)) { 1587 mtx_lock(&metadata_lock); 1588 if (res->hr_primary_localcnt == 1589 res->hr_secondary_remotecnt) { 1590 res->hr_primary_localcnt++; 1591 pjdlog_debug(1, 1592 "Increasing localcnt to %ju.", 1593 (uintmax_t)res->hr_primary_localcnt); 1594 (void)metadata_write(res); 1595 } 1596 mtx_unlock(&metadata_lock); 1597 } 1598 rw_unlock(&hio_remote_lock[ncomp]); 1599 } 1600 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1601 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1602 pjdlog_debug(2, 1603 "ggate_send: (%p) Moving request to the free queue.", hio); 1604 QUEUE_INSERT2(hio, free); 1605 } 1606 /* NOTREACHED */ 1607 return (NULL); 1608 } 1609 1610 /* 1611 * Thread synchronize local and remote components. 1612 */ 1613 static void * 1614 sync_thread(void *arg __unused) 1615 { 1616 struct hast_resource *res = arg; 1617 struct hio *hio; 1618 struct g_gate_ctl_io *ggio; 1619 unsigned int ii, ncomp, ncomps; 1620 off_t offset, length, synced; 1621 bool dorewind; 1622 int syncext; 1623 1624 ncomps = HAST_NCOMPONENTS; 1625 dorewind = true; 1626 synced = 0; 1627 offset = -1; 1628 1629 for (;;) { 1630 mtx_lock(&sync_lock); 1631 if (offset >= 0 && !sync_inprogress) { 1632 pjdlog_info("Synchronization interrupted. " 1633 "%jd bytes synchronized so far.", 1634 (intmax_t)synced); 1635 event_send(res, EVENT_SYNCINTR); 1636 } 1637 while (!sync_inprogress) { 1638 dorewind = true; 1639 synced = 0; 1640 cv_wait(&sync_cond, &sync_lock); 1641 } 1642 mtx_unlock(&sync_lock); 1643 /* 1644 * Obtain offset at which we should synchronize. 1645 * Rewind synchronization if needed. 1646 */ 1647 mtx_lock(&res->hr_amp_lock); 1648 if (dorewind) 1649 activemap_sync_rewind(res->hr_amp); 1650 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1651 if (syncext != -1) { 1652 /* 1653 * We synchronized entire syncext extent, we can mark 1654 * it as clean now. 1655 */ 1656 if (activemap_extent_complete(res->hr_amp, syncext)) 1657 (void)hast_activemap_flush(res); 1658 } 1659 mtx_unlock(&res->hr_amp_lock); 1660 if (dorewind) { 1661 dorewind = false; 1662 if (offset < 0) 1663 pjdlog_info("Nodes are in sync."); 1664 else { 1665 pjdlog_info("Synchronization started. %ju bytes to go.", 1666 (uintmax_t)(res->hr_extentsize * 1667 activemap_ndirty(res->hr_amp))); 1668 event_send(res, EVENT_SYNCSTART); 1669 } 1670 } 1671 if (offset < 0) { 1672 sync_stop(); 1673 pjdlog_debug(1, "Nothing to synchronize."); 1674 /* 1675 * Synchronization complete, make both localcnt and 1676 * remotecnt equal. 1677 */ 1678 ncomp = 1; 1679 rw_rlock(&hio_remote_lock[ncomp]); 1680 if (ISCONNECTED(res, ncomp)) { 1681 if (synced > 0) { 1682 pjdlog_info("Synchronization complete. " 1683 "%jd bytes synchronized.", 1684 (intmax_t)synced); 1685 event_send(res, EVENT_SYNCDONE); 1686 } 1687 mtx_lock(&metadata_lock); 1688 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1689 res->hr_primary_localcnt = 1690 res->hr_secondary_localcnt; 1691 res->hr_primary_remotecnt = 1692 res->hr_secondary_remotecnt; 1693 pjdlog_debug(1, 1694 "Setting localcnt to %ju and remotecnt to %ju.", 1695 (uintmax_t)res->hr_primary_localcnt, 1696 (uintmax_t)res->hr_secondary_localcnt); 1697 (void)metadata_write(res); 1698 mtx_unlock(&metadata_lock); 1699 } 1700 rw_unlock(&hio_remote_lock[ncomp]); 1701 continue; 1702 } 1703 pjdlog_debug(2, "sync: Taking free request."); 1704 QUEUE_TAKE2(hio, free); 1705 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1706 /* 1707 * Lock the range we are going to synchronize. We don't want 1708 * race where someone writes between our read and write. 1709 */ 1710 for (;;) { 1711 mtx_lock(&range_lock); 1712 if (rangelock_islocked(range_regular, offset, length)) { 1713 pjdlog_debug(2, 1714 "sync: Range offset=%jd length=%jd locked.", 1715 (intmax_t)offset, (intmax_t)length); 1716 range_sync_wait = true; 1717 cv_wait(&range_sync_cond, &range_lock); 1718 range_sync_wait = false; 1719 mtx_unlock(&range_lock); 1720 continue; 1721 } 1722 if (rangelock_add(range_sync, offset, length) < 0) { 1723 mtx_unlock(&range_lock); 1724 pjdlog_debug(2, 1725 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1726 (intmax_t)offset, (intmax_t)length); 1727 sleep(1); 1728 continue; 1729 } 1730 mtx_unlock(&range_lock); 1731 break; 1732 } 1733 /* 1734 * First read the data from synchronization source. 1735 */ 1736 SYNCREQ(hio); 1737 ggio = &hio->hio_ggio; 1738 ggio->gctl_cmd = BIO_READ; 1739 ggio->gctl_offset = offset; 1740 ggio->gctl_length = length; 1741 ggio->gctl_error = 0; 1742 for (ii = 0; ii < ncomps; ii++) 1743 hio->hio_errors[ii] = EINVAL; 1744 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1745 hio); 1746 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1747 hio); 1748 mtx_lock(&metadata_lock); 1749 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1750 /* 1751 * This range is up-to-date on local component, 1752 * so handle request locally. 1753 */ 1754 /* Local component is 0 for now. */ 1755 ncomp = 0; 1756 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1757 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1758 /* 1759 * This range is out-of-date on local component, 1760 * so send request to the remote node. 1761 */ 1762 /* Remote component is 1 for now. */ 1763 ncomp = 1; 1764 } 1765 mtx_unlock(&metadata_lock); 1766 refcount_init(&hio->hio_countdown, 1); 1767 QUEUE_INSERT1(hio, send, ncomp); 1768 1769 /* 1770 * Let's wait for READ to finish. 1771 */ 1772 mtx_lock(&sync_lock); 1773 while (!ISSYNCREQDONE(hio)) 1774 cv_wait(&sync_cond, &sync_lock); 1775 mtx_unlock(&sync_lock); 1776 1777 if (hio->hio_errors[ncomp] != 0) { 1778 pjdlog_error("Unable to read synchronization data: %s.", 1779 strerror(hio->hio_errors[ncomp])); 1780 goto free_queue; 1781 } 1782 1783 /* 1784 * We read the data from synchronization source, now write it 1785 * to synchronization target. 1786 */ 1787 SYNCREQ(hio); 1788 ggio->gctl_cmd = BIO_WRITE; 1789 for (ii = 0; ii < ncomps; ii++) 1790 hio->hio_errors[ii] = EINVAL; 1791 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1792 hio); 1793 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1794 hio); 1795 mtx_lock(&metadata_lock); 1796 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1797 /* 1798 * This range is up-to-date on local component, 1799 * so we update remote component. 1800 */ 1801 /* Remote component is 1 for now. */ 1802 ncomp = 1; 1803 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1804 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1805 /* 1806 * This range is out-of-date on local component, 1807 * so we update it. 1808 */ 1809 /* Local component is 0 for now. */ 1810 ncomp = 0; 1811 } 1812 mtx_unlock(&metadata_lock); 1813 1814 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1815 hio); 1816 refcount_init(&hio->hio_countdown, 1); 1817 QUEUE_INSERT1(hio, send, ncomp); 1818 1819 /* 1820 * Let's wait for WRITE to finish. 1821 */ 1822 mtx_lock(&sync_lock); 1823 while (!ISSYNCREQDONE(hio)) 1824 cv_wait(&sync_cond, &sync_lock); 1825 mtx_unlock(&sync_lock); 1826 1827 if (hio->hio_errors[ncomp] != 0) { 1828 pjdlog_error("Unable to write synchronization data: %s.", 1829 strerror(hio->hio_errors[ncomp])); 1830 goto free_queue; 1831 } 1832 1833 synced += length; 1834 free_queue: 1835 mtx_lock(&range_lock); 1836 rangelock_del(range_sync, offset, length); 1837 if (range_regular_wait) 1838 cv_signal(&range_regular_cond); 1839 mtx_unlock(&range_lock); 1840 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1841 hio); 1842 QUEUE_INSERT2(hio, free); 1843 } 1844 /* NOTREACHED */ 1845 return (NULL); 1846 } 1847 1848 void 1849 primary_config_reload(struct hast_resource *res, struct nv *nv) 1850 { 1851 unsigned int ii, ncomps; 1852 int modified, vint; 1853 const char *vstr; 1854 1855 pjdlog_info("Reloading configuration..."); 1856 1857 assert(res->hr_role == HAST_ROLE_PRIMARY); 1858 assert(gres == res); 1859 nv_assert(nv, "remoteaddr"); 1860 nv_assert(nv, "replication"); 1861 nv_assert(nv, "timeout"); 1862 nv_assert(nv, "exec"); 1863 1864 ncomps = HAST_NCOMPONENTS; 1865 1866 #define MODIFIED_REMOTEADDR 0x1 1867 #define MODIFIED_REPLICATION 0x2 1868 #define MODIFIED_TIMEOUT 0x4 1869 #define MODIFIED_EXEC 0x8 1870 modified = 0; 1871 1872 vstr = nv_get_string(nv, "remoteaddr"); 1873 if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 1874 /* 1875 * Don't copy res->hr_remoteaddr to gres just yet. 1876 * We want remote_close() to log disconnect from the old 1877 * addresses, not from the new ones. 1878 */ 1879 modified |= MODIFIED_REMOTEADDR; 1880 } 1881 vint = nv_get_int32(nv, "replication"); 1882 if (gres->hr_replication != vint) { 1883 gres->hr_replication = vint; 1884 modified |= MODIFIED_REPLICATION; 1885 } 1886 vint = nv_get_int32(nv, "timeout"); 1887 if (gres->hr_timeout != vint) { 1888 gres->hr_timeout = vint; 1889 modified |= MODIFIED_TIMEOUT; 1890 } 1891 vstr = nv_get_string(nv, "exec"); 1892 if (strcmp(gres->hr_exec, vstr) != 0) { 1893 strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 1894 modified |= MODIFIED_EXEC; 1895 } 1896 1897 /* 1898 * If only timeout was modified we only need to change it without 1899 * reconnecting. 1900 */ 1901 if (modified == MODIFIED_TIMEOUT) { 1902 for (ii = 0; ii < ncomps; ii++) { 1903 if (!ISREMOTE(ii)) 1904 continue; 1905 rw_rlock(&hio_remote_lock[ii]); 1906 if (!ISCONNECTED(gres, ii)) { 1907 rw_unlock(&hio_remote_lock[ii]); 1908 continue; 1909 } 1910 rw_unlock(&hio_remote_lock[ii]); 1911 if (proto_timeout(gres->hr_remotein, 1912 gres->hr_timeout) < 0) { 1913 pjdlog_errno(LOG_WARNING, 1914 "Unable to set connection timeout"); 1915 } 1916 if (proto_timeout(gres->hr_remoteout, 1917 gres->hr_timeout) < 0) { 1918 pjdlog_errno(LOG_WARNING, 1919 "Unable to set connection timeout"); 1920 } 1921 } 1922 } else if ((modified & 1923 (MODIFIED_REMOTEADDR | MODIFIED_REPLICATION)) != 0) { 1924 for (ii = 0; ii < ncomps; ii++) { 1925 if (!ISREMOTE(ii)) 1926 continue; 1927 remote_close(gres, ii); 1928 } 1929 if (modified & MODIFIED_REMOTEADDR) { 1930 vstr = nv_get_string(nv, "remoteaddr"); 1931 strlcpy(gres->hr_remoteaddr, vstr, 1932 sizeof(gres->hr_remoteaddr)); 1933 } 1934 } 1935 #undef MODIFIED_REMOTEADDR 1936 #undef MODIFIED_REPLICATION 1937 #undef MODIFIED_TIMEOUT 1938 #undef MODIFIED_EXEC 1939 1940 pjdlog_info("Configuration reloaded successfully."); 1941 } 1942 1943 static void 1944 guard_one(struct hast_resource *res, unsigned int ncomp) 1945 { 1946 struct proto_conn *in, *out; 1947 1948 if (!ISREMOTE(ncomp)) 1949 return; 1950 1951 rw_rlock(&hio_remote_lock[ncomp]); 1952 1953 if (!real_remote(res)) { 1954 rw_unlock(&hio_remote_lock[ncomp]); 1955 return; 1956 } 1957 1958 if (ISCONNECTED(res, ncomp)) { 1959 assert(res->hr_remotein != NULL); 1960 assert(res->hr_remoteout != NULL); 1961 rw_unlock(&hio_remote_lock[ncomp]); 1962 pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 1963 res->hr_remoteaddr); 1964 return; 1965 } 1966 1967 assert(res->hr_remotein == NULL); 1968 assert(res->hr_remoteout == NULL); 1969 /* 1970 * Upgrade the lock. It doesn't have to be atomic as no other thread 1971 * can change connection status from disconnected to connected. 1972 */ 1973 rw_unlock(&hio_remote_lock[ncomp]); 1974 pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 1975 res->hr_remoteaddr); 1976 in = out = NULL; 1977 if (init_remote(res, &in, &out)) { 1978 rw_wlock(&hio_remote_lock[ncomp]); 1979 assert(res->hr_remotein == NULL); 1980 assert(res->hr_remoteout == NULL); 1981 assert(in != NULL && out != NULL); 1982 res->hr_remotein = in; 1983 res->hr_remoteout = out; 1984 rw_unlock(&hio_remote_lock[ncomp]); 1985 pjdlog_info("Successfully reconnected to %s.", 1986 res->hr_remoteaddr); 1987 sync_start(); 1988 } else { 1989 /* Both connections should be NULL. */ 1990 assert(res->hr_remotein == NULL); 1991 assert(res->hr_remoteout == NULL); 1992 assert(in == NULL && out == NULL); 1993 pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 1994 res->hr_remoteaddr); 1995 } 1996 } 1997 1998 /* 1999 * Thread guards remote connections and reconnects when needed, handles 2000 * signals, etc. 2001 */ 2002 static void * 2003 guard_thread(void *arg) 2004 { 2005 struct hast_resource *res = arg; 2006 unsigned int ii, ncomps; 2007 struct timespec timeout; 2008 time_t lastcheck, now; 2009 sigset_t mask; 2010 int signo; 2011 2012 ncomps = HAST_NCOMPONENTS; 2013 lastcheck = time(NULL); 2014 2015 PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2016 PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2017 PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2018 2019 timeout.tv_sec = RETRY_SLEEP; 2020 timeout.tv_nsec = 0; 2021 signo = -1; 2022 2023 for (;;) { 2024 switch (signo) { 2025 case SIGINT: 2026 case SIGTERM: 2027 sigexit_received = true; 2028 primary_exitx(EX_OK, 2029 "Termination signal received, exiting."); 2030 break; 2031 default: 2032 break; 2033 } 2034 2035 pjdlog_debug(2, "remote_guard: Checking connections."); 2036 now = time(NULL); 2037 if (lastcheck + RETRY_SLEEP <= now) { 2038 for (ii = 0; ii < ncomps; ii++) 2039 guard_one(res, ii); 2040 lastcheck = now; 2041 } 2042 signo = sigtimedwait(&mask, NULL, &timeout); 2043 } 2044 /* NOTREACHED */ 2045 return (NULL); 2046 } 2047