1 /*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #include <sys/time.h> 36 #include <sys/bio.h> 37 #include <sys/disk.h> 38 #include <sys/refcount.h> 39 #include <sys/stat.h> 40 41 #include <geom/gate/g_gate.h> 42 43 #include <assert.h> 44 #include <err.h> 45 #include <errno.h> 46 #include <fcntl.h> 47 #include <libgeom.h> 48 #include <pthread.h> 49 #include <stdint.h> 50 #include <stdio.h> 51 #include <string.h> 52 #include <sysexits.h> 53 #include <unistd.h> 54 55 #include <activemap.h> 56 #include <nv.h> 57 #include <rangelock.h> 58 59 #include "control.h" 60 #include "hast.h" 61 #include "hast_proto.h" 62 #include "hastd.h" 63 #include "metadata.h" 64 #include "proto.h" 65 #include "pjdlog.h" 66 #include "subr.h" 67 #include "synch.h" 68 69 /* The is only one remote component for now. */ 70 #define ISREMOTE(no) ((no) == 1) 71 72 struct hio { 73 /* 74 * Number of components we are still waiting for. 75 * When this field goes to 0, we can send the request back to the 76 * kernel. Each component has to decrease this counter by one 77 * even on failure. 78 */ 79 unsigned int hio_countdown; 80 /* 81 * Each component has a place to store its own error. 82 * Once the request is handled by all components we can decide if the 83 * request overall is successful or not. 84 */ 85 int *hio_errors; 86 /* 87 * Structure used to comunicate with GEOM Gate class. 88 */ 89 struct g_gate_ctl_io hio_ggio; 90 TAILQ_ENTRY(hio) *hio_next; 91 }; 92 #define hio_free_next hio_next[0] 93 #define hio_done_next hio_next[0] 94 95 /* 96 * Free list holds unused structures. When free list is empty, we have to wait 97 * until some in-progress requests are freed. 98 */ 99 static TAILQ_HEAD(, hio) hio_free_list; 100 static pthread_mutex_t hio_free_list_lock; 101 static pthread_cond_t hio_free_list_cond; 102 /* 103 * There is one send list for every component. One requests is placed on all 104 * send lists - each component gets the same request, but each component is 105 * responsible for managing his own send list. 106 */ 107 static TAILQ_HEAD(, hio) *hio_send_list; 108 static pthread_mutex_t *hio_send_list_lock; 109 static pthread_cond_t *hio_send_list_cond; 110 /* 111 * There is one recv list for every component, although local components don't 112 * use recv lists as local requests are done synchronously. 113 */ 114 static TAILQ_HEAD(, hio) *hio_recv_list; 115 static pthread_mutex_t *hio_recv_list_lock; 116 static pthread_cond_t *hio_recv_list_cond; 117 /* 118 * Request is placed on done list by the slowest component (the one that 119 * decreased hio_countdown from 1 to 0). 120 */ 121 static TAILQ_HEAD(, hio) hio_done_list; 122 static pthread_mutex_t hio_done_list_lock; 123 static pthread_cond_t hio_done_list_cond; 124 /* 125 * Structure below are for interaction with sync thread. 126 */ 127 static bool sync_inprogress; 128 static pthread_mutex_t sync_lock; 129 static pthread_cond_t sync_cond; 130 /* 131 * The lock below allows to synchornize access to remote connections. 132 */ 133 static pthread_rwlock_t *hio_remote_lock; 134 static pthread_mutex_t hio_guard_lock; 135 static pthread_cond_t hio_guard_cond; 136 137 /* 138 * Lock to synchronize metadata updates. Also synchronize access to 139 * hr_primary_localcnt and hr_primary_remotecnt fields. 140 */ 141 static pthread_mutex_t metadata_lock; 142 143 /* 144 * Maximum number of outstanding I/O requests. 145 */ 146 #define HAST_HIO_MAX 256 147 /* 148 * Number of components. At this point there are only two components: local 149 * and remote, but in the future it might be possible to use multiple local 150 * and remote components. 151 */ 152 #define HAST_NCOMPONENTS 2 153 /* 154 * Number of seconds to sleep before next reconnect try. 155 */ 156 #define RECONNECT_SLEEP 5 157 158 #define ISCONNECTED(res, no) \ 159 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 160 161 #define QUEUE_INSERT1(hio, name, ncomp) do { \ 162 bool _wakeup; \ 163 \ 164 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 165 _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 166 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 167 hio_next[(ncomp)]); \ 168 mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 169 if (_wakeup) \ 170 cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 171 } while (0) 172 #define QUEUE_INSERT2(hio, name) do { \ 173 bool _wakeup; \ 174 \ 175 mtx_lock(&hio_##name##_list_lock); \ 176 _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 177 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 178 mtx_unlock(&hio_##name##_list_lock); \ 179 if (_wakeup) \ 180 cv_signal(&hio_##name##_list_cond); \ 181 } while (0) 182 #define QUEUE_TAKE1(hio, name, ncomp) do { \ 183 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 184 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 185 cv_wait(&hio_##name##_list_cond[(ncomp)], \ 186 &hio_##name##_list_lock[(ncomp)]); \ 187 } \ 188 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 189 hio_next[(ncomp)]); \ 190 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 191 } while (0) 192 #define QUEUE_TAKE2(hio, name) do { \ 193 mtx_lock(&hio_##name##_list_lock); \ 194 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 195 cv_wait(&hio_##name##_list_cond, \ 196 &hio_##name##_list_lock); \ 197 } \ 198 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 199 mtx_unlock(&hio_##name##_list_lock); \ 200 } while (0) 201 202 #define SYNCREQ(hio) do { \ 203 (hio)->hio_ggio.gctl_unit = -1; \ 204 (hio)->hio_ggio.gctl_seq = 1; \ 205 } while (0) 206 #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 207 #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 208 #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 209 210 static struct hast_resource *gres; 211 212 static pthread_mutex_t range_lock; 213 static struct rangelocks *range_regular; 214 static bool range_regular_wait; 215 static pthread_cond_t range_regular_cond; 216 static struct rangelocks *range_sync; 217 static bool range_sync_wait; 218 static pthread_cond_t range_sync_cond; 219 220 static void *ggate_recv_thread(void *arg); 221 static void *local_send_thread(void *arg); 222 static void *remote_send_thread(void *arg); 223 static void *remote_recv_thread(void *arg); 224 static void *ggate_send_thread(void *arg); 225 static void *sync_thread(void *arg); 226 static void *guard_thread(void *arg); 227 228 static void sighandler(int sig); 229 230 static void 231 cleanup(struct hast_resource *res) 232 { 233 int rerrno; 234 235 /* Remember errno. */ 236 rerrno = errno; 237 238 /* 239 * Close descriptor to /dev/hast/<name> 240 * to work-around race in the kernel. 241 */ 242 close(res->hr_localfd); 243 244 /* Destroy ggate provider if we created one. */ 245 if (res->hr_ggateunit >= 0) { 246 struct g_gate_ctl_destroy ggiod; 247 248 ggiod.gctl_version = G_GATE_VERSION; 249 ggiod.gctl_unit = res->hr_ggateunit; 250 ggiod.gctl_force = 1; 251 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 252 pjdlog_warning("Unable to destroy hast/%s device", 253 res->hr_provname); 254 } 255 res->hr_ggateunit = -1; 256 } 257 258 /* Restore errno. */ 259 errno = rerrno; 260 } 261 262 static void 263 primary_exit(int exitcode, const char *fmt, ...) 264 { 265 va_list ap; 266 267 assert(exitcode != EX_OK); 268 va_start(ap, fmt); 269 pjdlogv_errno(LOG_ERR, fmt, ap); 270 va_end(ap); 271 cleanup(gres); 272 exit(exitcode); 273 } 274 275 static void 276 primary_exitx(int exitcode, const char *fmt, ...) 277 { 278 va_list ap; 279 280 va_start(ap, fmt); 281 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 282 va_end(ap); 283 cleanup(gres); 284 exit(exitcode); 285 } 286 287 static int 288 hast_activemap_flush(struct hast_resource *res) 289 { 290 const unsigned char *buf; 291 size_t size; 292 293 buf = activemap_bitmap(res->hr_amp, &size); 294 assert(buf != NULL); 295 assert((size % res->hr_local_sectorsize) == 0); 296 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 297 (ssize_t)size) { 298 KEEP_ERRNO(pjdlog_errno(LOG_ERR, 299 "Unable to flush activemap to disk")); 300 return (-1); 301 } 302 return (0); 303 } 304 305 static bool 306 real_remote(const struct hast_resource *res) 307 { 308 309 return (strcmp(res->hr_remoteaddr, "none") != 0); 310 } 311 312 static void 313 init_environment(struct hast_resource *res __unused) 314 { 315 struct hio *hio; 316 unsigned int ii, ncomps; 317 318 /* 319 * In the future it might be per-resource value. 320 */ 321 ncomps = HAST_NCOMPONENTS; 322 323 /* 324 * Allocate memory needed by lists. 325 */ 326 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 327 if (hio_send_list == NULL) { 328 primary_exitx(EX_TEMPFAIL, 329 "Unable to allocate %zu bytes of memory for send lists.", 330 sizeof(hio_send_list[0]) * ncomps); 331 } 332 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 333 if (hio_send_list_lock == NULL) { 334 primary_exitx(EX_TEMPFAIL, 335 "Unable to allocate %zu bytes of memory for send list locks.", 336 sizeof(hio_send_list_lock[0]) * ncomps); 337 } 338 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 339 if (hio_send_list_cond == NULL) { 340 primary_exitx(EX_TEMPFAIL, 341 "Unable to allocate %zu bytes of memory for send list condition variables.", 342 sizeof(hio_send_list_cond[0]) * ncomps); 343 } 344 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 345 if (hio_recv_list == NULL) { 346 primary_exitx(EX_TEMPFAIL, 347 "Unable to allocate %zu bytes of memory for recv lists.", 348 sizeof(hio_recv_list[0]) * ncomps); 349 } 350 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 351 if (hio_recv_list_lock == NULL) { 352 primary_exitx(EX_TEMPFAIL, 353 "Unable to allocate %zu bytes of memory for recv list locks.", 354 sizeof(hio_recv_list_lock[0]) * ncomps); 355 } 356 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 357 if (hio_recv_list_cond == NULL) { 358 primary_exitx(EX_TEMPFAIL, 359 "Unable to allocate %zu bytes of memory for recv list condition variables.", 360 sizeof(hio_recv_list_cond[0]) * ncomps); 361 } 362 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 363 if (hio_remote_lock == NULL) { 364 primary_exitx(EX_TEMPFAIL, 365 "Unable to allocate %zu bytes of memory for remote connections locks.", 366 sizeof(hio_remote_lock[0]) * ncomps); 367 } 368 369 /* 370 * Initialize lists, their locks and theirs condition variables. 371 */ 372 TAILQ_INIT(&hio_free_list); 373 mtx_init(&hio_free_list_lock); 374 cv_init(&hio_free_list_cond); 375 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 376 TAILQ_INIT(&hio_send_list[ii]); 377 mtx_init(&hio_send_list_lock[ii]); 378 cv_init(&hio_send_list_cond[ii]); 379 TAILQ_INIT(&hio_recv_list[ii]); 380 mtx_init(&hio_recv_list_lock[ii]); 381 cv_init(&hio_recv_list_cond[ii]); 382 rw_init(&hio_remote_lock[ii]); 383 } 384 TAILQ_INIT(&hio_done_list); 385 mtx_init(&hio_done_list_lock); 386 cv_init(&hio_done_list_cond); 387 mtx_init(&hio_guard_lock); 388 cv_init(&hio_guard_cond); 389 mtx_init(&metadata_lock); 390 391 /* 392 * Allocate requests pool and initialize requests. 393 */ 394 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 395 hio = malloc(sizeof(*hio)); 396 if (hio == NULL) { 397 primary_exitx(EX_TEMPFAIL, 398 "Unable to allocate %zu bytes of memory for hio request.", 399 sizeof(*hio)); 400 } 401 hio->hio_countdown = 0; 402 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 403 if (hio->hio_errors == NULL) { 404 primary_exitx(EX_TEMPFAIL, 405 "Unable allocate %zu bytes of memory for hio errors.", 406 sizeof(hio->hio_errors[0]) * ncomps); 407 } 408 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 409 if (hio->hio_next == NULL) { 410 primary_exitx(EX_TEMPFAIL, 411 "Unable allocate %zu bytes of memory for hio_next field.", 412 sizeof(hio->hio_next[0]) * ncomps); 413 } 414 hio->hio_ggio.gctl_version = G_GATE_VERSION; 415 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 416 if (hio->hio_ggio.gctl_data == NULL) { 417 primary_exitx(EX_TEMPFAIL, 418 "Unable to allocate %zu bytes of memory for gctl_data.", 419 MAXPHYS); 420 } 421 hio->hio_ggio.gctl_length = MAXPHYS; 422 hio->hio_ggio.gctl_error = 0; 423 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 424 } 425 426 /* 427 * Turn on signals handling. 428 */ 429 signal(SIGINT, sighandler); 430 signal(SIGTERM, sighandler); 431 signal(SIGHUP, sighandler); 432 } 433 434 static void 435 init_local(struct hast_resource *res) 436 { 437 unsigned char *buf; 438 size_t mapsize; 439 440 if (metadata_read(res, true) < 0) 441 exit(EX_NOINPUT); 442 mtx_init(&res->hr_amp_lock); 443 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 444 res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 445 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 446 } 447 mtx_init(&range_lock); 448 cv_init(&range_regular_cond); 449 if (rangelock_init(&range_regular) < 0) 450 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 451 cv_init(&range_sync_cond); 452 if (rangelock_init(&range_sync) < 0) 453 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 454 mapsize = activemap_ondisk_size(res->hr_amp); 455 buf = calloc(1, mapsize); 456 if (buf == NULL) { 457 primary_exitx(EX_TEMPFAIL, 458 "Unable to allocate buffer for activemap."); 459 } 460 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 461 (ssize_t)mapsize) { 462 primary_exit(EX_NOINPUT, "Unable to read activemap"); 463 } 464 activemap_copyin(res->hr_amp, buf, mapsize); 465 free(buf); 466 if (res->hr_resuid != 0) 467 return; 468 /* 469 * We're using provider for the first time, so we have to generate 470 * resource unique identifier and initialize local and remote counts. 471 */ 472 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 473 res->hr_primary_localcnt = 1; 474 res->hr_primary_remotecnt = 0; 475 if (metadata_write(res) < 0) 476 exit(EX_NOINPUT); 477 } 478 479 static bool 480 init_remote(struct hast_resource *res, struct proto_conn **inp, 481 struct proto_conn **outp) 482 { 483 struct proto_conn *in, *out; 484 struct nv *nvout, *nvin; 485 const unsigned char *token; 486 unsigned char *map; 487 const char *errmsg; 488 int32_t extentsize; 489 int64_t datasize; 490 uint32_t mapsize; 491 size_t size; 492 493 assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 494 assert(real_remote(res)); 495 496 in = out = NULL; 497 498 /* Prepare outgoing connection with remote node. */ 499 if (proto_client(res->hr_remoteaddr, &out) < 0) { 500 primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 501 res->hr_remoteaddr); 502 } 503 /* Try to connect, but accept failure. */ 504 if (proto_connect(out) < 0) { 505 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 506 res->hr_remoteaddr); 507 goto close; 508 } 509 /* Error in setting timeout is not critical, but why should it fail? */ 510 if (proto_timeout(out, res->hr_timeout) < 0) 511 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 512 /* 513 * First handshake step. 514 * Setup outgoing connection with remote node. 515 */ 516 nvout = nv_alloc(); 517 nv_add_string(nvout, res->hr_name, "resource"); 518 if (nv_error(nvout) != 0) { 519 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 520 "Unable to allocate header for connection with %s", 521 res->hr_remoteaddr); 522 nv_free(nvout); 523 goto close; 524 } 525 if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 526 pjdlog_errno(LOG_WARNING, 527 "Unable to send handshake header to %s", 528 res->hr_remoteaddr); 529 nv_free(nvout); 530 goto close; 531 } 532 nv_free(nvout); 533 if (hast_proto_recv_hdr(out, &nvin) < 0) { 534 pjdlog_errno(LOG_WARNING, 535 "Unable to receive handshake header from %s", 536 res->hr_remoteaddr); 537 goto close; 538 } 539 errmsg = nv_get_string(nvin, "errmsg"); 540 if (errmsg != NULL) { 541 pjdlog_warning("%s", errmsg); 542 nv_free(nvin); 543 goto close; 544 } 545 token = nv_get_uint8_array(nvin, &size, "token"); 546 if (token == NULL) { 547 pjdlog_warning("Handshake header from %s has no 'token' field.", 548 res->hr_remoteaddr); 549 nv_free(nvin); 550 goto close; 551 } 552 if (size != sizeof(res->hr_token)) { 553 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 554 res->hr_remoteaddr, size, sizeof(res->hr_token)); 555 nv_free(nvin); 556 goto close; 557 } 558 bcopy(token, res->hr_token, sizeof(res->hr_token)); 559 nv_free(nvin); 560 561 /* 562 * Second handshake step. 563 * Setup incoming connection with remote node. 564 */ 565 if (proto_client(res->hr_remoteaddr, &in) < 0) { 566 pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 567 res->hr_remoteaddr); 568 } 569 /* Try to connect, but accept failure. */ 570 if (proto_connect(in) < 0) { 571 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 572 res->hr_remoteaddr); 573 goto close; 574 } 575 /* Error in setting timeout is not critical, but why should it fail? */ 576 if (proto_timeout(in, res->hr_timeout) < 0) 577 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 578 nvout = nv_alloc(); 579 nv_add_string(nvout, res->hr_name, "resource"); 580 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 581 "token"); 582 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 583 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 584 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 585 if (nv_error(nvout) != 0) { 586 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 587 "Unable to allocate header for connection with %s", 588 res->hr_remoteaddr); 589 nv_free(nvout); 590 goto close; 591 } 592 if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 593 pjdlog_errno(LOG_WARNING, 594 "Unable to send handshake header to %s", 595 res->hr_remoteaddr); 596 nv_free(nvout); 597 goto close; 598 } 599 nv_free(nvout); 600 if (hast_proto_recv_hdr(out, &nvin) < 0) { 601 pjdlog_errno(LOG_WARNING, 602 "Unable to receive handshake header from %s", 603 res->hr_remoteaddr); 604 goto close; 605 } 606 errmsg = nv_get_string(nvin, "errmsg"); 607 if (errmsg != NULL) { 608 pjdlog_warning("%s", errmsg); 609 nv_free(nvin); 610 goto close; 611 } 612 datasize = nv_get_int64(nvin, "datasize"); 613 if (datasize != res->hr_datasize) { 614 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 615 (intmax_t)res->hr_datasize, (intmax_t)datasize); 616 nv_free(nvin); 617 goto close; 618 } 619 extentsize = nv_get_int32(nvin, "extentsize"); 620 if (extentsize != res->hr_extentsize) { 621 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 622 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 623 nv_free(nvin); 624 goto close; 625 } 626 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 627 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 628 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 629 map = NULL; 630 mapsize = nv_get_uint32(nvin, "mapsize"); 631 if (mapsize > 0) { 632 map = malloc(mapsize); 633 if (map == NULL) { 634 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 635 (uintmax_t)mapsize); 636 nv_free(nvin); 637 goto close; 638 } 639 /* 640 * Remote node have some dirty extents on its own, lets 641 * download its activemap. 642 */ 643 if (hast_proto_recv_data(res, out, nvin, map, 644 mapsize) < 0) { 645 pjdlog_errno(LOG_ERR, 646 "Unable to receive remote activemap"); 647 nv_free(nvin); 648 free(map); 649 goto close; 650 } 651 /* 652 * Merge local and remote bitmaps. 653 */ 654 activemap_merge(res->hr_amp, map, mapsize); 655 free(map); 656 /* 657 * Now that we merged bitmaps from both nodes, flush it to the 658 * disk before we start to synchronize. 659 */ 660 (void)hast_activemap_flush(res); 661 } 662 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 663 if (inp != NULL && outp != NULL) { 664 *inp = in; 665 *outp = out; 666 } else { 667 res->hr_remotein = in; 668 res->hr_remoteout = out; 669 } 670 return (true); 671 close: 672 proto_close(out); 673 if (in != NULL) 674 proto_close(in); 675 return (false); 676 } 677 678 static void 679 sync_start(void) 680 { 681 682 mtx_lock(&sync_lock); 683 sync_inprogress = true; 684 mtx_unlock(&sync_lock); 685 cv_signal(&sync_cond); 686 } 687 688 static void 689 init_ggate(struct hast_resource *res) 690 { 691 struct g_gate_ctl_create ggiocreate; 692 struct g_gate_ctl_cancel ggiocancel; 693 694 /* 695 * We communicate with ggate via /dev/ggctl. Open it. 696 */ 697 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 698 if (res->hr_ggatefd < 0) 699 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 700 /* 701 * Create provider before trying to connect, as connection failure 702 * is not critical, but may take some time. 703 */ 704 ggiocreate.gctl_version = G_GATE_VERSION; 705 ggiocreate.gctl_mediasize = res->hr_datasize; 706 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 707 ggiocreate.gctl_flags = 0; 708 ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 709 ggiocreate.gctl_timeout = 0; 710 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 711 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 712 res->hr_provname); 713 bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 714 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 715 pjdlog_info("Device hast/%s created.", res->hr_provname); 716 res->hr_ggateunit = ggiocreate.gctl_unit; 717 return; 718 } 719 if (errno != EEXIST) { 720 primary_exit(EX_OSERR, "Unable to create hast/%s device", 721 res->hr_provname); 722 } 723 pjdlog_debug(1, 724 "Device hast/%s already exists, we will try to take it over.", 725 res->hr_provname); 726 /* 727 * If we received EEXIST, we assume that the process who created the 728 * provider died and didn't clean up. In that case we will start from 729 * where he left of. 730 */ 731 ggiocancel.gctl_version = G_GATE_VERSION; 732 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 733 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 734 res->hr_provname); 735 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 736 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 737 res->hr_ggateunit = ggiocancel.gctl_unit; 738 return; 739 } 740 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 741 res->hr_provname); 742 } 743 744 void 745 hastd_primary(struct hast_resource *res) 746 { 747 pthread_t td; 748 pid_t pid; 749 int error; 750 751 gres = res; 752 753 /* 754 * Create communication channel between parent and child. 755 */ 756 if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 757 KEEP_ERRNO((void)pidfile_remove(pfh)); 758 primary_exit(EX_OSERR, 759 "Unable to create control sockets between parent and child"); 760 } 761 762 pid = fork(); 763 if (pid < 0) { 764 KEEP_ERRNO((void)pidfile_remove(pfh)); 765 primary_exit(EX_TEMPFAIL, "Unable to fork"); 766 } 767 768 if (pid > 0) { 769 /* This is parent. */ 770 res->hr_workerpid = pid; 771 return; 772 } 773 (void)pidfile_close(pfh); 774 775 setproctitle("%s (primary)", res->hr_name); 776 777 signal(SIGHUP, SIG_DFL); 778 signal(SIGCHLD, SIG_DFL); 779 780 init_local(res); 781 if (real_remote(res) && init_remote(res, NULL, NULL)) 782 sync_start(); 783 init_ggate(res); 784 init_environment(res); 785 error = pthread_create(&td, NULL, ggate_recv_thread, res); 786 assert(error == 0); 787 error = pthread_create(&td, NULL, local_send_thread, res); 788 assert(error == 0); 789 error = pthread_create(&td, NULL, remote_send_thread, res); 790 assert(error == 0); 791 error = pthread_create(&td, NULL, remote_recv_thread, res); 792 assert(error == 0); 793 error = pthread_create(&td, NULL, ggate_send_thread, res); 794 assert(error == 0); 795 error = pthread_create(&td, NULL, sync_thread, res); 796 assert(error == 0); 797 error = pthread_create(&td, NULL, ctrl_thread, res); 798 assert(error == 0); 799 (void)guard_thread(res); 800 } 801 802 static void 803 reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 804 { 805 char msg[1024]; 806 va_list ap; 807 int len; 808 809 va_start(ap, fmt); 810 len = vsnprintf(msg, sizeof(msg), fmt, ap); 811 va_end(ap); 812 if ((size_t)len < sizeof(msg)) { 813 switch (ggio->gctl_cmd) { 814 case BIO_READ: 815 (void)snprintf(msg + len, sizeof(msg) - len, 816 "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 817 (uintmax_t)ggio->gctl_length); 818 break; 819 case BIO_DELETE: 820 (void)snprintf(msg + len, sizeof(msg) - len, 821 "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 822 (uintmax_t)ggio->gctl_length); 823 break; 824 case BIO_FLUSH: 825 (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 826 break; 827 case BIO_WRITE: 828 (void)snprintf(msg + len, sizeof(msg) - len, 829 "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 830 (uintmax_t)ggio->gctl_length); 831 break; 832 default: 833 (void)snprintf(msg + len, sizeof(msg) - len, 834 "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 835 break; 836 } 837 } 838 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 839 } 840 841 static void 842 remote_close(struct hast_resource *res, int ncomp) 843 { 844 845 rw_wlock(&hio_remote_lock[ncomp]); 846 /* 847 * A race is possible between dropping rlock and acquiring wlock - 848 * another thread can close connection in-between. 849 */ 850 if (!ISCONNECTED(res, ncomp)) { 851 assert(res->hr_remotein == NULL); 852 assert(res->hr_remoteout == NULL); 853 rw_unlock(&hio_remote_lock[ncomp]); 854 return; 855 } 856 857 assert(res->hr_remotein != NULL); 858 assert(res->hr_remoteout != NULL); 859 860 pjdlog_debug(2, "Closing old incoming connection to %s.", 861 res->hr_remoteaddr); 862 proto_close(res->hr_remotein); 863 res->hr_remotein = NULL; 864 pjdlog_debug(2, "Closing old outgoing connection to %s.", 865 res->hr_remoteaddr); 866 proto_close(res->hr_remoteout); 867 res->hr_remoteout = NULL; 868 869 rw_unlock(&hio_remote_lock[ncomp]); 870 871 /* 872 * Stop synchronization if in-progress. 873 */ 874 mtx_lock(&sync_lock); 875 if (sync_inprogress) 876 sync_inprogress = false; 877 mtx_unlock(&sync_lock); 878 879 /* 880 * Wake up guard thread, so it can immediately start reconnect. 881 */ 882 mtx_lock(&hio_guard_lock); 883 cv_signal(&hio_guard_cond); 884 mtx_unlock(&hio_guard_lock); 885 } 886 887 /* 888 * Thread receives ggate I/O requests from the kernel and passes them to 889 * appropriate threads: 890 * WRITE - always goes to both local_send and remote_send threads 891 * READ (when the block is up-to-date on local component) - 892 * only local_send thread 893 * READ (when the block isn't up-to-date on local component) - 894 * only remote_send thread 895 * DELETE - always goes to both local_send and remote_send threads 896 * FLUSH - always goes to both local_send and remote_send threads 897 */ 898 static void * 899 ggate_recv_thread(void *arg) 900 { 901 struct hast_resource *res = arg; 902 struct g_gate_ctl_io *ggio; 903 struct hio *hio; 904 unsigned int ii, ncomp, ncomps; 905 int error; 906 907 ncomps = HAST_NCOMPONENTS; 908 909 for (;;) { 910 pjdlog_debug(2, "ggate_recv: Taking free request."); 911 QUEUE_TAKE2(hio, free); 912 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 913 ggio = &hio->hio_ggio; 914 ggio->gctl_unit = res->hr_ggateunit; 915 ggio->gctl_length = MAXPHYS; 916 ggio->gctl_error = 0; 917 pjdlog_debug(2, 918 "ggate_recv: (%p) Waiting for request from the kernel.", 919 hio); 920 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 921 if (sigexit_received) 922 pthread_exit(NULL); 923 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 924 } 925 error = ggio->gctl_error; 926 switch (error) { 927 case 0: 928 break; 929 case ECANCELED: 930 /* Exit gracefully. */ 931 if (!sigexit_received) { 932 pjdlog_debug(2, 933 "ggate_recv: (%p) Received cancel from the kernel.", 934 hio); 935 pjdlog_info("Received cancel from the kernel, exiting."); 936 } 937 pthread_exit(NULL); 938 case ENOMEM: 939 /* 940 * Buffer too small? Impossible, we allocate MAXPHYS 941 * bytes - request can't be bigger than that. 942 */ 943 /* FALLTHROUGH */ 944 case ENXIO: 945 default: 946 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 947 strerror(error)); 948 } 949 for (ii = 0; ii < ncomps; ii++) 950 hio->hio_errors[ii] = EINVAL; 951 reqlog(LOG_DEBUG, 2, ggio, 952 "ggate_recv: (%p) Request received from the kernel: ", 953 hio); 954 /* 955 * Inform all components about new write request. 956 * For read request prefer local component unless the given 957 * range is out-of-date, then use remote component. 958 */ 959 switch (ggio->gctl_cmd) { 960 case BIO_READ: 961 pjdlog_debug(2, 962 "ggate_recv: (%p) Moving request to the send queue.", 963 hio); 964 refcount_init(&hio->hio_countdown, 1); 965 mtx_lock(&metadata_lock); 966 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 967 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 968 /* 969 * This range is up-to-date on local component, 970 * so handle request locally. 971 */ 972 /* Local component is 0 for now. */ 973 ncomp = 0; 974 } else /* if (res->hr_syncsrc == 975 HAST_SYNCSRC_SECONDARY) */ { 976 assert(res->hr_syncsrc == 977 HAST_SYNCSRC_SECONDARY); 978 /* 979 * This range is out-of-date on local component, 980 * so send request to the remote node. 981 */ 982 /* Remote component is 1 for now. */ 983 ncomp = 1; 984 } 985 mtx_unlock(&metadata_lock); 986 QUEUE_INSERT1(hio, send, ncomp); 987 break; 988 case BIO_WRITE: 989 for (;;) { 990 mtx_lock(&range_lock); 991 if (rangelock_islocked(range_sync, 992 ggio->gctl_offset, ggio->gctl_length)) { 993 pjdlog_debug(2, 994 "regular: Range offset=%jd length=%zu locked.", 995 (intmax_t)ggio->gctl_offset, 996 (size_t)ggio->gctl_length); 997 range_regular_wait = true; 998 cv_wait(&range_regular_cond, &range_lock); 999 range_regular_wait = false; 1000 mtx_unlock(&range_lock); 1001 continue; 1002 } 1003 if (rangelock_add(range_regular, 1004 ggio->gctl_offset, ggio->gctl_length) < 0) { 1005 mtx_unlock(&range_lock); 1006 pjdlog_debug(2, 1007 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1008 (intmax_t)ggio->gctl_offset, 1009 (size_t)ggio->gctl_length); 1010 sleep(1); 1011 continue; 1012 } 1013 mtx_unlock(&range_lock); 1014 break; 1015 } 1016 mtx_lock(&res->hr_amp_lock); 1017 if (activemap_write_start(res->hr_amp, 1018 ggio->gctl_offset, ggio->gctl_length)) { 1019 (void)hast_activemap_flush(res); 1020 } 1021 mtx_unlock(&res->hr_amp_lock); 1022 /* FALLTHROUGH */ 1023 case BIO_DELETE: 1024 case BIO_FLUSH: 1025 pjdlog_debug(2, 1026 "ggate_recv: (%p) Moving request to the send queues.", 1027 hio); 1028 refcount_init(&hio->hio_countdown, ncomps); 1029 for (ii = 0; ii < ncomps; ii++) 1030 QUEUE_INSERT1(hio, send, ii); 1031 break; 1032 } 1033 } 1034 /* NOTREACHED */ 1035 return (NULL); 1036 } 1037 1038 /* 1039 * Thread reads from or writes to local component. 1040 * If local read fails, it redirects it to remote_send thread. 1041 */ 1042 static void * 1043 local_send_thread(void *arg) 1044 { 1045 struct hast_resource *res = arg; 1046 struct g_gate_ctl_io *ggio; 1047 struct hio *hio; 1048 unsigned int ncomp, rncomp; 1049 ssize_t ret; 1050 1051 /* Local component is 0 for now. */ 1052 ncomp = 0; 1053 /* Remote component is 1 for now. */ 1054 rncomp = 1; 1055 1056 for (;;) { 1057 pjdlog_debug(2, "local_send: Taking request."); 1058 QUEUE_TAKE1(hio, send, ncomp); 1059 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1060 ggio = &hio->hio_ggio; 1061 switch (ggio->gctl_cmd) { 1062 case BIO_READ: 1063 ret = pread(res->hr_localfd, ggio->gctl_data, 1064 ggio->gctl_length, 1065 ggio->gctl_offset + res->hr_localoff); 1066 if (ret == ggio->gctl_length) 1067 hio->hio_errors[ncomp] = 0; 1068 else { 1069 /* 1070 * If READ failed, try to read from remote node. 1071 */ 1072 QUEUE_INSERT1(hio, send, rncomp); 1073 continue; 1074 } 1075 break; 1076 case BIO_WRITE: 1077 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1078 ggio->gctl_length, 1079 ggio->gctl_offset + res->hr_localoff); 1080 if (ret < 0) 1081 hio->hio_errors[ncomp] = errno; 1082 else if (ret != ggio->gctl_length) 1083 hio->hio_errors[ncomp] = EIO; 1084 else 1085 hio->hio_errors[ncomp] = 0; 1086 break; 1087 case BIO_DELETE: 1088 ret = g_delete(res->hr_localfd, 1089 ggio->gctl_offset + res->hr_localoff, 1090 ggio->gctl_length); 1091 if (ret < 0) 1092 hio->hio_errors[ncomp] = errno; 1093 else 1094 hio->hio_errors[ncomp] = 0; 1095 break; 1096 case BIO_FLUSH: 1097 ret = g_flush(res->hr_localfd); 1098 if (ret < 0) 1099 hio->hio_errors[ncomp] = errno; 1100 else 1101 hio->hio_errors[ncomp] = 0; 1102 break; 1103 } 1104 if (refcount_release(&hio->hio_countdown)) { 1105 if (ISSYNCREQ(hio)) { 1106 mtx_lock(&sync_lock); 1107 SYNCREQDONE(hio); 1108 mtx_unlock(&sync_lock); 1109 cv_signal(&sync_cond); 1110 } else { 1111 pjdlog_debug(2, 1112 "local_send: (%p) Moving request to the done queue.", 1113 hio); 1114 QUEUE_INSERT2(hio, done); 1115 } 1116 } 1117 } 1118 /* NOTREACHED */ 1119 return (NULL); 1120 } 1121 1122 /* 1123 * Thread sends request to secondary node. 1124 */ 1125 static void * 1126 remote_send_thread(void *arg) 1127 { 1128 struct hast_resource *res = arg; 1129 struct g_gate_ctl_io *ggio; 1130 struct hio *hio; 1131 struct nv *nv; 1132 unsigned int ncomp; 1133 bool wakeup; 1134 uint64_t offset, length; 1135 uint8_t cmd; 1136 void *data; 1137 1138 /* Remote component is 1 for now. */ 1139 ncomp = 1; 1140 1141 for (;;) { 1142 pjdlog_debug(2, "remote_send: Taking request."); 1143 QUEUE_TAKE1(hio, send, ncomp); 1144 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1145 ggio = &hio->hio_ggio; 1146 switch (ggio->gctl_cmd) { 1147 case BIO_READ: 1148 cmd = HIO_READ; 1149 data = NULL; 1150 offset = ggio->gctl_offset; 1151 length = ggio->gctl_length; 1152 break; 1153 case BIO_WRITE: 1154 cmd = HIO_WRITE; 1155 data = ggio->gctl_data; 1156 offset = ggio->gctl_offset; 1157 length = ggio->gctl_length; 1158 break; 1159 case BIO_DELETE: 1160 cmd = HIO_DELETE; 1161 data = NULL; 1162 offset = ggio->gctl_offset; 1163 length = ggio->gctl_length; 1164 break; 1165 case BIO_FLUSH: 1166 cmd = HIO_FLUSH; 1167 data = NULL; 1168 offset = 0; 1169 length = 0; 1170 break; 1171 default: 1172 assert(!"invalid condition"); 1173 abort(); 1174 } 1175 nv = nv_alloc(); 1176 nv_add_uint8(nv, cmd, "cmd"); 1177 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1178 nv_add_uint64(nv, offset, "offset"); 1179 nv_add_uint64(nv, length, "length"); 1180 if (nv_error(nv) != 0) { 1181 hio->hio_errors[ncomp] = nv_error(nv); 1182 pjdlog_debug(2, 1183 "remote_send: (%p) Unable to prepare header to send.", 1184 hio); 1185 reqlog(LOG_ERR, 0, ggio, 1186 "Unable to prepare header to send (%s): ", 1187 strerror(nv_error(nv))); 1188 /* Move failed request immediately to the done queue. */ 1189 goto done_queue; 1190 } 1191 pjdlog_debug(2, 1192 "remote_send: (%p) Moving request to the recv queue.", 1193 hio); 1194 /* 1195 * Protect connection from disappearing. 1196 */ 1197 rw_rlock(&hio_remote_lock[ncomp]); 1198 if (!ISCONNECTED(res, ncomp)) { 1199 rw_unlock(&hio_remote_lock[ncomp]); 1200 hio->hio_errors[ncomp] = ENOTCONN; 1201 goto done_queue; 1202 } 1203 /* 1204 * Move the request to recv queue before sending it, because 1205 * in different order we can get reply before we move request 1206 * to recv queue. 1207 */ 1208 mtx_lock(&hio_recv_list_lock[ncomp]); 1209 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1210 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1211 mtx_unlock(&hio_recv_list_lock[ncomp]); 1212 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1213 data != NULL ? length : 0) < 0) { 1214 hio->hio_errors[ncomp] = errno; 1215 rw_unlock(&hio_remote_lock[ncomp]); 1216 remote_close(res, ncomp); 1217 pjdlog_debug(2, 1218 "remote_send: (%p) Unable to send request.", hio); 1219 reqlog(LOG_ERR, 0, ggio, 1220 "Unable to send request (%s): ", 1221 strerror(hio->hio_errors[ncomp])); 1222 /* 1223 * Take request back from the receive queue and move 1224 * it immediately to the done queue. 1225 */ 1226 mtx_lock(&hio_recv_list_lock[ncomp]); 1227 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1228 mtx_unlock(&hio_recv_list_lock[ncomp]); 1229 goto done_queue; 1230 } 1231 rw_unlock(&hio_remote_lock[ncomp]); 1232 nv_free(nv); 1233 if (wakeup) 1234 cv_signal(&hio_recv_list_cond[ncomp]); 1235 continue; 1236 done_queue: 1237 nv_free(nv); 1238 if (ISSYNCREQ(hio)) { 1239 if (!refcount_release(&hio->hio_countdown)) 1240 continue; 1241 mtx_lock(&sync_lock); 1242 SYNCREQDONE(hio); 1243 mtx_unlock(&sync_lock); 1244 cv_signal(&sync_cond); 1245 continue; 1246 } 1247 if (ggio->gctl_cmd == BIO_WRITE) { 1248 mtx_lock(&res->hr_amp_lock); 1249 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1250 ggio->gctl_length)) { 1251 (void)hast_activemap_flush(res); 1252 } 1253 mtx_unlock(&res->hr_amp_lock); 1254 } 1255 if (!refcount_release(&hio->hio_countdown)) 1256 continue; 1257 pjdlog_debug(2, 1258 "remote_send: (%p) Moving request to the done queue.", 1259 hio); 1260 QUEUE_INSERT2(hio, done); 1261 } 1262 /* NOTREACHED */ 1263 return (NULL); 1264 } 1265 1266 /* 1267 * Thread receives answer from secondary node and passes it to ggate_send 1268 * thread. 1269 */ 1270 static void * 1271 remote_recv_thread(void *arg) 1272 { 1273 struct hast_resource *res = arg; 1274 struct g_gate_ctl_io *ggio; 1275 struct hio *hio; 1276 struct nv *nv; 1277 unsigned int ncomp; 1278 uint64_t seq; 1279 int error; 1280 1281 /* Remote component is 1 for now. */ 1282 ncomp = 1; 1283 1284 for (;;) { 1285 /* Wait until there is anything to receive. */ 1286 mtx_lock(&hio_recv_list_lock[ncomp]); 1287 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1288 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1289 cv_wait(&hio_recv_list_cond[ncomp], 1290 &hio_recv_list_lock[ncomp]); 1291 } 1292 mtx_unlock(&hio_recv_list_lock[ncomp]); 1293 rw_rlock(&hio_remote_lock[ncomp]); 1294 if (!ISCONNECTED(res, ncomp)) { 1295 rw_unlock(&hio_remote_lock[ncomp]); 1296 /* 1297 * Connection is dead, so move all pending requests to 1298 * the done queue (one-by-one). 1299 */ 1300 mtx_lock(&hio_recv_list_lock[ncomp]); 1301 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1302 assert(hio != NULL); 1303 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1304 hio_next[ncomp]); 1305 mtx_unlock(&hio_recv_list_lock[ncomp]); 1306 goto done_queue; 1307 } 1308 if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 1309 pjdlog_errno(LOG_ERR, 1310 "Unable to receive reply header"); 1311 rw_unlock(&hio_remote_lock[ncomp]); 1312 remote_close(res, ncomp); 1313 continue; 1314 } 1315 rw_unlock(&hio_remote_lock[ncomp]); 1316 seq = nv_get_uint64(nv, "seq"); 1317 if (seq == 0) { 1318 pjdlog_error("Header contains no 'seq' field."); 1319 nv_free(nv); 1320 continue; 1321 } 1322 mtx_lock(&hio_recv_list_lock[ncomp]); 1323 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1324 if (hio->hio_ggio.gctl_seq == seq) { 1325 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1326 hio_next[ncomp]); 1327 break; 1328 } 1329 } 1330 mtx_unlock(&hio_recv_list_lock[ncomp]); 1331 if (hio == NULL) { 1332 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1333 (uintmax_t)seq); 1334 nv_free(nv); 1335 continue; 1336 } 1337 error = nv_get_int16(nv, "error"); 1338 if (error != 0) { 1339 /* Request failed on remote side. */ 1340 hio->hio_errors[ncomp] = 0; 1341 nv_free(nv); 1342 goto done_queue; 1343 } 1344 ggio = &hio->hio_ggio; 1345 switch (ggio->gctl_cmd) { 1346 case BIO_READ: 1347 rw_rlock(&hio_remote_lock[ncomp]); 1348 if (!ISCONNECTED(res, ncomp)) { 1349 rw_unlock(&hio_remote_lock[ncomp]); 1350 nv_free(nv); 1351 goto done_queue; 1352 } 1353 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1354 ggio->gctl_data, ggio->gctl_length) < 0) { 1355 hio->hio_errors[ncomp] = errno; 1356 pjdlog_errno(LOG_ERR, 1357 "Unable to receive reply data"); 1358 rw_unlock(&hio_remote_lock[ncomp]); 1359 nv_free(nv); 1360 remote_close(res, ncomp); 1361 goto done_queue; 1362 } 1363 rw_unlock(&hio_remote_lock[ncomp]); 1364 break; 1365 case BIO_WRITE: 1366 case BIO_DELETE: 1367 case BIO_FLUSH: 1368 break; 1369 default: 1370 assert(!"invalid condition"); 1371 abort(); 1372 } 1373 hio->hio_errors[ncomp] = 0; 1374 nv_free(nv); 1375 done_queue: 1376 if (refcount_release(&hio->hio_countdown)) { 1377 if (ISSYNCREQ(hio)) { 1378 mtx_lock(&sync_lock); 1379 SYNCREQDONE(hio); 1380 mtx_unlock(&sync_lock); 1381 cv_signal(&sync_cond); 1382 } else { 1383 pjdlog_debug(2, 1384 "remote_recv: (%p) Moving request to the done queue.", 1385 hio); 1386 QUEUE_INSERT2(hio, done); 1387 } 1388 } 1389 } 1390 /* NOTREACHED */ 1391 return (NULL); 1392 } 1393 1394 /* 1395 * Thread sends answer to the kernel. 1396 */ 1397 static void * 1398 ggate_send_thread(void *arg) 1399 { 1400 struct hast_resource *res = arg; 1401 struct g_gate_ctl_io *ggio; 1402 struct hio *hio; 1403 unsigned int ii, ncomp, ncomps; 1404 1405 ncomps = HAST_NCOMPONENTS; 1406 1407 for (;;) { 1408 pjdlog_debug(2, "ggate_send: Taking request."); 1409 QUEUE_TAKE2(hio, done); 1410 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1411 ggio = &hio->hio_ggio; 1412 for (ii = 0; ii < ncomps; ii++) { 1413 if (hio->hio_errors[ii] == 0) { 1414 /* 1415 * One successful request is enough to declare 1416 * success. 1417 */ 1418 ggio->gctl_error = 0; 1419 break; 1420 } 1421 } 1422 if (ii == ncomps) { 1423 /* 1424 * None of the requests were successful. 1425 * Use first error. 1426 */ 1427 ggio->gctl_error = hio->hio_errors[0]; 1428 } 1429 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1430 mtx_lock(&res->hr_amp_lock); 1431 activemap_write_complete(res->hr_amp, 1432 ggio->gctl_offset, ggio->gctl_length); 1433 mtx_unlock(&res->hr_amp_lock); 1434 } 1435 if (ggio->gctl_cmd == BIO_WRITE) { 1436 /* 1437 * Unlock range we locked. 1438 */ 1439 mtx_lock(&range_lock); 1440 rangelock_del(range_regular, ggio->gctl_offset, 1441 ggio->gctl_length); 1442 if (range_sync_wait) 1443 cv_signal(&range_sync_cond); 1444 mtx_unlock(&range_lock); 1445 /* 1446 * Bump local count if this is first write after 1447 * connection failure with remote node. 1448 */ 1449 ncomp = 1; 1450 rw_rlock(&hio_remote_lock[ncomp]); 1451 if (!ISCONNECTED(res, ncomp)) { 1452 mtx_lock(&metadata_lock); 1453 if (res->hr_primary_localcnt == 1454 res->hr_secondary_remotecnt) { 1455 res->hr_primary_localcnt++; 1456 pjdlog_debug(1, 1457 "Increasing localcnt to %ju.", 1458 (uintmax_t)res->hr_primary_localcnt); 1459 (void)metadata_write(res); 1460 } 1461 mtx_unlock(&metadata_lock); 1462 } 1463 rw_unlock(&hio_remote_lock[ncomp]); 1464 } 1465 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 1466 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1467 pjdlog_debug(2, 1468 "ggate_send: (%p) Moving request to the free queue.", hio); 1469 QUEUE_INSERT2(hio, free); 1470 } 1471 /* NOTREACHED */ 1472 return (NULL); 1473 } 1474 1475 /* 1476 * Thread synchronize local and remote components. 1477 */ 1478 static void * 1479 sync_thread(void *arg __unused) 1480 { 1481 struct hast_resource *res = arg; 1482 struct hio *hio; 1483 struct g_gate_ctl_io *ggio; 1484 unsigned int ii, ncomp, ncomps; 1485 off_t offset, length, synced; 1486 bool dorewind; 1487 int syncext; 1488 1489 ncomps = HAST_NCOMPONENTS; 1490 dorewind = true; 1491 synced = 0; 1492 1493 for (;;) { 1494 mtx_lock(&sync_lock); 1495 while (!sync_inprogress) { 1496 dorewind = true; 1497 synced = 0; 1498 cv_wait(&sync_cond, &sync_lock); 1499 } 1500 mtx_unlock(&sync_lock); 1501 /* 1502 * Obtain offset at which we should synchronize. 1503 * Rewind synchronization if needed. 1504 */ 1505 mtx_lock(&res->hr_amp_lock); 1506 if (dorewind) 1507 activemap_sync_rewind(res->hr_amp); 1508 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1509 if (syncext != -1) { 1510 /* 1511 * We synchronized entire syncext extent, we can mark 1512 * it as clean now. 1513 */ 1514 if (activemap_extent_complete(res->hr_amp, syncext)) 1515 (void)hast_activemap_flush(res); 1516 } 1517 mtx_unlock(&res->hr_amp_lock); 1518 if (dorewind) { 1519 dorewind = false; 1520 if (offset < 0) 1521 pjdlog_info("Nodes are in sync."); 1522 else { 1523 pjdlog_info("Synchronization started. %ju bytes to go.", 1524 (uintmax_t)(res->hr_extentsize * 1525 activemap_ndirty(res->hr_amp))); 1526 } 1527 } 1528 if (offset < 0) { 1529 mtx_lock(&sync_lock); 1530 sync_inprogress = false; 1531 mtx_unlock(&sync_lock); 1532 pjdlog_debug(1, "Nothing to synchronize."); 1533 /* 1534 * Synchronization complete, make both localcnt and 1535 * remotecnt equal. 1536 */ 1537 ncomp = 1; 1538 rw_rlock(&hio_remote_lock[ncomp]); 1539 if (ISCONNECTED(res, ncomp)) { 1540 if (synced > 0) { 1541 pjdlog_info("Synchronization complete. " 1542 "%jd bytes synchronized.", 1543 (intmax_t)synced); 1544 } 1545 mtx_lock(&metadata_lock); 1546 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 1547 res->hr_primary_localcnt = 1548 res->hr_secondary_localcnt; 1549 res->hr_primary_remotecnt = 1550 res->hr_secondary_remotecnt; 1551 pjdlog_debug(1, 1552 "Setting localcnt to %ju and remotecnt to %ju.", 1553 (uintmax_t)res->hr_primary_localcnt, 1554 (uintmax_t)res->hr_secondary_localcnt); 1555 (void)metadata_write(res); 1556 mtx_unlock(&metadata_lock); 1557 } else if (synced > 0) { 1558 pjdlog_info("Synchronization interrupted. " 1559 "%jd bytes synchronized so far.", 1560 (intmax_t)synced); 1561 } 1562 rw_unlock(&hio_remote_lock[ncomp]); 1563 continue; 1564 } 1565 pjdlog_debug(2, "sync: Taking free request."); 1566 QUEUE_TAKE2(hio, free); 1567 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 1568 /* 1569 * Lock the range we are going to synchronize. We don't want 1570 * race where someone writes between our read and write. 1571 */ 1572 for (;;) { 1573 mtx_lock(&range_lock); 1574 if (rangelock_islocked(range_regular, offset, length)) { 1575 pjdlog_debug(2, 1576 "sync: Range offset=%jd length=%jd locked.", 1577 (intmax_t)offset, (intmax_t)length); 1578 range_sync_wait = true; 1579 cv_wait(&range_sync_cond, &range_lock); 1580 range_sync_wait = false; 1581 mtx_unlock(&range_lock); 1582 continue; 1583 } 1584 if (rangelock_add(range_sync, offset, length) < 0) { 1585 mtx_unlock(&range_lock); 1586 pjdlog_debug(2, 1587 "sync: Range offset=%jd length=%jd is already locked, waiting.", 1588 (intmax_t)offset, (intmax_t)length); 1589 sleep(1); 1590 continue; 1591 } 1592 mtx_unlock(&range_lock); 1593 break; 1594 } 1595 /* 1596 * First read the data from synchronization source. 1597 */ 1598 SYNCREQ(hio); 1599 ggio = &hio->hio_ggio; 1600 ggio->gctl_cmd = BIO_READ; 1601 ggio->gctl_offset = offset; 1602 ggio->gctl_length = length; 1603 ggio->gctl_error = 0; 1604 for (ii = 0; ii < ncomps; ii++) 1605 hio->hio_errors[ii] = EINVAL; 1606 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1607 hio); 1608 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1609 hio); 1610 mtx_lock(&metadata_lock); 1611 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1612 /* 1613 * This range is up-to-date on local component, 1614 * so handle request locally. 1615 */ 1616 /* Local component is 0 for now. */ 1617 ncomp = 0; 1618 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1619 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1620 /* 1621 * This range is out-of-date on local component, 1622 * so send request to the remote node. 1623 */ 1624 /* Remote component is 1 for now. */ 1625 ncomp = 1; 1626 } 1627 mtx_unlock(&metadata_lock); 1628 refcount_init(&hio->hio_countdown, 1); 1629 QUEUE_INSERT1(hio, send, ncomp); 1630 1631 /* 1632 * Let's wait for READ to finish. 1633 */ 1634 mtx_lock(&sync_lock); 1635 while (!ISSYNCREQDONE(hio)) 1636 cv_wait(&sync_cond, &sync_lock); 1637 mtx_unlock(&sync_lock); 1638 1639 if (hio->hio_errors[ncomp] != 0) { 1640 pjdlog_error("Unable to read synchronization data: %s.", 1641 strerror(hio->hio_errors[ncomp])); 1642 goto free_queue; 1643 } 1644 1645 /* 1646 * We read the data from synchronization source, now write it 1647 * to synchronization target. 1648 */ 1649 SYNCREQ(hio); 1650 ggio->gctl_cmd = BIO_WRITE; 1651 for (ii = 0; ii < ncomps; ii++) 1652 hio->hio_errors[ii] = EINVAL; 1653 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 1654 hio); 1655 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 1656 hio); 1657 mtx_lock(&metadata_lock); 1658 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1659 /* 1660 * This range is up-to-date on local component, 1661 * so we update remote component. 1662 */ 1663 /* Remote component is 1 for now. */ 1664 ncomp = 1; 1665 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 1666 assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 1667 /* 1668 * This range is out-of-date on local component, 1669 * so we update it. 1670 */ 1671 /* Local component is 0 for now. */ 1672 ncomp = 0; 1673 } 1674 mtx_unlock(&metadata_lock); 1675 1676 pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 1677 hio); 1678 refcount_init(&hio->hio_countdown, 1); 1679 QUEUE_INSERT1(hio, send, ncomp); 1680 1681 /* 1682 * Let's wait for WRITE to finish. 1683 */ 1684 mtx_lock(&sync_lock); 1685 while (!ISSYNCREQDONE(hio)) 1686 cv_wait(&sync_cond, &sync_lock); 1687 mtx_unlock(&sync_lock); 1688 1689 if (hio->hio_errors[ncomp] != 0) { 1690 pjdlog_error("Unable to write synchronization data: %s.", 1691 strerror(hio->hio_errors[ncomp])); 1692 goto free_queue; 1693 } 1694 free_queue: 1695 mtx_lock(&range_lock); 1696 rangelock_del(range_sync, offset, length); 1697 if (range_regular_wait) 1698 cv_signal(&range_regular_cond); 1699 mtx_unlock(&range_lock); 1700 1701 synced += length; 1702 1703 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 1704 hio); 1705 QUEUE_INSERT2(hio, free); 1706 } 1707 /* NOTREACHED */ 1708 return (NULL); 1709 } 1710 1711 static void 1712 sighandler(int sig) 1713 { 1714 bool unlock; 1715 1716 switch (sig) { 1717 case SIGINT: 1718 case SIGTERM: 1719 sigexit_received = true; 1720 break; 1721 case SIGHUP: 1722 sighup_received = true; 1723 break; 1724 default: 1725 assert(!"invalid condition"); 1726 } 1727 /* 1728 * XXX: Racy, but if we cannot obtain hio_guard_lock here, we don't 1729 * want to risk deadlock. 1730 */ 1731 unlock = mtx_trylock(&hio_guard_lock); 1732 cv_signal(&hio_guard_cond); 1733 if (unlock) 1734 mtx_unlock(&hio_guard_lock); 1735 } 1736 1737 static void 1738 config_reload(void) 1739 { 1740 struct hastd_config *newcfg; 1741 struct hast_resource *res; 1742 unsigned int ii, ncomps; 1743 int modified; 1744 1745 pjdlog_info("Reloading configuration..."); 1746 1747 ncomps = HAST_NCOMPONENTS; 1748 1749 newcfg = yy_config_parse(cfgpath, false); 1750 if (newcfg == NULL) 1751 goto failed; 1752 1753 TAILQ_FOREACH(res, &newcfg->hc_resources, hr_next) { 1754 if (strcmp(res->hr_name, gres->hr_name) == 0) 1755 break; 1756 } 1757 /* 1758 * If resource was removed from the configuration file, resource 1759 * name, provider name or path to local component was modified we 1760 * shouldn't be here. This means that someone modified configuration 1761 * file and send SIGHUP to us instead of main hastd process. 1762 * Log advice and ignore the signal. 1763 */ 1764 if (res == NULL || strcmp(gres->hr_name, res->hr_name) != 0 || 1765 strcmp(gres->hr_provname, res->hr_provname) != 0 || 1766 strcmp(gres->hr_localpath, res->hr_localpath) != 0) { 1767 pjdlog_warning("To reload configuration send SIGHUP to the main hastd process (pid %u).", 1768 (unsigned int)getppid()); 1769 goto failed; 1770 } 1771 1772 #define MODIFIED_REMOTEADDR 0x1 1773 #define MODIFIED_REPLICATION 0x2 1774 #define MODIFIED_TIMEOUT 0x4 1775 modified = 0; 1776 if (strcmp(gres->hr_remoteaddr, res->hr_remoteaddr) != 0) { 1777 /* 1778 * Don't copy res->hr_remoteaddr to gres just yet. 1779 * We want remote_close() to log disconnect from the old 1780 * addresses, not from the new ones. 1781 */ 1782 modified |= MODIFIED_REMOTEADDR; 1783 } 1784 if (gres->hr_replication != res->hr_replication) { 1785 gres->hr_replication = res->hr_replication; 1786 modified |= MODIFIED_REPLICATION; 1787 } 1788 if (gres->hr_timeout != res->hr_timeout) { 1789 gres->hr_timeout = res->hr_timeout; 1790 modified |= MODIFIED_TIMEOUT; 1791 } 1792 /* 1793 * If only timeout was modified we only need to change it without 1794 * reconnecting. 1795 */ 1796 if (modified == MODIFIED_TIMEOUT) { 1797 for (ii = 0; ii < ncomps; ii++) { 1798 if (!ISREMOTE(ii)) 1799 continue; 1800 rw_rlock(&hio_remote_lock[ii]); 1801 if (!ISCONNECTED(gres, ii)) { 1802 rw_unlock(&hio_remote_lock[ii]); 1803 continue; 1804 } 1805 rw_unlock(&hio_remote_lock[ii]); 1806 if (proto_timeout(gres->hr_remotein, 1807 gres->hr_timeout) < 0) { 1808 pjdlog_errno(LOG_WARNING, 1809 "Unable to set connection timeout"); 1810 } 1811 if (proto_timeout(gres->hr_remoteout, 1812 gres->hr_timeout) < 0) { 1813 pjdlog_errno(LOG_WARNING, 1814 "Unable to set connection timeout"); 1815 } 1816 } 1817 } else { 1818 for (ii = 0; ii < ncomps; ii++) { 1819 if (!ISREMOTE(ii)) 1820 continue; 1821 remote_close(gres, ii); 1822 } 1823 if (modified & MODIFIED_REMOTEADDR) { 1824 strlcpy(gres->hr_remoteaddr, res->hr_remoteaddr, 1825 sizeof(gres->hr_remoteaddr)); 1826 } 1827 } 1828 #undef MODIFIED_REMOTEADDR 1829 #undef MODIFIED_REPLICATION 1830 #undef MODIFIED_TIMEOUT 1831 1832 pjdlog_info("Configuration reloaded successfully."); 1833 return; 1834 failed: 1835 if (newcfg != NULL) { 1836 if (newcfg->hc_controlconn != NULL) 1837 proto_close(newcfg->hc_controlconn); 1838 if (newcfg->hc_listenconn != NULL) 1839 proto_close(newcfg->hc_listenconn); 1840 yy_config_free(newcfg); 1841 } 1842 pjdlog_warning("Configuration not reloaded."); 1843 } 1844 1845 /* 1846 * Thread guards remote connections and reconnects when needed, handles 1847 * signals, etc. 1848 */ 1849 static void * 1850 guard_thread(void *arg) 1851 { 1852 struct hast_resource *res = arg; 1853 struct proto_conn *in, *out; 1854 unsigned int ii, ncomps; 1855 int timeout; 1856 1857 ncomps = HAST_NCOMPONENTS; 1858 1859 for (;;) { 1860 if (sigexit_received) { 1861 primary_exitx(EX_OK, 1862 "Termination signal received, exiting."); 1863 } 1864 if (sighup_received) { 1865 sighup_received = false; 1866 config_reload(); 1867 } 1868 /* 1869 * If all the connection will be fine, we will sleep until 1870 * someone wakes us up. 1871 * If any of the connections will be broken and we won't be 1872 * able to connect, we will sleep only for RECONNECT_SLEEP 1873 * seconds so we can retry soon. 1874 */ 1875 timeout = 0; 1876 pjdlog_debug(2, "remote_guard: Checking connections."); 1877 mtx_lock(&hio_guard_lock); 1878 for (ii = 0; ii < ncomps; ii++) { 1879 if (!ISREMOTE(ii)) 1880 continue; 1881 rw_rlock(&hio_remote_lock[ii]); 1882 if (ISCONNECTED(res, ii)) { 1883 assert(res->hr_remotein != NULL); 1884 assert(res->hr_remoteout != NULL); 1885 rw_unlock(&hio_remote_lock[ii]); 1886 pjdlog_debug(2, 1887 "remote_guard: Connection to %s is ok.", 1888 res->hr_remoteaddr); 1889 } else if (real_remote(res)) { 1890 assert(res->hr_remotein == NULL); 1891 assert(res->hr_remoteout == NULL); 1892 /* 1893 * Upgrade the lock. It doesn't have to be 1894 * atomic as no other thread can change 1895 * connection status from disconnected to 1896 * connected. 1897 */ 1898 rw_unlock(&hio_remote_lock[ii]); 1899 pjdlog_debug(2, 1900 "remote_guard: Reconnecting to %s.", 1901 res->hr_remoteaddr); 1902 in = out = NULL; 1903 if (init_remote(res, &in, &out)) { 1904 rw_wlock(&hio_remote_lock[ii]); 1905 assert(res->hr_remotein == NULL); 1906 assert(res->hr_remoteout == NULL); 1907 assert(in != NULL && out != NULL); 1908 res->hr_remotein = in; 1909 res->hr_remoteout = out; 1910 rw_unlock(&hio_remote_lock[ii]); 1911 pjdlog_info("Successfully reconnected to %s.", 1912 res->hr_remoteaddr); 1913 sync_start(); 1914 } else { 1915 /* Both connections should be NULL. */ 1916 assert(res->hr_remotein == NULL); 1917 assert(res->hr_remoteout == NULL); 1918 assert(in == NULL && out == NULL); 1919 pjdlog_debug(2, 1920 "remote_guard: Reconnect to %s failed.", 1921 res->hr_remoteaddr); 1922 timeout = RECONNECT_SLEEP; 1923 } 1924 } else { 1925 rw_unlock(&hio_remote_lock[ii]); 1926 } 1927 } 1928 (void)cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 1929 mtx_unlock(&hio_guard_lock); 1930 } 1931 /* NOTREACHED */ 1932 return (NULL); 1933 } 1934