1 /*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 4 * All rights reserved. 5 * 6 * This software was developed by Pawel Jakub Dawidek under sponsorship from 7 * the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/types.h> 35 #include <sys/time.h> 36 #include <sys/bio.h> 37 #include <sys/disk.h> 38 #include <sys/stat.h> 39 40 #include <geom/gate/g_gate.h> 41 42 #include <err.h> 43 #include <errno.h> 44 #include <fcntl.h> 45 #include <libgeom.h> 46 #include <pthread.h> 47 #include <signal.h> 48 #include <stdint.h> 49 #include <stdio.h> 50 #include <string.h> 51 #include <sysexits.h> 52 #include <unistd.h> 53 54 #include <activemap.h> 55 #include <nv.h> 56 #include <rangelock.h> 57 58 #include "control.h" 59 #include "event.h" 60 #include "hast.h" 61 #include "hast_proto.h" 62 #include "hastd.h" 63 #include "hooks.h" 64 #include "metadata.h" 65 #include "proto.h" 66 #include "pjdlog.h" 67 #include "refcnt.h" 68 #include "subr.h" 69 #include "synch.h" 70 71 /* The is only one remote component for now. */ 72 #define ISREMOTE(no) ((no) == 1) 73 74 struct hio { 75 /* 76 * Number of components we are still waiting for. 77 * When this field goes to 0, we can send the request back to the 78 * kernel. Each component has to decrease this counter by one 79 * even on failure. 80 */ 81 refcnt_t hio_countdown; 82 /* 83 * Each component has a place to store its own error. 84 * Once the request is handled by all components we can decide if the 85 * request overall is successful or not. 86 */ 87 int *hio_errors; 88 /* 89 * Structure used to communicate with GEOM Gate class. 90 */ 91 struct g_gate_ctl_io hio_ggio; 92 /* 93 * Request was already confirmed to GEOM Gate. 94 */ 95 bool hio_done; 96 /* 97 * Number of components we are still waiting before sending write 98 * completion ack to GEOM Gate. Used for memsync. 99 */ 100 refcnt_t hio_writecount; 101 /* 102 * Memsync request was acknowleged by remote. 103 */ 104 bool hio_memsyncacked; 105 /* 106 * Remember replication from the time the request was initiated, 107 * so we won't get confused when replication changes on reload. 108 */ 109 int hio_replication; 110 TAILQ_ENTRY(hio) *hio_next; 111 }; 112 #define hio_free_next hio_next[0] 113 #define hio_done_next hio_next[0] 114 115 /* 116 * Free list holds unused structures. When free list is empty, we have to wait 117 * until some in-progress requests are freed. 118 */ 119 static TAILQ_HEAD(, hio) hio_free_list; 120 static size_t hio_free_list_size; 121 static pthread_mutex_t hio_free_list_lock; 122 static pthread_cond_t hio_free_list_cond; 123 /* 124 * There is one send list for every component. One requests is placed on all 125 * send lists - each component gets the same request, but each component is 126 * responsible for managing his own send list. 127 */ 128 static TAILQ_HEAD(, hio) *hio_send_list; 129 static size_t *hio_send_list_size; 130 static pthread_mutex_t *hio_send_list_lock; 131 static pthread_cond_t *hio_send_list_cond; 132 #define hio_send_local_list_size hio_send_list_size[0] 133 #define hio_send_remote_list_size hio_send_list_size[1] 134 /* 135 * There is one recv list for every component, although local components don't 136 * use recv lists as local requests are done synchronously. 137 */ 138 static TAILQ_HEAD(, hio) *hio_recv_list; 139 static size_t *hio_recv_list_size; 140 static pthread_mutex_t *hio_recv_list_lock; 141 static pthread_cond_t *hio_recv_list_cond; 142 #define hio_recv_remote_list_size hio_recv_list_size[1] 143 /* 144 * Request is placed on done list by the slowest component (the one that 145 * decreased hio_countdown from 1 to 0). 146 */ 147 static TAILQ_HEAD(, hio) hio_done_list; 148 static size_t hio_done_list_size; 149 static pthread_mutex_t hio_done_list_lock; 150 static pthread_cond_t hio_done_list_cond; 151 /* 152 * Structure below are for interaction with sync thread. 153 */ 154 static bool sync_inprogress; 155 static pthread_mutex_t sync_lock; 156 static pthread_cond_t sync_cond; 157 /* 158 * The lock below allows to synchornize access to remote connections. 159 */ 160 static pthread_rwlock_t *hio_remote_lock; 161 162 /* 163 * Lock to synchronize metadata updates. Also synchronize access to 164 * hr_primary_localcnt and hr_primary_remotecnt fields. 165 */ 166 static pthread_mutex_t metadata_lock; 167 168 /* 169 * Maximum number of outstanding I/O requests. 170 */ 171 #define HAST_HIO_MAX 256 172 /* 173 * Number of components. At this point there are only two components: local 174 * and remote, but in the future it might be possible to use multiple local 175 * and remote components. 176 */ 177 #define HAST_NCOMPONENTS 2 178 179 #define ISCONNECTED(res, no) \ 180 ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 181 182 #define QUEUE_INSERT1(hio, name, ncomp) do { \ 183 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 184 if (TAILQ_EMPTY(&hio_##name##_list[(ncomp)])) \ 185 cv_broadcast(&hio_##name##_list_cond[(ncomp)]); \ 186 TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 187 hio_next[(ncomp)]); \ 188 hio_##name##_list_size[(ncomp)]++; \ 189 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 190 } while (0) 191 #define QUEUE_INSERT2(hio, name) do { \ 192 mtx_lock(&hio_##name##_list_lock); \ 193 if (TAILQ_EMPTY(&hio_##name##_list)) \ 194 cv_broadcast(&hio_##name##_list_cond); \ 195 TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 196 hio_##name##_list_size++; \ 197 mtx_unlock(&hio_##name##_list_lock); \ 198 } while (0) 199 #define QUEUE_TAKE1(hio, name, ncomp, timeout) do { \ 200 bool _last; \ 201 \ 202 mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 203 _last = false; \ 204 while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL && !_last) { \ 205 cv_timedwait(&hio_##name##_list_cond[(ncomp)], \ 206 &hio_##name##_list_lock[(ncomp)], (timeout)); \ 207 if ((timeout) != 0) \ 208 _last = true; \ 209 } \ 210 if (hio != NULL) { \ 211 PJDLOG_ASSERT(hio_##name##_list_size[(ncomp)] != 0); \ 212 hio_##name##_list_size[(ncomp)]--; \ 213 TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 214 hio_next[(ncomp)]); \ 215 } \ 216 mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 217 } while (0) 218 #define QUEUE_TAKE2(hio, name) do { \ 219 mtx_lock(&hio_##name##_list_lock); \ 220 while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 221 cv_wait(&hio_##name##_list_cond, \ 222 &hio_##name##_list_lock); \ 223 } \ 224 PJDLOG_ASSERT(hio_##name##_list_size != 0); \ 225 hio_##name##_list_size--; \ 226 TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 227 mtx_unlock(&hio_##name##_list_lock); \ 228 } while (0) 229 230 #define ISFULLSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_FULLSYNC) 231 #define ISMEMSYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_MEMSYNC) 232 #define ISASYNC(hio) ((hio)->hio_replication == HAST_REPLICATION_ASYNC) 233 234 #define SYNCREQ(hio) do { \ 235 (hio)->hio_ggio.gctl_unit = -1; \ 236 (hio)->hio_ggio.gctl_seq = 1; \ 237 } while (0) 238 #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 239 #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 240 #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 241 242 #define ISMEMSYNCWRITE(hio) (ISMEMSYNC(hio) && \ 243 (hio)->hio_ggio.gctl_cmd == BIO_WRITE && !ISSYNCREQ(hio)) 244 245 static struct hast_resource *gres; 246 247 static pthread_mutex_t range_lock; 248 static struct rangelocks *range_regular; 249 static bool range_regular_wait; 250 static pthread_cond_t range_regular_cond; 251 static struct rangelocks *range_sync; 252 static bool range_sync_wait; 253 static pthread_cond_t range_sync_cond; 254 static bool fullystarted; 255 256 static void *ggate_recv_thread(void *arg); 257 static void *local_send_thread(void *arg); 258 static void *remote_send_thread(void *arg); 259 static void *remote_recv_thread(void *arg); 260 static void *ggate_send_thread(void *arg); 261 static void *sync_thread(void *arg); 262 static void *guard_thread(void *arg); 263 264 static void 265 output_status_aux(struct nv *nvout) 266 { 267 268 nv_add_uint64(nvout, (uint64_t)hio_free_list_size, 269 "idle_queue_size"); 270 nv_add_uint64(nvout, (uint64_t)hio_send_local_list_size, 271 "local_queue_size"); 272 nv_add_uint64(nvout, (uint64_t)hio_send_remote_list_size, 273 "send_queue_size"); 274 nv_add_uint64(nvout, (uint64_t)hio_recv_remote_list_size, 275 "recv_queue_size"); 276 nv_add_uint64(nvout, (uint64_t)hio_done_list_size, 277 "done_queue_size"); 278 } 279 280 static void 281 cleanup(struct hast_resource *res) 282 { 283 int rerrno; 284 285 /* Remember errno. */ 286 rerrno = errno; 287 288 /* Destroy ggate provider if we created one. */ 289 if (res->hr_ggateunit >= 0) { 290 struct g_gate_ctl_destroy ggiod; 291 292 bzero(&ggiod, sizeof(ggiod)); 293 ggiod.gctl_version = G_GATE_VERSION; 294 ggiod.gctl_unit = res->hr_ggateunit; 295 ggiod.gctl_force = 1; 296 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) == -1) { 297 pjdlog_errno(LOG_WARNING, 298 "Unable to destroy hast/%s device", 299 res->hr_provname); 300 } 301 res->hr_ggateunit = -1; 302 } 303 304 /* Restore errno. */ 305 errno = rerrno; 306 } 307 308 static __dead2 void 309 primary_exit(int exitcode, const char *fmt, ...) 310 { 311 va_list ap; 312 313 PJDLOG_ASSERT(exitcode != EX_OK); 314 va_start(ap, fmt); 315 pjdlogv_errno(LOG_ERR, fmt, ap); 316 va_end(ap); 317 cleanup(gres); 318 exit(exitcode); 319 } 320 321 static __dead2 void 322 primary_exitx(int exitcode, const char *fmt, ...) 323 { 324 va_list ap; 325 326 va_start(ap, fmt); 327 pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 328 va_end(ap); 329 cleanup(gres); 330 exit(exitcode); 331 } 332 333 static int 334 hast_activemap_flush(struct hast_resource *res) __unlocks(res->hr_amp_lock) 335 { 336 const unsigned char *buf; 337 size_t size; 338 int ret; 339 340 mtx_lock(&res->hr_amp_diskmap_lock); 341 buf = activemap_bitmap(res->hr_amp, &size); 342 mtx_unlock(&res->hr_amp_lock); 343 PJDLOG_ASSERT(buf != NULL); 344 PJDLOG_ASSERT((size % res->hr_local_sectorsize) == 0); 345 ret = 0; 346 if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 347 (ssize_t)size) { 348 pjdlog_errno(LOG_ERR, "Unable to flush activemap to disk"); 349 res->hr_stat_activemap_write_error++; 350 ret = -1; 351 } 352 if (ret == 0 && res->hr_metaflush == 1 && 353 g_flush(res->hr_localfd) == -1) { 354 if (errno == EOPNOTSUPP) { 355 pjdlog_warning("The %s provider doesn't support flushing write cache. Disabling it.", 356 res->hr_localpath); 357 res->hr_metaflush = 0; 358 } else { 359 pjdlog_errno(LOG_ERR, 360 "Unable to flush disk cache on activemap update"); 361 res->hr_stat_activemap_flush_error++; 362 ret = -1; 363 } 364 } 365 mtx_unlock(&res->hr_amp_diskmap_lock); 366 return (ret); 367 } 368 369 static bool 370 real_remote(const struct hast_resource *res) 371 { 372 373 return (strcmp(res->hr_remoteaddr, "none") != 0); 374 } 375 376 static void 377 init_environment(struct hast_resource *res __unused) 378 { 379 struct hio *hio; 380 unsigned int ii, ncomps; 381 382 /* 383 * In the future it might be per-resource value. 384 */ 385 ncomps = HAST_NCOMPONENTS; 386 387 /* 388 * Allocate memory needed by lists. 389 */ 390 hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 391 if (hio_send_list == NULL) { 392 primary_exitx(EX_TEMPFAIL, 393 "Unable to allocate %zu bytes of memory for send lists.", 394 sizeof(hio_send_list[0]) * ncomps); 395 } 396 hio_send_list_size = malloc(sizeof(hio_send_list_size[0]) * ncomps); 397 if (hio_send_list_size == NULL) { 398 primary_exitx(EX_TEMPFAIL, 399 "Unable to allocate %zu bytes of memory for send list counters.", 400 sizeof(hio_send_list_size[0]) * ncomps); 401 } 402 hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 403 if (hio_send_list_lock == NULL) { 404 primary_exitx(EX_TEMPFAIL, 405 "Unable to allocate %zu bytes of memory for send list locks.", 406 sizeof(hio_send_list_lock[0]) * ncomps); 407 } 408 hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 409 if (hio_send_list_cond == NULL) { 410 primary_exitx(EX_TEMPFAIL, 411 "Unable to allocate %zu bytes of memory for send list condition variables.", 412 sizeof(hio_send_list_cond[0]) * ncomps); 413 } 414 hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 415 if (hio_recv_list == NULL) { 416 primary_exitx(EX_TEMPFAIL, 417 "Unable to allocate %zu bytes of memory for recv lists.", 418 sizeof(hio_recv_list[0]) * ncomps); 419 } 420 hio_recv_list_size = malloc(sizeof(hio_recv_list_size[0]) * ncomps); 421 if (hio_recv_list_size == NULL) { 422 primary_exitx(EX_TEMPFAIL, 423 "Unable to allocate %zu bytes of memory for recv list counters.", 424 sizeof(hio_recv_list_size[0]) * ncomps); 425 } 426 hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 427 if (hio_recv_list_lock == NULL) { 428 primary_exitx(EX_TEMPFAIL, 429 "Unable to allocate %zu bytes of memory for recv list locks.", 430 sizeof(hio_recv_list_lock[0]) * ncomps); 431 } 432 hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 433 if (hio_recv_list_cond == NULL) { 434 primary_exitx(EX_TEMPFAIL, 435 "Unable to allocate %zu bytes of memory for recv list condition variables.", 436 sizeof(hio_recv_list_cond[0]) * ncomps); 437 } 438 hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 439 if (hio_remote_lock == NULL) { 440 primary_exitx(EX_TEMPFAIL, 441 "Unable to allocate %zu bytes of memory for remote connections locks.", 442 sizeof(hio_remote_lock[0]) * ncomps); 443 } 444 445 /* 446 * Initialize lists, their counters, locks and condition variables. 447 */ 448 TAILQ_INIT(&hio_free_list); 449 mtx_init(&hio_free_list_lock); 450 cv_init(&hio_free_list_cond); 451 for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 452 TAILQ_INIT(&hio_send_list[ii]); 453 hio_send_list_size[ii] = 0; 454 mtx_init(&hio_send_list_lock[ii]); 455 cv_init(&hio_send_list_cond[ii]); 456 TAILQ_INIT(&hio_recv_list[ii]); 457 hio_recv_list_size[ii] = 0; 458 mtx_init(&hio_recv_list_lock[ii]); 459 cv_init(&hio_recv_list_cond[ii]); 460 rw_init(&hio_remote_lock[ii]); 461 } 462 TAILQ_INIT(&hio_done_list); 463 mtx_init(&hio_done_list_lock); 464 cv_init(&hio_done_list_cond); 465 mtx_init(&metadata_lock); 466 467 /* 468 * Allocate requests pool and initialize requests. 469 */ 470 for (ii = 0; ii < HAST_HIO_MAX; ii++) { 471 hio = malloc(sizeof(*hio)); 472 if (hio == NULL) { 473 primary_exitx(EX_TEMPFAIL, 474 "Unable to allocate %zu bytes of memory for hio request.", 475 sizeof(*hio)); 476 } 477 refcnt_init(&hio->hio_countdown, 0); 478 hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 479 if (hio->hio_errors == NULL) { 480 primary_exitx(EX_TEMPFAIL, 481 "Unable allocate %zu bytes of memory for hio errors.", 482 sizeof(hio->hio_errors[0]) * ncomps); 483 } 484 hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 485 if (hio->hio_next == NULL) { 486 primary_exitx(EX_TEMPFAIL, 487 "Unable allocate %zu bytes of memory for hio_next field.", 488 sizeof(hio->hio_next[0]) * ncomps); 489 } 490 hio->hio_ggio.gctl_version = G_GATE_VERSION; 491 hio->hio_ggio.gctl_data = malloc(MAXPHYS); 492 if (hio->hio_ggio.gctl_data == NULL) { 493 primary_exitx(EX_TEMPFAIL, 494 "Unable to allocate %zu bytes of memory for gctl_data.", 495 MAXPHYS); 496 } 497 hio->hio_ggio.gctl_length = MAXPHYS; 498 hio->hio_ggio.gctl_error = 0; 499 TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 500 hio_free_list_size++; 501 } 502 } 503 504 static bool 505 init_resuid(struct hast_resource *res) 506 { 507 508 mtx_lock(&metadata_lock); 509 if (res->hr_resuid != 0) { 510 mtx_unlock(&metadata_lock); 511 return (false); 512 } else { 513 /* Initialize unique resource identifier. */ 514 arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 515 mtx_unlock(&metadata_lock); 516 if (metadata_write(res) == -1) 517 exit(EX_NOINPUT); 518 return (true); 519 } 520 } 521 522 static void 523 init_local(struct hast_resource *res) 524 { 525 unsigned char *buf; 526 size_t mapsize; 527 528 if (metadata_read(res, true) == -1) 529 exit(EX_NOINPUT); 530 mtx_init(&res->hr_amp_lock); 531 if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 532 res->hr_local_sectorsize, res->hr_keepdirty) == -1) { 533 primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 534 } 535 mtx_init(&range_lock); 536 cv_init(&range_regular_cond); 537 if (rangelock_init(&range_regular) == -1) 538 primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 539 cv_init(&range_sync_cond); 540 if (rangelock_init(&range_sync) == -1) 541 primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 542 mapsize = activemap_ondisk_size(res->hr_amp); 543 buf = calloc(1, mapsize); 544 if (buf == NULL) { 545 primary_exitx(EX_TEMPFAIL, 546 "Unable to allocate buffer for activemap."); 547 } 548 if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 549 (ssize_t)mapsize) { 550 primary_exit(EX_NOINPUT, "Unable to read activemap"); 551 } 552 activemap_copyin(res->hr_amp, buf, mapsize); 553 free(buf); 554 if (res->hr_resuid != 0) 555 return; 556 /* 557 * We're using provider for the first time. Initialize local and remote 558 * counters. We don't initialize resuid here, as we want to do it just 559 * in time. The reason for this is that we want to inform secondary 560 * that there were no writes yet, so there is no need to synchronize 561 * anything. 562 */ 563 res->hr_primary_localcnt = 0; 564 res->hr_primary_remotecnt = 0; 565 if (metadata_write(res) == -1) 566 exit(EX_NOINPUT); 567 } 568 569 static int 570 primary_connect(struct hast_resource *res, struct proto_conn **connp) 571 { 572 struct proto_conn *conn; 573 int16_t val; 574 575 val = 1; 576 if (proto_send(res->hr_conn, &val, sizeof(val)) == -1) { 577 primary_exit(EX_TEMPFAIL, 578 "Unable to send connection request to parent"); 579 } 580 if (proto_recv(res->hr_conn, &val, sizeof(val)) == -1) { 581 primary_exit(EX_TEMPFAIL, 582 "Unable to receive reply to connection request from parent"); 583 } 584 if (val != 0) { 585 errno = val; 586 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 587 res->hr_remoteaddr); 588 return (-1); 589 } 590 if (proto_connection_recv(res->hr_conn, true, &conn) == -1) { 591 primary_exit(EX_TEMPFAIL, 592 "Unable to receive connection from parent"); 593 } 594 if (proto_connect_wait(conn, res->hr_timeout) == -1) { 595 pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 596 res->hr_remoteaddr); 597 proto_close(conn); 598 return (-1); 599 } 600 /* Error in setting timeout is not critical, but why should it fail? */ 601 if (proto_timeout(conn, res->hr_timeout) == -1) 602 pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 603 604 *connp = conn; 605 606 return (0); 607 } 608 609 /* 610 * Function instructs GEOM_GATE to handle reads directly from within the kernel. 611 */ 612 static void 613 enable_direct_reads(struct hast_resource *res) 614 { 615 struct g_gate_ctl_modify ggiomodify; 616 617 bzero(&ggiomodify, sizeof(ggiomodify)); 618 ggiomodify.gctl_version = G_GATE_VERSION; 619 ggiomodify.gctl_unit = res->hr_ggateunit; 620 ggiomodify.gctl_modify = GG_MODIFY_READPROV | GG_MODIFY_READOFFSET; 621 strlcpy(ggiomodify.gctl_readprov, res->hr_localpath, 622 sizeof(ggiomodify.gctl_readprov)); 623 ggiomodify.gctl_readoffset = res->hr_localoff; 624 if (ioctl(res->hr_ggatefd, G_GATE_CMD_MODIFY, &ggiomodify) == 0) 625 pjdlog_debug(1, "Direct reads enabled."); 626 else 627 pjdlog_errno(LOG_WARNING, "Failed to enable direct reads"); 628 } 629 630 static int 631 init_remote(struct hast_resource *res, struct proto_conn **inp, 632 struct proto_conn **outp) 633 { 634 struct proto_conn *in, *out; 635 struct nv *nvout, *nvin; 636 const unsigned char *token; 637 unsigned char *map; 638 const char *errmsg; 639 int32_t extentsize; 640 int64_t datasize; 641 uint32_t mapsize; 642 uint8_t version; 643 size_t size; 644 int error; 645 646 PJDLOG_ASSERT((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 647 PJDLOG_ASSERT(real_remote(res)); 648 649 in = out = NULL; 650 errmsg = NULL; 651 652 if (primary_connect(res, &out) == -1) 653 return (ECONNREFUSED); 654 655 error = ECONNABORTED; 656 657 /* 658 * First handshake step. 659 * Setup outgoing connection with remote node. 660 */ 661 nvout = nv_alloc(); 662 nv_add_string(nvout, res->hr_name, "resource"); 663 nv_add_uint8(nvout, HAST_PROTO_VERSION, "version"); 664 if (nv_error(nvout) != 0) { 665 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 666 "Unable to allocate header for connection with %s", 667 res->hr_remoteaddr); 668 nv_free(nvout); 669 goto close; 670 } 671 if (hast_proto_send(res, out, nvout, NULL, 0) == -1) { 672 pjdlog_errno(LOG_WARNING, 673 "Unable to send handshake header to %s", 674 res->hr_remoteaddr); 675 nv_free(nvout); 676 goto close; 677 } 678 nv_free(nvout); 679 if (hast_proto_recv_hdr(out, &nvin) == -1) { 680 pjdlog_errno(LOG_WARNING, 681 "Unable to receive handshake header from %s", 682 res->hr_remoteaddr); 683 goto close; 684 } 685 errmsg = nv_get_string(nvin, "errmsg"); 686 if (errmsg != NULL) { 687 pjdlog_warning("%s", errmsg); 688 if (nv_exists(nvin, "wait")) 689 error = EBUSY; 690 nv_free(nvin); 691 goto close; 692 } 693 version = nv_get_uint8(nvin, "version"); 694 if (version == 0) { 695 /* 696 * If no version is sent, it means this is protocol version 1. 697 */ 698 version = 1; 699 } 700 if (version > HAST_PROTO_VERSION) { 701 pjdlog_warning("Invalid version received (%hhu).", version); 702 nv_free(nvin); 703 goto close; 704 } 705 res->hr_version = version; 706 pjdlog_debug(1, "Negotiated protocol version %d.", res->hr_version); 707 token = nv_get_uint8_array(nvin, &size, "token"); 708 if (token == NULL) { 709 pjdlog_warning("Handshake header from %s has no 'token' field.", 710 res->hr_remoteaddr); 711 nv_free(nvin); 712 goto close; 713 } 714 if (size != sizeof(res->hr_token)) { 715 pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 716 res->hr_remoteaddr, size, sizeof(res->hr_token)); 717 nv_free(nvin); 718 goto close; 719 } 720 bcopy(token, res->hr_token, sizeof(res->hr_token)); 721 nv_free(nvin); 722 723 /* 724 * Second handshake step. 725 * Setup incoming connection with remote node. 726 */ 727 if (primary_connect(res, &in) == -1) 728 goto close; 729 730 nvout = nv_alloc(); 731 nv_add_string(nvout, res->hr_name, "resource"); 732 nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 733 "token"); 734 if (res->hr_resuid == 0) { 735 /* 736 * The resuid field was not yet initialized. 737 * Because we do synchronization inside init_resuid(), it is 738 * possible that someone already initialized it, the function 739 * will return false then, but if we successfully initialized 740 * it, we will get true. True means that there were no writes 741 * to this resource yet and we want to inform secondary that 742 * synchronization is not needed by sending "virgin" argument. 743 */ 744 if (init_resuid(res)) 745 nv_add_int8(nvout, 1, "virgin"); 746 } 747 nv_add_uint64(nvout, res->hr_resuid, "resuid"); 748 nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 749 nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 750 if (nv_error(nvout) != 0) { 751 pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 752 "Unable to allocate header for connection with %s", 753 res->hr_remoteaddr); 754 nv_free(nvout); 755 goto close; 756 } 757 if (hast_proto_send(res, in, nvout, NULL, 0) == -1) { 758 pjdlog_errno(LOG_WARNING, 759 "Unable to send handshake header to %s", 760 res->hr_remoteaddr); 761 nv_free(nvout); 762 goto close; 763 } 764 nv_free(nvout); 765 if (hast_proto_recv_hdr(out, &nvin) == -1) { 766 pjdlog_errno(LOG_WARNING, 767 "Unable to receive handshake header from %s", 768 res->hr_remoteaddr); 769 goto close; 770 } 771 errmsg = nv_get_string(nvin, "errmsg"); 772 if (errmsg != NULL) { 773 pjdlog_warning("%s", errmsg); 774 nv_free(nvin); 775 goto close; 776 } 777 datasize = nv_get_int64(nvin, "datasize"); 778 if (datasize != res->hr_datasize) { 779 pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 780 (intmax_t)res->hr_datasize, (intmax_t)datasize); 781 nv_free(nvin); 782 goto close; 783 } 784 extentsize = nv_get_int32(nvin, "extentsize"); 785 if (extentsize != res->hr_extentsize) { 786 pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 787 (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 788 nv_free(nvin); 789 goto close; 790 } 791 res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 792 res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 793 res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 794 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) 795 enable_direct_reads(res); 796 if (nv_exists(nvin, "virgin")) { 797 /* 798 * Secondary was reinitialized, bump localcnt if it is 0 as 799 * only we have the data. 800 */ 801 PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_PRIMARY); 802 PJDLOG_ASSERT(res->hr_secondary_localcnt == 0); 803 804 if (res->hr_primary_localcnt == 0) { 805 PJDLOG_ASSERT(res->hr_secondary_remotecnt == 0); 806 807 mtx_lock(&metadata_lock); 808 res->hr_primary_localcnt++; 809 pjdlog_debug(1, "Increasing localcnt to %ju.", 810 (uintmax_t)res->hr_primary_localcnt); 811 (void)metadata_write(res); 812 mtx_unlock(&metadata_lock); 813 } 814 } 815 map = NULL; 816 mapsize = nv_get_uint32(nvin, "mapsize"); 817 if (mapsize > 0) { 818 map = malloc(mapsize); 819 if (map == NULL) { 820 pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 821 (uintmax_t)mapsize); 822 nv_free(nvin); 823 goto close; 824 } 825 /* 826 * Remote node have some dirty extents on its own, lets 827 * download its activemap. 828 */ 829 if (hast_proto_recv_data(res, out, nvin, map, 830 mapsize) == -1) { 831 pjdlog_errno(LOG_ERR, 832 "Unable to receive remote activemap"); 833 nv_free(nvin); 834 free(map); 835 goto close; 836 } 837 mtx_lock(&res->hr_amp_lock); 838 /* 839 * Merge local and remote bitmaps. 840 */ 841 activemap_merge(res->hr_amp, map, mapsize); 842 free(map); 843 /* 844 * Now that we merged bitmaps from both nodes, flush it to the 845 * disk before we start to synchronize. 846 */ 847 (void)hast_activemap_flush(res); 848 } 849 nv_free(nvin); 850 #ifdef notyet 851 /* Setup directions. */ 852 if (proto_send(out, NULL, 0) == -1) 853 pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 854 if (proto_recv(in, NULL, 0) == -1) 855 pjdlog_errno(LOG_WARNING, "Unable to set connection direction"); 856 #endif 857 pjdlog_info("Connected to %s.", res->hr_remoteaddr); 858 if (res->hr_original_replication == HAST_REPLICATION_MEMSYNC && 859 res->hr_version < 2) { 860 pjdlog_warning("The 'memsync' replication mode is not supported by the remote node, falling back to 'fullsync' mode."); 861 res->hr_replication = HAST_REPLICATION_FULLSYNC; 862 } else if (res->hr_replication != res->hr_original_replication) { 863 /* 864 * This is in case hastd disconnected and was upgraded. 865 */ 866 res->hr_replication = res->hr_original_replication; 867 } 868 if (inp != NULL && outp != NULL) { 869 *inp = in; 870 *outp = out; 871 } else { 872 res->hr_remotein = in; 873 res->hr_remoteout = out; 874 } 875 event_send(res, EVENT_CONNECT); 876 return (0); 877 close: 878 if (errmsg != NULL && strcmp(errmsg, "Split-brain condition!") == 0) 879 event_send(res, EVENT_SPLITBRAIN); 880 proto_close(out); 881 if (in != NULL) 882 proto_close(in); 883 return (error); 884 } 885 886 static void 887 sync_start(void) 888 { 889 890 mtx_lock(&sync_lock); 891 sync_inprogress = true; 892 mtx_unlock(&sync_lock); 893 cv_signal(&sync_cond); 894 } 895 896 static void 897 sync_stop(void) 898 { 899 900 mtx_lock(&sync_lock); 901 if (sync_inprogress) 902 sync_inprogress = false; 903 mtx_unlock(&sync_lock); 904 } 905 906 static void 907 init_ggate(struct hast_resource *res) 908 { 909 struct g_gate_ctl_create ggiocreate; 910 struct g_gate_ctl_cancel ggiocancel; 911 912 /* 913 * We communicate with ggate via /dev/ggctl. Open it. 914 */ 915 res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 916 if (res->hr_ggatefd == -1) 917 primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 918 /* 919 * Create provider before trying to connect, as connection failure 920 * is not critical, but may take some time. 921 */ 922 bzero(&ggiocreate, sizeof(ggiocreate)); 923 ggiocreate.gctl_version = G_GATE_VERSION; 924 ggiocreate.gctl_mediasize = res->hr_datasize; 925 ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 926 ggiocreate.gctl_flags = 0; 927 ggiocreate.gctl_maxcount = 0; 928 ggiocreate.gctl_timeout = 0; 929 ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 930 snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 931 res->hr_provname); 932 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 933 pjdlog_info("Device hast/%s created.", res->hr_provname); 934 res->hr_ggateunit = ggiocreate.gctl_unit; 935 return; 936 } 937 if (errno != EEXIST) { 938 primary_exit(EX_OSERR, "Unable to create hast/%s device", 939 res->hr_provname); 940 } 941 pjdlog_debug(1, 942 "Device hast/%s already exists, we will try to take it over.", 943 res->hr_provname); 944 /* 945 * If we received EEXIST, we assume that the process who created the 946 * provider died and didn't clean up. In that case we will start from 947 * where he left of. 948 */ 949 bzero(&ggiocancel, sizeof(ggiocancel)); 950 ggiocancel.gctl_version = G_GATE_VERSION; 951 ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 952 snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 953 res->hr_provname); 954 if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 955 pjdlog_info("Device hast/%s recovered.", res->hr_provname); 956 res->hr_ggateunit = ggiocancel.gctl_unit; 957 return; 958 } 959 primary_exit(EX_OSERR, "Unable to take over hast/%s device", 960 res->hr_provname); 961 } 962 963 void 964 hastd_primary(struct hast_resource *res) 965 { 966 pthread_t td; 967 pid_t pid; 968 int error, mode, debuglevel; 969 970 /* 971 * Create communication channel for sending control commands from 972 * parent to child. 973 */ 974 if (proto_client(NULL, "socketpair://", &res->hr_ctrl) == -1) { 975 /* TODO: There's no need for this to be fatal error. */ 976 KEEP_ERRNO((void)pidfile_remove(pfh)); 977 pjdlog_exit(EX_OSERR, 978 "Unable to create control sockets between parent and child"); 979 } 980 /* 981 * Create communication channel for sending events from child to parent. 982 */ 983 if (proto_client(NULL, "socketpair://", &res->hr_event) == -1) { 984 /* TODO: There's no need for this to be fatal error. */ 985 KEEP_ERRNO((void)pidfile_remove(pfh)); 986 pjdlog_exit(EX_OSERR, 987 "Unable to create event sockets between child and parent"); 988 } 989 /* 990 * Create communication channel for sending connection requests from 991 * child to parent. 992 */ 993 if (proto_client(NULL, "socketpair://", &res->hr_conn) == -1) { 994 /* TODO: There's no need for this to be fatal error. */ 995 KEEP_ERRNO((void)pidfile_remove(pfh)); 996 pjdlog_exit(EX_OSERR, 997 "Unable to create connection sockets between child and parent"); 998 } 999 1000 pid = fork(); 1001 if (pid == -1) { 1002 /* TODO: There's no need for this to be fatal error. */ 1003 KEEP_ERRNO((void)pidfile_remove(pfh)); 1004 pjdlog_exit(EX_TEMPFAIL, "Unable to fork"); 1005 } 1006 1007 if (pid > 0) { 1008 /* This is parent. */ 1009 /* Declare that we are receiver. */ 1010 proto_recv(res->hr_event, NULL, 0); 1011 proto_recv(res->hr_conn, NULL, 0); 1012 /* Declare that we are sender. */ 1013 proto_send(res->hr_ctrl, NULL, 0); 1014 res->hr_workerpid = pid; 1015 return; 1016 } 1017 1018 gres = res; 1019 res->output_status_aux = output_status_aux; 1020 mode = pjdlog_mode_get(); 1021 debuglevel = pjdlog_debug_get(); 1022 1023 /* Declare that we are sender. */ 1024 proto_send(res->hr_event, NULL, 0); 1025 proto_send(res->hr_conn, NULL, 0); 1026 /* Declare that we are receiver. */ 1027 proto_recv(res->hr_ctrl, NULL, 0); 1028 descriptors_cleanup(res); 1029 1030 descriptors_assert(res, mode); 1031 1032 pjdlog_init(mode); 1033 pjdlog_debug_set(debuglevel); 1034 pjdlog_prefix_set("[%s] (%s) ", res->hr_name, role2str(res->hr_role)); 1035 setproctitle("%s (%s)", res->hr_name, role2str(res->hr_role)); 1036 1037 init_local(res); 1038 init_ggate(res); 1039 init_environment(res); 1040 1041 if (drop_privs(res) != 0) { 1042 cleanup(res); 1043 exit(EX_CONFIG); 1044 } 1045 pjdlog_info("Privileges successfully dropped."); 1046 1047 /* 1048 * Create the guard thread first, so we can handle signals from the 1049 * very beginning. 1050 */ 1051 error = pthread_create(&td, NULL, guard_thread, res); 1052 PJDLOG_ASSERT(error == 0); 1053 /* 1054 * Create the control thread before sending any event to the parent, 1055 * as we can deadlock when parent sends control request to worker, 1056 * but worker has no control thread started yet, so parent waits. 1057 * In the meantime worker sends an event to the parent, but parent 1058 * is unable to handle the event, because it waits for control 1059 * request response. 1060 */ 1061 error = pthread_create(&td, NULL, ctrl_thread, res); 1062 PJDLOG_ASSERT(error == 0); 1063 if (real_remote(res)) { 1064 error = init_remote(res, NULL, NULL); 1065 if (error == 0) { 1066 sync_start(); 1067 } else if (error == EBUSY) { 1068 time_t start = time(NULL); 1069 1070 pjdlog_warning("Waiting for remote node to become %s for %ds.", 1071 role2str(HAST_ROLE_SECONDARY), 1072 res->hr_timeout); 1073 for (;;) { 1074 sleep(1); 1075 error = init_remote(res, NULL, NULL); 1076 if (error != EBUSY) 1077 break; 1078 if (time(NULL) > start + res->hr_timeout) 1079 break; 1080 } 1081 if (error == EBUSY) { 1082 pjdlog_warning("Remote node is still %s, starting anyway.", 1083 role2str(HAST_ROLE_PRIMARY)); 1084 } 1085 } 1086 } 1087 error = pthread_create(&td, NULL, ggate_recv_thread, res); 1088 PJDLOG_ASSERT(error == 0); 1089 error = pthread_create(&td, NULL, local_send_thread, res); 1090 PJDLOG_ASSERT(error == 0); 1091 error = pthread_create(&td, NULL, remote_send_thread, res); 1092 PJDLOG_ASSERT(error == 0); 1093 error = pthread_create(&td, NULL, remote_recv_thread, res); 1094 PJDLOG_ASSERT(error == 0); 1095 error = pthread_create(&td, NULL, ggate_send_thread, res); 1096 PJDLOG_ASSERT(error == 0); 1097 fullystarted = true; 1098 (void)sync_thread(res); 1099 } 1100 1101 static void 1102 reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, 1103 const char *fmt, ...) 1104 { 1105 char msg[1024]; 1106 va_list ap; 1107 1108 va_start(ap, fmt); 1109 (void)vsnprintf(msg, sizeof(msg), fmt, ap); 1110 va_end(ap); 1111 switch (ggio->gctl_cmd) { 1112 case BIO_READ: 1113 (void)snprlcat(msg, sizeof(msg), "READ(%ju, %ju).", 1114 (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1115 break; 1116 case BIO_DELETE: 1117 (void)snprlcat(msg, sizeof(msg), "DELETE(%ju, %ju).", 1118 (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1119 break; 1120 case BIO_FLUSH: 1121 (void)snprlcat(msg, sizeof(msg), "FLUSH."); 1122 break; 1123 case BIO_WRITE: 1124 (void)snprlcat(msg, sizeof(msg), "WRITE(%ju, %ju).", 1125 (uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length); 1126 break; 1127 default: 1128 (void)snprlcat(msg, sizeof(msg), "UNKNOWN(%u).", 1129 (unsigned int)ggio->gctl_cmd); 1130 break; 1131 } 1132 pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 1133 } 1134 1135 static void 1136 remote_close(struct hast_resource *res, int ncomp) 1137 { 1138 1139 rw_wlock(&hio_remote_lock[ncomp]); 1140 /* 1141 * Check for a race between dropping rlock and acquiring wlock - 1142 * another thread can close connection in-between. 1143 */ 1144 if (!ISCONNECTED(res, ncomp)) { 1145 PJDLOG_ASSERT(res->hr_remotein == NULL); 1146 PJDLOG_ASSERT(res->hr_remoteout == NULL); 1147 rw_unlock(&hio_remote_lock[ncomp]); 1148 return; 1149 } 1150 1151 PJDLOG_ASSERT(res->hr_remotein != NULL); 1152 PJDLOG_ASSERT(res->hr_remoteout != NULL); 1153 1154 pjdlog_debug(2, "Closing incoming connection to %s.", 1155 res->hr_remoteaddr); 1156 proto_close(res->hr_remotein); 1157 res->hr_remotein = NULL; 1158 pjdlog_debug(2, "Closing outgoing connection to %s.", 1159 res->hr_remoteaddr); 1160 proto_close(res->hr_remoteout); 1161 res->hr_remoteout = NULL; 1162 1163 rw_unlock(&hio_remote_lock[ncomp]); 1164 1165 pjdlog_warning("Disconnected from %s.", res->hr_remoteaddr); 1166 1167 /* 1168 * Stop synchronization if in-progress. 1169 */ 1170 sync_stop(); 1171 1172 event_send(res, EVENT_DISCONNECT); 1173 } 1174 1175 /* 1176 * Acknowledge write completion to the kernel, but don't update activemap yet. 1177 */ 1178 static void 1179 write_complete(struct hast_resource *res, struct hio *hio) 1180 { 1181 struct g_gate_ctl_io *ggio; 1182 unsigned int ncomp; 1183 1184 PJDLOG_ASSERT(!hio->hio_done); 1185 1186 ggio = &hio->hio_ggio; 1187 PJDLOG_ASSERT(ggio->gctl_cmd == BIO_WRITE); 1188 1189 /* 1190 * Bump local count if this is first write after 1191 * connection failure with remote node. 1192 */ 1193 ncomp = 1; 1194 rw_rlock(&hio_remote_lock[ncomp]); 1195 if (!ISCONNECTED(res, ncomp)) { 1196 mtx_lock(&metadata_lock); 1197 if (res->hr_primary_localcnt == res->hr_secondary_remotecnt) { 1198 res->hr_primary_localcnt++; 1199 pjdlog_debug(1, "Increasing localcnt to %ju.", 1200 (uintmax_t)res->hr_primary_localcnt); 1201 (void)metadata_write(res); 1202 } 1203 mtx_unlock(&metadata_lock); 1204 } 1205 rw_unlock(&hio_remote_lock[ncomp]); 1206 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) 1207 primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 1208 hio->hio_done = true; 1209 } 1210 1211 /* 1212 * Thread receives ggate I/O requests from the kernel and passes them to 1213 * appropriate threads: 1214 * WRITE - always goes to both local_send and remote_send threads 1215 * READ (when the block is up-to-date on local component) - 1216 * only local_send thread 1217 * READ (when the block isn't up-to-date on local component) - 1218 * only remote_send thread 1219 * DELETE - always goes to both local_send and remote_send threads 1220 * FLUSH - always goes to both local_send and remote_send threads 1221 */ 1222 static void * 1223 ggate_recv_thread(void *arg) 1224 { 1225 struct hast_resource *res = arg; 1226 struct g_gate_ctl_io *ggio; 1227 struct hio *hio; 1228 unsigned int ii, ncomp, ncomps; 1229 int error; 1230 1231 for (;;) { 1232 pjdlog_debug(2, "ggate_recv: Taking free request."); 1233 QUEUE_TAKE2(hio, free); 1234 pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 1235 ggio = &hio->hio_ggio; 1236 ggio->gctl_unit = res->hr_ggateunit; 1237 ggio->gctl_length = MAXPHYS; 1238 ggio->gctl_error = 0; 1239 hio->hio_done = false; 1240 hio->hio_replication = res->hr_replication; 1241 pjdlog_debug(2, 1242 "ggate_recv: (%p) Waiting for request from the kernel.", 1243 hio); 1244 if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) == -1) { 1245 if (sigexit_received) 1246 pthread_exit(NULL); 1247 primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 1248 } 1249 error = ggio->gctl_error; 1250 switch (error) { 1251 case 0: 1252 break; 1253 case ECANCELED: 1254 /* Exit gracefully. */ 1255 if (!sigexit_received) { 1256 pjdlog_debug(2, 1257 "ggate_recv: (%p) Received cancel from the kernel.", 1258 hio); 1259 pjdlog_info("Received cancel from the kernel, exiting."); 1260 } 1261 pthread_exit(NULL); 1262 case ENOMEM: 1263 /* 1264 * Buffer too small? Impossible, we allocate MAXPHYS 1265 * bytes - request can't be bigger than that. 1266 */ 1267 /* FALLTHROUGH */ 1268 case ENXIO: 1269 default: 1270 primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 1271 strerror(error)); 1272 } 1273 1274 ncomp = 0; 1275 ncomps = HAST_NCOMPONENTS; 1276 1277 for (ii = 0; ii < ncomps; ii++) 1278 hio->hio_errors[ii] = EINVAL; 1279 reqlog(LOG_DEBUG, 2, ggio, 1280 "ggate_recv: (%p) Request received from the kernel: ", 1281 hio); 1282 1283 /* 1284 * Inform all components about new write request. 1285 * For read request prefer local component unless the given 1286 * range is out-of-date, then use remote component. 1287 */ 1288 switch (ggio->gctl_cmd) { 1289 case BIO_READ: 1290 res->hr_stat_read++; 1291 ncomps = 1; 1292 mtx_lock(&metadata_lock); 1293 if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 1294 res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 1295 /* 1296 * This range is up-to-date on local component, 1297 * so handle request locally. 1298 */ 1299 /* Local component is 0 for now. */ 1300 ncomp = 0; 1301 } else /* if (res->hr_syncsrc == 1302 HAST_SYNCSRC_SECONDARY) */ { 1303 PJDLOG_ASSERT(res->hr_syncsrc == 1304 HAST_SYNCSRC_SECONDARY); 1305 /* 1306 * This range is out-of-date on local component, 1307 * so send request to the remote node. 1308 */ 1309 /* Remote component is 1 for now. */ 1310 ncomp = 1; 1311 } 1312 mtx_unlock(&metadata_lock); 1313 break; 1314 case BIO_WRITE: 1315 res->hr_stat_write++; 1316 if (res->hr_resuid == 0 && 1317 res->hr_primary_localcnt == 0) { 1318 /* This is first write. */ 1319 res->hr_primary_localcnt = 1; 1320 } 1321 for (;;) { 1322 mtx_lock(&range_lock); 1323 if (rangelock_islocked(range_sync, 1324 ggio->gctl_offset, ggio->gctl_length)) { 1325 pjdlog_debug(2, 1326 "regular: Range offset=%jd length=%zu locked.", 1327 (intmax_t)ggio->gctl_offset, 1328 (size_t)ggio->gctl_length); 1329 range_regular_wait = true; 1330 cv_wait(&range_regular_cond, &range_lock); 1331 range_regular_wait = false; 1332 mtx_unlock(&range_lock); 1333 continue; 1334 } 1335 if (rangelock_add(range_regular, 1336 ggio->gctl_offset, ggio->gctl_length) == -1) { 1337 mtx_unlock(&range_lock); 1338 pjdlog_debug(2, 1339 "regular: Range offset=%jd length=%zu is already locked, waiting.", 1340 (intmax_t)ggio->gctl_offset, 1341 (size_t)ggio->gctl_length); 1342 sleep(1); 1343 continue; 1344 } 1345 mtx_unlock(&range_lock); 1346 break; 1347 } 1348 mtx_lock(&res->hr_amp_lock); 1349 if (activemap_write_start(res->hr_amp, 1350 ggio->gctl_offset, ggio->gctl_length)) { 1351 res->hr_stat_activemap_update++; 1352 (void)hast_activemap_flush(res); 1353 } else { 1354 mtx_unlock(&res->hr_amp_lock); 1355 } 1356 if (ISMEMSYNC(hio)) { 1357 hio->hio_memsyncacked = false; 1358 refcnt_init(&hio->hio_writecount, ncomps); 1359 } 1360 break; 1361 case BIO_DELETE: 1362 res->hr_stat_delete++; 1363 break; 1364 case BIO_FLUSH: 1365 res->hr_stat_flush++; 1366 break; 1367 } 1368 pjdlog_debug(2, 1369 "ggate_recv: (%p) Moving request to the send queues.", hio); 1370 refcnt_init(&hio->hio_countdown, ncomps); 1371 for (ii = ncomp; ii < ncomps; ii++) 1372 QUEUE_INSERT1(hio, send, ii); 1373 } 1374 /* NOTREACHED */ 1375 return (NULL); 1376 } 1377 1378 /* 1379 * Thread reads from or writes to local component. 1380 * If local read fails, it redirects it to remote_send thread. 1381 */ 1382 static void * 1383 local_send_thread(void *arg) 1384 { 1385 struct hast_resource *res = arg; 1386 struct g_gate_ctl_io *ggio; 1387 struct hio *hio; 1388 unsigned int ncomp, rncomp; 1389 ssize_t ret; 1390 1391 /* Local component is 0 for now. */ 1392 ncomp = 0; 1393 /* Remote component is 1 for now. */ 1394 rncomp = 1; 1395 1396 for (;;) { 1397 pjdlog_debug(2, "local_send: Taking request."); 1398 QUEUE_TAKE1(hio, send, ncomp, 0); 1399 pjdlog_debug(2, "local_send: (%p) Got request.", hio); 1400 ggio = &hio->hio_ggio; 1401 switch (ggio->gctl_cmd) { 1402 case BIO_READ: 1403 ret = pread(res->hr_localfd, ggio->gctl_data, 1404 ggio->gctl_length, 1405 ggio->gctl_offset + res->hr_localoff); 1406 if (ret == ggio->gctl_length) 1407 hio->hio_errors[ncomp] = 0; 1408 else if (!ISSYNCREQ(hio)) { 1409 /* 1410 * If READ failed, try to read from remote node. 1411 */ 1412 if (ret == -1) { 1413 reqlog(LOG_WARNING, 0, ggio, 1414 "Local request failed (%s), trying remote node. ", 1415 strerror(errno)); 1416 } else if (ret != ggio->gctl_length) { 1417 reqlog(LOG_WARNING, 0, ggio, 1418 "Local request failed (%zd != %jd), trying remote node. ", 1419 ret, (intmax_t)ggio->gctl_length); 1420 } 1421 QUEUE_INSERT1(hio, send, rncomp); 1422 continue; 1423 } 1424 break; 1425 case BIO_WRITE: 1426 ret = pwrite(res->hr_localfd, ggio->gctl_data, 1427 ggio->gctl_length, 1428 ggio->gctl_offset + res->hr_localoff); 1429 if (ret == -1) { 1430 hio->hio_errors[ncomp] = errno; 1431 reqlog(LOG_WARNING, 0, ggio, 1432 "Local request failed (%s): ", 1433 strerror(errno)); 1434 } else if (ret != ggio->gctl_length) { 1435 hio->hio_errors[ncomp] = EIO; 1436 reqlog(LOG_WARNING, 0, ggio, 1437 "Local request failed (%zd != %jd): ", 1438 ret, (intmax_t)ggio->gctl_length); 1439 } else { 1440 hio->hio_errors[ncomp] = 0; 1441 if (ISASYNC(hio)) { 1442 ggio->gctl_error = 0; 1443 write_complete(res, hio); 1444 } 1445 } 1446 break; 1447 case BIO_DELETE: 1448 ret = g_delete(res->hr_localfd, 1449 ggio->gctl_offset + res->hr_localoff, 1450 ggio->gctl_length); 1451 if (ret == -1) { 1452 hio->hio_errors[ncomp] = errno; 1453 reqlog(LOG_WARNING, 0, ggio, 1454 "Local request failed (%s): ", 1455 strerror(errno)); 1456 } else { 1457 hio->hio_errors[ncomp] = 0; 1458 } 1459 break; 1460 case BIO_FLUSH: 1461 if (!res->hr_localflush) { 1462 ret = -1; 1463 errno = EOPNOTSUPP; 1464 break; 1465 } 1466 ret = g_flush(res->hr_localfd); 1467 if (ret == -1) { 1468 if (errno == EOPNOTSUPP) 1469 res->hr_localflush = false; 1470 hio->hio_errors[ncomp] = errno; 1471 reqlog(LOG_WARNING, 0, ggio, 1472 "Local request failed (%s): ", 1473 strerror(errno)); 1474 } else { 1475 hio->hio_errors[ncomp] = 0; 1476 } 1477 break; 1478 } 1479 if (ISMEMSYNCWRITE(hio)) { 1480 if (refcnt_release(&hio->hio_writecount) == 0) { 1481 write_complete(res, hio); 1482 } 1483 } 1484 if (refcnt_release(&hio->hio_countdown) > 0) 1485 continue; 1486 if (ISSYNCREQ(hio)) { 1487 mtx_lock(&sync_lock); 1488 SYNCREQDONE(hio); 1489 mtx_unlock(&sync_lock); 1490 cv_signal(&sync_cond); 1491 } else { 1492 pjdlog_debug(2, 1493 "local_send: (%p) Moving request to the done queue.", 1494 hio); 1495 QUEUE_INSERT2(hio, done); 1496 } 1497 } 1498 /* NOTREACHED */ 1499 return (NULL); 1500 } 1501 1502 static void 1503 keepalive_send(struct hast_resource *res, unsigned int ncomp) 1504 { 1505 struct nv *nv; 1506 1507 rw_rlock(&hio_remote_lock[ncomp]); 1508 1509 if (!ISCONNECTED(res, ncomp)) { 1510 rw_unlock(&hio_remote_lock[ncomp]); 1511 return; 1512 } 1513 1514 PJDLOG_ASSERT(res->hr_remotein != NULL); 1515 PJDLOG_ASSERT(res->hr_remoteout != NULL); 1516 1517 nv = nv_alloc(); 1518 nv_add_uint8(nv, HIO_KEEPALIVE, "cmd"); 1519 if (nv_error(nv) != 0) { 1520 rw_unlock(&hio_remote_lock[ncomp]); 1521 nv_free(nv); 1522 pjdlog_debug(1, 1523 "keepalive_send: Unable to prepare header to send."); 1524 return; 1525 } 1526 if (hast_proto_send(res, res->hr_remoteout, nv, NULL, 0) == -1) { 1527 rw_unlock(&hio_remote_lock[ncomp]); 1528 pjdlog_common(LOG_DEBUG, 1, errno, 1529 "keepalive_send: Unable to send request"); 1530 nv_free(nv); 1531 remote_close(res, ncomp); 1532 return; 1533 } 1534 1535 rw_unlock(&hio_remote_lock[ncomp]); 1536 nv_free(nv); 1537 pjdlog_debug(2, "keepalive_send: Request sent."); 1538 } 1539 1540 /* 1541 * Thread sends request to secondary node. 1542 */ 1543 static void * 1544 remote_send_thread(void *arg) 1545 { 1546 struct hast_resource *res = arg; 1547 struct g_gate_ctl_io *ggio; 1548 time_t lastcheck, now; 1549 struct hio *hio; 1550 struct nv *nv; 1551 unsigned int ncomp; 1552 bool wakeup; 1553 uint64_t offset, length; 1554 uint8_t cmd; 1555 void *data; 1556 1557 /* Remote component is 1 for now. */ 1558 ncomp = 1; 1559 lastcheck = time(NULL); 1560 1561 for (;;) { 1562 pjdlog_debug(2, "remote_send: Taking request."); 1563 QUEUE_TAKE1(hio, send, ncomp, HAST_KEEPALIVE); 1564 if (hio == NULL) { 1565 now = time(NULL); 1566 if (lastcheck + HAST_KEEPALIVE <= now) { 1567 keepalive_send(res, ncomp); 1568 lastcheck = now; 1569 } 1570 continue; 1571 } 1572 pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 1573 ggio = &hio->hio_ggio; 1574 switch (ggio->gctl_cmd) { 1575 case BIO_READ: 1576 cmd = HIO_READ; 1577 data = NULL; 1578 offset = ggio->gctl_offset; 1579 length = ggio->gctl_length; 1580 break; 1581 case BIO_WRITE: 1582 cmd = HIO_WRITE; 1583 data = ggio->gctl_data; 1584 offset = ggio->gctl_offset; 1585 length = ggio->gctl_length; 1586 break; 1587 case BIO_DELETE: 1588 cmd = HIO_DELETE; 1589 data = NULL; 1590 offset = ggio->gctl_offset; 1591 length = ggio->gctl_length; 1592 break; 1593 case BIO_FLUSH: 1594 cmd = HIO_FLUSH; 1595 data = NULL; 1596 offset = 0; 1597 length = 0; 1598 break; 1599 default: 1600 PJDLOG_ABORT("invalid condition"); 1601 } 1602 nv = nv_alloc(); 1603 nv_add_uint8(nv, cmd, "cmd"); 1604 nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 1605 nv_add_uint64(nv, offset, "offset"); 1606 nv_add_uint64(nv, length, "length"); 1607 if (ISMEMSYNCWRITE(hio)) 1608 nv_add_uint8(nv, 1, "memsync"); 1609 if (nv_error(nv) != 0) { 1610 hio->hio_errors[ncomp] = nv_error(nv); 1611 pjdlog_debug(2, 1612 "remote_send: (%p) Unable to prepare header to send.", 1613 hio); 1614 reqlog(LOG_ERR, 0, ggio, 1615 "Unable to prepare header to send (%s): ", 1616 strerror(nv_error(nv))); 1617 /* Move failed request immediately to the done queue. */ 1618 goto done_queue; 1619 } 1620 /* 1621 * Protect connection from disappearing. 1622 */ 1623 rw_rlock(&hio_remote_lock[ncomp]); 1624 if (!ISCONNECTED(res, ncomp)) { 1625 rw_unlock(&hio_remote_lock[ncomp]); 1626 hio->hio_errors[ncomp] = ENOTCONN; 1627 goto done_queue; 1628 } 1629 /* 1630 * Move the request to recv queue before sending it, because 1631 * in different order we can get reply before we move request 1632 * to recv queue. 1633 */ 1634 pjdlog_debug(2, 1635 "remote_send: (%p) Moving request to the recv queue.", 1636 hio); 1637 mtx_lock(&hio_recv_list_lock[ncomp]); 1638 wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 1639 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 1640 hio_recv_list_size[ncomp]++; 1641 mtx_unlock(&hio_recv_list_lock[ncomp]); 1642 if (hast_proto_send(res, res->hr_remoteout, nv, data, 1643 data != NULL ? length : 0) == -1) { 1644 hio->hio_errors[ncomp] = errno; 1645 rw_unlock(&hio_remote_lock[ncomp]); 1646 pjdlog_debug(2, 1647 "remote_send: (%p) Unable to send request.", hio); 1648 reqlog(LOG_ERR, 0, ggio, 1649 "Unable to send request (%s): ", 1650 strerror(hio->hio_errors[ncomp])); 1651 remote_close(res, ncomp); 1652 } else { 1653 rw_unlock(&hio_remote_lock[ncomp]); 1654 } 1655 nv_free(nv); 1656 if (wakeup) 1657 cv_signal(&hio_recv_list_cond[ncomp]); 1658 continue; 1659 done_queue: 1660 nv_free(nv); 1661 if (ISSYNCREQ(hio)) { 1662 if (refcnt_release(&hio->hio_countdown) > 0) 1663 continue; 1664 mtx_lock(&sync_lock); 1665 SYNCREQDONE(hio); 1666 mtx_unlock(&sync_lock); 1667 cv_signal(&sync_cond); 1668 continue; 1669 } 1670 if (ggio->gctl_cmd == BIO_WRITE) { 1671 mtx_lock(&res->hr_amp_lock); 1672 if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 1673 ggio->gctl_length)) { 1674 (void)hast_activemap_flush(res); 1675 } else { 1676 mtx_unlock(&res->hr_amp_lock); 1677 } 1678 if (ISMEMSYNCWRITE(hio)) { 1679 if (refcnt_release(&hio->hio_writecount) == 0) { 1680 if (hio->hio_errors[0] == 0) 1681 write_complete(res, hio); 1682 } 1683 } 1684 } 1685 if (refcnt_release(&hio->hio_countdown) > 0) 1686 continue; 1687 pjdlog_debug(2, 1688 "remote_send: (%p) Moving request to the done queue.", 1689 hio); 1690 QUEUE_INSERT2(hio, done); 1691 } 1692 /* NOTREACHED */ 1693 return (NULL); 1694 } 1695 1696 /* 1697 * Thread receives answer from secondary node and passes it to ggate_send 1698 * thread. 1699 */ 1700 static void * 1701 remote_recv_thread(void *arg) 1702 { 1703 struct hast_resource *res = arg; 1704 struct g_gate_ctl_io *ggio; 1705 struct hio *hio; 1706 struct nv *nv; 1707 unsigned int ncomp; 1708 uint64_t seq; 1709 bool memsyncack; 1710 int error; 1711 1712 /* Remote component is 1 for now. */ 1713 ncomp = 1; 1714 1715 for (;;) { 1716 /* Wait until there is anything to receive. */ 1717 mtx_lock(&hio_recv_list_lock[ncomp]); 1718 while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 1719 pjdlog_debug(2, "remote_recv: No requests, waiting."); 1720 cv_wait(&hio_recv_list_cond[ncomp], 1721 &hio_recv_list_lock[ncomp]); 1722 } 1723 mtx_unlock(&hio_recv_list_lock[ncomp]); 1724 1725 memsyncack = false; 1726 1727 rw_rlock(&hio_remote_lock[ncomp]); 1728 if (!ISCONNECTED(res, ncomp)) { 1729 rw_unlock(&hio_remote_lock[ncomp]); 1730 /* 1731 * Connection is dead, so move all pending requests to 1732 * the done queue (one-by-one). 1733 */ 1734 mtx_lock(&hio_recv_list_lock[ncomp]); 1735 hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 1736 PJDLOG_ASSERT(hio != NULL); 1737 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1738 hio_next[ncomp]); 1739 hio_recv_list_size[ncomp]--; 1740 mtx_unlock(&hio_recv_list_lock[ncomp]); 1741 hio->hio_errors[ncomp] = ENOTCONN; 1742 goto done_queue; 1743 } 1744 if (hast_proto_recv_hdr(res->hr_remotein, &nv) == -1) { 1745 pjdlog_errno(LOG_ERR, 1746 "Unable to receive reply header"); 1747 rw_unlock(&hio_remote_lock[ncomp]); 1748 remote_close(res, ncomp); 1749 continue; 1750 } 1751 rw_unlock(&hio_remote_lock[ncomp]); 1752 seq = nv_get_uint64(nv, "seq"); 1753 if (seq == 0) { 1754 pjdlog_error("Header contains no 'seq' field."); 1755 nv_free(nv); 1756 continue; 1757 } 1758 memsyncack = nv_exists(nv, "received"); 1759 mtx_lock(&hio_recv_list_lock[ncomp]); 1760 TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 1761 if (hio->hio_ggio.gctl_seq == seq) { 1762 TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 1763 hio_next[ncomp]); 1764 hio_recv_list_size[ncomp]--; 1765 break; 1766 } 1767 } 1768 mtx_unlock(&hio_recv_list_lock[ncomp]); 1769 if (hio == NULL) { 1770 pjdlog_error("Found no request matching received 'seq' field (%ju).", 1771 (uintmax_t)seq); 1772 nv_free(nv); 1773 continue; 1774 } 1775 ggio = &hio->hio_ggio; 1776 error = nv_get_int16(nv, "error"); 1777 if (error != 0) { 1778 /* Request failed on remote side. */ 1779 hio->hio_errors[ncomp] = error; 1780 reqlog(LOG_WARNING, 0, ggio, 1781 "Remote request failed (%s): ", strerror(error)); 1782 nv_free(nv); 1783 goto done_queue; 1784 } 1785 switch (ggio->gctl_cmd) { 1786 case BIO_READ: 1787 rw_rlock(&hio_remote_lock[ncomp]); 1788 if (!ISCONNECTED(res, ncomp)) { 1789 rw_unlock(&hio_remote_lock[ncomp]); 1790 nv_free(nv); 1791 goto done_queue; 1792 } 1793 if (hast_proto_recv_data(res, res->hr_remotein, nv, 1794 ggio->gctl_data, ggio->gctl_length) == -1) { 1795 hio->hio_errors[ncomp] = errno; 1796 pjdlog_errno(LOG_ERR, 1797 "Unable to receive reply data"); 1798 rw_unlock(&hio_remote_lock[ncomp]); 1799 nv_free(nv); 1800 remote_close(res, ncomp); 1801 goto done_queue; 1802 } 1803 rw_unlock(&hio_remote_lock[ncomp]); 1804 break; 1805 case BIO_WRITE: 1806 case BIO_DELETE: 1807 case BIO_FLUSH: 1808 break; 1809 default: 1810 PJDLOG_ABORT("invalid condition"); 1811 } 1812 hio->hio_errors[ncomp] = 0; 1813 nv_free(nv); 1814 done_queue: 1815 if (ISMEMSYNCWRITE(hio)) { 1816 if (!hio->hio_memsyncacked) { 1817 PJDLOG_ASSERT(memsyncack || 1818 hio->hio_errors[ncomp] != 0); 1819 /* Remote ack arrived. */ 1820 if (refcnt_release(&hio->hio_writecount) == 0) { 1821 if (hio->hio_errors[0] == 0) 1822 write_complete(res, hio); 1823 } 1824 hio->hio_memsyncacked = true; 1825 if (hio->hio_errors[ncomp] == 0) { 1826 pjdlog_debug(2, 1827 "remote_recv: (%p) Moving request " 1828 "back to the recv queue.", hio); 1829 mtx_lock(&hio_recv_list_lock[ncomp]); 1830 TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], 1831 hio, hio_next[ncomp]); 1832 hio_recv_list_size[ncomp]++; 1833 mtx_unlock(&hio_recv_list_lock[ncomp]); 1834 continue; 1835 } 1836 } else { 1837 PJDLOG_ASSERT(!memsyncack); 1838 /* Remote final reply arrived. */ 1839 } 1840 } 1841 if (refcnt_release(&hio->hio_countdown) > 0) 1842 continue; 1843 if (ISSYNCREQ(hio)) { 1844 mtx_lock(&sync_lock); 1845 SYNCREQDONE(hio); 1846 mtx_unlock(&sync_lock); 1847 cv_signal(&sync_cond); 1848 } else { 1849 pjdlog_debug(2, 1850 "remote_recv: (%p) Moving request to the done queue.", 1851 hio); 1852 QUEUE_INSERT2(hio, done); 1853 } 1854 } 1855 /* NOTREACHED */ 1856 return (NULL); 1857 } 1858 1859 /* 1860 * Thread sends answer to the kernel. 1861 */ 1862 static void * 1863 ggate_send_thread(void *arg) 1864 { 1865 struct hast_resource *res = arg; 1866 struct g_gate_ctl_io *ggio; 1867 struct hio *hio; 1868 unsigned int ii, ncomps; 1869 1870 ncomps = HAST_NCOMPONENTS; 1871 1872 for (;;) { 1873 pjdlog_debug(2, "ggate_send: Taking request."); 1874 QUEUE_TAKE2(hio, done); 1875 pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 1876 ggio = &hio->hio_ggio; 1877 for (ii = 0; ii < ncomps; ii++) { 1878 if (hio->hio_errors[ii] == 0) { 1879 /* 1880 * One successful request is enough to declare 1881 * success. 1882 */ 1883 ggio->gctl_error = 0; 1884 break; 1885 } 1886 } 1887 if (ii == ncomps) { 1888 /* 1889 * None of the requests were successful. 1890 * Use the error from local component except the 1891 * case when we did only remote request. 1892 */ 1893 if (ggio->gctl_cmd == BIO_READ && 1894 res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 1895 ggio->gctl_error = hio->hio_errors[1]; 1896 else 1897 ggio->gctl_error = hio->hio_errors[0]; 1898 } 1899 if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 1900 mtx_lock(&res->hr_amp_lock); 1901 if (activemap_write_complete(res->hr_amp, 1902 ggio->gctl_offset, ggio->gctl_length)) { 1903 res->hr_stat_activemap_update++; 1904 (void)hast_activemap_flush(res); 1905 } else { 1906 mtx_unlock(&res->hr_amp_lock); 1907 } 1908 } 1909 if (ggio->gctl_cmd == BIO_WRITE) { 1910 /* 1911 * Unlock range we locked. 1912 */ 1913 mtx_lock(&range_lock); 1914 rangelock_del(range_regular, ggio->gctl_offset, 1915 ggio->gctl_length); 1916 if (range_sync_wait) 1917 cv_signal(&range_sync_cond); 1918 mtx_unlock(&range_lock); 1919 if (!hio->hio_done) 1920 write_complete(res, hio); 1921 } else { 1922 if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) == -1) { 1923 primary_exit(EX_OSERR, 1924 "G_GATE_CMD_DONE failed"); 1925 } 1926 } 1927 if (hio->hio_errors[0]) { 1928 switch (ggio->gctl_cmd) { 1929 case BIO_READ: 1930 res->hr_stat_read_error++; 1931 break; 1932 case BIO_WRITE: 1933 res->hr_stat_write_error++; 1934 break; 1935 case BIO_DELETE: 1936 res->hr_stat_delete_error++; 1937 break; 1938 case BIO_FLUSH: 1939 res->hr_stat_flush_error++; 1940 break; 1941 } 1942 } 1943 pjdlog_debug(2, 1944 "ggate_send: (%p) Moving request to the free queue.", hio); 1945 QUEUE_INSERT2(hio, free); 1946 } 1947 /* NOTREACHED */ 1948 return (NULL); 1949 } 1950 1951 /* 1952 * Thread synchronize local and remote components. 1953 */ 1954 static void * 1955 sync_thread(void *arg __unused) 1956 { 1957 struct hast_resource *res = arg; 1958 struct hio *hio; 1959 struct g_gate_ctl_io *ggio; 1960 struct timeval tstart, tend, tdiff; 1961 unsigned int ii, ncomp, ncomps; 1962 off_t offset, length, synced; 1963 bool dorewind, directreads; 1964 int syncext; 1965 1966 ncomps = HAST_NCOMPONENTS; 1967 dorewind = true; 1968 synced = 0; 1969 offset = -1; 1970 directreads = false; 1971 1972 for (;;) { 1973 mtx_lock(&sync_lock); 1974 if (offset >= 0 && !sync_inprogress) { 1975 gettimeofday(&tend, NULL); 1976 timersub(&tend, &tstart, &tdiff); 1977 pjdlog_info("Synchronization interrupted after %#.0T. " 1978 "%NB synchronized so far.", &tdiff, 1979 (intmax_t)synced); 1980 event_send(res, EVENT_SYNCINTR); 1981 } 1982 while (!sync_inprogress) { 1983 dorewind = true; 1984 synced = 0; 1985 cv_wait(&sync_cond, &sync_lock); 1986 } 1987 mtx_unlock(&sync_lock); 1988 /* 1989 * Obtain offset at which we should synchronize. 1990 * Rewind synchronization if needed. 1991 */ 1992 mtx_lock(&res->hr_amp_lock); 1993 if (dorewind) 1994 activemap_sync_rewind(res->hr_amp); 1995 offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 1996 if (syncext != -1) { 1997 /* 1998 * We synchronized entire syncext extent, we can mark 1999 * it as clean now. 2000 */ 2001 if (activemap_extent_complete(res->hr_amp, syncext)) 2002 (void)hast_activemap_flush(res); 2003 else 2004 mtx_unlock(&res->hr_amp_lock); 2005 } else { 2006 mtx_unlock(&res->hr_amp_lock); 2007 } 2008 if (dorewind) { 2009 dorewind = false; 2010 if (offset == -1) 2011 pjdlog_info("Nodes are in sync."); 2012 else { 2013 pjdlog_info("Synchronization started. %NB to go.", 2014 (intmax_t)(res->hr_extentsize * 2015 activemap_ndirty(res->hr_amp))); 2016 event_send(res, EVENT_SYNCSTART); 2017 gettimeofday(&tstart, NULL); 2018 } 2019 } 2020 if (offset == -1) { 2021 sync_stop(); 2022 pjdlog_debug(1, "Nothing to synchronize."); 2023 /* 2024 * Synchronization complete, make both localcnt and 2025 * remotecnt equal. 2026 */ 2027 ncomp = 1; 2028 rw_rlock(&hio_remote_lock[ncomp]); 2029 if (ISCONNECTED(res, ncomp)) { 2030 if (synced > 0) { 2031 int64_t bps; 2032 2033 gettimeofday(&tend, NULL); 2034 timersub(&tend, &tstart, &tdiff); 2035 bps = (int64_t)((double)synced / 2036 ((double)tdiff.tv_sec + 2037 (double)tdiff.tv_usec / 1000000)); 2038 pjdlog_info("Synchronization complete. " 2039 "%NB synchronized in %#.0lT (%NB/sec).", 2040 (intmax_t)synced, &tdiff, 2041 (intmax_t)bps); 2042 event_send(res, EVENT_SYNCDONE); 2043 } 2044 mtx_lock(&metadata_lock); 2045 if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) 2046 directreads = true; 2047 res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 2048 res->hr_primary_localcnt = 2049 res->hr_secondary_remotecnt; 2050 res->hr_primary_remotecnt = 2051 res->hr_secondary_localcnt; 2052 pjdlog_debug(1, 2053 "Setting localcnt to %ju and remotecnt to %ju.", 2054 (uintmax_t)res->hr_primary_localcnt, 2055 (uintmax_t)res->hr_primary_remotecnt); 2056 (void)metadata_write(res); 2057 mtx_unlock(&metadata_lock); 2058 } 2059 rw_unlock(&hio_remote_lock[ncomp]); 2060 if (directreads) { 2061 directreads = false; 2062 enable_direct_reads(res); 2063 } 2064 continue; 2065 } 2066 pjdlog_debug(2, "sync: Taking free request."); 2067 QUEUE_TAKE2(hio, free); 2068 pjdlog_debug(2, "sync: (%p) Got free request.", hio); 2069 /* 2070 * Lock the range we are going to synchronize. We don't want 2071 * race where someone writes between our read and write. 2072 */ 2073 for (;;) { 2074 mtx_lock(&range_lock); 2075 if (rangelock_islocked(range_regular, offset, length)) { 2076 pjdlog_debug(2, 2077 "sync: Range offset=%jd length=%jd locked.", 2078 (intmax_t)offset, (intmax_t)length); 2079 range_sync_wait = true; 2080 cv_wait(&range_sync_cond, &range_lock); 2081 range_sync_wait = false; 2082 mtx_unlock(&range_lock); 2083 continue; 2084 } 2085 if (rangelock_add(range_sync, offset, length) == -1) { 2086 mtx_unlock(&range_lock); 2087 pjdlog_debug(2, 2088 "sync: Range offset=%jd length=%jd is already locked, waiting.", 2089 (intmax_t)offset, (intmax_t)length); 2090 sleep(1); 2091 continue; 2092 } 2093 mtx_unlock(&range_lock); 2094 break; 2095 } 2096 /* 2097 * First read the data from synchronization source. 2098 */ 2099 SYNCREQ(hio); 2100 ggio = &hio->hio_ggio; 2101 ggio->gctl_cmd = BIO_READ; 2102 ggio->gctl_offset = offset; 2103 ggio->gctl_length = length; 2104 ggio->gctl_error = 0; 2105 hio->hio_done = false; 2106 hio->hio_replication = res->hr_replication; 2107 for (ii = 0; ii < ncomps; ii++) 2108 hio->hio_errors[ii] = EINVAL; 2109 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2110 hio); 2111 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2112 hio); 2113 mtx_lock(&metadata_lock); 2114 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2115 /* 2116 * This range is up-to-date on local component, 2117 * so handle request locally. 2118 */ 2119 /* Local component is 0 for now. */ 2120 ncomp = 0; 2121 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2122 PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2123 /* 2124 * This range is out-of-date on local component, 2125 * so send request to the remote node. 2126 */ 2127 /* Remote component is 1 for now. */ 2128 ncomp = 1; 2129 } 2130 mtx_unlock(&metadata_lock); 2131 refcnt_init(&hio->hio_countdown, 1); 2132 QUEUE_INSERT1(hio, send, ncomp); 2133 2134 /* 2135 * Let's wait for READ to finish. 2136 */ 2137 mtx_lock(&sync_lock); 2138 while (!ISSYNCREQDONE(hio)) 2139 cv_wait(&sync_cond, &sync_lock); 2140 mtx_unlock(&sync_lock); 2141 2142 if (hio->hio_errors[ncomp] != 0) { 2143 pjdlog_error("Unable to read synchronization data: %s.", 2144 strerror(hio->hio_errors[ncomp])); 2145 goto free_queue; 2146 } 2147 2148 /* 2149 * We read the data from synchronization source, now write it 2150 * to synchronization target. 2151 */ 2152 SYNCREQ(hio); 2153 ggio->gctl_cmd = BIO_WRITE; 2154 for (ii = 0; ii < ncomps; ii++) 2155 hio->hio_errors[ii] = EINVAL; 2156 reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 2157 hio); 2158 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2159 hio); 2160 mtx_lock(&metadata_lock); 2161 if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 2162 /* 2163 * This range is up-to-date on local component, 2164 * so we update remote component. 2165 */ 2166 /* Remote component is 1 for now. */ 2167 ncomp = 1; 2168 } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 2169 PJDLOG_ASSERT(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 2170 /* 2171 * This range is out-of-date on local component, 2172 * so we update it. 2173 */ 2174 /* Local component is 0 for now. */ 2175 ncomp = 0; 2176 } 2177 mtx_unlock(&metadata_lock); 2178 2179 pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 2180 hio); 2181 refcnt_init(&hio->hio_countdown, 1); 2182 QUEUE_INSERT1(hio, send, ncomp); 2183 2184 /* 2185 * Let's wait for WRITE to finish. 2186 */ 2187 mtx_lock(&sync_lock); 2188 while (!ISSYNCREQDONE(hio)) 2189 cv_wait(&sync_cond, &sync_lock); 2190 mtx_unlock(&sync_lock); 2191 2192 if (hio->hio_errors[ncomp] != 0) { 2193 pjdlog_error("Unable to write synchronization data: %s.", 2194 strerror(hio->hio_errors[ncomp])); 2195 goto free_queue; 2196 } 2197 2198 synced += length; 2199 free_queue: 2200 mtx_lock(&range_lock); 2201 rangelock_del(range_sync, offset, length); 2202 if (range_regular_wait) 2203 cv_signal(&range_regular_cond); 2204 mtx_unlock(&range_lock); 2205 pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 2206 hio); 2207 QUEUE_INSERT2(hio, free); 2208 } 2209 /* NOTREACHED */ 2210 return (NULL); 2211 } 2212 2213 void 2214 primary_config_reload(struct hast_resource *res, struct nv *nv) 2215 { 2216 unsigned int ii, ncomps; 2217 int modified, vint; 2218 const char *vstr; 2219 2220 pjdlog_info("Reloading configuration..."); 2221 2222 PJDLOG_ASSERT(res->hr_role == HAST_ROLE_PRIMARY); 2223 PJDLOG_ASSERT(gres == res); 2224 nv_assert(nv, "remoteaddr"); 2225 nv_assert(nv, "sourceaddr"); 2226 nv_assert(nv, "replication"); 2227 nv_assert(nv, "checksum"); 2228 nv_assert(nv, "compression"); 2229 nv_assert(nv, "timeout"); 2230 nv_assert(nv, "exec"); 2231 nv_assert(nv, "metaflush"); 2232 2233 ncomps = HAST_NCOMPONENTS; 2234 2235 #define MODIFIED_REMOTEADDR 0x01 2236 #define MODIFIED_SOURCEADDR 0x02 2237 #define MODIFIED_REPLICATION 0x04 2238 #define MODIFIED_CHECKSUM 0x08 2239 #define MODIFIED_COMPRESSION 0x10 2240 #define MODIFIED_TIMEOUT 0x20 2241 #define MODIFIED_EXEC 0x40 2242 #define MODIFIED_METAFLUSH 0x80 2243 modified = 0; 2244 2245 vstr = nv_get_string(nv, "remoteaddr"); 2246 if (strcmp(gres->hr_remoteaddr, vstr) != 0) { 2247 /* 2248 * Don't copy res->hr_remoteaddr to gres just yet. 2249 * We want remote_close() to log disconnect from the old 2250 * addresses, not from the new ones. 2251 */ 2252 modified |= MODIFIED_REMOTEADDR; 2253 } 2254 vstr = nv_get_string(nv, "sourceaddr"); 2255 if (strcmp(gres->hr_sourceaddr, vstr) != 0) { 2256 strlcpy(gres->hr_sourceaddr, vstr, sizeof(gres->hr_sourceaddr)); 2257 modified |= MODIFIED_SOURCEADDR; 2258 } 2259 vint = nv_get_int32(nv, "replication"); 2260 if (gres->hr_replication != vint) { 2261 gres->hr_replication = vint; 2262 modified |= MODIFIED_REPLICATION; 2263 } 2264 vint = nv_get_int32(nv, "checksum"); 2265 if (gres->hr_checksum != vint) { 2266 gres->hr_checksum = vint; 2267 modified |= MODIFIED_CHECKSUM; 2268 } 2269 vint = nv_get_int32(nv, "compression"); 2270 if (gres->hr_compression != vint) { 2271 gres->hr_compression = vint; 2272 modified |= MODIFIED_COMPRESSION; 2273 } 2274 vint = nv_get_int32(nv, "timeout"); 2275 if (gres->hr_timeout != vint) { 2276 gres->hr_timeout = vint; 2277 modified |= MODIFIED_TIMEOUT; 2278 } 2279 vstr = nv_get_string(nv, "exec"); 2280 if (strcmp(gres->hr_exec, vstr) != 0) { 2281 strlcpy(gres->hr_exec, vstr, sizeof(gres->hr_exec)); 2282 modified |= MODIFIED_EXEC; 2283 } 2284 vint = nv_get_int32(nv, "metaflush"); 2285 if (gres->hr_metaflush != vint) { 2286 gres->hr_metaflush = vint; 2287 modified |= MODIFIED_METAFLUSH; 2288 } 2289 2290 /* 2291 * Change timeout for connected sockets. 2292 * Don't bother if we need to reconnect. 2293 */ 2294 if ((modified & MODIFIED_TIMEOUT) != 0 && 2295 (modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) == 0) { 2296 for (ii = 0; ii < ncomps; ii++) { 2297 if (!ISREMOTE(ii)) 2298 continue; 2299 rw_rlock(&hio_remote_lock[ii]); 2300 if (!ISCONNECTED(gres, ii)) { 2301 rw_unlock(&hio_remote_lock[ii]); 2302 continue; 2303 } 2304 rw_unlock(&hio_remote_lock[ii]); 2305 if (proto_timeout(gres->hr_remotein, 2306 gres->hr_timeout) == -1) { 2307 pjdlog_errno(LOG_WARNING, 2308 "Unable to set connection timeout"); 2309 } 2310 if (proto_timeout(gres->hr_remoteout, 2311 gres->hr_timeout) == -1) { 2312 pjdlog_errno(LOG_WARNING, 2313 "Unable to set connection timeout"); 2314 } 2315 } 2316 } 2317 if ((modified & (MODIFIED_REMOTEADDR | MODIFIED_SOURCEADDR)) != 0) { 2318 for (ii = 0; ii < ncomps; ii++) { 2319 if (!ISREMOTE(ii)) 2320 continue; 2321 remote_close(gres, ii); 2322 } 2323 if (modified & MODIFIED_REMOTEADDR) { 2324 vstr = nv_get_string(nv, "remoteaddr"); 2325 strlcpy(gres->hr_remoteaddr, vstr, 2326 sizeof(gres->hr_remoteaddr)); 2327 } 2328 } 2329 #undef MODIFIED_REMOTEADDR 2330 #undef MODIFIED_SOURCEADDR 2331 #undef MODIFIED_REPLICATION 2332 #undef MODIFIED_CHECKSUM 2333 #undef MODIFIED_COMPRESSION 2334 #undef MODIFIED_TIMEOUT 2335 #undef MODIFIED_EXEC 2336 #undef MODIFIED_METAFLUSH 2337 2338 pjdlog_info("Configuration reloaded successfully."); 2339 } 2340 2341 static void 2342 guard_one(struct hast_resource *res, unsigned int ncomp) 2343 { 2344 struct proto_conn *in, *out; 2345 2346 if (!ISREMOTE(ncomp)) 2347 return; 2348 2349 rw_rlock(&hio_remote_lock[ncomp]); 2350 2351 if (!real_remote(res)) { 2352 rw_unlock(&hio_remote_lock[ncomp]); 2353 return; 2354 } 2355 2356 if (ISCONNECTED(res, ncomp)) { 2357 PJDLOG_ASSERT(res->hr_remotein != NULL); 2358 PJDLOG_ASSERT(res->hr_remoteout != NULL); 2359 rw_unlock(&hio_remote_lock[ncomp]); 2360 pjdlog_debug(2, "remote_guard: Connection to %s is ok.", 2361 res->hr_remoteaddr); 2362 return; 2363 } 2364 2365 PJDLOG_ASSERT(res->hr_remotein == NULL); 2366 PJDLOG_ASSERT(res->hr_remoteout == NULL); 2367 /* 2368 * Upgrade the lock. It doesn't have to be atomic as no other thread 2369 * can change connection status from disconnected to connected. 2370 */ 2371 rw_unlock(&hio_remote_lock[ncomp]); 2372 pjdlog_debug(2, "remote_guard: Reconnecting to %s.", 2373 res->hr_remoteaddr); 2374 in = out = NULL; 2375 if (init_remote(res, &in, &out) == 0) { 2376 rw_wlock(&hio_remote_lock[ncomp]); 2377 PJDLOG_ASSERT(res->hr_remotein == NULL); 2378 PJDLOG_ASSERT(res->hr_remoteout == NULL); 2379 PJDLOG_ASSERT(in != NULL && out != NULL); 2380 res->hr_remotein = in; 2381 res->hr_remoteout = out; 2382 rw_unlock(&hio_remote_lock[ncomp]); 2383 pjdlog_info("Successfully reconnected to %s.", 2384 res->hr_remoteaddr); 2385 sync_start(); 2386 } else { 2387 /* Both connections should be NULL. */ 2388 PJDLOG_ASSERT(res->hr_remotein == NULL); 2389 PJDLOG_ASSERT(res->hr_remoteout == NULL); 2390 PJDLOG_ASSERT(in == NULL && out == NULL); 2391 pjdlog_debug(2, "remote_guard: Reconnect to %s failed.", 2392 res->hr_remoteaddr); 2393 } 2394 } 2395 2396 /* 2397 * Thread guards remote connections and reconnects when needed, handles 2398 * signals, etc. 2399 */ 2400 static void * 2401 guard_thread(void *arg) 2402 { 2403 struct hast_resource *res = arg; 2404 unsigned int ii, ncomps; 2405 struct timespec timeout; 2406 time_t lastcheck, now; 2407 sigset_t mask; 2408 int signo; 2409 2410 ncomps = HAST_NCOMPONENTS; 2411 lastcheck = time(NULL); 2412 2413 PJDLOG_VERIFY(sigemptyset(&mask) == 0); 2414 PJDLOG_VERIFY(sigaddset(&mask, SIGINT) == 0); 2415 PJDLOG_VERIFY(sigaddset(&mask, SIGTERM) == 0); 2416 2417 timeout.tv_sec = HAST_KEEPALIVE; 2418 timeout.tv_nsec = 0; 2419 signo = -1; 2420 2421 for (;;) { 2422 switch (signo) { 2423 case SIGINT: 2424 case SIGTERM: 2425 sigexit_received = true; 2426 primary_exitx(EX_OK, 2427 "Termination signal received, exiting."); 2428 break; 2429 default: 2430 break; 2431 } 2432 2433 /* 2434 * Don't check connections until we fully started, 2435 * as we may still be looping, waiting for remote node 2436 * to switch from primary to secondary. 2437 */ 2438 if (fullystarted) { 2439 pjdlog_debug(2, "remote_guard: Checking connections."); 2440 now = time(NULL); 2441 if (lastcheck + HAST_KEEPALIVE <= now) { 2442 for (ii = 0; ii < ncomps; ii++) 2443 guard_one(res, ii); 2444 lastcheck = now; 2445 } 2446 } 2447 signo = sigtimedwait(&mask, NULL, &timeout); 2448 } 2449 /* NOTREACHED */ 2450 return (NULL); 2451 } 2452