132115b10SPawel Jakub Dawidek /*- 232115b10SPawel Jakub Dawidek * Copyright (c) 2009 The FreeBSD Foundation 332115b10SPawel Jakub Dawidek * All rights reserved. 432115b10SPawel Jakub Dawidek * 532115b10SPawel Jakub Dawidek * This software was developed by Pawel Jakub Dawidek under sponsorship from 632115b10SPawel Jakub Dawidek * the FreeBSD Foundation. 732115b10SPawel Jakub Dawidek * 832115b10SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 932115b10SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 1032115b10SPawel Jakub Dawidek * are met: 1132115b10SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 1232115b10SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 1332115b10SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 1432115b10SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 1532115b10SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 1632115b10SPawel Jakub Dawidek * 1732115b10SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 1832115b10SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1932115b10SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2032115b10SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 2132115b10SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2232115b10SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2332115b10SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2432115b10SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2532115b10SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2632115b10SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2732115b10SPawel Jakub Dawidek * SUCH DAMAGE. 2832115b10SPawel Jakub Dawidek */ 2932115b10SPawel Jakub Dawidek 3032115b10SPawel Jakub Dawidek #include <sys/cdefs.h> 3132115b10SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 3232115b10SPawel Jakub Dawidek 3332115b10SPawel Jakub Dawidek #include <sys/types.h> 3432115b10SPawel Jakub Dawidek #include <sys/time.h> 3532115b10SPawel Jakub Dawidek #include <sys/bio.h> 3632115b10SPawel Jakub Dawidek #include <sys/disk.h> 3732115b10SPawel Jakub Dawidek #include <sys/refcount.h> 3832115b10SPawel Jakub Dawidek #include <sys/stat.h> 3932115b10SPawel Jakub Dawidek 4032115b10SPawel Jakub Dawidek #include <geom/gate/g_gate.h> 4132115b10SPawel Jakub Dawidek 4232115b10SPawel Jakub Dawidek #include <assert.h> 4332115b10SPawel Jakub Dawidek #include <err.h> 4432115b10SPawel Jakub Dawidek #include <errno.h> 4532115b10SPawel Jakub Dawidek #include <fcntl.h> 4632115b10SPawel Jakub Dawidek #include <libgeom.h> 4732115b10SPawel Jakub Dawidek #include <pthread.h> 4832115b10SPawel Jakub Dawidek #include <stdint.h> 4932115b10SPawel Jakub Dawidek #include <stdio.h> 5032115b10SPawel Jakub Dawidek #include <string.h> 5132115b10SPawel Jakub Dawidek #include <sysexits.h> 5232115b10SPawel Jakub Dawidek #include <unistd.h> 5332115b10SPawel Jakub Dawidek 5432115b10SPawel Jakub Dawidek #include <activemap.h> 5532115b10SPawel Jakub Dawidek #include <nv.h> 5632115b10SPawel Jakub Dawidek #include <rangelock.h> 5732115b10SPawel Jakub Dawidek 5832115b10SPawel Jakub Dawidek #include "control.h" 5932115b10SPawel Jakub Dawidek #include "hast.h" 6032115b10SPawel Jakub Dawidek #include "hast_proto.h" 6132115b10SPawel Jakub Dawidek #include "hastd.h" 6232115b10SPawel Jakub Dawidek #include "metadata.h" 6332115b10SPawel Jakub Dawidek #include "proto.h" 6432115b10SPawel Jakub Dawidek #include "pjdlog.h" 6532115b10SPawel Jakub Dawidek #include "subr.h" 6632115b10SPawel Jakub Dawidek #include "synch.h" 6732115b10SPawel Jakub Dawidek 6832115b10SPawel Jakub Dawidek struct hio { 6932115b10SPawel Jakub Dawidek /* 7032115b10SPawel Jakub Dawidek * Number of components we are still waiting for. 7132115b10SPawel Jakub Dawidek * When this field goes to 0, we can send the request back to the 7232115b10SPawel Jakub Dawidek * kernel. Each component has to decrease this counter by one 7332115b10SPawel Jakub Dawidek * even on failure. 7432115b10SPawel Jakub Dawidek */ 7532115b10SPawel Jakub Dawidek unsigned int hio_countdown; 7632115b10SPawel Jakub Dawidek /* 7732115b10SPawel Jakub Dawidek * Each component has a place to store its own error. 7832115b10SPawel Jakub Dawidek * Once the request is handled by all components we can decide if the 7932115b10SPawel Jakub Dawidek * request overall is successful or not. 8032115b10SPawel Jakub Dawidek */ 8132115b10SPawel Jakub Dawidek int *hio_errors; 8232115b10SPawel Jakub Dawidek /* 8332115b10SPawel Jakub Dawidek * Structure used to comunicate with GEOM Gate class. 8432115b10SPawel Jakub Dawidek */ 8532115b10SPawel Jakub Dawidek struct g_gate_ctl_io hio_ggio; 8632115b10SPawel Jakub Dawidek TAILQ_ENTRY(hio) *hio_next; 8732115b10SPawel Jakub Dawidek }; 8832115b10SPawel Jakub Dawidek #define hio_free_next hio_next[0] 8932115b10SPawel Jakub Dawidek #define hio_done_next hio_next[0] 9032115b10SPawel Jakub Dawidek 9132115b10SPawel Jakub Dawidek /* 9232115b10SPawel Jakub Dawidek * Free list holds unused structures. When free list is empty, we have to wait 9332115b10SPawel Jakub Dawidek * until some in-progress requests are freed. 9432115b10SPawel Jakub Dawidek */ 9532115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) hio_free_list; 9632115b10SPawel Jakub Dawidek static pthread_mutex_t hio_free_list_lock; 9732115b10SPawel Jakub Dawidek static pthread_cond_t hio_free_list_cond; 9832115b10SPawel Jakub Dawidek /* 9932115b10SPawel Jakub Dawidek * There is one send list for every component. One requests is placed on all 10032115b10SPawel Jakub Dawidek * send lists - each component gets the same request, but each component is 10132115b10SPawel Jakub Dawidek * responsible for managing his own send list. 10232115b10SPawel Jakub Dawidek */ 10332115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) *hio_send_list; 10432115b10SPawel Jakub Dawidek static pthread_mutex_t *hio_send_list_lock; 10532115b10SPawel Jakub Dawidek static pthread_cond_t *hio_send_list_cond; 10632115b10SPawel Jakub Dawidek /* 10732115b10SPawel Jakub Dawidek * There is one recv list for every component, although local components don't 10832115b10SPawel Jakub Dawidek * use recv lists as local requests are done synchronously. 10932115b10SPawel Jakub Dawidek */ 11032115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) *hio_recv_list; 11132115b10SPawel Jakub Dawidek static pthread_mutex_t *hio_recv_list_lock; 11232115b10SPawel Jakub Dawidek static pthread_cond_t *hio_recv_list_cond; 11332115b10SPawel Jakub Dawidek /* 11432115b10SPawel Jakub Dawidek * Request is placed on done list by the slowest component (the one that 11532115b10SPawel Jakub Dawidek * decreased hio_countdown from 1 to 0). 11632115b10SPawel Jakub Dawidek */ 11732115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) hio_done_list; 11832115b10SPawel Jakub Dawidek static pthread_mutex_t hio_done_list_lock; 11932115b10SPawel Jakub Dawidek static pthread_cond_t hio_done_list_cond; 12032115b10SPawel Jakub Dawidek /* 12132115b10SPawel Jakub Dawidek * Structure below are for interaction with sync thread. 12232115b10SPawel Jakub Dawidek */ 12332115b10SPawel Jakub Dawidek static bool sync_inprogress; 12432115b10SPawel Jakub Dawidek static pthread_mutex_t sync_lock; 12532115b10SPawel Jakub Dawidek static pthread_cond_t sync_cond; 12632115b10SPawel Jakub Dawidek /* 12732115b10SPawel Jakub Dawidek * The lock below allows to synchornize access to remote connections. 12832115b10SPawel Jakub Dawidek */ 12932115b10SPawel Jakub Dawidek static pthread_rwlock_t *hio_remote_lock; 13032115b10SPawel Jakub Dawidek static pthread_mutex_t hio_guard_lock; 13132115b10SPawel Jakub Dawidek static pthread_cond_t hio_guard_cond; 13232115b10SPawel Jakub Dawidek 13332115b10SPawel Jakub Dawidek /* 13432115b10SPawel Jakub Dawidek * Lock to synchronize metadata updates. Also synchronize access to 13532115b10SPawel Jakub Dawidek * hr_primary_localcnt and hr_primary_remotecnt fields. 13632115b10SPawel Jakub Dawidek */ 13732115b10SPawel Jakub Dawidek static pthread_mutex_t metadata_lock; 13832115b10SPawel Jakub Dawidek 13932115b10SPawel Jakub Dawidek /* 14032115b10SPawel Jakub Dawidek * Maximum number of outstanding I/O requests. 14132115b10SPawel Jakub Dawidek */ 14232115b10SPawel Jakub Dawidek #define HAST_HIO_MAX 256 14332115b10SPawel Jakub Dawidek /* 14432115b10SPawel Jakub Dawidek * Number of components. At this point there are only two components: local 14532115b10SPawel Jakub Dawidek * and remote, but in the future it might be possible to use multiple local 14632115b10SPawel Jakub Dawidek * and remote components. 14732115b10SPawel Jakub Dawidek */ 14832115b10SPawel Jakub Dawidek #define HAST_NCOMPONENTS 2 14932115b10SPawel Jakub Dawidek /* 15032115b10SPawel Jakub Dawidek * Number of seconds to sleep before next reconnect try. 15132115b10SPawel Jakub Dawidek */ 15232115b10SPawel Jakub Dawidek #define RECONNECT_SLEEP 5 15332115b10SPawel Jakub Dawidek 15432115b10SPawel Jakub Dawidek #define ISCONNECTED(res, no) \ 15532115b10SPawel Jakub Dawidek ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 15632115b10SPawel Jakub Dawidek 15732115b10SPawel Jakub Dawidek #define QUEUE_INSERT1(hio, name, ncomp) do { \ 15832115b10SPawel Jakub Dawidek bool _wakeup; \ 15932115b10SPawel Jakub Dawidek \ 16032115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 16132115b10SPawel Jakub Dawidek _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 16232115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 16332115b10SPawel Jakub Dawidek hio_next[(ncomp)]); \ 16432115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 16532115b10SPawel Jakub Dawidek if (_wakeup) \ 16632115b10SPawel Jakub Dawidek cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 16732115b10SPawel Jakub Dawidek } while (0) 16832115b10SPawel Jakub Dawidek #define QUEUE_INSERT2(hio, name) do { \ 16932115b10SPawel Jakub Dawidek bool _wakeup; \ 17032115b10SPawel Jakub Dawidek \ 17132115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock); \ 17232115b10SPawel Jakub Dawidek _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 17332115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 17432115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock); \ 17532115b10SPawel Jakub Dawidek if (_wakeup) \ 17632115b10SPawel Jakub Dawidek cv_signal(&hio_##name##_list_cond); \ 17732115b10SPawel Jakub Dawidek } while (0) 17832115b10SPawel Jakub Dawidek #define QUEUE_TAKE1(hio, name, ncomp) do { \ 17932115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 18032115b10SPawel Jakub Dawidek while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 18132115b10SPawel Jakub Dawidek cv_wait(&hio_##name##_list_cond[(ncomp)], \ 18232115b10SPawel Jakub Dawidek &hio_##name##_list_lock[(ncomp)]); \ 18332115b10SPawel Jakub Dawidek } \ 18432115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 18532115b10SPawel Jakub Dawidek hio_next[(ncomp)]); \ 18632115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 18732115b10SPawel Jakub Dawidek } while (0) 18832115b10SPawel Jakub Dawidek #define QUEUE_TAKE2(hio, name) do { \ 18932115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock); \ 19032115b10SPawel Jakub Dawidek while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 19132115b10SPawel Jakub Dawidek cv_wait(&hio_##name##_list_cond, \ 19232115b10SPawel Jakub Dawidek &hio_##name##_list_lock); \ 19332115b10SPawel Jakub Dawidek } \ 19432115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 19532115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock); \ 19632115b10SPawel Jakub Dawidek } while (0) 19732115b10SPawel Jakub Dawidek 19832115b10SPawel Jakub Dawidek #define SYNCREQ(hio) do { (hio)->hio_ggio.gctl_unit = -1; } while (0) 19932115b10SPawel Jakub Dawidek #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 20032115b10SPawel Jakub Dawidek #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 20132115b10SPawel Jakub Dawidek #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 20232115b10SPawel Jakub Dawidek 20332115b10SPawel Jakub Dawidek static struct hast_resource *gres; 20432115b10SPawel Jakub Dawidek 20532115b10SPawel Jakub Dawidek static pthread_mutex_t range_lock; 20632115b10SPawel Jakub Dawidek static struct rangelocks *range_regular; 20732115b10SPawel Jakub Dawidek static bool range_regular_wait; 20832115b10SPawel Jakub Dawidek static pthread_cond_t range_regular_cond; 20932115b10SPawel Jakub Dawidek static struct rangelocks *range_sync; 21032115b10SPawel Jakub Dawidek static bool range_sync_wait; 21132115b10SPawel Jakub Dawidek static pthread_cond_t range_sync_cond; 21232115b10SPawel Jakub Dawidek 21332115b10SPawel Jakub Dawidek static void *ggate_recv_thread(void *arg); 21432115b10SPawel Jakub Dawidek static void *local_send_thread(void *arg); 21532115b10SPawel Jakub Dawidek static void *remote_send_thread(void *arg); 21632115b10SPawel Jakub Dawidek static void *remote_recv_thread(void *arg); 21732115b10SPawel Jakub Dawidek static void *ggate_send_thread(void *arg); 21832115b10SPawel Jakub Dawidek static void *sync_thread(void *arg); 21932115b10SPawel Jakub Dawidek static void *guard_thread(void *arg); 22032115b10SPawel Jakub Dawidek 22132115b10SPawel Jakub Dawidek static void sighandler(int sig); 22232115b10SPawel Jakub Dawidek 22332115b10SPawel Jakub Dawidek static void 22432115b10SPawel Jakub Dawidek cleanup(struct hast_resource *res) 22532115b10SPawel Jakub Dawidek { 22632115b10SPawel Jakub Dawidek int rerrno; 22732115b10SPawel Jakub Dawidek 22832115b10SPawel Jakub Dawidek /* Remember errno. */ 22932115b10SPawel Jakub Dawidek rerrno = errno; 23032115b10SPawel Jakub Dawidek 23132115b10SPawel Jakub Dawidek /* 23232115b10SPawel Jakub Dawidek * Close descriptor to /dev/hast/<name> 23332115b10SPawel Jakub Dawidek * to work-around race in the kernel. 23432115b10SPawel Jakub Dawidek */ 23532115b10SPawel Jakub Dawidek close(res->hr_localfd); 23632115b10SPawel Jakub Dawidek 23732115b10SPawel Jakub Dawidek /* Destroy ggate provider if we created one. */ 23832115b10SPawel Jakub Dawidek if (res->hr_ggateunit >= 0) { 23932115b10SPawel Jakub Dawidek struct g_gate_ctl_destroy ggiod; 24032115b10SPawel Jakub Dawidek 24132115b10SPawel Jakub Dawidek ggiod.gctl_version = G_GATE_VERSION; 24232115b10SPawel Jakub Dawidek ggiod.gctl_unit = res->hr_ggateunit; 24332115b10SPawel Jakub Dawidek ggiod.gctl_force = 1; 24432115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 24532115b10SPawel Jakub Dawidek pjdlog_warning("Unable to destroy hast/%s device", 24632115b10SPawel Jakub Dawidek res->hr_provname); 24732115b10SPawel Jakub Dawidek } 24832115b10SPawel Jakub Dawidek res->hr_ggateunit = -1; 24932115b10SPawel Jakub Dawidek } 25032115b10SPawel Jakub Dawidek 25132115b10SPawel Jakub Dawidek /* Restore errno. */ 25232115b10SPawel Jakub Dawidek errno = rerrno; 25332115b10SPawel Jakub Dawidek } 25432115b10SPawel Jakub Dawidek 25532115b10SPawel Jakub Dawidek static void 25632115b10SPawel Jakub Dawidek primary_exit(int exitcode, const char *fmt, ...) 25732115b10SPawel Jakub Dawidek { 25832115b10SPawel Jakub Dawidek va_list ap; 25932115b10SPawel Jakub Dawidek 26032115b10SPawel Jakub Dawidek assert(exitcode != EX_OK); 26132115b10SPawel Jakub Dawidek va_start(ap, fmt); 26232115b10SPawel Jakub Dawidek pjdlogv_errno(LOG_ERR, fmt, ap); 26332115b10SPawel Jakub Dawidek va_end(ap); 26432115b10SPawel Jakub Dawidek cleanup(gres); 26532115b10SPawel Jakub Dawidek exit(exitcode); 26632115b10SPawel Jakub Dawidek } 26732115b10SPawel Jakub Dawidek 26832115b10SPawel Jakub Dawidek static void 26932115b10SPawel Jakub Dawidek primary_exitx(int exitcode, const char *fmt, ...) 27032115b10SPawel Jakub Dawidek { 27132115b10SPawel Jakub Dawidek va_list ap; 27232115b10SPawel Jakub Dawidek 27332115b10SPawel Jakub Dawidek va_start(ap, fmt); 27432115b10SPawel Jakub Dawidek pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 27532115b10SPawel Jakub Dawidek va_end(ap); 27632115b10SPawel Jakub Dawidek cleanup(gres); 27732115b10SPawel Jakub Dawidek exit(exitcode); 27832115b10SPawel Jakub Dawidek } 27932115b10SPawel Jakub Dawidek 28032115b10SPawel Jakub Dawidek static int 28132115b10SPawel Jakub Dawidek hast_activemap_flush(struct hast_resource *res) 28232115b10SPawel Jakub Dawidek { 28332115b10SPawel Jakub Dawidek const unsigned char *buf; 28432115b10SPawel Jakub Dawidek size_t size; 28532115b10SPawel Jakub Dawidek 28632115b10SPawel Jakub Dawidek buf = activemap_bitmap(res->hr_amp, &size); 28732115b10SPawel Jakub Dawidek assert(buf != NULL); 28832115b10SPawel Jakub Dawidek assert((size % res->hr_local_sectorsize) == 0); 28932115b10SPawel Jakub Dawidek if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 29032115b10SPawel Jakub Dawidek (ssize_t)size) { 29132115b10SPawel Jakub Dawidek KEEP_ERRNO(pjdlog_errno(LOG_ERR, 29232115b10SPawel Jakub Dawidek "Unable to flush activemap to disk")); 29332115b10SPawel Jakub Dawidek return (-1); 29432115b10SPawel Jakub Dawidek } 29532115b10SPawel Jakub Dawidek return (0); 29632115b10SPawel Jakub Dawidek } 29732115b10SPawel Jakub Dawidek 29832115b10SPawel Jakub Dawidek static void 29932115b10SPawel Jakub Dawidek init_environment(struct hast_resource *res __unused) 30032115b10SPawel Jakub Dawidek { 30132115b10SPawel Jakub Dawidek struct hio *hio; 30232115b10SPawel Jakub Dawidek unsigned int ii, ncomps; 30332115b10SPawel Jakub Dawidek 30432115b10SPawel Jakub Dawidek /* 30532115b10SPawel Jakub Dawidek * In the future it might be per-resource value. 30632115b10SPawel Jakub Dawidek */ 30732115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 30832115b10SPawel Jakub Dawidek 30932115b10SPawel Jakub Dawidek /* 31032115b10SPawel Jakub Dawidek * Allocate memory needed by lists. 31132115b10SPawel Jakub Dawidek */ 31232115b10SPawel Jakub Dawidek hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 31332115b10SPawel Jakub Dawidek if (hio_send_list == NULL) { 31432115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 31532115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send lists.", 31632115b10SPawel Jakub Dawidek sizeof(hio_send_list[0]) * ncomps); 31732115b10SPawel Jakub Dawidek } 31832115b10SPawel Jakub Dawidek hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 31932115b10SPawel Jakub Dawidek if (hio_send_list_lock == NULL) { 32032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 32132115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send list locks.", 32232115b10SPawel Jakub Dawidek sizeof(hio_send_list_lock[0]) * ncomps); 32332115b10SPawel Jakub Dawidek } 32432115b10SPawel Jakub Dawidek hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 32532115b10SPawel Jakub Dawidek if (hio_send_list_cond == NULL) { 32632115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 32732115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send list condition variables.", 32832115b10SPawel Jakub Dawidek sizeof(hio_send_list_cond[0]) * ncomps); 32932115b10SPawel Jakub Dawidek } 33032115b10SPawel Jakub Dawidek hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 33132115b10SPawel Jakub Dawidek if (hio_recv_list == NULL) { 33232115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 33332115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv lists.", 33432115b10SPawel Jakub Dawidek sizeof(hio_recv_list[0]) * ncomps); 33532115b10SPawel Jakub Dawidek } 33632115b10SPawel Jakub Dawidek hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 33732115b10SPawel Jakub Dawidek if (hio_recv_list_lock == NULL) { 33832115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 33932115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv list locks.", 34032115b10SPawel Jakub Dawidek sizeof(hio_recv_list_lock[0]) * ncomps); 34132115b10SPawel Jakub Dawidek } 34232115b10SPawel Jakub Dawidek hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 34332115b10SPawel Jakub Dawidek if (hio_recv_list_cond == NULL) { 34432115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 34532115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv list condition variables.", 34632115b10SPawel Jakub Dawidek sizeof(hio_recv_list_cond[0]) * ncomps); 34732115b10SPawel Jakub Dawidek } 34832115b10SPawel Jakub Dawidek hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 34932115b10SPawel Jakub Dawidek if (hio_remote_lock == NULL) { 35032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 35132115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for remote connections locks.", 35232115b10SPawel Jakub Dawidek sizeof(hio_remote_lock[0]) * ncomps); 35332115b10SPawel Jakub Dawidek } 35432115b10SPawel Jakub Dawidek 35532115b10SPawel Jakub Dawidek /* 35632115b10SPawel Jakub Dawidek * Initialize lists, their locks and theirs condition variables. 35732115b10SPawel Jakub Dawidek */ 35832115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_free_list); 35932115b10SPawel Jakub Dawidek mtx_init(&hio_free_list_lock); 36032115b10SPawel Jakub Dawidek cv_init(&hio_free_list_cond); 36132115b10SPawel Jakub Dawidek for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 36232115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_send_list[ii]); 36332115b10SPawel Jakub Dawidek mtx_init(&hio_send_list_lock[ii]); 36432115b10SPawel Jakub Dawidek cv_init(&hio_send_list_cond[ii]); 36532115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_recv_list[ii]); 36632115b10SPawel Jakub Dawidek mtx_init(&hio_recv_list_lock[ii]); 36732115b10SPawel Jakub Dawidek cv_init(&hio_recv_list_cond[ii]); 36832115b10SPawel Jakub Dawidek rw_init(&hio_remote_lock[ii]); 36932115b10SPawel Jakub Dawidek } 37032115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_done_list); 37132115b10SPawel Jakub Dawidek mtx_init(&hio_done_list_lock); 37232115b10SPawel Jakub Dawidek cv_init(&hio_done_list_cond); 37332115b10SPawel Jakub Dawidek mtx_init(&hio_guard_lock); 37432115b10SPawel Jakub Dawidek cv_init(&hio_guard_cond); 37532115b10SPawel Jakub Dawidek mtx_init(&metadata_lock); 37632115b10SPawel Jakub Dawidek 37732115b10SPawel Jakub Dawidek /* 37832115b10SPawel Jakub Dawidek * Allocate requests pool and initialize requests. 37932115b10SPawel Jakub Dawidek */ 38032115b10SPawel Jakub Dawidek for (ii = 0; ii < HAST_HIO_MAX; ii++) { 38132115b10SPawel Jakub Dawidek hio = malloc(sizeof(*hio)); 38232115b10SPawel Jakub Dawidek if (hio == NULL) { 38332115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 38432115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for hio request.", 38532115b10SPawel Jakub Dawidek sizeof(*hio)); 38632115b10SPawel Jakub Dawidek } 38732115b10SPawel Jakub Dawidek hio->hio_countdown = 0; 38832115b10SPawel Jakub Dawidek hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 38932115b10SPawel Jakub Dawidek if (hio->hio_errors == NULL) { 39032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 39132115b10SPawel Jakub Dawidek "Unable allocate %zu bytes of memory for hio errors.", 39232115b10SPawel Jakub Dawidek sizeof(hio->hio_errors[0]) * ncomps); 39332115b10SPawel Jakub Dawidek } 39432115b10SPawel Jakub Dawidek hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 39532115b10SPawel Jakub Dawidek if (hio->hio_next == NULL) { 39632115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 39732115b10SPawel Jakub Dawidek "Unable allocate %zu bytes of memory for hio_next field.", 39832115b10SPawel Jakub Dawidek sizeof(hio->hio_next[0]) * ncomps); 39932115b10SPawel Jakub Dawidek } 40032115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_version = G_GATE_VERSION; 40132115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_data = malloc(MAXPHYS); 40232115b10SPawel Jakub Dawidek if (hio->hio_ggio.gctl_data == NULL) { 40332115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 40432115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for gctl_data.", 40532115b10SPawel Jakub Dawidek MAXPHYS); 40632115b10SPawel Jakub Dawidek } 40732115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_length = MAXPHYS; 40832115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_error = 0; 40932115b10SPawel Jakub Dawidek TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 41032115b10SPawel Jakub Dawidek } 41132115b10SPawel Jakub Dawidek 41232115b10SPawel Jakub Dawidek /* 41332115b10SPawel Jakub Dawidek * Turn on signals handling. 41432115b10SPawel Jakub Dawidek */ 41532115b10SPawel Jakub Dawidek signal(SIGINT, sighandler); 41632115b10SPawel Jakub Dawidek signal(SIGTERM, sighandler); 41732115b10SPawel Jakub Dawidek } 41832115b10SPawel Jakub Dawidek 41932115b10SPawel Jakub Dawidek static void 42032115b10SPawel Jakub Dawidek init_local(struct hast_resource *res) 42132115b10SPawel Jakub Dawidek { 42232115b10SPawel Jakub Dawidek unsigned char *buf; 42332115b10SPawel Jakub Dawidek size_t mapsize; 42432115b10SPawel Jakub Dawidek 42532115b10SPawel Jakub Dawidek if (metadata_read(res, true) < 0) 42632115b10SPawel Jakub Dawidek exit(EX_NOINPUT); 42732115b10SPawel Jakub Dawidek mtx_init(&res->hr_amp_lock); 42832115b10SPawel Jakub Dawidek if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 42932115b10SPawel Jakub Dawidek res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 43032115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 43132115b10SPawel Jakub Dawidek } 43232115b10SPawel Jakub Dawidek mtx_init(&range_lock); 43332115b10SPawel Jakub Dawidek cv_init(&range_regular_cond); 43432115b10SPawel Jakub Dawidek if (rangelock_init(&range_regular) < 0) 43532115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 43632115b10SPawel Jakub Dawidek cv_init(&range_sync_cond); 43732115b10SPawel Jakub Dawidek if (rangelock_init(&range_sync) < 0) 43832115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 43932115b10SPawel Jakub Dawidek mapsize = activemap_ondisk_size(res->hr_amp); 44032115b10SPawel Jakub Dawidek buf = calloc(1, mapsize); 44132115b10SPawel Jakub Dawidek if (buf == NULL) { 44232115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 44332115b10SPawel Jakub Dawidek "Unable to allocate buffer for activemap."); 44432115b10SPawel Jakub Dawidek } 44532115b10SPawel Jakub Dawidek if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 44632115b10SPawel Jakub Dawidek (ssize_t)mapsize) { 44732115b10SPawel Jakub Dawidek primary_exit(EX_NOINPUT, "Unable to read activemap"); 44832115b10SPawel Jakub Dawidek } 44932115b10SPawel Jakub Dawidek activemap_copyin(res->hr_amp, buf, mapsize); 45032115b10SPawel Jakub Dawidek if (res->hr_resuid != 0) 45132115b10SPawel Jakub Dawidek return; 45232115b10SPawel Jakub Dawidek /* 45332115b10SPawel Jakub Dawidek * We're using provider for the first time, so we have to generate 45432115b10SPawel Jakub Dawidek * resource unique identifier and initialize local and remote counts. 45532115b10SPawel Jakub Dawidek */ 45632115b10SPawel Jakub Dawidek arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 45732115b10SPawel Jakub Dawidek res->hr_primary_localcnt = 1; 45832115b10SPawel Jakub Dawidek res->hr_primary_remotecnt = 0; 45932115b10SPawel Jakub Dawidek if (metadata_write(res) < 0) 46032115b10SPawel Jakub Dawidek exit(EX_NOINPUT); 46132115b10SPawel Jakub Dawidek } 46232115b10SPawel Jakub Dawidek 4630d9014f3SPawel Jakub Dawidek static bool 4640d9014f3SPawel Jakub Dawidek init_remote(struct hast_resource *res, struct proto_conn **inp, 4650d9014f3SPawel Jakub Dawidek struct proto_conn **outp) 46632115b10SPawel Jakub Dawidek { 4670d9014f3SPawel Jakub Dawidek struct proto_conn *in, *out; 46832115b10SPawel Jakub Dawidek struct nv *nvout, *nvin; 46932115b10SPawel Jakub Dawidek const unsigned char *token; 47032115b10SPawel Jakub Dawidek unsigned char *map; 47132115b10SPawel Jakub Dawidek const char *errmsg; 47232115b10SPawel Jakub Dawidek int32_t extentsize; 47332115b10SPawel Jakub Dawidek int64_t datasize; 47432115b10SPawel Jakub Dawidek uint32_t mapsize; 47532115b10SPawel Jakub Dawidek size_t size; 47632115b10SPawel Jakub Dawidek 4770d9014f3SPawel Jakub Dawidek assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 4780d9014f3SPawel Jakub Dawidek 4790d9014f3SPawel Jakub Dawidek in = out = NULL; 4800d9014f3SPawel Jakub Dawidek 48132115b10SPawel Jakub Dawidek /* Prepare outgoing connection with remote node. */ 4820d9014f3SPawel Jakub Dawidek if (proto_client(res->hr_remoteaddr, &out) < 0) { 48332115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to create connection to %s", 48432115b10SPawel Jakub Dawidek res->hr_remoteaddr); 48532115b10SPawel Jakub Dawidek } 48632115b10SPawel Jakub Dawidek /* Try to connect, but accept failure. */ 4870d9014f3SPawel Jakub Dawidek if (proto_connect(out) < 0) { 48832115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 48932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 49032115b10SPawel Jakub Dawidek goto close; 49132115b10SPawel Jakub Dawidek } 49232115b10SPawel Jakub Dawidek /* 49332115b10SPawel Jakub Dawidek * First handshake step. 49432115b10SPawel Jakub Dawidek * Setup outgoing connection with remote node. 49532115b10SPawel Jakub Dawidek */ 49632115b10SPawel Jakub Dawidek nvout = nv_alloc(); 49732115b10SPawel Jakub Dawidek nv_add_string(nvout, res->hr_name, "resource"); 49832115b10SPawel Jakub Dawidek if (nv_error(nvout) != 0) { 49932115b10SPawel Jakub Dawidek pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 50032115b10SPawel Jakub Dawidek "Unable to allocate header for connection with %s", 50132115b10SPawel Jakub Dawidek res->hr_remoteaddr); 50232115b10SPawel Jakub Dawidek nv_free(nvout); 50332115b10SPawel Jakub Dawidek goto close; 50432115b10SPawel Jakub Dawidek } 5050d9014f3SPawel Jakub Dawidek if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 50632115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 50732115b10SPawel Jakub Dawidek "Unable to send handshake header to %s", 50832115b10SPawel Jakub Dawidek res->hr_remoteaddr); 50932115b10SPawel Jakub Dawidek nv_free(nvout); 51032115b10SPawel Jakub Dawidek goto close; 51132115b10SPawel Jakub Dawidek } 51232115b10SPawel Jakub Dawidek nv_free(nvout); 5130d9014f3SPawel Jakub Dawidek if (hast_proto_recv_hdr(out, &nvin) < 0) { 51432115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 51532115b10SPawel Jakub Dawidek "Unable to receive handshake header from %s", 51632115b10SPawel Jakub Dawidek res->hr_remoteaddr); 51732115b10SPawel Jakub Dawidek goto close; 51832115b10SPawel Jakub Dawidek } 51932115b10SPawel Jakub Dawidek errmsg = nv_get_string(nvin, "errmsg"); 52032115b10SPawel Jakub Dawidek if (errmsg != NULL) { 52132115b10SPawel Jakub Dawidek pjdlog_warning("%s", errmsg); 52232115b10SPawel Jakub Dawidek nv_free(nvin); 52332115b10SPawel Jakub Dawidek goto close; 52432115b10SPawel Jakub Dawidek } 52532115b10SPawel Jakub Dawidek token = nv_get_uint8_array(nvin, &size, "token"); 52632115b10SPawel Jakub Dawidek if (token == NULL) { 52732115b10SPawel Jakub Dawidek pjdlog_warning("Handshake header from %s has no 'token' field.", 52832115b10SPawel Jakub Dawidek res->hr_remoteaddr); 52932115b10SPawel Jakub Dawidek nv_free(nvin); 53032115b10SPawel Jakub Dawidek goto close; 53132115b10SPawel Jakub Dawidek } 53232115b10SPawel Jakub Dawidek if (size != sizeof(res->hr_token)) { 53332115b10SPawel Jakub Dawidek pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 53432115b10SPawel Jakub Dawidek res->hr_remoteaddr, size, sizeof(res->hr_token)); 53532115b10SPawel Jakub Dawidek nv_free(nvin); 53632115b10SPawel Jakub Dawidek goto close; 53732115b10SPawel Jakub Dawidek } 53832115b10SPawel Jakub Dawidek bcopy(token, res->hr_token, sizeof(res->hr_token)); 53932115b10SPawel Jakub Dawidek nv_free(nvin); 54032115b10SPawel Jakub Dawidek 54132115b10SPawel Jakub Dawidek /* 54232115b10SPawel Jakub Dawidek * Second handshake step. 54332115b10SPawel Jakub Dawidek * Setup incoming connection with remote node. 54432115b10SPawel Jakub Dawidek */ 5450d9014f3SPawel Jakub Dawidek if (proto_client(res->hr_remoteaddr, &in) < 0) { 54632115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 54732115b10SPawel Jakub Dawidek res->hr_remoteaddr); 54832115b10SPawel Jakub Dawidek } 54932115b10SPawel Jakub Dawidek /* Try to connect, but accept failure. */ 5500d9014f3SPawel Jakub Dawidek if (proto_connect(in) < 0) { 55132115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 55232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 55332115b10SPawel Jakub Dawidek goto close; 55432115b10SPawel Jakub Dawidek } 55532115b10SPawel Jakub Dawidek nvout = nv_alloc(); 55632115b10SPawel Jakub Dawidek nv_add_string(nvout, res->hr_name, "resource"); 55732115b10SPawel Jakub Dawidek nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 55832115b10SPawel Jakub Dawidek "token"); 55932115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_resuid, "resuid"); 56032115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 56132115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 56232115b10SPawel Jakub Dawidek if (nv_error(nvout) != 0) { 56332115b10SPawel Jakub Dawidek pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 56432115b10SPawel Jakub Dawidek "Unable to allocate header for connection with %s", 56532115b10SPawel Jakub Dawidek res->hr_remoteaddr); 56632115b10SPawel Jakub Dawidek nv_free(nvout); 56732115b10SPawel Jakub Dawidek goto close; 56832115b10SPawel Jakub Dawidek } 5690d9014f3SPawel Jakub Dawidek if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 57032115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 57132115b10SPawel Jakub Dawidek "Unable to send handshake header to %s", 57232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 57332115b10SPawel Jakub Dawidek nv_free(nvout); 57432115b10SPawel Jakub Dawidek goto close; 57532115b10SPawel Jakub Dawidek } 57632115b10SPawel Jakub Dawidek nv_free(nvout); 5770d9014f3SPawel Jakub Dawidek if (hast_proto_recv_hdr(out, &nvin) < 0) { 57832115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 57932115b10SPawel Jakub Dawidek "Unable to receive handshake header from %s", 58032115b10SPawel Jakub Dawidek res->hr_remoteaddr); 58132115b10SPawel Jakub Dawidek goto close; 58232115b10SPawel Jakub Dawidek } 58332115b10SPawel Jakub Dawidek errmsg = nv_get_string(nvin, "errmsg"); 58432115b10SPawel Jakub Dawidek if (errmsg != NULL) { 58532115b10SPawel Jakub Dawidek pjdlog_warning("%s", errmsg); 58632115b10SPawel Jakub Dawidek nv_free(nvin); 58732115b10SPawel Jakub Dawidek goto close; 58832115b10SPawel Jakub Dawidek } 58932115b10SPawel Jakub Dawidek datasize = nv_get_int64(nvin, "datasize"); 59032115b10SPawel Jakub Dawidek if (datasize != res->hr_datasize) { 59132115b10SPawel Jakub Dawidek pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 59232115b10SPawel Jakub Dawidek (intmax_t)res->hr_datasize, (intmax_t)datasize); 59332115b10SPawel Jakub Dawidek nv_free(nvin); 59432115b10SPawel Jakub Dawidek goto close; 59532115b10SPawel Jakub Dawidek } 59632115b10SPawel Jakub Dawidek extentsize = nv_get_int32(nvin, "extentsize"); 59732115b10SPawel Jakub Dawidek if (extentsize != res->hr_extentsize) { 59832115b10SPawel Jakub Dawidek pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 59932115b10SPawel Jakub Dawidek (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 60032115b10SPawel Jakub Dawidek nv_free(nvin); 60132115b10SPawel Jakub Dawidek goto close; 60232115b10SPawel Jakub Dawidek } 60332115b10SPawel Jakub Dawidek res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 60432115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 60532115b10SPawel Jakub Dawidek res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 60632115b10SPawel Jakub Dawidek map = NULL; 60732115b10SPawel Jakub Dawidek mapsize = nv_get_uint32(nvin, "mapsize"); 60832115b10SPawel Jakub Dawidek if (mapsize > 0) { 60932115b10SPawel Jakub Dawidek map = malloc(mapsize); 61032115b10SPawel Jakub Dawidek if (map == NULL) { 61132115b10SPawel Jakub Dawidek pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 61232115b10SPawel Jakub Dawidek (uintmax_t)mapsize); 61332115b10SPawel Jakub Dawidek nv_free(nvin); 61432115b10SPawel Jakub Dawidek goto close; 61532115b10SPawel Jakub Dawidek } 61632115b10SPawel Jakub Dawidek /* 61732115b10SPawel Jakub Dawidek * Remote node have some dirty extents on its own, lets 61832115b10SPawel Jakub Dawidek * download its activemap. 61932115b10SPawel Jakub Dawidek */ 6200d9014f3SPawel Jakub Dawidek if (hast_proto_recv_data(res, out, nvin, map, 62132115b10SPawel Jakub Dawidek mapsize) < 0) { 62232115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 62332115b10SPawel Jakub Dawidek "Unable to receive remote activemap"); 62432115b10SPawel Jakub Dawidek nv_free(nvin); 62532115b10SPawel Jakub Dawidek free(map); 62632115b10SPawel Jakub Dawidek goto close; 62732115b10SPawel Jakub Dawidek } 62832115b10SPawel Jakub Dawidek /* 62932115b10SPawel Jakub Dawidek * Merge local and remote bitmaps. 63032115b10SPawel Jakub Dawidek */ 63132115b10SPawel Jakub Dawidek activemap_merge(res->hr_amp, map, mapsize); 63232115b10SPawel Jakub Dawidek free(map); 63332115b10SPawel Jakub Dawidek /* 63432115b10SPawel Jakub Dawidek * Now that we merged bitmaps from both nodes, flush it to the 63532115b10SPawel Jakub Dawidek * disk before we start to synchronize. 63632115b10SPawel Jakub Dawidek */ 63732115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 63832115b10SPawel Jakub Dawidek } 63932115b10SPawel Jakub Dawidek pjdlog_info("Connected to %s.", res->hr_remoteaddr); 6400d9014f3SPawel Jakub Dawidek if (inp != NULL && outp != NULL) { 6410d9014f3SPawel Jakub Dawidek *inp = in; 6420d9014f3SPawel Jakub Dawidek *outp = out; 6430d9014f3SPawel Jakub Dawidek } else { 6440d9014f3SPawel Jakub Dawidek res->hr_remotein = in; 6450d9014f3SPawel Jakub Dawidek res->hr_remoteout = out; 6460d9014f3SPawel Jakub Dawidek } 6470d9014f3SPawel Jakub Dawidek return (true); 6480d9014f3SPawel Jakub Dawidek close: 6490d9014f3SPawel Jakub Dawidek proto_close(out); 6500d9014f3SPawel Jakub Dawidek if (in != NULL) 6510d9014f3SPawel Jakub Dawidek proto_close(in); 6520d9014f3SPawel Jakub Dawidek return (false); 6530d9014f3SPawel Jakub Dawidek } 6540d9014f3SPawel Jakub Dawidek 6550d9014f3SPawel Jakub Dawidek static void 6560d9014f3SPawel Jakub Dawidek sync_start(void) 6570d9014f3SPawel Jakub Dawidek { 6580d9014f3SPawel Jakub Dawidek 65932115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 66032115b10SPawel Jakub Dawidek sync_inprogress = true; 66132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 66232115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 66332115b10SPawel Jakub Dawidek } 66432115b10SPawel Jakub Dawidek 66532115b10SPawel Jakub Dawidek static void 66632115b10SPawel Jakub Dawidek init_ggate(struct hast_resource *res) 66732115b10SPawel Jakub Dawidek { 66832115b10SPawel Jakub Dawidek struct g_gate_ctl_create ggiocreate; 66932115b10SPawel Jakub Dawidek struct g_gate_ctl_cancel ggiocancel; 67032115b10SPawel Jakub Dawidek 67132115b10SPawel Jakub Dawidek /* 67232115b10SPawel Jakub Dawidek * We communicate with ggate via /dev/ggctl. Open it. 67332115b10SPawel Jakub Dawidek */ 67432115b10SPawel Jakub Dawidek res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 67532115b10SPawel Jakub Dawidek if (res->hr_ggatefd < 0) 67632115b10SPawel Jakub Dawidek primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 67732115b10SPawel Jakub Dawidek /* 67832115b10SPawel Jakub Dawidek * Create provider before trying to connect, as connection failure 67932115b10SPawel Jakub Dawidek * is not critical, but may take some time. 68032115b10SPawel Jakub Dawidek */ 68132115b10SPawel Jakub Dawidek ggiocreate.gctl_version = G_GATE_VERSION; 68232115b10SPawel Jakub Dawidek ggiocreate.gctl_mediasize = res->hr_datasize; 68332115b10SPawel Jakub Dawidek ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 68432115b10SPawel Jakub Dawidek ggiocreate.gctl_flags = 0; 68532115b10SPawel Jakub Dawidek ggiocreate.gctl_maxcount = 128; 68632115b10SPawel Jakub Dawidek ggiocreate.gctl_timeout = 0; 68732115b10SPawel Jakub Dawidek ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 68832115b10SPawel Jakub Dawidek snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 68932115b10SPawel Jakub Dawidek res->hr_provname); 69032115b10SPawel Jakub Dawidek bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 69132115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 69232115b10SPawel Jakub Dawidek pjdlog_info("Device hast/%s created.", res->hr_provname); 69332115b10SPawel Jakub Dawidek res->hr_ggateunit = ggiocreate.gctl_unit; 69432115b10SPawel Jakub Dawidek return; 69532115b10SPawel Jakub Dawidek } 69632115b10SPawel Jakub Dawidek if (errno != EEXIST) { 69732115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to create hast/%s device", 69832115b10SPawel Jakub Dawidek res->hr_provname); 69932115b10SPawel Jakub Dawidek } 70032115b10SPawel Jakub Dawidek pjdlog_debug(1, 70132115b10SPawel Jakub Dawidek "Device hast/%s already exists, we will try to take it over.", 70232115b10SPawel Jakub Dawidek res->hr_provname); 70332115b10SPawel Jakub Dawidek /* 70432115b10SPawel Jakub Dawidek * If we received EEXIST, we assume that the process who created the 70532115b10SPawel Jakub Dawidek * provider died and didn't clean up. In that case we will start from 70632115b10SPawel Jakub Dawidek * where he left of. 70732115b10SPawel Jakub Dawidek */ 70832115b10SPawel Jakub Dawidek ggiocancel.gctl_version = G_GATE_VERSION; 70932115b10SPawel Jakub Dawidek ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 71032115b10SPawel Jakub Dawidek snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 71132115b10SPawel Jakub Dawidek res->hr_provname); 71232115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 71332115b10SPawel Jakub Dawidek pjdlog_info("Device hast/%s recovered.", res->hr_provname); 71432115b10SPawel Jakub Dawidek res->hr_ggateunit = ggiocancel.gctl_unit; 71532115b10SPawel Jakub Dawidek return; 71632115b10SPawel Jakub Dawidek } 71732115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to take over hast/%s device", 71832115b10SPawel Jakub Dawidek res->hr_provname); 71932115b10SPawel Jakub Dawidek } 72032115b10SPawel Jakub Dawidek 72132115b10SPawel Jakub Dawidek void 72232115b10SPawel Jakub Dawidek hastd_primary(struct hast_resource *res) 72332115b10SPawel Jakub Dawidek { 72432115b10SPawel Jakub Dawidek pthread_t td; 72532115b10SPawel Jakub Dawidek pid_t pid; 72632115b10SPawel Jakub Dawidek int error; 72732115b10SPawel Jakub Dawidek 72832115b10SPawel Jakub Dawidek gres = res; 72932115b10SPawel Jakub Dawidek 73032115b10SPawel Jakub Dawidek /* 73132115b10SPawel Jakub Dawidek * Create communication channel between parent and child. 73232115b10SPawel Jakub Dawidek */ 73332115b10SPawel Jakub Dawidek if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 73432115b10SPawel Jakub Dawidek KEEP_ERRNO((void)pidfile_remove(pfh)); 73532115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, 73632115b10SPawel Jakub Dawidek "Unable to create control sockets between parent and child"); 73732115b10SPawel Jakub Dawidek } 73832115b10SPawel Jakub Dawidek 73932115b10SPawel Jakub Dawidek pid = fork(); 74032115b10SPawel Jakub Dawidek if (pid < 0) { 74132115b10SPawel Jakub Dawidek KEEP_ERRNO((void)pidfile_remove(pfh)); 74232115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to fork"); 74332115b10SPawel Jakub Dawidek } 74432115b10SPawel Jakub Dawidek 74532115b10SPawel Jakub Dawidek if (pid > 0) { 74632115b10SPawel Jakub Dawidek /* This is parent. */ 74732115b10SPawel Jakub Dawidek res->hr_workerpid = pid; 74832115b10SPawel Jakub Dawidek return; 74932115b10SPawel Jakub Dawidek } 75032115b10SPawel Jakub Dawidek (void)pidfile_close(pfh); 75132115b10SPawel Jakub Dawidek 75232115b10SPawel Jakub Dawidek setproctitle("%s (primary)", res->hr_name); 75332115b10SPawel Jakub Dawidek 75432115b10SPawel Jakub Dawidek init_local(res); 7550d9014f3SPawel Jakub Dawidek if (init_remote(res, NULL, NULL)) 7560d9014f3SPawel Jakub Dawidek sync_start(); 75732115b10SPawel Jakub Dawidek init_ggate(res); 75832115b10SPawel Jakub Dawidek init_environment(res); 75932115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ggate_recv_thread, res); 76032115b10SPawel Jakub Dawidek assert(error == 0); 76132115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, local_send_thread, res); 76232115b10SPawel Jakub Dawidek assert(error == 0); 76332115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, remote_send_thread, res); 76432115b10SPawel Jakub Dawidek assert(error == 0); 76532115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, remote_recv_thread, res); 76632115b10SPawel Jakub Dawidek assert(error == 0); 76732115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ggate_send_thread, res); 76832115b10SPawel Jakub Dawidek assert(error == 0); 76932115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, sync_thread, res); 77032115b10SPawel Jakub Dawidek assert(error == 0); 77132115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ctrl_thread, res); 77232115b10SPawel Jakub Dawidek assert(error == 0); 77332115b10SPawel Jakub Dawidek (void)guard_thread(res); 77432115b10SPawel Jakub Dawidek } 77532115b10SPawel Jakub Dawidek 77632115b10SPawel Jakub Dawidek static void 77732115b10SPawel Jakub Dawidek reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 77832115b10SPawel Jakub Dawidek { 77932115b10SPawel Jakub Dawidek char msg[1024]; 78032115b10SPawel Jakub Dawidek va_list ap; 78132115b10SPawel Jakub Dawidek int len; 78232115b10SPawel Jakub Dawidek 78332115b10SPawel Jakub Dawidek va_start(ap, fmt); 78432115b10SPawel Jakub Dawidek len = vsnprintf(msg, sizeof(msg), fmt, ap); 78532115b10SPawel Jakub Dawidek va_end(ap); 78632115b10SPawel Jakub Dawidek if ((size_t)len < sizeof(msg)) { 78732115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 78832115b10SPawel Jakub Dawidek case BIO_READ: 78932115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 79032115b10SPawel Jakub Dawidek "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 79132115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 79232115b10SPawel Jakub Dawidek break; 79332115b10SPawel Jakub Dawidek case BIO_DELETE: 79432115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 79532115b10SPawel Jakub Dawidek "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 79632115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 79732115b10SPawel Jakub Dawidek break; 79832115b10SPawel Jakub Dawidek case BIO_FLUSH: 79932115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 80032115b10SPawel Jakub Dawidek break; 80132115b10SPawel Jakub Dawidek case BIO_WRITE: 80232115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 80332115b10SPawel Jakub Dawidek "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 80432115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 80532115b10SPawel Jakub Dawidek break; 80632115b10SPawel Jakub Dawidek default: 80732115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 80832115b10SPawel Jakub Dawidek "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 80932115b10SPawel Jakub Dawidek break; 81032115b10SPawel Jakub Dawidek } 81132115b10SPawel Jakub Dawidek } 81232115b10SPawel Jakub Dawidek pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 81332115b10SPawel Jakub Dawidek } 81432115b10SPawel Jakub Dawidek 81532115b10SPawel Jakub Dawidek static void 81632115b10SPawel Jakub Dawidek remote_close(struct hast_resource *res, int ncomp) 81732115b10SPawel Jakub Dawidek { 81832115b10SPawel Jakub Dawidek 81932115b10SPawel Jakub Dawidek rw_wlock(&hio_remote_lock[ncomp]); 82032115b10SPawel Jakub Dawidek /* 82132115b10SPawel Jakub Dawidek * A race is possible between dropping rlock and acquiring wlock - 82232115b10SPawel Jakub Dawidek * another thread can close connection in-between. 82332115b10SPawel Jakub Dawidek */ 82432115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 82532115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 82632115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 82732115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 82832115b10SPawel Jakub Dawidek return; 82932115b10SPawel Jakub Dawidek } 83032115b10SPawel Jakub Dawidek 83132115b10SPawel Jakub Dawidek assert(res->hr_remotein != NULL); 83232115b10SPawel Jakub Dawidek assert(res->hr_remoteout != NULL); 83332115b10SPawel Jakub Dawidek 83432115b10SPawel Jakub Dawidek pjdlog_debug(2, "Closing old incoming connection to %s.", 83532115b10SPawel Jakub Dawidek res->hr_remoteaddr); 83632115b10SPawel Jakub Dawidek proto_close(res->hr_remotein); 83732115b10SPawel Jakub Dawidek res->hr_remotein = NULL; 83832115b10SPawel Jakub Dawidek pjdlog_debug(2, "Closing old outgoing connection to %s.", 83932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 84032115b10SPawel Jakub Dawidek proto_close(res->hr_remoteout); 84132115b10SPawel Jakub Dawidek res->hr_remoteout = NULL; 84232115b10SPawel Jakub Dawidek 84332115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 84432115b10SPawel Jakub Dawidek 84532115b10SPawel Jakub Dawidek /* 84632115b10SPawel Jakub Dawidek * Stop synchronization if in-progress. 84732115b10SPawel Jakub Dawidek */ 84832115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 84932115b10SPawel Jakub Dawidek if (sync_inprogress) 85032115b10SPawel Jakub Dawidek sync_inprogress = false; 85132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 85232115b10SPawel Jakub Dawidek 85332115b10SPawel Jakub Dawidek /* 85432115b10SPawel Jakub Dawidek * Wake up guard thread, so it can immediately start reconnect. 85532115b10SPawel Jakub Dawidek */ 85632115b10SPawel Jakub Dawidek mtx_lock(&hio_guard_lock); 85732115b10SPawel Jakub Dawidek cv_signal(&hio_guard_cond); 85832115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 85932115b10SPawel Jakub Dawidek } 86032115b10SPawel Jakub Dawidek 86132115b10SPawel Jakub Dawidek /* 86232115b10SPawel Jakub Dawidek * Thread receives ggate I/O requests from the kernel and passes them to 86332115b10SPawel Jakub Dawidek * appropriate threads: 86432115b10SPawel Jakub Dawidek * WRITE - always goes to both local_send and remote_send threads 86532115b10SPawel Jakub Dawidek * READ (when the block is up-to-date on local component) - 86632115b10SPawel Jakub Dawidek * only local_send thread 86732115b10SPawel Jakub Dawidek * READ (when the block isn't up-to-date on local component) - 86832115b10SPawel Jakub Dawidek * only remote_send thread 86932115b10SPawel Jakub Dawidek * DELETE - always goes to both local_send and remote_send threads 87032115b10SPawel Jakub Dawidek * FLUSH - always goes to both local_send and remote_send threads 87132115b10SPawel Jakub Dawidek */ 87232115b10SPawel Jakub Dawidek static void * 87332115b10SPawel Jakub Dawidek ggate_recv_thread(void *arg) 87432115b10SPawel Jakub Dawidek { 87532115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 87632115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 87732115b10SPawel Jakub Dawidek struct hio *hio; 87832115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 87932115b10SPawel Jakub Dawidek int error; 88032115b10SPawel Jakub Dawidek 88132115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 88232115b10SPawel Jakub Dawidek 88332115b10SPawel Jakub Dawidek for (;;) { 88432115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_recv: Taking free request."); 88532115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, free); 88632115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 88732115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 88832115b10SPawel Jakub Dawidek ggio->gctl_unit = res->hr_ggateunit; 88932115b10SPawel Jakub Dawidek ggio->gctl_length = MAXPHYS; 89032115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 89132115b10SPawel Jakub Dawidek pjdlog_debug(2, 89232115b10SPawel Jakub Dawidek "ggate_recv: (%p) Waiting for request from the kernel.", 89332115b10SPawel Jakub Dawidek hio); 89432115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 89532115b10SPawel Jakub Dawidek if (sigexit_received) 89632115b10SPawel Jakub Dawidek pthread_exit(NULL); 89732115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 89832115b10SPawel Jakub Dawidek } 89932115b10SPawel Jakub Dawidek error = ggio->gctl_error; 90032115b10SPawel Jakub Dawidek switch (error) { 90132115b10SPawel Jakub Dawidek case 0: 90232115b10SPawel Jakub Dawidek break; 90332115b10SPawel Jakub Dawidek case ECANCELED: 90432115b10SPawel Jakub Dawidek /* Exit gracefully. */ 90532115b10SPawel Jakub Dawidek if (!sigexit_received) { 90632115b10SPawel Jakub Dawidek pjdlog_debug(2, 90732115b10SPawel Jakub Dawidek "ggate_recv: (%p) Received cancel from the kernel.", 90832115b10SPawel Jakub Dawidek hio); 90932115b10SPawel Jakub Dawidek pjdlog_info("Received cancel from the kernel, exiting."); 91032115b10SPawel Jakub Dawidek } 91132115b10SPawel Jakub Dawidek pthread_exit(NULL); 91232115b10SPawel Jakub Dawidek case ENOMEM: 91332115b10SPawel Jakub Dawidek /* 91432115b10SPawel Jakub Dawidek * Buffer too small? Impossible, we allocate MAXPHYS 91532115b10SPawel Jakub Dawidek * bytes - request can't be bigger than that. 91632115b10SPawel Jakub Dawidek */ 91732115b10SPawel Jakub Dawidek /* FALLTHROUGH */ 91832115b10SPawel Jakub Dawidek case ENXIO: 91932115b10SPawel Jakub Dawidek default: 92032115b10SPawel Jakub Dawidek primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 92132115b10SPawel Jakub Dawidek strerror(error)); 92232115b10SPawel Jakub Dawidek } 92332115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 92432115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 92532115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, 92632115b10SPawel Jakub Dawidek "ggate_recv: (%p) Request received from the kernel: ", 92732115b10SPawel Jakub Dawidek hio); 92832115b10SPawel Jakub Dawidek /* 92932115b10SPawel Jakub Dawidek * Inform all components about new write request. 93032115b10SPawel Jakub Dawidek * For read request prefer local component unless the given 93132115b10SPawel Jakub Dawidek * range is out-of-date, then use remote component. 93232115b10SPawel Jakub Dawidek */ 93332115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 93432115b10SPawel Jakub Dawidek case BIO_READ: 93532115b10SPawel Jakub Dawidek pjdlog_debug(2, 93632115b10SPawel Jakub Dawidek "ggate_recv: (%p) Moving request to the send queue.", 93732115b10SPawel Jakub Dawidek hio); 93832115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 93932115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 94032115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 94132115b10SPawel Jakub Dawidek res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 94232115b10SPawel Jakub Dawidek /* 94332115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 94432115b10SPawel Jakub Dawidek * so handle request locally. 94532115b10SPawel Jakub Dawidek */ 94632115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 94732115b10SPawel Jakub Dawidek ncomp = 0; 94832115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == 94932115b10SPawel Jakub Dawidek HAST_SYNCSRC_SECONDARY) */ { 95032115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == 95132115b10SPawel Jakub Dawidek HAST_SYNCSRC_SECONDARY); 95232115b10SPawel Jakub Dawidek /* 95332115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 95432115b10SPawel Jakub Dawidek * so send request to the remote node. 95532115b10SPawel Jakub Dawidek */ 95632115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 95732115b10SPawel Jakub Dawidek ncomp = 1; 95832115b10SPawel Jakub Dawidek } 95932115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 96032115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 96132115b10SPawel Jakub Dawidek break; 96232115b10SPawel Jakub Dawidek case BIO_WRITE: 96332115b10SPawel Jakub Dawidek for (;;) { 96432115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 96532115b10SPawel Jakub Dawidek if (rangelock_islocked(range_sync, 96632115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length)) { 96732115b10SPawel Jakub Dawidek pjdlog_debug(2, 96832115b10SPawel Jakub Dawidek "regular: Range offset=%jd length=%zu locked.", 96932115b10SPawel Jakub Dawidek (intmax_t)ggio->gctl_offset, 97032115b10SPawel Jakub Dawidek (size_t)ggio->gctl_length); 97132115b10SPawel Jakub Dawidek range_regular_wait = true; 97232115b10SPawel Jakub Dawidek cv_wait(&range_regular_cond, &range_lock); 97332115b10SPawel Jakub Dawidek range_regular_wait = false; 97432115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 97532115b10SPawel Jakub Dawidek continue; 97632115b10SPawel Jakub Dawidek } 97732115b10SPawel Jakub Dawidek if (rangelock_add(range_regular, 97832115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length) < 0) { 97932115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 98032115b10SPawel Jakub Dawidek pjdlog_debug(2, 98132115b10SPawel Jakub Dawidek "regular: Range offset=%jd length=%zu is already locked, waiting.", 98232115b10SPawel Jakub Dawidek (intmax_t)ggio->gctl_offset, 98332115b10SPawel Jakub Dawidek (size_t)ggio->gctl_length); 98432115b10SPawel Jakub Dawidek sleep(1); 98532115b10SPawel Jakub Dawidek continue; 98632115b10SPawel Jakub Dawidek } 98732115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 98832115b10SPawel Jakub Dawidek break; 98932115b10SPawel Jakub Dawidek } 99032115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 99132115b10SPawel Jakub Dawidek if (activemap_write_start(res->hr_amp, 99232115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length)) { 99332115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 99432115b10SPawel Jakub Dawidek } 99532115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 99632115b10SPawel Jakub Dawidek /* FALLTHROUGH */ 99732115b10SPawel Jakub Dawidek case BIO_DELETE: 99832115b10SPawel Jakub Dawidek case BIO_FLUSH: 99932115b10SPawel Jakub Dawidek pjdlog_debug(2, 100032115b10SPawel Jakub Dawidek "ggate_recv: (%p) Moving request to the send queues.", 100132115b10SPawel Jakub Dawidek hio); 100232115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, ncomps); 100332115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 100432115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ii); 100532115b10SPawel Jakub Dawidek break; 100632115b10SPawel Jakub Dawidek } 100732115b10SPawel Jakub Dawidek } 100832115b10SPawel Jakub Dawidek /* NOTREACHED */ 100932115b10SPawel Jakub Dawidek return (NULL); 101032115b10SPawel Jakub Dawidek } 101132115b10SPawel Jakub Dawidek 101232115b10SPawel Jakub Dawidek /* 101332115b10SPawel Jakub Dawidek * Thread reads from or writes to local component. 101432115b10SPawel Jakub Dawidek * If local read fails, it redirects it to remote_send thread. 101532115b10SPawel Jakub Dawidek */ 101632115b10SPawel Jakub Dawidek static void * 101732115b10SPawel Jakub Dawidek local_send_thread(void *arg) 101832115b10SPawel Jakub Dawidek { 101932115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 102032115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 102132115b10SPawel Jakub Dawidek struct hio *hio; 102232115b10SPawel Jakub Dawidek unsigned int ncomp, rncomp; 102332115b10SPawel Jakub Dawidek ssize_t ret; 102432115b10SPawel Jakub Dawidek 102532115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 102632115b10SPawel Jakub Dawidek ncomp = 0; 102732115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 102832115b10SPawel Jakub Dawidek rncomp = 1; 102932115b10SPawel Jakub Dawidek 103032115b10SPawel Jakub Dawidek for (;;) { 103132115b10SPawel Jakub Dawidek pjdlog_debug(2, "local_send: Taking request."); 103232115b10SPawel Jakub Dawidek QUEUE_TAKE1(hio, send, ncomp); 103332115b10SPawel Jakub Dawidek pjdlog_debug(2, "local_send: (%p) Got request.", hio); 103432115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 103532115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 103632115b10SPawel Jakub Dawidek case BIO_READ: 103732115b10SPawel Jakub Dawidek ret = pread(res->hr_localfd, ggio->gctl_data, 103832115b10SPawel Jakub Dawidek ggio->gctl_length, 103932115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff); 104032115b10SPawel Jakub Dawidek if (ret == ggio->gctl_length) 104132115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 104232115b10SPawel Jakub Dawidek else { 104332115b10SPawel Jakub Dawidek /* 104432115b10SPawel Jakub Dawidek * If READ failed, try to read from remote node. 104532115b10SPawel Jakub Dawidek */ 104632115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, rncomp); 104732115b10SPawel Jakub Dawidek continue; 104832115b10SPawel Jakub Dawidek } 104932115b10SPawel Jakub Dawidek break; 105032115b10SPawel Jakub Dawidek case BIO_WRITE: 105132115b10SPawel Jakub Dawidek ret = pwrite(res->hr_localfd, ggio->gctl_data, 105232115b10SPawel Jakub Dawidek ggio->gctl_length, 105332115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff); 105432115b10SPawel Jakub Dawidek if (ret < 0) 105532115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 105632115b10SPawel Jakub Dawidek else if (ret != ggio->gctl_length) 105732115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = EIO; 105832115b10SPawel Jakub Dawidek else 105932115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 106032115b10SPawel Jakub Dawidek break; 106132115b10SPawel Jakub Dawidek case BIO_DELETE: 106232115b10SPawel Jakub Dawidek ret = g_delete(res->hr_localfd, 106332115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff, 106432115b10SPawel Jakub Dawidek ggio->gctl_length); 106532115b10SPawel Jakub Dawidek if (ret < 0) 106632115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 106732115b10SPawel Jakub Dawidek else 106832115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 106932115b10SPawel Jakub Dawidek break; 107032115b10SPawel Jakub Dawidek case BIO_FLUSH: 107132115b10SPawel Jakub Dawidek ret = g_flush(res->hr_localfd); 107232115b10SPawel Jakub Dawidek if (ret < 0) 107332115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 107432115b10SPawel Jakub Dawidek else 107532115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 107632115b10SPawel Jakub Dawidek break; 107732115b10SPawel Jakub Dawidek } 107832115b10SPawel Jakub Dawidek if (refcount_release(&hio->hio_countdown)) { 107932115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 108032115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 108132115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 108232115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 108332115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 108432115b10SPawel Jakub Dawidek } else { 108532115b10SPawel Jakub Dawidek pjdlog_debug(2, 108632115b10SPawel Jakub Dawidek "local_send: (%p) Moving request to the done queue.", 108732115b10SPawel Jakub Dawidek hio); 108832115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 108932115b10SPawel Jakub Dawidek } 109032115b10SPawel Jakub Dawidek } 109132115b10SPawel Jakub Dawidek } 109232115b10SPawel Jakub Dawidek /* NOTREACHED */ 109332115b10SPawel Jakub Dawidek return (NULL); 109432115b10SPawel Jakub Dawidek } 109532115b10SPawel Jakub Dawidek 109632115b10SPawel Jakub Dawidek /* 109732115b10SPawel Jakub Dawidek * Thread sends request to secondary node. 109832115b10SPawel Jakub Dawidek */ 109932115b10SPawel Jakub Dawidek static void * 110032115b10SPawel Jakub Dawidek remote_send_thread(void *arg) 110132115b10SPawel Jakub Dawidek { 110232115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 110332115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 110432115b10SPawel Jakub Dawidek struct hio *hio; 110532115b10SPawel Jakub Dawidek struct nv *nv; 110632115b10SPawel Jakub Dawidek unsigned int ncomp; 110732115b10SPawel Jakub Dawidek bool wakeup; 110832115b10SPawel Jakub Dawidek uint64_t offset, length; 110932115b10SPawel Jakub Dawidek uint8_t cmd; 111032115b10SPawel Jakub Dawidek void *data; 111132115b10SPawel Jakub Dawidek 111232115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 111332115b10SPawel Jakub Dawidek ncomp = 1; 111432115b10SPawel Jakub Dawidek 111532115b10SPawel Jakub Dawidek for (;;) { 111632115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_send: Taking request."); 111732115b10SPawel Jakub Dawidek QUEUE_TAKE1(hio, send, ncomp); 111832115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 111932115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 112032115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 112132115b10SPawel Jakub Dawidek case BIO_READ: 112232115b10SPawel Jakub Dawidek cmd = HIO_READ; 112332115b10SPawel Jakub Dawidek data = NULL; 112432115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 112532115b10SPawel Jakub Dawidek length = ggio->gctl_length; 112632115b10SPawel Jakub Dawidek break; 112732115b10SPawel Jakub Dawidek case BIO_WRITE: 112832115b10SPawel Jakub Dawidek cmd = HIO_WRITE; 112932115b10SPawel Jakub Dawidek data = ggio->gctl_data; 113032115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 113132115b10SPawel Jakub Dawidek length = ggio->gctl_length; 113232115b10SPawel Jakub Dawidek break; 113332115b10SPawel Jakub Dawidek case BIO_DELETE: 113432115b10SPawel Jakub Dawidek cmd = HIO_DELETE; 113532115b10SPawel Jakub Dawidek data = NULL; 113632115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 113732115b10SPawel Jakub Dawidek length = ggio->gctl_length; 113832115b10SPawel Jakub Dawidek break; 113932115b10SPawel Jakub Dawidek case BIO_FLUSH: 114032115b10SPawel Jakub Dawidek cmd = HIO_FLUSH; 114132115b10SPawel Jakub Dawidek data = NULL; 114232115b10SPawel Jakub Dawidek offset = 0; 114332115b10SPawel Jakub Dawidek length = 0; 114432115b10SPawel Jakub Dawidek break; 114532115b10SPawel Jakub Dawidek default: 114632115b10SPawel Jakub Dawidek assert(!"invalid condition"); 114732115b10SPawel Jakub Dawidek abort(); 114832115b10SPawel Jakub Dawidek } 114932115b10SPawel Jakub Dawidek nv = nv_alloc(); 115032115b10SPawel Jakub Dawidek nv_add_uint8(nv, cmd, "cmd"); 115132115b10SPawel Jakub Dawidek nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 115232115b10SPawel Jakub Dawidek nv_add_uint64(nv, offset, "offset"); 115332115b10SPawel Jakub Dawidek nv_add_uint64(nv, length, "length"); 115432115b10SPawel Jakub Dawidek if (nv_error(nv) != 0) { 115532115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = nv_error(nv); 115632115b10SPawel Jakub Dawidek pjdlog_debug(2, 115732115b10SPawel Jakub Dawidek "remote_send: (%p) Unable to prepare header to send.", 115832115b10SPawel Jakub Dawidek hio); 115932115b10SPawel Jakub Dawidek reqlog(LOG_ERR, 0, ggio, 116032115b10SPawel Jakub Dawidek "Unable to prepare header to send (%s): ", 116132115b10SPawel Jakub Dawidek strerror(nv_error(nv))); 116232115b10SPawel Jakub Dawidek /* Move failed request immediately to the done queue. */ 116332115b10SPawel Jakub Dawidek goto done_queue; 116432115b10SPawel Jakub Dawidek } 116532115b10SPawel Jakub Dawidek pjdlog_debug(2, 116632115b10SPawel Jakub Dawidek "remote_send: (%p) Moving request to the recv queue.", 116732115b10SPawel Jakub Dawidek hio); 116832115b10SPawel Jakub Dawidek /* 116932115b10SPawel Jakub Dawidek * Protect connection from disappearing. 117032115b10SPawel Jakub Dawidek */ 117132115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 117232115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 117332115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 117432115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = ENOTCONN; 117532115b10SPawel Jakub Dawidek goto done_queue; 117632115b10SPawel Jakub Dawidek } 117732115b10SPawel Jakub Dawidek /* 117832115b10SPawel Jakub Dawidek * Move the request to recv queue before sending it, because 117932115b10SPawel Jakub Dawidek * in different order we can get reply before we move request 118032115b10SPawel Jakub Dawidek * to recv queue. 118132115b10SPawel Jakub Dawidek */ 118232115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 118332115b10SPawel Jakub Dawidek wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 118432115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 118532115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 118632115b10SPawel Jakub Dawidek if (hast_proto_send(res, res->hr_remoteout, nv, data, 118732115b10SPawel Jakub Dawidek data != NULL ? length : 0) < 0) { 118832115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 118932115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 119032115b10SPawel Jakub Dawidek remote_close(res, ncomp); 119132115b10SPawel Jakub Dawidek pjdlog_debug(2, 119232115b10SPawel Jakub Dawidek "remote_send: (%p) Unable to send request.", hio); 119332115b10SPawel Jakub Dawidek reqlog(LOG_ERR, 0, ggio, 119432115b10SPawel Jakub Dawidek "Unable to send request (%s): ", 119532115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 119632115b10SPawel Jakub Dawidek /* 119732115b10SPawel Jakub Dawidek * Take request back from the receive queue and move 119832115b10SPawel Jakub Dawidek * it immediately to the done queue. 119932115b10SPawel Jakub Dawidek */ 120032115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 120132115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 120232115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 120332115b10SPawel Jakub Dawidek goto done_queue; 120432115b10SPawel Jakub Dawidek } 120532115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 120632115b10SPawel Jakub Dawidek nv_free(nv); 120732115b10SPawel Jakub Dawidek if (wakeup) 120832115b10SPawel Jakub Dawidek cv_signal(&hio_recv_list_cond[ncomp]); 120932115b10SPawel Jakub Dawidek continue; 121032115b10SPawel Jakub Dawidek done_queue: 121132115b10SPawel Jakub Dawidek nv_free(nv); 121232115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 121332115b10SPawel Jakub Dawidek if (!refcount_release(&hio->hio_countdown)) 121432115b10SPawel Jakub Dawidek continue; 121532115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 121632115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 121732115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 121832115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 121932115b10SPawel Jakub Dawidek continue; 122032115b10SPawel Jakub Dawidek } 122132115b10SPawel Jakub Dawidek if (ggio->gctl_cmd == BIO_WRITE) { 122232115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 122332115b10SPawel Jakub Dawidek if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 122432115b10SPawel Jakub Dawidek ggio->gctl_length)) { 122532115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 122632115b10SPawel Jakub Dawidek } 122732115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 122832115b10SPawel Jakub Dawidek } 122932115b10SPawel Jakub Dawidek if (!refcount_release(&hio->hio_countdown)) 123032115b10SPawel Jakub Dawidek continue; 123132115b10SPawel Jakub Dawidek pjdlog_debug(2, 123232115b10SPawel Jakub Dawidek "remote_send: (%p) Moving request to the done queue.", 123332115b10SPawel Jakub Dawidek hio); 123432115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 123532115b10SPawel Jakub Dawidek } 123632115b10SPawel Jakub Dawidek /* NOTREACHED */ 123732115b10SPawel Jakub Dawidek return (NULL); 123832115b10SPawel Jakub Dawidek } 123932115b10SPawel Jakub Dawidek 124032115b10SPawel Jakub Dawidek /* 124132115b10SPawel Jakub Dawidek * Thread receives answer from secondary node and passes it to ggate_send 124232115b10SPawel Jakub Dawidek * thread. 124332115b10SPawel Jakub Dawidek */ 124432115b10SPawel Jakub Dawidek static void * 124532115b10SPawel Jakub Dawidek remote_recv_thread(void *arg) 124632115b10SPawel Jakub Dawidek { 124732115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 124832115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 124932115b10SPawel Jakub Dawidek struct hio *hio; 125032115b10SPawel Jakub Dawidek struct nv *nv; 125132115b10SPawel Jakub Dawidek unsigned int ncomp; 125232115b10SPawel Jakub Dawidek uint64_t seq; 125332115b10SPawel Jakub Dawidek int error; 125432115b10SPawel Jakub Dawidek 125532115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 125632115b10SPawel Jakub Dawidek ncomp = 1; 125732115b10SPawel Jakub Dawidek 125832115b10SPawel Jakub Dawidek for (;;) { 125932115b10SPawel Jakub Dawidek /* Wait until there is anything to receive. */ 126032115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 126132115b10SPawel Jakub Dawidek while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 126232115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_recv: No requests, waiting."); 126332115b10SPawel Jakub Dawidek cv_wait(&hio_recv_list_cond[ncomp], 126432115b10SPawel Jakub Dawidek &hio_recv_list_lock[ncomp]); 126532115b10SPawel Jakub Dawidek } 126632115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 126732115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 126832115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 126932115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 127032115b10SPawel Jakub Dawidek /* 127132115b10SPawel Jakub Dawidek * Connection is dead, so move all pending requests to 127232115b10SPawel Jakub Dawidek * the done queue (one-by-one). 127332115b10SPawel Jakub Dawidek */ 127432115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 127532115b10SPawel Jakub Dawidek hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 127632115b10SPawel Jakub Dawidek assert(hio != NULL); 127732115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 127832115b10SPawel Jakub Dawidek hio_next[ncomp]); 127932115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 128032115b10SPawel Jakub Dawidek goto done_queue; 128132115b10SPawel Jakub Dawidek } 128232115b10SPawel Jakub Dawidek if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 128332115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 128432115b10SPawel Jakub Dawidek "Unable to receive reply header"); 128532115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 128632115b10SPawel Jakub Dawidek remote_close(res, ncomp); 128732115b10SPawel Jakub Dawidek continue; 128832115b10SPawel Jakub Dawidek } 128932115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 129032115b10SPawel Jakub Dawidek seq = nv_get_uint64(nv, "seq"); 129132115b10SPawel Jakub Dawidek if (seq == 0) { 129232115b10SPawel Jakub Dawidek pjdlog_error("Header contains no 'seq' field."); 129332115b10SPawel Jakub Dawidek nv_free(nv); 129432115b10SPawel Jakub Dawidek continue; 129532115b10SPawel Jakub Dawidek } 129632115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 129732115b10SPawel Jakub Dawidek TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 129832115b10SPawel Jakub Dawidek if (hio->hio_ggio.gctl_seq == seq) { 129932115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 130032115b10SPawel Jakub Dawidek hio_next[ncomp]); 130132115b10SPawel Jakub Dawidek break; 130232115b10SPawel Jakub Dawidek } 130332115b10SPawel Jakub Dawidek } 130432115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 130532115b10SPawel Jakub Dawidek if (hio == NULL) { 130632115b10SPawel Jakub Dawidek pjdlog_error("Found no request matching received 'seq' field (%ju).", 130732115b10SPawel Jakub Dawidek (uintmax_t)seq); 130832115b10SPawel Jakub Dawidek nv_free(nv); 130932115b10SPawel Jakub Dawidek continue; 131032115b10SPawel Jakub Dawidek } 131132115b10SPawel Jakub Dawidek error = nv_get_int16(nv, "error"); 131232115b10SPawel Jakub Dawidek if (error != 0) { 131332115b10SPawel Jakub Dawidek /* Request failed on remote side. */ 131432115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 131532115b10SPawel Jakub Dawidek nv_free(nv); 131632115b10SPawel Jakub Dawidek goto done_queue; 131732115b10SPawel Jakub Dawidek } 131832115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 131932115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 132032115b10SPawel Jakub Dawidek case BIO_READ: 132132115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 132232115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 132332115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 132432115b10SPawel Jakub Dawidek nv_free(nv); 132532115b10SPawel Jakub Dawidek goto done_queue; 132632115b10SPawel Jakub Dawidek } 132732115b10SPawel Jakub Dawidek if (hast_proto_recv_data(res, res->hr_remotein, nv, 132832115b10SPawel Jakub Dawidek ggio->gctl_data, ggio->gctl_length) < 0) { 132932115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 133032115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 133132115b10SPawel Jakub Dawidek "Unable to receive reply data"); 133232115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 133332115b10SPawel Jakub Dawidek nv_free(nv); 133432115b10SPawel Jakub Dawidek remote_close(res, ncomp); 133532115b10SPawel Jakub Dawidek goto done_queue; 133632115b10SPawel Jakub Dawidek } 133732115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 133832115b10SPawel Jakub Dawidek break; 133932115b10SPawel Jakub Dawidek case BIO_WRITE: 134032115b10SPawel Jakub Dawidek case BIO_DELETE: 134132115b10SPawel Jakub Dawidek case BIO_FLUSH: 134232115b10SPawel Jakub Dawidek break; 134332115b10SPawel Jakub Dawidek default: 134432115b10SPawel Jakub Dawidek assert(!"invalid condition"); 134532115b10SPawel Jakub Dawidek abort(); 134632115b10SPawel Jakub Dawidek } 134732115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 134832115b10SPawel Jakub Dawidek nv_free(nv); 134932115b10SPawel Jakub Dawidek done_queue: 135032115b10SPawel Jakub Dawidek if (refcount_release(&hio->hio_countdown)) { 135132115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 135232115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 135332115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 135432115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 135532115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 135632115b10SPawel Jakub Dawidek } else { 135732115b10SPawel Jakub Dawidek pjdlog_debug(2, 135832115b10SPawel Jakub Dawidek "remote_recv: (%p) Moving request to the done queue.", 135932115b10SPawel Jakub Dawidek hio); 136032115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 136132115b10SPawel Jakub Dawidek } 136232115b10SPawel Jakub Dawidek } 136332115b10SPawel Jakub Dawidek } 136432115b10SPawel Jakub Dawidek /* NOTREACHED */ 136532115b10SPawel Jakub Dawidek return (NULL); 136632115b10SPawel Jakub Dawidek } 136732115b10SPawel Jakub Dawidek 136832115b10SPawel Jakub Dawidek /* 136932115b10SPawel Jakub Dawidek * Thread sends answer to the kernel. 137032115b10SPawel Jakub Dawidek */ 137132115b10SPawel Jakub Dawidek static void * 137232115b10SPawel Jakub Dawidek ggate_send_thread(void *arg) 137332115b10SPawel Jakub Dawidek { 137432115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 137532115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 137632115b10SPawel Jakub Dawidek struct hio *hio; 137732115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 137832115b10SPawel Jakub Dawidek 137932115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 138032115b10SPawel Jakub Dawidek 138132115b10SPawel Jakub Dawidek for (;;) { 138232115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_send: Taking request."); 138332115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, done); 138432115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 138532115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 138632115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) { 138732115b10SPawel Jakub Dawidek if (hio->hio_errors[ii] == 0) { 138832115b10SPawel Jakub Dawidek /* 138932115b10SPawel Jakub Dawidek * One successful request is enough to declare 139032115b10SPawel Jakub Dawidek * success. 139132115b10SPawel Jakub Dawidek */ 139232115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 139332115b10SPawel Jakub Dawidek break; 139432115b10SPawel Jakub Dawidek } 139532115b10SPawel Jakub Dawidek } 139632115b10SPawel Jakub Dawidek if (ii == ncomps) { 139732115b10SPawel Jakub Dawidek /* 139832115b10SPawel Jakub Dawidek * None of the requests were successful. 139932115b10SPawel Jakub Dawidek * Use first error. 140032115b10SPawel Jakub Dawidek */ 140132115b10SPawel Jakub Dawidek ggio->gctl_error = hio->hio_errors[0]; 140232115b10SPawel Jakub Dawidek } 140332115b10SPawel Jakub Dawidek if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 140432115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 140532115b10SPawel Jakub Dawidek activemap_write_complete(res->hr_amp, 140632115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length); 140732115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 140832115b10SPawel Jakub Dawidek } 140932115b10SPawel Jakub Dawidek if (ggio->gctl_cmd == BIO_WRITE) { 141032115b10SPawel Jakub Dawidek /* 141132115b10SPawel Jakub Dawidek * Unlock range we locked. 141232115b10SPawel Jakub Dawidek */ 141332115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 141432115b10SPawel Jakub Dawidek rangelock_del(range_regular, ggio->gctl_offset, 141532115b10SPawel Jakub Dawidek ggio->gctl_length); 141632115b10SPawel Jakub Dawidek if (range_sync_wait) 141732115b10SPawel Jakub Dawidek cv_signal(&range_sync_cond); 141832115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 141932115b10SPawel Jakub Dawidek /* 142032115b10SPawel Jakub Dawidek * Bump local count if this is first write after 142132115b10SPawel Jakub Dawidek * connection failure with remote node. 142232115b10SPawel Jakub Dawidek */ 142332115b10SPawel Jakub Dawidek ncomp = 1; 142432115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 142532115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 142632115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 142732115b10SPawel Jakub Dawidek if (res->hr_primary_localcnt == 142832115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt) { 142932115b10SPawel Jakub Dawidek res->hr_primary_localcnt++; 143032115b10SPawel Jakub Dawidek pjdlog_debug(1, 143132115b10SPawel Jakub Dawidek "Increasing localcnt to %ju.", 143232115b10SPawel Jakub Dawidek (uintmax_t)res->hr_primary_localcnt); 143332115b10SPawel Jakub Dawidek (void)metadata_write(res); 143432115b10SPawel Jakub Dawidek } 143532115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 143632115b10SPawel Jakub Dawidek } 143732115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 143832115b10SPawel Jakub Dawidek } 143932115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 144032115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 144132115b10SPawel Jakub Dawidek pjdlog_debug(2, 144232115b10SPawel Jakub Dawidek "ggate_send: (%p) Moving request to the free queue.", hio); 144332115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, free); 144432115b10SPawel Jakub Dawidek } 144532115b10SPawel Jakub Dawidek /* NOTREACHED */ 144632115b10SPawel Jakub Dawidek return (NULL); 144732115b10SPawel Jakub Dawidek } 144832115b10SPawel Jakub Dawidek 144932115b10SPawel Jakub Dawidek /* 145032115b10SPawel Jakub Dawidek * Thread synchronize local and remote components. 145132115b10SPawel Jakub Dawidek */ 145232115b10SPawel Jakub Dawidek static void * 145332115b10SPawel Jakub Dawidek sync_thread(void *arg __unused) 145432115b10SPawel Jakub Dawidek { 145532115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 145632115b10SPawel Jakub Dawidek struct hio *hio; 145732115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 145832115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 145932115b10SPawel Jakub Dawidek off_t offset, length, synced; 146032115b10SPawel Jakub Dawidek bool dorewind; 146132115b10SPawel Jakub Dawidek int syncext; 146232115b10SPawel Jakub Dawidek 146332115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 146432115b10SPawel Jakub Dawidek dorewind = true; 146532115b10SPawel Jakub Dawidek synced = 0; 146632115b10SPawel Jakub Dawidek 146732115b10SPawel Jakub Dawidek for (;;) { 146832115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 146932115b10SPawel Jakub Dawidek while (!sync_inprogress) { 147032115b10SPawel Jakub Dawidek dorewind = true; 147132115b10SPawel Jakub Dawidek synced = 0; 147232115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 147332115b10SPawel Jakub Dawidek } 147432115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 147532115b10SPawel Jakub Dawidek /* 147632115b10SPawel Jakub Dawidek * Obtain offset at which we should synchronize. 147732115b10SPawel Jakub Dawidek * Rewind synchronization if needed. 147832115b10SPawel Jakub Dawidek */ 147932115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 148032115b10SPawel Jakub Dawidek if (dorewind) 148132115b10SPawel Jakub Dawidek activemap_sync_rewind(res->hr_amp); 148232115b10SPawel Jakub Dawidek offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 148332115b10SPawel Jakub Dawidek if (syncext != -1) { 148432115b10SPawel Jakub Dawidek /* 148532115b10SPawel Jakub Dawidek * We synchronized entire syncext extent, we can mark 148632115b10SPawel Jakub Dawidek * it as clean now. 148732115b10SPawel Jakub Dawidek */ 148832115b10SPawel Jakub Dawidek if (activemap_extent_complete(res->hr_amp, syncext)) 148932115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 149032115b10SPawel Jakub Dawidek } 149132115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 149232115b10SPawel Jakub Dawidek if (dorewind) { 149332115b10SPawel Jakub Dawidek dorewind = false; 149432115b10SPawel Jakub Dawidek if (offset < 0) 149532115b10SPawel Jakub Dawidek pjdlog_info("Nodes are in sync."); 149632115b10SPawel Jakub Dawidek else { 149732115b10SPawel Jakub Dawidek pjdlog_info("Synchronization started. %ju bytes to go.", 149832115b10SPawel Jakub Dawidek (uintmax_t)(res->hr_extentsize * 149932115b10SPawel Jakub Dawidek activemap_ndirty(res->hr_amp))); 150032115b10SPawel Jakub Dawidek } 150132115b10SPawel Jakub Dawidek } 150232115b10SPawel Jakub Dawidek if (offset < 0) { 150332115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 150432115b10SPawel Jakub Dawidek sync_inprogress = false; 150532115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 150632115b10SPawel Jakub Dawidek pjdlog_debug(1, "Nothing to synchronize."); 150732115b10SPawel Jakub Dawidek /* 150832115b10SPawel Jakub Dawidek * Synchronization complete, make both localcnt and 150932115b10SPawel Jakub Dawidek * remotecnt equal. 151032115b10SPawel Jakub Dawidek */ 151132115b10SPawel Jakub Dawidek ncomp = 1; 151232115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 151332115b10SPawel Jakub Dawidek if (ISCONNECTED(res, ncomp)) { 151432115b10SPawel Jakub Dawidek if (synced > 0) { 151532115b10SPawel Jakub Dawidek pjdlog_info("Synchronization complete. " 151632115b10SPawel Jakub Dawidek "%jd bytes synchronized.", 151732115b10SPawel Jakub Dawidek (intmax_t)synced); 151832115b10SPawel Jakub Dawidek } 151932115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 152032115b10SPawel Jakub Dawidek res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 152132115b10SPawel Jakub Dawidek res->hr_primary_localcnt = 152232115b10SPawel Jakub Dawidek res->hr_secondary_localcnt; 152332115b10SPawel Jakub Dawidek res->hr_primary_remotecnt = 152432115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt; 152532115b10SPawel Jakub Dawidek pjdlog_debug(1, 152632115b10SPawel Jakub Dawidek "Setting localcnt to %ju and remotecnt to %ju.", 152732115b10SPawel Jakub Dawidek (uintmax_t)res->hr_primary_localcnt, 152832115b10SPawel Jakub Dawidek (uintmax_t)res->hr_secondary_localcnt); 152932115b10SPawel Jakub Dawidek (void)metadata_write(res); 153032115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 153132115b10SPawel Jakub Dawidek } else if (synced > 0) { 153232115b10SPawel Jakub Dawidek pjdlog_info("Synchronization interrupted. " 153332115b10SPawel Jakub Dawidek "%jd bytes synchronized so far.", 153432115b10SPawel Jakub Dawidek (intmax_t)synced); 153532115b10SPawel Jakub Dawidek } 153632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 153732115b10SPawel Jakub Dawidek continue; 153832115b10SPawel Jakub Dawidek } 153932115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: Taking free request."); 154032115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, free); 154132115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Got free request.", hio); 154232115b10SPawel Jakub Dawidek /* 154332115b10SPawel Jakub Dawidek * Lock the range we are going to synchronize. We don't want 154432115b10SPawel Jakub Dawidek * race where someone writes between our read and write. 154532115b10SPawel Jakub Dawidek */ 154632115b10SPawel Jakub Dawidek for (;;) { 154732115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 154832115b10SPawel Jakub Dawidek if (rangelock_islocked(range_regular, offset, length)) { 154932115b10SPawel Jakub Dawidek pjdlog_debug(2, 155032115b10SPawel Jakub Dawidek "sync: Range offset=%jd length=%jd locked.", 155132115b10SPawel Jakub Dawidek (intmax_t)offset, (intmax_t)length); 155232115b10SPawel Jakub Dawidek range_sync_wait = true; 155332115b10SPawel Jakub Dawidek cv_wait(&range_sync_cond, &range_lock); 155432115b10SPawel Jakub Dawidek range_sync_wait = false; 155532115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 155632115b10SPawel Jakub Dawidek continue; 155732115b10SPawel Jakub Dawidek } 155832115b10SPawel Jakub Dawidek if (rangelock_add(range_sync, offset, length) < 0) { 155932115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 156032115b10SPawel Jakub Dawidek pjdlog_debug(2, 156132115b10SPawel Jakub Dawidek "sync: Range offset=%jd length=%jd is already locked, waiting.", 156232115b10SPawel Jakub Dawidek (intmax_t)offset, (intmax_t)length); 156332115b10SPawel Jakub Dawidek sleep(1); 156432115b10SPawel Jakub Dawidek continue; 156532115b10SPawel Jakub Dawidek } 156632115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 156732115b10SPawel Jakub Dawidek break; 156832115b10SPawel Jakub Dawidek } 156932115b10SPawel Jakub Dawidek /* 157032115b10SPawel Jakub Dawidek * First read the data from synchronization source. 157132115b10SPawel Jakub Dawidek */ 157232115b10SPawel Jakub Dawidek SYNCREQ(hio); 157332115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 157432115b10SPawel Jakub Dawidek ggio->gctl_cmd = BIO_READ; 157532115b10SPawel Jakub Dawidek ggio->gctl_offset = offset; 157632115b10SPawel Jakub Dawidek ggio->gctl_length = length; 157732115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 157832115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 157932115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 158032115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 158132115b10SPawel Jakub Dawidek hio); 158232115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 158332115b10SPawel Jakub Dawidek hio); 158432115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 158532115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 158632115b10SPawel Jakub Dawidek /* 158732115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 158832115b10SPawel Jakub Dawidek * so handle request locally. 158932115b10SPawel Jakub Dawidek */ 159032115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 159132115b10SPawel Jakub Dawidek ncomp = 0; 159232115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 159332115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 159432115b10SPawel Jakub Dawidek /* 159532115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 159632115b10SPawel Jakub Dawidek * so send request to the remote node. 159732115b10SPawel Jakub Dawidek */ 159832115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 159932115b10SPawel Jakub Dawidek ncomp = 1; 160032115b10SPawel Jakub Dawidek } 160132115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 160232115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 160332115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 160432115b10SPawel Jakub Dawidek 160532115b10SPawel Jakub Dawidek /* 160632115b10SPawel Jakub Dawidek * Let's wait for READ to finish. 160732115b10SPawel Jakub Dawidek */ 160832115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 160932115b10SPawel Jakub Dawidek while (!ISSYNCREQDONE(hio)) 161032115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 161132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 161232115b10SPawel Jakub Dawidek 161332115b10SPawel Jakub Dawidek if (hio->hio_errors[ncomp] != 0) { 161432115b10SPawel Jakub Dawidek pjdlog_error("Unable to read synchronization data: %s.", 161532115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 161632115b10SPawel Jakub Dawidek goto free_queue; 161732115b10SPawel Jakub Dawidek } 161832115b10SPawel Jakub Dawidek 161932115b10SPawel Jakub Dawidek /* 162032115b10SPawel Jakub Dawidek * We read the data from synchronization source, now write it 162132115b10SPawel Jakub Dawidek * to synchronization target. 162232115b10SPawel Jakub Dawidek */ 162332115b10SPawel Jakub Dawidek SYNCREQ(hio); 162432115b10SPawel Jakub Dawidek ggio->gctl_cmd = BIO_WRITE; 162532115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 162632115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 162732115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 162832115b10SPawel Jakub Dawidek hio); 162932115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 163032115b10SPawel Jakub Dawidek hio); 163132115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 163232115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 163332115b10SPawel Jakub Dawidek /* 163432115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 163532115b10SPawel Jakub Dawidek * so we update remote component. 163632115b10SPawel Jakub Dawidek */ 163732115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 163832115b10SPawel Jakub Dawidek ncomp = 1; 163932115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 164032115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 164132115b10SPawel Jakub Dawidek /* 164232115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 164332115b10SPawel Jakub Dawidek * so we update it. 164432115b10SPawel Jakub Dawidek */ 164532115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 164632115b10SPawel Jakub Dawidek ncomp = 0; 164732115b10SPawel Jakub Dawidek } 164832115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 164932115b10SPawel Jakub Dawidek 165032115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 165132115b10SPawel Jakub Dawidek hio); 165232115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 165332115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 165432115b10SPawel Jakub Dawidek 165532115b10SPawel Jakub Dawidek /* 165632115b10SPawel Jakub Dawidek * Let's wait for WRITE to finish. 165732115b10SPawel Jakub Dawidek */ 165832115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 165932115b10SPawel Jakub Dawidek while (!ISSYNCREQDONE(hio)) 166032115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 166132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 166232115b10SPawel Jakub Dawidek 166332115b10SPawel Jakub Dawidek if (hio->hio_errors[ncomp] != 0) { 166432115b10SPawel Jakub Dawidek pjdlog_error("Unable to write synchronization data: %s.", 166532115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 166632115b10SPawel Jakub Dawidek goto free_queue; 166732115b10SPawel Jakub Dawidek } 166832115b10SPawel Jakub Dawidek free_queue: 166932115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 167032115b10SPawel Jakub Dawidek rangelock_del(range_sync, offset, length); 167132115b10SPawel Jakub Dawidek if (range_regular_wait) 167232115b10SPawel Jakub Dawidek cv_signal(&range_regular_cond); 167332115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 167432115b10SPawel Jakub Dawidek 167532115b10SPawel Jakub Dawidek synced += length; 167632115b10SPawel Jakub Dawidek 167732115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 167832115b10SPawel Jakub Dawidek hio); 167932115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, free); 168032115b10SPawel Jakub Dawidek } 168132115b10SPawel Jakub Dawidek /* NOTREACHED */ 168232115b10SPawel Jakub Dawidek return (NULL); 168332115b10SPawel Jakub Dawidek } 168432115b10SPawel Jakub Dawidek 168532115b10SPawel Jakub Dawidek static void 168632115b10SPawel Jakub Dawidek sighandler(int sig) 168732115b10SPawel Jakub Dawidek { 168832115b10SPawel Jakub Dawidek bool unlock; 168932115b10SPawel Jakub Dawidek 169032115b10SPawel Jakub Dawidek switch (sig) { 169132115b10SPawel Jakub Dawidek case SIGINT: 169232115b10SPawel Jakub Dawidek case SIGTERM: 169332115b10SPawel Jakub Dawidek sigexit_received = true; 169432115b10SPawel Jakub Dawidek break; 169532115b10SPawel Jakub Dawidek default: 169632115b10SPawel Jakub Dawidek assert(!"invalid condition"); 169732115b10SPawel Jakub Dawidek } 169832115b10SPawel Jakub Dawidek /* 169932115b10SPawel Jakub Dawidek * XXX: Racy, but if we cannot obtain hio_guard_lock here, we don't 170032115b10SPawel Jakub Dawidek * want to risk deadlock. 170132115b10SPawel Jakub Dawidek */ 170232115b10SPawel Jakub Dawidek unlock = mtx_trylock(&hio_guard_lock); 170332115b10SPawel Jakub Dawidek cv_signal(&hio_guard_cond); 170432115b10SPawel Jakub Dawidek if (unlock) 170532115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 170632115b10SPawel Jakub Dawidek } 170732115b10SPawel Jakub Dawidek 170832115b10SPawel Jakub Dawidek /* 170932115b10SPawel Jakub Dawidek * Thread guards remote connections and reconnects when needed, handles 171032115b10SPawel Jakub Dawidek * signals, etc. 171132115b10SPawel Jakub Dawidek */ 171232115b10SPawel Jakub Dawidek static void * 171332115b10SPawel Jakub Dawidek guard_thread(void *arg) 171432115b10SPawel Jakub Dawidek { 171532115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 17160d9014f3SPawel Jakub Dawidek struct proto_conn *in, *out; 171732115b10SPawel Jakub Dawidek unsigned int ii, ncomps; 171832115b10SPawel Jakub Dawidek int timeout; 171932115b10SPawel Jakub Dawidek 172032115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 172132115b10SPawel Jakub Dawidek /* The is only one remote component for now. */ 172232115b10SPawel Jakub Dawidek #define ISREMOTE(no) ((no) == 1) 172332115b10SPawel Jakub Dawidek 172432115b10SPawel Jakub Dawidek for (;;) { 172532115b10SPawel Jakub Dawidek if (sigexit_received) { 172632115b10SPawel Jakub Dawidek primary_exitx(EX_OK, 172732115b10SPawel Jakub Dawidek "Termination signal received, exiting."); 172832115b10SPawel Jakub Dawidek } 172932115b10SPawel Jakub Dawidek /* 173032115b10SPawel Jakub Dawidek * If all the connection will be fine, we will sleep until 173132115b10SPawel Jakub Dawidek * someone wakes us up. 173232115b10SPawel Jakub Dawidek * If any of the connections will be broken and we won't be 173332115b10SPawel Jakub Dawidek * able to connect, we will sleep only for RECONNECT_SLEEP 173432115b10SPawel Jakub Dawidek * seconds so we can retry soon. 173532115b10SPawel Jakub Dawidek */ 173632115b10SPawel Jakub Dawidek timeout = 0; 173732115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_guard: Checking connections."); 173832115b10SPawel Jakub Dawidek mtx_lock(&hio_guard_lock); 173932115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) { 174032115b10SPawel Jakub Dawidek if (!ISREMOTE(ii)) 174132115b10SPawel Jakub Dawidek continue; 174232115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ii]); 174332115b10SPawel Jakub Dawidek if (ISCONNECTED(res, ii)) { 174432115b10SPawel Jakub Dawidek assert(res->hr_remotein != NULL); 174532115b10SPawel Jakub Dawidek assert(res->hr_remoteout != NULL); 174632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 174732115b10SPawel Jakub Dawidek pjdlog_debug(2, 174832115b10SPawel Jakub Dawidek "remote_guard: Connection to %s is ok.", 174932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 175032115b10SPawel Jakub Dawidek } else { 175132115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 175232115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 175332115b10SPawel Jakub Dawidek /* 175432115b10SPawel Jakub Dawidek * Upgrade the lock. It doesn't have to be 175532115b10SPawel Jakub Dawidek * atomic as no other thread can change 175632115b10SPawel Jakub Dawidek * connection status from disconnected to 175732115b10SPawel Jakub Dawidek * connected. 175832115b10SPawel Jakub Dawidek */ 175932115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 176032115b10SPawel Jakub Dawidek pjdlog_debug(2, 176132115b10SPawel Jakub Dawidek "remote_guard: Reconnecting to %s.", 176232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 17630d9014f3SPawel Jakub Dawidek in = out = NULL; 17640d9014f3SPawel Jakub Dawidek if (init_remote(res, &in, &out)) { 17650d9014f3SPawel Jakub Dawidek rw_wlock(&hio_remote_lock[ii]); 17660d9014f3SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 17670d9014f3SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 17680d9014f3SPawel Jakub Dawidek assert(in != NULL && out != NULL); 17690d9014f3SPawel Jakub Dawidek res->hr_remotein = in; 17700d9014f3SPawel Jakub Dawidek res->hr_remoteout = out; 17710d9014f3SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 177232115b10SPawel Jakub Dawidek pjdlog_info("Successfully reconnected to %s.", 177332115b10SPawel Jakub Dawidek res->hr_remoteaddr); 17740d9014f3SPawel Jakub Dawidek sync_start(); 177532115b10SPawel Jakub Dawidek } else { 177632115b10SPawel Jakub Dawidek /* Both connections should be NULL. */ 177732115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 177832115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 17790d9014f3SPawel Jakub Dawidek assert(in == NULL && out == NULL); 178032115b10SPawel Jakub Dawidek pjdlog_debug(2, 178132115b10SPawel Jakub Dawidek "remote_guard: Reconnect to %s failed.", 178232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 178332115b10SPawel Jakub Dawidek timeout = RECONNECT_SLEEP; 178432115b10SPawel Jakub Dawidek } 178532115b10SPawel Jakub Dawidek } 178632115b10SPawel Jakub Dawidek } 178732115b10SPawel Jakub Dawidek (void)cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 178832115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 178932115b10SPawel Jakub Dawidek } 179032115b10SPawel Jakub Dawidek #undef ISREMOTE 179132115b10SPawel Jakub Dawidek /* NOTREACHED */ 179232115b10SPawel Jakub Dawidek return (NULL); 179332115b10SPawel Jakub Dawidek } 1794