132115b10SPawel Jakub Dawidek /*- 232115b10SPawel Jakub Dawidek * Copyright (c) 2009 The FreeBSD Foundation 332115b10SPawel Jakub Dawidek * All rights reserved. 432115b10SPawel Jakub Dawidek * 532115b10SPawel Jakub Dawidek * This software was developed by Pawel Jakub Dawidek under sponsorship from 632115b10SPawel Jakub Dawidek * the FreeBSD Foundation. 732115b10SPawel Jakub Dawidek * 832115b10SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 932115b10SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 1032115b10SPawel Jakub Dawidek * are met: 1132115b10SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 1232115b10SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 1332115b10SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 1432115b10SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 1532115b10SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 1632115b10SPawel Jakub Dawidek * 1732115b10SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 1832115b10SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1932115b10SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2032115b10SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 2132115b10SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2232115b10SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2332115b10SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2432115b10SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2532115b10SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2632115b10SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2732115b10SPawel Jakub Dawidek * SUCH DAMAGE. 2832115b10SPawel Jakub Dawidek */ 2932115b10SPawel Jakub Dawidek 3032115b10SPawel Jakub Dawidek #include <sys/cdefs.h> 3132115b10SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 3232115b10SPawel Jakub Dawidek 3332115b10SPawel Jakub Dawidek #include <sys/types.h> 3432115b10SPawel Jakub Dawidek #include <sys/time.h> 3532115b10SPawel Jakub Dawidek #include <sys/bio.h> 3632115b10SPawel Jakub Dawidek #include <sys/disk.h> 3732115b10SPawel Jakub Dawidek #include <sys/refcount.h> 3832115b10SPawel Jakub Dawidek #include <sys/stat.h> 3932115b10SPawel Jakub Dawidek 4032115b10SPawel Jakub Dawidek #include <geom/gate/g_gate.h> 4132115b10SPawel Jakub Dawidek 4232115b10SPawel Jakub Dawidek #include <assert.h> 4332115b10SPawel Jakub Dawidek #include <err.h> 4432115b10SPawel Jakub Dawidek #include <errno.h> 4532115b10SPawel Jakub Dawidek #include <fcntl.h> 4632115b10SPawel Jakub Dawidek #include <libgeom.h> 4732115b10SPawel Jakub Dawidek #include <pthread.h> 4832115b10SPawel Jakub Dawidek #include <stdint.h> 4932115b10SPawel Jakub Dawidek #include <stdio.h> 5032115b10SPawel Jakub Dawidek #include <string.h> 5132115b10SPawel Jakub Dawidek #include <sysexits.h> 5232115b10SPawel Jakub Dawidek #include <unistd.h> 5332115b10SPawel Jakub Dawidek 5432115b10SPawel Jakub Dawidek #include <activemap.h> 5532115b10SPawel Jakub Dawidek #include <nv.h> 5632115b10SPawel Jakub Dawidek #include <rangelock.h> 5732115b10SPawel Jakub Dawidek 5832115b10SPawel Jakub Dawidek #include "control.h" 5932115b10SPawel Jakub Dawidek #include "hast.h" 6032115b10SPawel Jakub Dawidek #include "hast_proto.h" 6132115b10SPawel Jakub Dawidek #include "hastd.h" 6232115b10SPawel Jakub Dawidek #include "metadata.h" 6332115b10SPawel Jakub Dawidek #include "proto.h" 6432115b10SPawel Jakub Dawidek #include "pjdlog.h" 6532115b10SPawel Jakub Dawidek #include "subr.h" 6632115b10SPawel Jakub Dawidek #include "synch.h" 6732115b10SPawel Jakub Dawidek 6832115b10SPawel Jakub Dawidek struct hio { 6932115b10SPawel Jakub Dawidek /* 7032115b10SPawel Jakub Dawidek * Number of components we are still waiting for. 7132115b10SPawel Jakub Dawidek * When this field goes to 0, we can send the request back to the 7232115b10SPawel Jakub Dawidek * kernel. Each component has to decrease this counter by one 7332115b10SPawel Jakub Dawidek * even on failure. 7432115b10SPawel Jakub Dawidek */ 7532115b10SPawel Jakub Dawidek unsigned int hio_countdown; 7632115b10SPawel Jakub Dawidek /* 7732115b10SPawel Jakub Dawidek * Each component has a place to store its own error. 7832115b10SPawel Jakub Dawidek * Once the request is handled by all components we can decide if the 7932115b10SPawel Jakub Dawidek * request overall is successful or not. 8032115b10SPawel Jakub Dawidek */ 8132115b10SPawel Jakub Dawidek int *hio_errors; 8232115b10SPawel Jakub Dawidek /* 8332115b10SPawel Jakub Dawidek * Structure used to comunicate with GEOM Gate class. 8432115b10SPawel Jakub Dawidek */ 8532115b10SPawel Jakub Dawidek struct g_gate_ctl_io hio_ggio; 8632115b10SPawel Jakub Dawidek TAILQ_ENTRY(hio) *hio_next; 8732115b10SPawel Jakub Dawidek }; 8832115b10SPawel Jakub Dawidek #define hio_free_next hio_next[0] 8932115b10SPawel Jakub Dawidek #define hio_done_next hio_next[0] 9032115b10SPawel Jakub Dawidek 9132115b10SPawel Jakub Dawidek /* 9232115b10SPawel Jakub Dawidek * Free list holds unused structures. When free list is empty, we have to wait 9332115b10SPawel Jakub Dawidek * until some in-progress requests are freed. 9432115b10SPawel Jakub Dawidek */ 9532115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) hio_free_list; 9632115b10SPawel Jakub Dawidek static pthread_mutex_t hio_free_list_lock; 9732115b10SPawel Jakub Dawidek static pthread_cond_t hio_free_list_cond; 9832115b10SPawel Jakub Dawidek /* 9932115b10SPawel Jakub Dawidek * There is one send list for every component. One requests is placed on all 10032115b10SPawel Jakub Dawidek * send lists - each component gets the same request, but each component is 10132115b10SPawel Jakub Dawidek * responsible for managing his own send list. 10232115b10SPawel Jakub Dawidek */ 10332115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) *hio_send_list; 10432115b10SPawel Jakub Dawidek static pthread_mutex_t *hio_send_list_lock; 10532115b10SPawel Jakub Dawidek static pthread_cond_t *hio_send_list_cond; 10632115b10SPawel Jakub Dawidek /* 10732115b10SPawel Jakub Dawidek * There is one recv list for every component, although local components don't 10832115b10SPawel Jakub Dawidek * use recv lists as local requests are done synchronously. 10932115b10SPawel Jakub Dawidek */ 11032115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) *hio_recv_list; 11132115b10SPawel Jakub Dawidek static pthread_mutex_t *hio_recv_list_lock; 11232115b10SPawel Jakub Dawidek static pthread_cond_t *hio_recv_list_cond; 11332115b10SPawel Jakub Dawidek /* 11432115b10SPawel Jakub Dawidek * Request is placed on done list by the slowest component (the one that 11532115b10SPawel Jakub Dawidek * decreased hio_countdown from 1 to 0). 11632115b10SPawel Jakub Dawidek */ 11732115b10SPawel Jakub Dawidek static TAILQ_HEAD(, hio) hio_done_list; 11832115b10SPawel Jakub Dawidek static pthread_mutex_t hio_done_list_lock; 11932115b10SPawel Jakub Dawidek static pthread_cond_t hio_done_list_cond; 12032115b10SPawel Jakub Dawidek /* 12132115b10SPawel Jakub Dawidek * Structure below are for interaction with sync thread. 12232115b10SPawel Jakub Dawidek */ 12332115b10SPawel Jakub Dawidek static bool sync_inprogress; 12432115b10SPawel Jakub Dawidek static pthread_mutex_t sync_lock; 12532115b10SPawel Jakub Dawidek static pthread_cond_t sync_cond; 12632115b10SPawel Jakub Dawidek /* 12732115b10SPawel Jakub Dawidek * The lock below allows to synchornize access to remote connections. 12832115b10SPawel Jakub Dawidek */ 12932115b10SPawel Jakub Dawidek static pthread_rwlock_t *hio_remote_lock; 13032115b10SPawel Jakub Dawidek static pthread_mutex_t hio_guard_lock; 13132115b10SPawel Jakub Dawidek static pthread_cond_t hio_guard_cond; 13232115b10SPawel Jakub Dawidek 13332115b10SPawel Jakub Dawidek /* 13432115b10SPawel Jakub Dawidek * Lock to synchronize metadata updates. Also synchronize access to 13532115b10SPawel Jakub Dawidek * hr_primary_localcnt and hr_primary_remotecnt fields. 13632115b10SPawel Jakub Dawidek */ 13732115b10SPawel Jakub Dawidek static pthread_mutex_t metadata_lock; 13832115b10SPawel Jakub Dawidek 13932115b10SPawel Jakub Dawidek /* 14032115b10SPawel Jakub Dawidek * Maximum number of outstanding I/O requests. 14132115b10SPawel Jakub Dawidek */ 14232115b10SPawel Jakub Dawidek #define HAST_HIO_MAX 256 14332115b10SPawel Jakub Dawidek /* 14432115b10SPawel Jakub Dawidek * Number of components. At this point there are only two components: local 14532115b10SPawel Jakub Dawidek * and remote, but in the future it might be possible to use multiple local 14632115b10SPawel Jakub Dawidek * and remote components. 14732115b10SPawel Jakub Dawidek */ 14832115b10SPawel Jakub Dawidek #define HAST_NCOMPONENTS 2 14932115b10SPawel Jakub Dawidek /* 15032115b10SPawel Jakub Dawidek * Number of seconds to sleep before next reconnect try. 15132115b10SPawel Jakub Dawidek */ 15232115b10SPawel Jakub Dawidek #define RECONNECT_SLEEP 5 15332115b10SPawel Jakub Dawidek 15432115b10SPawel Jakub Dawidek #define ISCONNECTED(res, no) \ 15532115b10SPawel Jakub Dawidek ((res)->hr_remotein != NULL && (res)->hr_remoteout != NULL) 15632115b10SPawel Jakub Dawidek 15732115b10SPawel Jakub Dawidek #define QUEUE_INSERT1(hio, name, ncomp) do { \ 15832115b10SPawel Jakub Dawidek bool _wakeup; \ 15932115b10SPawel Jakub Dawidek \ 16032115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 16132115b10SPawel Jakub Dawidek _wakeup = TAILQ_EMPTY(&hio_##name##_list[(ncomp)]); \ 16232115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_##name##_list[(ncomp)], (hio), \ 16332115b10SPawel Jakub Dawidek hio_next[(ncomp)]); \ 16432115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock[ncomp]); \ 16532115b10SPawel Jakub Dawidek if (_wakeup) \ 16632115b10SPawel Jakub Dawidek cv_signal(&hio_##name##_list_cond[(ncomp)]); \ 16732115b10SPawel Jakub Dawidek } while (0) 16832115b10SPawel Jakub Dawidek #define QUEUE_INSERT2(hio, name) do { \ 16932115b10SPawel Jakub Dawidek bool _wakeup; \ 17032115b10SPawel Jakub Dawidek \ 17132115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock); \ 17232115b10SPawel Jakub Dawidek _wakeup = TAILQ_EMPTY(&hio_##name##_list); \ 17332115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_##name##_list, (hio), hio_##name##_next);\ 17432115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock); \ 17532115b10SPawel Jakub Dawidek if (_wakeup) \ 17632115b10SPawel Jakub Dawidek cv_signal(&hio_##name##_list_cond); \ 17732115b10SPawel Jakub Dawidek } while (0) 17832115b10SPawel Jakub Dawidek #define QUEUE_TAKE1(hio, name, ncomp) do { \ 17932115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock[(ncomp)]); \ 18032115b10SPawel Jakub Dawidek while (((hio) = TAILQ_FIRST(&hio_##name##_list[(ncomp)])) == NULL) { \ 18132115b10SPawel Jakub Dawidek cv_wait(&hio_##name##_list_cond[(ncomp)], \ 18232115b10SPawel Jakub Dawidek &hio_##name##_list_lock[(ncomp)]); \ 18332115b10SPawel Jakub Dawidek } \ 18432115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_##name##_list[(ncomp)], (hio), \ 18532115b10SPawel Jakub Dawidek hio_next[(ncomp)]); \ 18632115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock[(ncomp)]); \ 18732115b10SPawel Jakub Dawidek } while (0) 18832115b10SPawel Jakub Dawidek #define QUEUE_TAKE2(hio, name) do { \ 18932115b10SPawel Jakub Dawidek mtx_lock(&hio_##name##_list_lock); \ 19032115b10SPawel Jakub Dawidek while (((hio) = TAILQ_FIRST(&hio_##name##_list)) == NULL) { \ 19132115b10SPawel Jakub Dawidek cv_wait(&hio_##name##_list_cond, \ 19232115b10SPawel Jakub Dawidek &hio_##name##_list_lock); \ 19332115b10SPawel Jakub Dawidek } \ 19432115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_##name##_list, (hio), hio_##name##_next); \ 19532115b10SPawel Jakub Dawidek mtx_unlock(&hio_##name##_list_lock); \ 19632115b10SPawel Jakub Dawidek } while (0) 19732115b10SPawel Jakub Dawidek 19832115b10SPawel Jakub Dawidek #define SYNCREQ(hio) do { (hio)->hio_ggio.gctl_unit = -1; } while (0) 19932115b10SPawel Jakub Dawidek #define ISSYNCREQ(hio) ((hio)->hio_ggio.gctl_unit == -1) 20032115b10SPawel Jakub Dawidek #define SYNCREQDONE(hio) do { (hio)->hio_ggio.gctl_unit = -2; } while (0) 20132115b10SPawel Jakub Dawidek #define ISSYNCREQDONE(hio) ((hio)->hio_ggio.gctl_unit == -2) 20232115b10SPawel Jakub Dawidek 20332115b10SPawel Jakub Dawidek static struct hast_resource *gres; 20432115b10SPawel Jakub Dawidek 20532115b10SPawel Jakub Dawidek static pthread_mutex_t range_lock; 20632115b10SPawel Jakub Dawidek static struct rangelocks *range_regular; 20732115b10SPawel Jakub Dawidek static bool range_regular_wait; 20832115b10SPawel Jakub Dawidek static pthread_cond_t range_regular_cond; 20932115b10SPawel Jakub Dawidek static struct rangelocks *range_sync; 21032115b10SPawel Jakub Dawidek static bool range_sync_wait; 21132115b10SPawel Jakub Dawidek static pthread_cond_t range_sync_cond; 21232115b10SPawel Jakub Dawidek 21332115b10SPawel Jakub Dawidek static void *ggate_recv_thread(void *arg); 21432115b10SPawel Jakub Dawidek static void *local_send_thread(void *arg); 21532115b10SPawel Jakub Dawidek static void *remote_send_thread(void *arg); 21632115b10SPawel Jakub Dawidek static void *remote_recv_thread(void *arg); 21732115b10SPawel Jakub Dawidek static void *ggate_send_thread(void *arg); 21832115b10SPawel Jakub Dawidek static void *sync_thread(void *arg); 21932115b10SPawel Jakub Dawidek static void *guard_thread(void *arg); 22032115b10SPawel Jakub Dawidek 22132115b10SPawel Jakub Dawidek static void sighandler(int sig); 22232115b10SPawel Jakub Dawidek 22332115b10SPawel Jakub Dawidek static void 22432115b10SPawel Jakub Dawidek cleanup(struct hast_resource *res) 22532115b10SPawel Jakub Dawidek { 22632115b10SPawel Jakub Dawidek int rerrno; 22732115b10SPawel Jakub Dawidek 22832115b10SPawel Jakub Dawidek /* Remember errno. */ 22932115b10SPawel Jakub Dawidek rerrno = errno; 23032115b10SPawel Jakub Dawidek 23132115b10SPawel Jakub Dawidek /* 23232115b10SPawel Jakub Dawidek * Close descriptor to /dev/hast/<name> 23332115b10SPawel Jakub Dawidek * to work-around race in the kernel. 23432115b10SPawel Jakub Dawidek */ 23532115b10SPawel Jakub Dawidek close(res->hr_localfd); 23632115b10SPawel Jakub Dawidek 23732115b10SPawel Jakub Dawidek /* Destroy ggate provider if we created one. */ 23832115b10SPawel Jakub Dawidek if (res->hr_ggateunit >= 0) { 23932115b10SPawel Jakub Dawidek struct g_gate_ctl_destroy ggiod; 24032115b10SPawel Jakub Dawidek 24132115b10SPawel Jakub Dawidek ggiod.gctl_version = G_GATE_VERSION; 24232115b10SPawel Jakub Dawidek ggiod.gctl_unit = res->hr_ggateunit; 24332115b10SPawel Jakub Dawidek ggiod.gctl_force = 1; 24432115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_DESTROY, &ggiod) < 0) { 24532115b10SPawel Jakub Dawidek pjdlog_warning("Unable to destroy hast/%s device", 24632115b10SPawel Jakub Dawidek res->hr_provname); 24732115b10SPawel Jakub Dawidek } 24832115b10SPawel Jakub Dawidek res->hr_ggateunit = -1; 24932115b10SPawel Jakub Dawidek } 25032115b10SPawel Jakub Dawidek 25132115b10SPawel Jakub Dawidek /* Restore errno. */ 25232115b10SPawel Jakub Dawidek errno = rerrno; 25332115b10SPawel Jakub Dawidek } 25432115b10SPawel Jakub Dawidek 25532115b10SPawel Jakub Dawidek static void 25632115b10SPawel Jakub Dawidek primary_exit(int exitcode, const char *fmt, ...) 25732115b10SPawel Jakub Dawidek { 25832115b10SPawel Jakub Dawidek va_list ap; 25932115b10SPawel Jakub Dawidek 26032115b10SPawel Jakub Dawidek assert(exitcode != EX_OK); 26132115b10SPawel Jakub Dawidek va_start(ap, fmt); 26232115b10SPawel Jakub Dawidek pjdlogv_errno(LOG_ERR, fmt, ap); 26332115b10SPawel Jakub Dawidek va_end(ap); 26432115b10SPawel Jakub Dawidek cleanup(gres); 26532115b10SPawel Jakub Dawidek exit(exitcode); 26632115b10SPawel Jakub Dawidek } 26732115b10SPawel Jakub Dawidek 26832115b10SPawel Jakub Dawidek static void 26932115b10SPawel Jakub Dawidek primary_exitx(int exitcode, const char *fmt, ...) 27032115b10SPawel Jakub Dawidek { 27132115b10SPawel Jakub Dawidek va_list ap; 27232115b10SPawel Jakub Dawidek 27332115b10SPawel Jakub Dawidek va_start(ap, fmt); 27432115b10SPawel Jakub Dawidek pjdlogv(exitcode == EX_OK ? LOG_INFO : LOG_ERR, fmt, ap); 27532115b10SPawel Jakub Dawidek va_end(ap); 27632115b10SPawel Jakub Dawidek cleanup(gres); 27732115b10SPawel Jakub Dawidek exit(exitcode); 27832115b10SPawel Jakub Dawidek } 27932115b10SPawel Jakub Dawidek 28032115b10SPawel Jakub Dawidek static int 28132115b10SPawel Jakub Dawidek hast_activemap_flush(struct hast_resource *res) 28232115b10SPawel Jakub Dawidek { 28332115b10SPawel Jakub Dawidek const unsigned char *buf; 28432115b10SPawel Jakub Dawidek size_t size; 28532115b10SPawel Jakub Dawidek 28632115b10SPawel Jakub Dawidek buf = activemap_bitmap(res->hr_amp, &size); 28732115b10SPawel Jakub Dawidek assert(buf != NULL); 28832115b10SPawel Jakub Dawidek assert((size % res->hr_local_sectorsize) == 0); 28932115b10SPawel Jakub Dawidek if (pwrite(res->hr_localfd, buf, size, METADATA_SIZE) != 29032115b10SPawel Jakub Dawidek (ssize_t)size) { 29132115b10SPawel Jakub Dawidek KEEP_ERRNO(pjdlog_errno(LOG_ERR, 29232115b10SPawel Jakub Dawidek "Unable to flush activemap to disk")); 29332115b10SPawel Jakub Dawidek return (-1); 29432115b10SPawel Jakub Dawidek } 29532115b10SPawel Jakub Dawidek return (0); 29632115b10SPawel Jakub Dawidek } 29732115b10SPawel Jakub Dawidek 29832115b10SPawel Jakub Dawidek static void 29932115b10SPawel Jakub Dawidek init_environment(struct hast_resource *res __unused) 30032115b10SPawel Jakub Dawidek { 30132115b10SPawel Jakub Dawidek struct hio *hio; 30232115b10SPawel Jakub Dawidek unsigned int ii, ncomps; 30332115b10SPawel Jakub Dawidek 30432115b10SPawel Jakub Dawidek /* 30532115b10SPawel Jakub Dawidek * In the future it might be per-resource value. 30632115b10SPawel Jakub Dawidek */ 30732115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 30832115b10SPawel Jakub Dawidek 30932115b10SPawel Jakub Dawidek /* 31032115b10SPawel Jakub Dawidek * Allocate memory needed by lists. 31132115b10SPawel Jakub Dawidek */ 31232115b10SPawel Jakub Dawidek hio_send_list = malloc(sizeof(hio_send_list[0]) * ncomps); 31332115b10SPawel Jakub Dawidek if (hio_send_list == NULL) { 31432115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 31532115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send lists.", 31632115b10SPawel Jakub Dawidek sizeof(hio_send_list[0]) * ncomps); 31732115b10SPawel Jakub Dawidek } 31832115b10SPawel Jakub Dawidek hio_send_list_lock = malloc(sizeof(hio_send_list_lock[0]) * ncomps); 31932115b10SPawel Jakub Dawidek if (hio_send_list_lock == NULL) { 32032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 32132115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send list locks.", 32232115b10SPawel Jakub Dawidek sizeof(hio_send_list_lock[0]) * ncomps); 32332115b10SPawel Jakub Dawidek } 32432115b10SPawel Jakub Dawidek hio_send_list_cond = malloc(sizeof(hio_send_list_cond[0]) * ncomps); 32532115b10SPawel Jakub Dawidek if (hio_send_list_cond == NULL) { 32632115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 32732115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for send list condition variables.", 32832115b10SPawel Jakub Dawidek sizeof(hio_send_list_cond[0]) * ncomps); 32932115b10SPawel Jakub Dawidek } 33032115b10SPawel Jakub Dawidek hio_recv_list = malloc(sizeof(hio_recv_list[0]) * ncomps); 33132115b10SPawel Jakub Dawidek if (hio_recv_list == NULL) { 33232115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 33332115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv lists.", 33432115b10SPawel Jakub Dawidek sizeof(hio_recv_list[0]) * ncomps); 33532115b10SPawel Jakub Dawidek } 33632115b10SPawel Jakub Dawidek hio_recv_list_lock = malloc(sizeof(hio_recv_list_lock[0]) * ncomps); 33732115b10SPawel Jakub Dawidek if (hio_recv_list_lock == NULL) { 33832115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 33932115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv list locks.", 34032115b10SPawel Jakub Dawidek sizeof(hio_recv_list_lock[0]) * ncomps); 34132115b10SPawel Jakub Dawidek } 34232115b10SPawel Jakub Dawidek hio_recv_list_cond = malloc(sizeof(hio_recv_list_cond[0]) * ncomps); 34332115b10SPawel Jakub Dawidek if (hio_recv_list_cond == NULL) { 34432115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 34532115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for recv list condition variables.", 34632115b10SPawel Jakub Dawidek sizeof(hio_recv_list_cond[0]) * ncomps); 34732115b10SPawel Jakub Dawidek } 34832115b10SPawel Jakub Dawidek hio_remote_lock = malloc(sizeof(hio_remote_lock[0]) * ncomps); 34932115b10SPawel Jakub Dawidek if (hio_remote_lock == NULL) { 35032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 35132115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for remote connections locks.", 35232115b10SPawel Jakub Dawidek sizeof(hio_remote_lock[0]) * ncomps); 35332115b10SPawel Jakub Dawidek } 35432115b10SPawel Jakub Dawidek 35532115b10SPawel Jakub Dawidek /* 35632115b10SPawel Jakub Dawidek * Initialize lists, their locks and theirs condition variables. 35732115b10SPawel Jakub Dawidek */ 35832115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_free_list); 35932115b10SPawel Jakub Dawidek mtx_init(&hio_free_list_lock); 36032115b10SPawel Jakub Dawidek cv_init(&hio_free_list_cond); 36132115b10SPawel Jakub Dawidek for (ii = 0; ii < HAST_NCOMPONENTS; ii++) { 36232115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_send_list[ii]); 36332115b10SPawel Jakub Dawidek mtx_init(&hio_send_list_lock[ii]); 36432115b10SPawel Jakub Dawidek cv_init(&hio_send_list_cond[ii]); 36532115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_recv_list[ii]); 36632115b10SPawel Jakub Dawidek mtx_init(&hio_recv_list_lock[ii]); 36732115b10SPawel Jakub Dawidek cv_init(&hio_recv_list_cond[ii]); 36832115b10SPawel Jakub Dawidek rw_init(&hio_remote_lock[ii]); 36932115b10SPawel Jakub Dawidek } 37032115b10SPawel Jakub Dawidek TAILQ_INIT(&hio_done_list); 37132115b10SPawel Jakub Dawidek mtx_init(&hio_done_list_lock); 37232115b10SPawel Jakub Dawidek cv_init(&hio_done_list_cond); 37332115b10SPawel Jakub Dawidek mtx_init(&hio_guard_lock); 37432115b10SPawel Jakub Dawidek cv_init(&hio_guard_cond); 37532115b10SPawel Jakub Dawidek mtx_init(&metadata_lock); 37632115b10SPawel Jakub Dawidek 37732115b10SPawel Jakub Dawidek /* 37832115b10SPawel Jakub Dawidek * Allocate requests pool and initialize requests. 37932115b10SPawel Jakub Dawidek */ 38032115b10SPawel Jakub Dawidek for (ii = 0; ii < HAST_HIO_MAX; ii++) { 38132115b10SPawel Jakub Dawidek hio = malloc(sizeof(*hio)); 38232115b10SPawel Jakub Dawidek if (hio == NULL) { 38332115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 38432115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for hio request.", 38532115b10SPawel Jakub Dawidek sizeof(*hio)); 38632115b10SPawel Jakub Dawidek } 38732115b10SPawel Jakub Dawidek hio->hio_countdown = 0; 38832115b10SPawel Jakub Dawidek hio->hio_errors = malloc(sizeof(hio->hio_errors[0]) * ncomps); 38932115b10SPawel Jakub Dawidek if (hio->hio_errors == NULL) { 39032115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 39132115b10SPawel Jakub Dawidek "Unable allocate %zu bytes of memory for hio errors.", 39232115b10SPawel Jakub Dawidek sizeof(hio->hio_errors[0]) * ncomps); 39332115b10SPawel Jakub Dawidek } 39432115b10SPawel Jakub Dawidek hio->hio_next = malloc(sizeof(hio->hio_next[0]) * ncomps); 39532115b10SPawel Jakub Dawidek if (hio->hio_next == NULL) { 39632115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 39732115b10SPawel Jakub Dawidek "Unable allocate %zu bytes of memory for hio_next field.", 39832115b10SPawel Jakub Dawidek sizeof(hio->hio_next[0]) * ncomps); 39932115b10SPawel Jakub Dawidek } 40032115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_version = G_GATE_VERSION; 40132115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_data = malloc(MAXPHYS); 40232115b10SPawel Jakub Dawidek if (hio->hio_ggio.gctl_data == NULL) { 40332115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 40432115b10SPawel Jakub Dawidek "Unable to allocate %zu bytes of memory for gctl_data.", 40532115b10SPawel Jakub Dawidek MAXPHYS); 40632115b10SPawel Jakub Dawidek } 40732115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_length = MAXPHYS; 40832115b10SPawel Jakub Dawidek hio->hio_ggio.gctl_error = 0; 40932115b10SPawel Jakub Dawidek TAILQ_INSERT_HEAD(&hio_free_list, hio, hio_free_next); 41032115b10SPawel Jakub Dawidek } 41132115b10SPawel Jakub Dawidek 41232115b10SPawel Jakub Dawidek /* 41332115b10SPawel Jakub Dawidek * Turn on signals handling. 41432115b10SPawel Jakub Dawidek */ 41532115b10SPawel Jakub Dawidek signal(SIGINT, sighandler); 41632115b10SPawel Jakub Dawidek signal(SIGTERM, sighandler); 41732115b10SPawel Jakub Dawidek } 41832115b10SPawel Jakub Dawidek 41932115b10SPawel Jakub Dawidek static void 42032115b10SPawel Jakub Dawidek init_local(struct hast_resource *res) 42132115b10SPawel Jakub Dawidek { 42232115b10SPawel Jakub Dawidek unsigned char *buf; 42332115b10SPawel Jakub Dawidek size_t mapsize; 42432115b10SPawel Jakub Dawidek 42532115b10SPawel Jakub Dawidek if (metadata_read(res, true) < 0) 42632115b10SPawel Jakub Dawidek exit(EX_NOINPUT); 42732115b10SPawel Jakub Dawidek mtx_init(&res->hr_amp_lock); 42832115b10SPawel Jakub Dawidek if (activemap_init(&res->hr_amp, res->hr_datasize, res->hr_extentsize, 42932115b10SPawel Jakub Dawidek res->hr_local_sectorsize, res->hr_keepdirty) < 0) { 43032115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create activemap"); 43132115b10SPawel Jakub Dawidek } 43232115b10SPawel Jakub Dawidek mtx_init(&range_lock); 43332115b10SPawel Jakub Dawidek cv_init(&range_regular_cond); 43432115b10SPawel Jakub Dawidek if (rangelock_init(&range_regular) < 0) 43532115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create regular range lock"); 43632115b10SPawel Jakub Dawidek cv_init(&range_sync_cond); 43732115b10SPawel Jakub Dawidek if (rangelock_init(&range_sync) < 0) 43832115b10SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create sync range lock"); 43932115b10SPawel Jakub Dawidek mapsize = activemap_ondisk_size(res->hr_amp); 44032115b10SPawel Jakub Dawidek buf = calloc(1, mapsize); 44132115b10SPawel Jakub Dawidek if (buf == NULL) { 44232115b10SPawel Jakub Dawidek primary_exitx(EX_TEMPFAIL, 44332115b10SPawel Jakub Dawidek "Unable to allocate buffer for activemap."); 44432115b10SPawel Jakub Dawidek } 44532115b10SPawel Jakub Dawidek if (pread(res->hr_localfd, buf, mapsize, METADATA_SIZE) != 44632115b10SPawel Jakub Dawidek (ssize_t)mapsize) { 44732115b10SPawel Jakub Dawidek primary_exit(EX_NOINPUT, "Unable to read activemap"); 44832115b10SPawel Jakub Dawidek } 44932115b10SPawel Jakub Dawidek activemap_copyin(res->hr_amp, buf, mapsize); 450*b0dfbe5bSPawel Jakub Dawidek free(buf); 45132115b10SPawel Jakub Dawidek if (res->hr_resuid != 0) 45232115b10SPawel Jakub Dawidek return; 45332115b10SPawel Jakub Dawidek /* 45432115b10SPawel Jakub Dawidek * We're using provider for the first time, so we have to generate 45532115b10SPawel Jakub Dawidek * resource unique identifier and initialize local and remote counts. 45632115b10SPawel Jakub Dawidek */ 45732115b10SPawel Jakub Dawidek arc4random_buf(&res->hr_resuid, sizeof(res->hr_resuid)); 45832115b10SPawel Jakub Dawidek res->hr_primary_localcnt = 1; 45932115b10SPawel Jakub Dawidek res->hr_primary_remotecnt = 0; 46032115b10SPawel Jakub Dawidek if (metadata_write(res) < 0) 46132115b10SPawel Jakub Dawidek exit(EX_NOINPUT); 46232115b10SPawel Jakub Dawidek } 46332115b10SPawel Jakub Dawidek 4640d9014f3SPawel Jakub Dawidek static bool 4650d9014f3SPawel Jakub Dawidek init_remote(struct hast_resource *res, struct proto_conn **inp, 4660d9014f3SPawel Jakub Dawidek struct proto_conn **outp) 46732115b10SPawel Jakub Dawidek { 4680d9014f3SPawel Jakub Dawidek struct proto_conn *in, *out; 46932115b10SPawel Jakub Dawidek struct nv *nvout, *nvin; 47032115b10SPawel Jakub Dawidek const unsigned char *token; 47132115b10SPawel Jakub Dawidek unsigned char *map; 47232115b10SPawel Jakub Dawidek const char *errmsg; 47332115b10SPawel Jakub Dawidek int32_t extentsize; 47432115b10SPawel Jakub Dawidek int64_t datasize; 47532115b10SPawel Jakub Dawidek uint32_t mapsize; 47632115b10SPawel Jakub Dawidek size_t size; 47732115b10SPawel Jakub Dawidek 4780d9014f3SPawel Jakub Dawidek assert((inp == NULL && outp == NULL) || (inp != NULL && outp != NULL)); 4790d9014f3SPawel Jakub Dawidek 4800d9014f3SPawel Jakub Dawidek in = out = NULL; 4810d9014f3SPawel Jakub Dawidek 48232115b10SPawel Jakub Dawidek /* Prepare outgoing connection with remote node. */ 4830d9014f3SPawel Jakub Dawidek if (proto_client(res->hr_remoteaddr, &out) < 0) { 4845abfc9c1SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to create connection to %s", 48532115b10SPawel Jakub Dawidek res->hr_remoteaddr); 48632115b10SPawel Jakub Dawidek } 48732115b10SPawel Jakub Dawidek /* Try to connect, but accept failure. */ 4880d9014f3SPawel Jakub Dawidek if (proto_connect(out) < 0) { 48932115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 49032115b10SPawel Jakub Dawidek res->hr_remoteaddr); 49132115b10SPawel Jakub Dawidek goto close; 49232115b10SPawel Jakub Dawidek } 4935571414cSPawel Jakub Dawidek /* Error in setting timeout is not critical, but why should it fail? */ 4945571414cSPawel Jakub Dawidek if (proto_timeout(out, res->hr_timeout) < 0) 4955571414cSPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 49632115b10SPawel Jakub Dawidek /* 49732115b10SPawel Jakub Dawidek * First handshake step. 49832115b10SPawel Jakub Dawidek * Setup outgoing connection with remote node. 49932115b10SPawel Jakub Dawidek */ 50032115b10SPawel Jakub Dawidek nvout = nv_alloc(); 50132115b10SPawel Jakub Dawidek nv_add_string(nvout, res->hr_name, "resource"); 50232115b10SPawel Jakub Dawidek if (nv_error(nvout) != 0) { 50332115b10SPawel Jakub Dawidek pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 50432115b10SPawel Jakub Dawidek "Unable to allocate header for connection with %s", 50532115b10SPawel Jakub Dawidek res->hr_remoteaddr); 50632115b10SPawel Jakub Dawidek nv_free(nvout); 50732115b10SPawel Jakub Dawidek goto close; 50832115b10SPawel Jakub Dawidek } 5090d9014f3SPawel Jakub Dawidek if (hast_proto_send(res, out, nvout, NULL, 0) < 0) { 51032115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 51132115b10SPawel Jakub Dawidek "Unable to send handshake header to %s", 51232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 51332115b10SPawel Jakub Dawidek nv_free(nvout); 51432115b10SPawel Jakub Dawidek goto close; 51532115b10SPawel Jakub Dawidek } 51632115b10SPawel Jakub Dawidek nv_free(nvout); 5170d9014f3SPawel Jakub Dawidek if (hast_proto_recv_hdr(out, &nvin) < 0) { 51832115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 51932115b10SPawel Jakub Dawidek "Unable to receive handshake header from %s", 52032115b10SPawel Jakub Dawidek res->hr_remoteaddr); 52132115b10SPawel Jakub Dawidek goto close; 52232115b10SPawel Jakub Dawidek } 52332115b10SPawel Jakub Dawidek errmsg = nv_get_string(nvin, "errmsg"); 52432115b10SPawel Jakub Dawidek if (errmsg != NULL) { 52532115b10SPawel Jakub Dawidek pjdlog_warning("%s", errmsg); 52632115b10SPawel Jakub Dawidek nv_free(nvin); 52732115b10SPawel Jakub Dawidek goto close; 52832115b10SPawel Jakub Dawidek } 52932115b10SPawel Jakub Dawidek token = nv_get_uint8_array(nvin, &size, "token"); 53032115b10SPawel Jakub Dawidek if (token == NULL) { 53132115b10SPawel Jakub Dawidek pjdlog_warning("Handshake header from %s has no 'token' field.", 53232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 53332115b10SPawel Jakub Dawidek nv_free(nvin); 53432115b10SPawel Jakub Dawidek goto close; 53532115b10SPawel Jakub Dawidek } 53632115b10SPawel Jakub Dawidek if (size != sizeof(res->hr_token)) { 53732115b10SPawel Jakub Dawidek pjdlog_warning("Handshake header from %s contains 'token' of wrong size (got %zu, expected %zu).", 53832115b10SPawel Jakub Dawidek res->hr_remoteaddr, size, sizeof(res->hr_token)); 53932115b10SPawel Jakub Dawidek nv_free(nvin); 54032115b10SPawel Jakub Dawidek goto close; 54132115b10SPawel Jakub Dawidek } 54232115b10SPawel Jakub Dawidek bcopy(token, res->hr_token, sizeof(res->hr_token)); 54332115b10SPawel Jakub Dawidek nv_free(nvin); 54432115b10SPawel Jakub Dawidek 54532115b10SPawel Jakub Dawidek /* 54632115b10SPawel Jakub Dawidek * Second handshake step. 54732115b10SPawel Jakub Dawidek * Setup incoming connection with remote node. 54832115b10SPawel Jakub Dawidek */ 5490d9014f3SPawel Jakub Dawidek if (proto_client(res->hr_remoteaddr, &in) < 0) { 55032115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to create connection to %s", 55132115b10SPawel Jakub Dawidek res->hr_remoteaddr); 55232115b10SPawel Jakub Dawidek } 55332115b10SPawel Jakub Dawidek /* Try to connect, but accept failure. */ 5540d9014f3SPawel Jakub Dawidek if (proto_connect(in) < 0) { 55532115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to connect to %s", 55632115b10SPawel Jakub Dawidek res->hr_remoteaddr); 55732115b10SPawel Jakub Dawidek goto close; 55832115b10SPawel Jakub Dawidek } 5595571414cSPawel Jakub Dawidek /* Error in setting timeout is not critical, but why should it fail? */ 5605571414cSPawel Jakub Dawidek if (proto_timeout(in, res->hr_timeout) < 0) 5615571414cSPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, "Unable to set connection timeout"); 56232115b10SPawel Jakub Dawidek nvout = nv_alloc(); 56332115b10SPawel Jakub Dawidek nv_add_string(nvout, res->hr_name, "resource"); 56432115b10SPawel Jakub Dawidek nv_add_uint8_array(nvout, res->hr_token, sizeof(res->hr_token), 56532115b10SPawel Jakub Dawidek "token"); 56632115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_resuid, "resuid"); 56732115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_primary_localcnt, "localcnt"); 56832115b10SPawel Jakub Dawidek nv_add_uint64(nvout, res->hr_primary_remotecnt, "remotecnt"); 56932115b10SPawel Jakub Dawidek if (nv_error(nvout) != 0) { 57032115b10SPawel Jakub Dawidek pjdlog_common(LOG_WARNING, 0, nv_error(nvout), 57132115b10SPawel Jakub Dawidek "Unable to allocate header for connection with %s", 57232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 57332115b10SPawel Jakub Dawidek nv_free(nvout); 57432115b10SPawel Jakub Dawidek goto close; 57532115b10SPawel Jakub Dawidek } 5760d9014f3SPawel Jakub Dawidek if (hast_proto_send(res, in, nvout, NULL, 0) < 0) { 57732115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 57832115b10SPawel Jakub Dawidek "Unable to send handshake header to %s", 57932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 58032115b10SPawel Jakub Dawidek nv_free(nvout); 58132115b10SPawel Jakub Dawidek goto close; 58232115b10SPawel Jakub Dawidek } 58332115b10SPawel Jakub Dawidek nv_free(nvout); 5840d9014f3SPawel Jakub Dawidek if (hast_proto_recv_hdr(out, &nvin) < 0) { 58532115b10SPawel Jakub Dawidek pjdlog_errno(LOG_WARNING, 58632115b10SPawel Jakub Dawidek "Unable to receive handshake header from %s", 58732115b10SPawel Jakub Dawidek res->hr_remoteaddr); 58832115b10SPawel Jakub Dawidek goto close; 58932115b10SPawel Jakub Dawidek } 59032115b10SPawel Jakub Dawidek errmsg = nv_get_string(nvin, "errmsg"); 59132115b10SPawel Jakub Dawidek if (errmsg != NULL) { 59232115b10SPawel Jakub Dawidek pjdlog_warning("%s", errmsg); 59332115b10SPawel Jakub Dawidek nv_free(nvin); 59432115b10SPawel Jakub Dawidek goto close; 59532115b10SPawel Jakub Dawidek } 59632115b10SPawel Jakub Dawidek datasize = nv_get_int64(nvin, "datasize"); 59732115b10SPawel Jakub Dawidek if (datasize != res->hr_datasize) { 59832115b10SPawel Jakub Dawidek pjdlog_warning("Data size differs between nodes (local=%jd, remote=%jd).", 59932115b10SPawel Jakub Dawidek (intmax_t)res->hr_datasize, (intmax_t)datasize); 60032115b10SPawel Jakub Dawidek nv_free(nvin); 60132115b10SPawel Jakub Dawidek goto close; 60232115b10SPawel Jakub Dawidek } 60332115b10SPawel Jakub Dawidek extentsize = nv_get_int32(nvin, "extentsize"); 60432115b10SPawel Jakub Dawidek if (extentsize != res->hr_extentsize) { 60532115b10SPawel Jakub Dawidek pjdlog_warning("Extent size differs between nodes (local=%zd, remote=%zd).", 60632115b10SPawel Jakub Dawidek (ssize_t)res->hr_extentsize, (ssize_t)extentsize); 60732115b10SPawel Jakub Dawidek nv_free(nvin); 60832115b10SPawel Jakub Dawidek goto close; 60932115b10SPawel Jakub Dawidek } 61032115b10SPawel Jakub Dawidek res->hr_secondary_localcnt = nv_get_uint64(nvin, "localcnt"); 61132115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt = nv_get_uint64(nvin, "remotecnt"); 61232115b10SPawel Jakub Dawidek res->hr_syncsrc = nv_get_uint8(nvin, "syncsrc"); 61332115b10SPawel Jakub Dawidek map = NULL; 61432115b10SPawel Jakub Dawidek mapsize = nv_get_uint32(nvin, "mapsize"); 61532115b10SPawel Jakub Dawidek if (mapsize > 0) { 61632115b10SPawel Jakub Dawidek map = malloc(mapsize); 61732115b10SPawel Jakub Dawidek if (map == NULL) { 61832115b10SPawel Jakub Dawidek pjdlog_error("Unable to allocate memory for remote activemap (mapsize=%ju).", 61932115b10SPawel Jakub Dawidek (uintmax_t)mapsize); 62032115b10SPawel Jakub Dawidek nv_free(nvin); 62132115b10SPawel Jakub Dawidek goto close; 62232115b10SPawel Jakub Dawidek } 62332115b10SPawel Jakub Dawidek /* 62432115b10SPawel Jakub Dawidek * Remote node have some dirty extents on its own, lets 62532115b10SPawel Jakub Dawidek * download its activemap. 62632115b10SPawel Jakub Dawidek */ 6270d9014f3SPawel Jakub Dawidek if (hast_proto_recv_data(res, out, nvin, map, 62832115b10SPawel Jakub Dawidek mapsize) < 0) { 62932115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 63032115b10SPawel Jakub Dawidek "Unable to receive remote activemap"); 63132115b10SPawel Jakub Dawidek nv_free(nvin); 63232115b10SPawel Jakub Dawidek free(map); 63332115b10SPawel Jakub Dawidek goto close; 63432115b10SPawel Jakub Dawidek } 63532115b10SPawel Jakub Dawidek /* 63632115b10SPawel Jakub Dawidek * Merge local and remote bitmaps. 63732115b10SPawel Jakub Dawidek */ 63832115b10SPawel Jakub Dawidek activemap_merge(res->hr_amp, map, mapsize); 63932115b10SPawel Jakub Dawidek free(map); 64032115b10SPawel Jakub Dawidek /* 64132115b10SPawel Jakub Dawidek * Now that we merged bitmaps from both nodes, flush it to the 64232115b10SPawel Jakub Dawidek * disk before we start to synchronize. 64332115b10SPawel Jakub Dawidek */ 64432115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 64532115b10SPawel Jakub Dawidek } 64632115b10SPawel Jakub Dawidek pjdlog_info("Connected to %s.", res->hr_remoteaddr); 6470d9014f3SPawel Jakub Dawidek if (inp != NULL && outp != NULL) { 6480d9014f3SPawel Jakub Dawidek *inp = in; 6490d9014f3SPawel Jakub Dawidek *outp = out; 6500d9014f3SPawel Jakub Dawidek } else { 6510d9014f3SPawel Jakub Dawidek res->hr_remotein = in; 6520d9014f3SPawel Jakub Dawidek res->hr_remoteout = out; 6530d9014f3SPawel Jakub Dawidek } 6540d9014f3SPawel Jakub Dawidek return (true); 6550d9014f3SPawel Jakub Dawidek close: 6560d9014f3SPawel Jakub Dawidek proto_close(out); 6570d9014f3SPawel Jakub Dawidek if (in != NULL) 6580d9014f3SPawel Jakub Dawidek proto_close(in); 6590d9014f3SPawel Jakub Dawidek return (false); 6600d9014f3SPawel Jakub Dawidek } 6610d9014f3SPawel Jakub Dawidek 6620d9014f3SPawel Jakub Dawidek static void 6630d9014f3SPawel Jakub Dawidek sync_start(void) 6640d9014f3SPawel Jakub Dawidek { 6650d9014f3SPawel Jakub Dawidek 66632115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 66732115b10SPawel Jakub Dawidek sync_inprogress = true; 66832115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 66932115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 67032115b10SPawel Jakub Dawidek } 67132115b10SPawel Jakub Dawidek 67232115b10SPawel Jakub Dawidek static void 67332115b10SPawel Jakub Dawidek init_ggate(struct hast_resource *res) 67432115b10SPawel Jakub Dawidek { 67532115b10SPawel Jakub Dawidek struct g_gate_ctl_create ggiocreate; 67632115b10SPawel Jakub Dawidek struct g_gate_ctl_cancel ggiocancel; 67732115b10SPawel Jakub Dawidek 67832115b10SPawel Jakub Dawidek /* 67932115b10SPawel Jakub Dawidek * We communicate with ggate via /dev/ggctl. Open it. 68032115b10SPawel Jakub Dawidek */ 68132115b10SPawel Jakub Dawidek res->hr_ggatefd = open("/dev/" G_GATE_CTL_NAME, O_RDWR); 68232115b10SPawel Jakub Dawidek if (res->hr_ggatefd < 0) 68332115b10SPawel Jakub Dawidek primary_exit(EX_OSFILE, "Unable to open /dev/" G_GATE_CTL_NAME); 68432115b10SPawel Jakub Dawidek /* 68532115b10SPawel Jakub Dawidek * Create provider before trying to connect, as connection failure 68632115b10SPawel Jakub Dawidek * is not critical, but may take some time. 68732115b10SPawel Jakub Dawidek */ 68832115b10SPawel Jakub Dawidek ggiocreate.gctl_version = G_GATE_VERSION; 68932115b10SPawel Jakub Dawidek ggiocreate.gctl_mediasize = res->hr_datasize; 69032115b10SPawel Jakub Dawidek ggiocreate.gctl_sectorsize = res->hr_local_sectorsize; 69132115b10SPawel Jakub Dawidek ggiocreate.gctl_flags = 0; 69220b77db9SPawel Jakub Dawidek ggiocreate.gctl_maxcount = G_GATE_MAX_QUEUE_SIZE; 69332115b10SPawel Jakub Dawidek ggiocreate.gctl_timeout = 0; 69432115b10SPawel Jakub Dawidek ggiocreate.gctl_unit = G_GATE_NAME_GIVEN; 69532115b10SPawel Jakub Dawidek snprintf(ggiocreate.gctl_name, sizeof(ggiocreate.gctl_name), "hast/%s", 69632115b10SPawel Jakub Dawidek res->hr_provname); 69732115b10SPawel Jakub Dawidek bzero(ggiocreate.gctl_info, sizeof(ggiocreate.gctl_info)); 69832115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_CREATE, &ggiocreate) == 0) { 69932115b10SPawel Jakub Dawidek pjdlog_info("Device hast/%s created.", res->hr_provname); 70032115b10SPawel Jakub Dawidek res->hr_ggateunit = ggiocreate.gctl_unit; 70132115b10SPawel Jakub Dawidek return; 70232115b10SPawel Jakub Dawidek } 70332115b10SPawel Jakub Dawidek if (errno != EEXIST) { 70432115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to create hast/%s device", 70532115b10SPawel Jakub Dawidek res->hr_provname); 70632115b10SPawel Jakub Dawidek } 70732115b10SPawel Jakub Dawidek pjdlog_debug(1, 70832115b10SPawel Jakub Dawidek "Device hast/%s already exists, we will try to take it over.", 70932115b10SPawel Jakub Dawidek res->hr_provname); 71032115b10SPawel Jakub Dawidek /* 71132115b10SPawel Jakub Dawidek * If we received EEXIST, we assume that the process who created the 71232115b10SPawel Jakub Dawidek * provider died and didn't clean up. In that case we will start from 71332115b10SPawel Jakub Dawidek * where he left of. 71432115b10SPawel Jakub Dawidek */ 71532115b10SPawel Jakub Dawidek ggiocancel.gctl_version = G_GATE_VERSION; 71632115b10SPawel Jakub Dawidek ggiocancel.gctl_unit = G_GATE_NAME_GIVEN; 71732115b10SPawel Jakub Dawidek snprintf(ggiocancel.gctl_name, sizeof(ggiocancel.gctl_name), "hast/%s", 71832115b10SPawel Jakub Dawidek res->hr_provname); 71932115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_CANCEL, &ggiocancel) == 0) { 72032115b10SPawel Jakub Dawidek pjdlog_info("Device hast/%s recovered.", res->hr_provname); 72132115b10SPawel Jakub Dawidek res->hr_ggateunit = ggiocancel.gctl_unit; 72232115b10SPawel Jakub Dawidek return; 72332115b10SPawel Jakub Dawidek } 72432115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "Unable to take over hast/%s device", 72532115b10SPawel Jakub Dawidek res->hr_provname); 72632115b10SPawel Jakub Dawidek } 72732115b10SPawel Jakub Dawidek 72832115b10SPawel Jakub Dawidek void 72932115b10SPawel Jakub Dawidek hastd_primary(struct hast_resource *res) 73032115b10SPawel Jakub Dawidek { 73132115b10SPawel Jakub Dawidek pthread_t td; 73232115b10SPawel Jakub Dawidek pid_t pid; 73332115b10SPawel Jakub Dawidek int error; 73432115b10SPawel Jakub Dawidek 73532115b10SPawel Jakub Dawidek gres = res; 73632115b10SPawel Jakub Dawidek 73732115b10SPawel Jakub Dawidek /* 73832115b10SPawel Jakub Dawidek * Create communication channel between parent and child. 73932115b10SPawel Jakub Dawidek */ 74032115b10SPawel Jakub Dawidek if (proto_client("socketpair://", &res->hr_ctrl) < 0) { 74132115b10SPawel Jakub Dawidek KEEP_ERRNO((void)pidfile_remove(pfh)); 74232115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, 74332115b10SPawel Jakub Dawidek "Unable to create control sockets between parent and child"); 74432115b10SPawel Jakub Dawidek } 74532115b10SPawel Jakub Dawidek 74632115b10SPawel Jakub Dawidek pid = fork(); 74732115b10SPawel Jakub Dawidek if (pid < 0) { 74832115b10SPawel Jakub Dawidek KEEP_ERRNO((void)pidfile_remove(pfh)); 7495abfc9c1SPawel Jakub Dawidek primary_exit(EX_TEMPFAIL, "Unable to fork"); 75032115b10SPawel Jakub Dawidek } 75132115b10SPawel Jakub Dawidek 75232115b10SPawel Jakub Dawidek if (pid > 0) { 75332115b10SPawel Jakub Dawidek /* This is parent. */ 75432115b10SPawel Jakub Dawidek res->hr_workerpid = pid; 75532115b10SPawel Jakub Dawidek return; 75632115b10SPawel Jakub Dawidek } 75732115b10SPawel Jakub Dawidek (void)pidfile_close(pfh); 75832115b10SPawel Jakub Dawidek 75932115b10SPawel Jakub Dawidek setproctitle("%s (primary)", res->hr_name); 76032115b10SPawel Jakub Dawidek 76132115b10SPawel Jakub Dawidek init_local(res); 7620d9014f3SPawel Jakub Dawidek if (init_remote(res, NULL, NULL)) 7630d9014f3SPawel Jakub Dawidek sync_start(); 76432115b10SPawel Jakub Dawidek init_ggate(res); 76532115b10SPawel Jakub Dawidek init_environment(res); 76632115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ggate_recv_thread, res); 76732115b10SPawel Jakub Dawidek assert(error == 0); 76832115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, local_send_thread, res); 76932115b10SPawel Jakub Dawidek assert(error == 0); 77032115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, remote_send_thread, res); 77132115b10SPawel Jakub Dawidek assert(error == 0); 77232115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, remote_recv_thread, res); 77332115b10SPawel Jakub Dawidek assert(error == 0); 77432115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ggate_send_thread, res); 77532115b10SPawel Jakub Dawidek assert(error == 0); 77632115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, sync_thread, res); 77732115b10SPawel Jakub Dawidek assert(error == 0); 77832115b10SPawel Jakub Dawidek error = pthread_create(&td, NULL, ctrl_thread, res); 77932115b10SPawel Jakub Dawidek assert(error == 0); 78032115b10SPawel Jakub Dawidek (void)guard_thread(res); 78132115b10SPawel Jakub Dawidek } 78232115b10SPawel Jakub Dawidek 78332115b10SPawel Jakub Dawidek static void 78432115b10SPawel Jakub Dawidek reqlog(int loglevel, int debuglevel, struct g_gate_ctl_io *ggio, const char *fmt, ...) 78532115b10SPawel Jakub Dawidek { 78632115b10SPawel Jakub Dawidek char msg[1024]; 78732115b10SPawel Jakub Dawidek va_list ap; 78832115b10SPawel Jakub Dawidek int len; 78932115b10SPawel Jakub Dawidek 79032115b10SPawel Jakub Dawidek va_start(ap, fmt); 79132115b10SPawel Jakub Dawidek len = vsnprintf(msg, sizeof(msg), fmt, ap); 79232115b10SPawel Jakub Dawidek va_end(ap); 79332115b10SPawel Jakub Dawidek if ((size_t)len < sizeof(msg)) { 79432115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 79532115b10SPawel Jakub Dawidek case BIO_READ: 79632115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 79732115b10SPawel Jakub Dawidek "READ(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 79832115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 79932115b10SPawel Jakub Dawidek break; 80032115b10SPawel Jakub Dawidek case BIO_DELETE: 80132115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 80232115b10SPawel Jakub Dawidek "DELETE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 80332115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 80432115b10SPawel Jakub Dawidek break; 80532115b10SPawel Jakub Dawidek case BIO_FLUSH: 80632115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, "FLUSH."); 80732115b10SPawel Jakub Dawidek break; 80832115b10SPawel Jakub Dawidek case BIO_WRITE: 80932115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 81032115b10SPawel Jakub Dawidek "WRITE(%ju, %ju).", (uintmax_t)ggio->gctl_offset, 81132115b10SPawel Jakub Dawidek (uintmax_t)ggio->gctl_length); 81232115b10SPawel Jakub Dawidek break; 81332115b10SPawel Jakub Dawidek default: 81432115b10SPawel Jakub Dawidek (void)snprintf(msg + len, sizeof(msg) - len, 81532115b10SPawel Jakub Dawidek "UNKNOWN(%u).", (unsigned int)ggio->gctl_cmd); 81632115b10SPawel Jakub Dawidek break; 81732115b10SPawel Jakub Dawidek } 81832115b10SPawel Jakub Dawidek } 81932115b10SPawel Jakub Dawidek pjdlog_common(loglevel, debuglevel, -1, "%s", msg); 82032115b10SPawel Jakub Dawidek } 82132115b10SPawel Jakub Dawidek 82232115b10SPawel Jakub Dawidek static void 82332115b10SPawel Jakub Dawidek remote_close(struct hast_resource *res, int ncomp) 82432115b10SPawel Jakub Dawidek { 82532115b10SPawel Jakub Dawidek 82632115b10SPawel Jakub Dawidek rw_wlock(&hio_remote_lock[ncomp]); 82732115b10SPawel Jakub Dawidek /* 82832115b10SPawel Jakub Dawidek * A race is possible between dropping rlock and acquiring wlock - 82932115b10SPawel Jakub Dawidek * another thread can close connection in-between. 83032115b10SPawel Jakub Dawidek */ 83132115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 83232115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 83332115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 83432115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 83532115b10SPawel Jakub Dawidek return; 83632115b10SPawel Jakub Dawidek } 83732115b10SPawel Jakub Dawidek 83832115b10SPawel Jakub Dawidek assert(res->hr_remotein != NULL); 83932115b10SPawel Jakub Dawidek assert(res->hr_remoteout != NULL); 84032115b10SPawel Jakub Dawidek 84132115b10SPawel Jakub Dawidek pjdlog_debug(2, "Closing old incoming connection to %s.", 84232115b10SPawel Jakub Dawidek res->hr_remoteaddr); 84332115b10SPawel Jakub Dawidek proto_close(res->hr_remotein); 84432115b10SPawel Jakub Dawidek res->hr_remotein = NULL; 84532115b10SPawel Jakub Dawidek pjdlog_debug(2, "Closing old outgoing connection to %s.", 84632115b10SPawel Jakub Dawidek res->hr_remoteaddr); 84732115b10SPawel Jakub Dawidek proto_close(res->hr_remoteout); 84832115b10SPawel Jakub Dawidek res->hr_remoteout = NULL; 84932115b10SPawel Jakub Dawidek 85032115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 85132115b10SPawel Jakub Dawidek 85232115b10SPawel Jakub Dawidek /* 85332115b10SPawel Jakub Dawidek * Stop synchronization if in-progress. 85432115b10SPawel Jakub Dawidek */ 85532115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 85632115b10SPawel Jakub Dawidek if (sync_inprogress) 85732115b10SPawel Jakub Dawidek sync_inprogress = false; 85832115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 85932115b10SPawel Jakub Dawidek 86032115b10SPawel Jakub Dawidek /* 86132115b10SPawel Jakub Dawidek * Wake up guard thread, so it can immediately start reconnect. 86232115b10SPawel Jakub Dawidek */ 86332115b10SPawel Jakub Dawidek mtx_lock(&hio_guard_lock); 86432115b10SPawel Jakub Dawidek cv_signal(&hio_guard_cond); 86532115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 86632115b10SPawel Jakub Dawidek } 86732115b10SPawel Jakub Dawidek 86832115b10SPawel Jakub Dawidek /* 86932115b10SPawel Jakub Dawidek * Thread receives ggate I/O requests from the kernel and passes them to 87032115b10SPawel Jakub Dawidek * appropriate threads: 87132115b10SPawel Jakub Dawidek * WRITE - always goes to both local_send and remote_send threads 87232115b10SPawel Jakub Dawidek * READ (when the block is up-to-date on local component) - 87332115b10SPawel Jakub Dawidek * only local_send thread 87432115b10SPawel Jakub Dawidek * READ (when the block isn't up-to-date on local component) - 87532115b10SPawel Jakub Dawidek * only remote_send thread 87632115b10SPawel Jakub Dawidek * DELETE - always goes to both local_send and remote_send threads 87732115b10SPawel Jakub Dawidek * FLUSH - always goes to both local_send and remote_send threads 87832115b10SPawel Jakub Dawidek */ 87932115b10SPawel Jakub Dawidek static void * 88032115b10SPawel Jakub Dawidek ggate_recv_thread(void *arg) 88132115b10SPawel Jakub Dawidek { 88232115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 88332115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 88432115b10SPawel Jakub Dawidek struct hio *hio; 88532115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 88632115b10SPawel Jakub Dawidek int error; 88732115b10SPawel Jakub Dawidek 88832115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 88932115b10SPawel Jakub Dawidek 89032115b10SPawel Jakub Dawidek for (;;) { 89132115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_recv: Taking free request."); 89232115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, free); 89332115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_recv: (%p) Got free request.", hio); 89432115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 89532115b10SPawel Jakub Dawidek ggio->gctl_unit = res->hr_ggateunit; 89632115b10SPawel Jakub Dawidek ggio->gctl_length = MAXPHYS; 89732115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 89832115b10SPawel Jakub Dawidek pjdlog_debug(2, 89932115b10SPawel Jakub Dawidek "ggate_recv: (%p) Waiting for request from the kernel.", 90032115b10SPawel Jakub Dawidek hio); 90132115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_START, ggio) < 0) { 90232115b10SPawel Jakub Dawidek if (sigexit_received) 90332115b10SPawel Jakub Dawidek pthread_exit(NULL); 90432115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "G_GATE_CMD_START failed"); 90532115b10SPawel Jakub Dawidek } 90632115b10SPawel Jakub Dawidek error = ggio->gctl_error; 90732115b10SPawel Jakub Dawidek switch (error) { 90832115b10SPawel Jakub Dawidek case 0: 90932115b10SPawel Jakub Dawidek break; 91032115b10SPawel Jakub Dawidek case ECANCELED: 91132115b10SPawel Jakub Dawidek /* Exit gracefully. */ 91232115b10SPawel Jakub Dawidek if (!sigexit_received) { 91332115b10SPawel Jakub Dawidek pjdlog_debug(2, 91432115b10SPawel Jakub Dawidek "ggate_recv: (%p) Received cancel from the kernel.", 91532115b10SPawel Jakub Dawidek hio); 91632115b10SPawel Jakub Dawidek pjdlog_info("Received cancel from the kernel, exiting."); 91732115b10SPawel Jakub Dawidek } 91832115b10SPawel Jakub Dawidek pthread_exit(NULL); 91932115b10SPawel Jakub Dawidek case ENOMEM: 92032115b10SPawel Jakub Dawidek /* 92132115b10SPawel Jakub Dawidek * Buffer too small? Impossible, we allocate MAXPHYS 92232115b10SPawel Jakub Dawidek * bytes - request can't be bigger than that. 92332115b10SPawel Jakub Dawidek */ 92432115b10SPawel Jakub Dawidek /* FALLTHROUGH */ 92532115b10SPawel Jakub Dawidek case ENXIO: 92632115b10SPawel Jakub Dawidek default: 92732115b10SPawel Jakub Dawidek primary_exitx(EX_OSERR, "G_GATE_CMD_START failed: %s.", 92832115b10SPawel Jakub Dawidek strerror(error)); 92932115b10SPawel Jakub Dawidek } 93032115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 93132115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 93232115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, 93332115b10SPawel Jakub Dawidek "ggate_recv: (%p) Request received from the kernel: ", 93432115b10SPawel Jakub Dawidek hio); 93532115b10SPawel Jakub Dawidek /* 93632115b10SPawel Jakub Dawidek * Inform all components about new write request. 93732115b10SPawel Jakub Dawidek * For read request prefer local component unless the given 93832115b10SPawel Jakub Dawidek * range is out-of-date, then use remote component. 93932115b10SPawel Jakub Dawidek */ 94032115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 94132115b10SPawel Jakub Dawidek case BIO_READ: 94232115b10SPawel Jakub Dawidek pjdlog_debug(2, 94332115b10SPawel Jakub Dawidek "ggate_recv: (%p) Moving request to the send queue.", 94432115b10SPawel Jakub Dawidek hio); 94532115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 94632115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 94732115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_UNDEF || 94832115b10SPawel Jakub Dawidek res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 94932115b10SPawel Jakub Dawidek /* 95032115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 95132115b10SPawel Jakub Dawidek * so handle request locally. 95232115b10SPawel Jakub Dawidek */ 95332115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 95432115b10SPawel Jakub Dawidek ncomp = 0; 95532115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == 95632115b10SPawel Jakub Dawidek HAST_SYNCSRC_SECONDARY) */ { 95732115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == 95832115b10SPawel Jakub Dawidek HAST_SYNCSRC_SECONDARY); 95932115b10SPawel Jakub Dawidek /* 96032115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 96132115b10SPawel Jakub Dawidek * so send request to the remote node. 96232115b10SPawel Jakub Dawidek */ 96332115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 96432115b10SPawel Jakub Dawidek ncomp = 1; 96532115b10SPawel Jakub Dawidek } 96632115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 96732115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 96832115b10SPawel Jakub Dawidek break; 96932115b10SPawel Jakub Dawidek case BIO_WRITE: 97032115b10SPawel Jakub Dawidek for (;;) { 97132115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 97232115b10SPawel Jakub Dawidek if (rangelock_islocked(range_sync, 97332115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length)) { 97432115b10SPawel Jakub Dawidek pjdlog_debug(2, 97532115b10SPawel Jakub Dawidek "regular: Range offset=%jd length=%zu locked.", 97632115b10SPawel Jakub Dawidek (intmax_t)ggio->gctl_offset, 97732115b10SPawel Jakub Dawidek (size_t)ggio->gctl_length); 97832115b10SPawel Jakub Dawidek range_regular_wait = true; 97932115b10SPawel Jakub Dawidek cv_wait(&range_regular_cond, &range_lock); 98032115b10SPawel Jakub Dawidek range_regular_wait = false; 98132115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 98232115b10SPawel Jakub Dawidek continue; 98332115b10SPawel Jakub Dawidek } 98432115b10SPawel Jakub Dawidek if (rangelock_add(range_regular, 98532115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length) < 0) { 98632115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 98732115b10SPawel Jakub Dawidek pjdlog_debug(2, 98832115b10SPawel Jakub Dawidek "regular: Range offset=%jd length=%zu is already locked, waiting.", 98932115b10SPawel Jakub Dawidek (intmax_t)ggio->gctl_offset, 99032115b10SPawel Jakub Dawidek (size_t)ggio->gctl_length); 99132115b10SPawel Jakub Dawidek sleep(1); 99232115b10SPawel Jakub Dawidek continue; 99332115b10SPawel Jakub Dawidek } 99432115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 99532115b10SPawel Jakub Dawidek break; 99632115b10SPawel Jakub Dawidek } 99732115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 99832115b10SPawel Jakub Dawidek if (activemap_write_start(res->hr_amp, 99932115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length)) { 100032115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 100132115b10SPawel Jakub Dawidek } 100232115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 100332115b10SPawel Jakub Dawidek /* FALLTHROUGH */ 100432115b10SPawel Jakub Dawidek case BIO_DELETE: 100532115b10SPawel Jakub Dawidek case BIO_FLUSH: 100632115b10SPawel Jakub Dawidek pjdlog_debug(2, 100732115b10SPawel Jakub Dawidek "ggate_recv: (%p) Moving request to the send queues.", 100832115b10SPawel Jakub Dawidek hio); 100932115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, ncomps); 101032115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 101132115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ii); 101232115b10SPawel Jakub Dawidek break; 101332115b10SPawel Jakub Dawidek } 101432115b10SPawel Jakub Dawidek } 101532115b10SPawel Jakub Dawidek /* NOTREACHED */ 101632115b10SPawel Jakub Dawidek return (NULL); 101732115b10SPawel Jakub Dawidek } 101832115b10SPawel Jakub Dawidek 101932115b10SPawel Jakub Dawidek /* 102032115b10SPawel Jakub Dawidek * Thread reads from or writes to local component. 102132115b10SPawel Jakub Dawidek * If local read fails, it redirects it to remote_send thread. 102232115b10SPawel Jakub Dawidek */ 102332115b10SPawel Jakub Dawidek static void * 102432115b10SPawel Jakub Dawidek local_send_thread(void *arg) 102532115b10SPawel Jakub Dawidek { 102632115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 102732115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 102832115b10SPawel Jakub Dawidek struct hio *hio; 102932115b10SPawel Jakub Dawidek unsigned int ncomp, rncomp; 103032115b10SPawel Jakub Dawidek ssize_t ret; 103132115b10SPawel Jakub Dawidek 103232115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 103332115b10SPawel Jakub Dawidek ncomp = 0; 103432115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 103532115b10SPawel Jakub Dawidek rncomp = 1; 103632115b10SPawel Jakub Dawidek 103732115b10SPawel Jakub Dawidek for (;;) { 103832115b10SPawel Jakub Dawidek pjdlog_debug(2, "local_send: Taking request."); 103932115b10SPawel Jakub Dawidek QUEUE_TAKE1(hio, send, ncomp); 104032115b10SPawel Jakub Dawidek pjdlog_debug(2, "local_send: (%p) Got request.", hio); 104132115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 104232115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 104332115b10SPawel Jakub Dawidek case BIO_READ: 104432115b10SPawel Jakub Dawidek ret = pread(res->hr_localfd, ggio->gctl_data, 104532115b10SPawel Jakub Dawidek ggio->gctl_length, 104632115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff); 104732115b10SPawel Jakub Dawidek if (ret == ggio->gctl_length) 104832115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 104932115b10SPawel Jakub Dawidek else { 105032115b10SPawel Jakub Dawidek /* 105132115b10SPawel Jakub Dawidek * If READ failed, try to read from remote node. 105232115b10SPawel Jakub Dawidek */ 105332115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, rncomp); 105432115b10SPawel Jakub Dawidek continue; 105532115b10SPawel Jakub Dawidek } 105632115b10SPawel Jakub Dawidek break; 105732115b10SPawel Jakub Dawidek case BIO_WRITE: 105832115b10SPawel Jakub Dawidek ret = pwrite(res->hr_localfd, ggio->gctl_data, 105932115b10SPawel Jakub Dawidek ggio->gctl_length, 106032115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff); 106132115b10SPawel Jakub Dawidek if (ret < 0) 106232115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 106332115b10SPawel Jakub Dawidek else if (ret != ggio->gctl_length) 106432115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = EIO; 106532115b10SPawel Jakub Dawidek else 106632115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 106732115b10SPawel Jakub Dawidek break; 106832115b10SPawel Jakub Dawidek case BIO_DELETE: 106932115b10SPawel Jakub Dawidek ret = g_delete(res->hr_localfd, 107032115b10SPawel Jakub Dawidek ggio->gctl_offset + res->hr_localoff, 107132115b10SPawel Jakub Dawidek ggio->gctl_length); 107232115b10SPawel Jakub Dawidek if (ret < 0) 107332115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 107432115b10SPawel Jakub Dawidek else 107532115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 107632115b10SPawel Jakub Dawidek break; 107732115b10SPawel Jakub Dawidek case BIO_FLUSH: 107832115b10SPawel Jakub Dawidek ret = g_flush(res->hr_localfd); 107932115b10SPawel Jakub Dawidek if (ret < 0) 108032115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 108132115b10SPawel Jakub Dawidek else 108232115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 108332115b10SPawel Jakub Dawidek break; 108432115b10SPawel Jakub Dawidek } 108532115b10SPawel Jakub Dawidek if (refcount_release(&hio->hio_countdown)) { 108632115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 108732115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 108832115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 108932115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 109032115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 109132115b10SPawel Jakub Dawidek } else { 109232115b10SPawel Jakub Dawidek pjdlog_debug(2, 109332115b10SPawel Jakub Dawidek "local_send: (%p) Moving request to the done queue.", 109432115b10SPawel Jakub Dawidek hio); 109532115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 109632115b10SPawel Jakub Dawidek } 109732115b10SPawel Jakub Dawidek } 109832115b10SPawel Jakub Dawidek } 109932115b10SPawel Jakub Dawidek /* NOTREACHED */ 110032115b10SPawel Jakub Dawidek return (NULL); 110132115b10SPawel Jakub Dawidek } 110232115b10SPawel Jakub Dawidek 110332115b10SPawel Jakub Dawidek /* 110432115b10SPawel Jakub Dawidek * Thread sends request to secondary node. 110532115b10SPawel Jakub Dawidek */ 110632115b10SPawel Jakub Dawidek static void * 110732115b10SPawel Jakub Dawidek remote_send_thread(void *arg) 110832115b10SPawel Jakub Dawidek { 110932115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 111032115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 111132115b10SPawel Jakub Dawidek struct hio *hio; 111232115b10SPawel Jakub Dawidek struct nv *nv; 111332115b10SPawel Jakub Dawidek unsigned int ncomp; 111432115b10SPawel Jakub Dawidek bool wakeup; 111532115b10SPawel Jakub Dawidek uint64_t offset, length; 111632115b10SPawel Jakub Dawidek uint8_t cmd; 111732115b10SPawel Jakub Dawidek void *data; 111832115b10SPawel Jakub Dawidek 111932115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 112032115b10SPawel Jakub Dawidek ncomp = 1; 112132115b10SPawel Jakub Dawidek 112232115b10SPawel Jakub Dawidek for (;;) { 112332115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_send: Taking request."); 112432115b10SPawel Jakub Dawidek QUEUE_TAKE1(hio, send, ncomp); 112532115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_send: (%p) Got request.", hio); 112632115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 112732115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 112832115b10SPawel Jakub Dawidek case BIO_READ: 112932115b10SPawel Jakub Dawidek cmd = HIO_READ; 113032115b10SPawel Jakub Dawidek data = NULL; 113132115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 113232115b10SPawel Jakub Dawidek length = ggio->gctl_length; 113332115b10SPawel Jakub Dawidek break; 113432115b10SPawel Jakub Dawidek case BIO_WRITE: 113532115b10SPawel Jakub Dawidek cmd = HIO_WRITE; 113632115b10SPawel Jakub Dawidek data = ggio->gctl_data; 113732115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 113832115b10SPawel Jakub Dawidek length = ggio->gctl_length; 113932115b10SPawel Jakub Dawidek break; 114032115b10SPawel Jakub Dawidek case BIO_DELETE: 114132115b10SPawel Jakub Dawidek cmd = HIO_DELETE; 114232115b10SPawel Jakub Dawidek data = NULL; 114332115b10SPawel Jakub Dawidek offset = ggio->gctl_offset; 114432115b10SPawel Jakub Dawidek length = ggio->gctl_length; 114532115b10SPawel Jakub Dawidek break; 114632115b10SPawel Jakub Dawidek case BIO_FLUSH: 114732115b10SPawel Jakub Dawidek cmd = HIO_FLUSH; 114832115b10SPawel Jakub Dawidek data = NULL; 114932115b10SPawel Jakub Dawidek offset = 0; 115032115b10SPawel Jakub Dawidek length = 0; 115132115b10SPawel Jakub Dawidek break; 115232115b10SPawel Jakub Dawidek default: 115332115b10SPawel Jakub Dawidek assert(!"invalid condition"); 115432115b10SPawel Jakub Dawidek abort(); 115532115b10SPawel Jakub Dawidek } 115632115b10SPawel Jakub Dawidek nv = nv_alloc(); 115732115b10SPawel Jakub Dawidek nv_add_uint8(nv, cmd, "cmd"); 115832115b10SPawel Jakub Dawidek nv_add_uint64(nv, (uint64_t)ggio->gctl_seq, "seq"); 115932115b10SPawel Jakub Dawidek nv_add_uint64(nv, offset, "offset"); 116032115b10SPawel Jakub Dawidek nv_add_uint64(nv, length, "length"); 116132115b10SPawel Jakub Dawidek if (nv_error(nv) != 0) { 116232115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = nv_error(nv); 116332115b10SPawel Jakub Dawidek pjdlog_debug(2, 116432115b10SPawel Jakub Dawidek "remote_send: (%p) Unable to prepare header to send.", 116532115b10SPawel Jakub Dawidek hio); 116632115b10SPawel Jakub Dawidek reqlog(LOG_ERR, 0, ggio, 116732115b10SPawel Jakub Dawidek "Unable to prepare header to send (%s): ", 116832115b10SPawel Jakub Dawidek strerror(nv_error(nv))); 116932115b10SPawel Jakub Dawidek /* Move failed request immediately to the done queue. */ 117032115b10SPawel Jakub Dawidek goto done_queue; 117132115b10SPawel Jakub Dawidek } 117232115b10SPawel Jakub Dawidek pjdlog_debug(2, 117332115b10SPawel Jakub Dawidek "remote_send: (%p) Moving request to the recv queue.", 117432115b10SPawel Jakub Dawidek hio); 117532115b10SPawel Jakub Dawidek /* 117632115b10SPawel Jakub Dawidek * Protect connection from disappearing. 117732115b10SPawel Jakub Dawidek */ 117832115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 117932115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 118032115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 118132115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = ENOTCONN; 118232115b10SPawel Jakub Dawidek goto done_queue; 118332115b10SPawel Jakub Dawidek } 118432115b10SPawel Jakub Dawidek /* 118532115b10SPawel Jakub Dawidek * Move the request to recv queue before sending it, because 118632115b10SPawel Jakub Dawidek * in different order we can get reply before we move request 118732115b10SPawel Jakub Dawidek * to recv queue. 118832115b10SPawel Jakub Dawidek */ 118932115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 119032115b10SPawel Jakub Dawidek wakeup = TAILQ_EMPTY(&hio_recv_list[ncomp]); 119132115b10SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 119232115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 119332115b10SPawel Jakub Dawidek if (hast_proto_send(res, res->hr_remoteout, nv, data, 119432115b10SPawel Jakub Dawidek data != NULL ? length : 0) < 0) { 119532115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 119632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 119732115b10SPawel Jakub Dawidek remote_close(res, ncomp); 119832115b10SPawel Jakub Dawidek pjdlog_debug(2, 119932115b10SPawel Jakub Dawidek "remote_send: (%p) Unable to send request.", hio); 120032115b10SPawel Jakub Dawidek reqlog(LOG_ERR, 0, ggio, 120132115b10SPawel Jakub Dawidek "Unable to send request (%s): ", 120232115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 120332115b10SPawel Jakub Dawidek /* 120432115b10SPawel Jakub Dawidek * Take request back from the receive queue and move 120532115b10SPawel Jakub Dawidek * it immediately to the done queue. 120632115b10SPawel Jakub Dawidek */ 120732115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 120832115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, hio_next[ncomp]); 120932115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 121032115b10SPawel Jakub Dawidek goto done_queue; 121132115b10SPawel Jakub Dawidek } 121232115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 121332115b10SPawel Jakub Dawidek nv_free(nv); 121432115b10SPawel Jakub Dawidek if (wakeup) 121532115b10SPawel Jakub Dawidek cv_signal(&hio_recv_list_cond[ncomp]); 121632115b10SPawel Jakub Dawidek continue; 121732115b10SPawel Jakub Dawidek done_queue: 121832115b10SPawel Jakub Dawidek nv_free(nv); 121932115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 122032115b10SPawel Jakub Dawidek if (!refcount_release(&hio->hio_countdown)) 122132115b10SPawel Jakub Dawidek continue; 122232115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 122332115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 122432115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 122532115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 122632115b10SPawel Jakub Dawidek continue; 122732115b10SPawel Jakub Dawidek } 122832115b10SPawel Jakub Dawidek if (ggio->gctl_cmd == BIO_WRITE) { 122932115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 123032115b10SPawel Jakub Dawidek if (activemap_need_sync(res->hr_amp, ggio->gctl_offset, 123132115b10SPawel Jakub Dawidek ggio->gctl_length)) { 123232115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 123332115b10SPawel Jakub Dawidek } 123432115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 123532115b10SPawel Jakub Dawidek } 123632115b10SPawel Jakub Dawidek if (!refcount_release(&hio->hio_countdown)) 123732115b10SPawel Jakub Dawidek continue; 123832115b10SPawel Jakub Dawidek pjdlog_debug(2, 123932115b10SPawel Jakub Dawidek "remote_send: (%p) Moving request to the done queue.", 124032115b10SPawel Jakub Dawidek hio); 124132115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 124232115b10SPawel Jakub Dawidek } 124332115b10SPawel Jakub Dawidek /* NOTREACHED */ 124432115b10SPawel Jakub Dawidek return (NULL); 124532115b10SPawel Jakub Dawidek } 124632115b10SPawel Jakub Dawidek 124732115b10SPawel Jakub Dawidek /* 124832115b10SPawel Jakub Dawidek * Thread receives answer from secondary node and passes it to ggate_send 124932115b10SPawel Jakub Dawidek * thread. 125032115b10SPawel Jakub Dawidek */ 125132115b10SPawel Jakub Dawidek static void * 125232115b10SPawel Jakub Dawidek remote_recv_thread(void *arg) 125332115b10SPawel Jakub Dawidek { 125432115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 125532115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 125632115b10SPawel Jakub Dawidek struct hio *hio; 125732115b10SPawel Jakub Dawidek struct nv *nv; 125832115b10SPawel Jakub Dawidek unsigned int ncomp; 125932115b10SPawel Jakub Dawidek uint64_t seq; 126032115b10SPawel Jakub Dawidek int error; 126132115b10SPawel Jakub Dawidek 126232115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 126332115b10SPawel Jakub Dawidek ncomp = 1; 126432115b10SPawel Jakub Dawidek 126532115b10SPawel Jakub Dawidek for (;;) { 126632115b10SPawel Jakub Dawidek /* Wait until there is anything to receive. */ 126732115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 126832115b10SPawel Jakub Dawidek while (TAILQ_EMPTY(&hio_recv_list[ncomp])) { 126932115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_recv: No requests, waiting."); 127032115b10SPawel Jakub Dawidek cv_wait(&hio_recv_list_cond[ncomp], 127132115b10SPawel Jakub Dawidek &hio_recv_list_lock[ncomp]); 127232115b10SPawel Jakub Dawidek } 127332115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 127432115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 127532115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 127632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 127732115b10SPawel Jakub Dawidek /* 127832115b10SPawel Jakub Dawidek * Connection is dead, so move all pending requests to 127932115b10SPawel Jakub Dawidek * the done queue (one-by-one). 128032115b10SPawel Jakub Dawidek */ 128132115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 128232115b10SPawel Jakub Dawidek hio = TAILQ_FIRST(&hio_recv_list[ncomp]); 128332115b10SPawel Jakub Dawidek assert(hio != NULL); 128432115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 128532115b10SPawel Jakub Dawidek hio_next[ncomp]); 128632115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 128732115b10SPawel Jakub Dawidek goto done_queue; 128832115b10SPawel Jakub Dawidek } 128932115b10SPawel Jakub Dawidek if (hast_proto_recv_hdr(res->hr_remotein, &nv) < 0) { 129032115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 129132115b10SPawel Jakub Dawidek "Unable to receive reply header"); 129232115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 129332115b10SPawel Jakub Dawidek remote_close(res, ncomp); 129432115b10SPawel Jakub Dawidek continue; 129532115b10SPawel Jakub Dawidek } 129632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 129732115b10SPawel Jakub Dawidek seq = nv_get_uint64(nv, "seq"); 129832115b10SPawel Jakub Dawidek if (seq == 0) { 129932115b10SPawel Jakub Dawidek pjdlog_error("Header contains no 'seq' field."); 130032115b10SPawel Jakub Dawidek nv_free(nv); 130132115b10SPawel Jakub Dawidek continue; 130232115b10SPawel Jakub Dawidek } 130332115b10SPawel Jakub Dawidek mtx_lock(&hio_recv_list_lock[ncomp]); 130432115b10SPawel Jakub Dawidek TAILQ_FOREACH(hio, &hio_recv_list[ncomp], hio_next[ncomp]) { 130532115b10SPawel Jakub Dawidek if (hio->hio_ggio.gctl_seq == seq) { 130632115b10SPawel Jakub Dawidek TAILQ_REMOVE(&hio_recv_list[ncomp], hio, 130732115b10SPawel Jakub Dawidek hio_next[ncomp]); 130832115b10SPawel Jakub Dawidek break; 130932115b10SPawel Jakub Dawidek } 131032115b10SPawel Jakub Dawidek } 131132115b10SPawel Jakub Dawidek mtx_unlock(&hio_recv_list_lock[ncomp]); 131232115b10SPawel Jakub Dawidek if (hio == NULL) { 131332115b10SPawel Jakub Dawidek pjdlog_error("Found no request matching received 'seq' field (%ju).", 131432115b10SPawel Jakub Dawidek (uintmax_t)seq); 131532115b10SPawel Jakub Dawidek nv_free(nv); 131632115b10SPawel Jakub Dawidek continue; 131732115b10SPawel Jakub Dawidek } 131832115b10SPawel Jakub Dawidek error = nv_get_int16(nv, "error"); 131932115b10SPawel Jakub Dawidek if (error != 0) { 132032115b10SPawel Jakub Dawidek /* Request failed on remote side. */ 132132115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 132232115b10SPawel Jakub Dawidek nv_free(nv); 132332115b10SPawel Jakub Dawidek goto done_queue; 132432115b10SPawel Jakub Dawidek } 132532115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 132632115b10SPawel Jakub Dawidek switch (ggio->gctl_cmd) { 132732115b10SPawel Jakub Dawidek case BIO_READ: 132832115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 132932115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 133032115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 133132115b10SPawel Jakub Dawidek nv_free(nv); 133232115b10SPawel Jakub Dawidek goto done_queue; 133332115b10SPawel Jakub Dawidek } 133432115b10SPawel Jakub Dawidek if (hast_proto_recv_data(res, res->hr_remotein, nv, 133532115b10SPawel Jakub Dawidek ggio->gctl_data, ggio->gctl_length) < 0) { 133632115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = errno; 133732115b10SPawel Jakub Dawidek pjdlog_errno(LOG_ERR, 133832115b10SPawel Jakub Dawidek "Unable to receive reply data"); 133932115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 134032115b10SPawel Jakub Dawidek nv_free(nv); 134132115b10SPawel Jakub Dawidek remote_close(res, ncomp); 134232115b10SPawel Jakub Dawidek goto done_queue; 134332115b10SPawel Jakub Dawidek } 134432115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 134532115b10SPawel Jakub Dawidek break; 134632115b10SPawel Jakub Dawidek case BIO_WRITE: 134732115b10SPawel Jakub Dawidek case BIO_DELETE: 134832115b10SPawel Jakub Dawidek case BIO_FLUSH: 134932115b10SPawel Jakub Dawidek break; 135032115b10SPawel Jakub Dawidek default: 135132115b10SPawel Jakub Dawidek assert(!"invalid condition"); 135232115b10SPawel Jakub Dawidek abort(); 135332115b10SPawel Jakub Dawidek } 135432115b10SPawel Jakub Dawidek hio->hio_errors[ncomp] = 0; 135532115b10SPawel Jakub Dawidek nv_free(nv); 135632115b10SPawel Jakub Dawidek done_queue: 135732115b10SPawel Jakub Dawidek if (refcount_release(&hio->hio_countdown)) { 135832115b10SPawel Jakub Dawidek if (ISSYNCREQ(hio)) { 135932115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 136032115b10SPawel Jakub Dawidek SYNCREQDONE(hio); 136132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 136232115b10SPawel Jakub Dawidek cv_signal(&sync_cond); 136332115b10SPawel Jakub Dawidek } else { 136432115b10SPawel Jakub Dawidek pjdlog_debug(2, 136532115b10SPawel Jakub Dawidek "remote_recv: (%p) Moving request to the done queue.", 136632115b10SPawel Jakub Dawidek hio); 136732115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, done); 136832115b10SPawel Jakub Dawidek } 136932115b10SPawel Jakub Dawidek } 137032115b10SPawel Jakub Dawidek } 137132115b10SPawel Jakub Dawidek /* NOTREACHED */ 137232115b10SPawel Jakub Dawidek return (NULL); 137332115b10SPawel Jakub Dawidek } 137432115b10SPawel Jakub Dawidek 137532115b10SPawel Jakub Dawidek /* 137632115b10SPawel Jakub Dawidek * Thread sends answer to the kernel. 137732115b10SPawel Jakub Dawidek */ 137832115b10SPawel Jakub Dawidek static void * 137932115b10SPawel Jakub Dawidek ggate_send_thread(void *arg) 138032115b10SPawel Jakub Dawidek { 138132115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 138232115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 138332115b10SPawel Jakub Dawidek struct hio *hio; 138432115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 138532115b10SPawel Jakub Dawidek 138632115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 138732115b10SPawel Jakub Dawidek 138832115b10SPawel Jakub Dawidek for (;;) { 138932115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_send: Taking request."); 139032115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, done); 139132115b10SPawel Jakub Dawidek pjdlog_debug(2, "ggate_send: (%p) Got request.", hio); 139232115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 139332115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) { 139432115b10SPawel Jakub Dawidek if (hio->hio_errors[ii] == 0) { 139532115b10SPawel Jakub Dawidek /* 139632115b10SPawel Jakub Dawidek * One successful request is enough to declare 139732115b10SPawel Jakub Dawidek * success. 139832115b10SPawel Jakub Dawidek */ 139932115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 140032115b10SPawel Jakub Dawidek break; 140132115b10SPawel Jakub Dawidek } 140232115b10SPawel Jakub Dawidek } 140332115b10SPawel Jakub Dawidek if (ii == ncomps) { 140432115b10SPawel Jakub Dawidek /* 140532115b10SPawel Jakub Dawidek * None of the requests were successful. 140632115b10SPawel Jakub Dawidek * Use first error. 140732115b10SPawel Jakub Dawidek */ 140832115b10SPawel Jakub Dawidek ggio->gctl_error = hio->hio_errors[0]; 140932115b10SPawel Jakub Dawidek } 141032115b10SPawel Jakub Dawidek if (ggio->gctl_error == 0 && ggio->gctl_cmd == BIO_WRITE) { 141132115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 141232115b10SPawel Jakub Dawidek activemap_write_complete(res->hr_amp, 141332115b10SPawel Jakub Dawidek ggio->gctl_offset, ggio->gctl_length); 141432115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 141532115b10SPawel Jakub Dawidek } 141632115b10SPawel Jakub Dawidek if (ggio->gctl_cmd == BIO_WRITE) { 141732115b10SPawel Jakub Dawidek /* 141832115b10SPawel Jakub Dawidek * Unlock range we locked. 141932115b10SPawel Jakub Dawidek */ 142032115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 142132115b10SPawel Jakub Dawidek rangelock_del(range_regular, ggio->gctl_offset, 142232115b10SPawel Jakub Dawidek ggio->gctl_length); 142332115b10SPawel Jakub Dawidek if (range_sync_wait) 142432115b10SPawel Jakub Dawidek cv_signal(&range_sync_cond); 142532115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 142632115b10SPawel Jakub Dawidek /* 142732115b10SPawel Jakub Dawidek * Bump local count if this is first write after 142832115b10SPawel Jakub Dawidek * connection failure with remote node. 142932115b10SPawel Jakub Dawidek */ 143032115b10SPawel Jakub Dawidek ncomp = 1; 143132115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 143232115b10SPawel Jakub Dawidek if (!ISCONNECTED(res, ncomp)) { 143332115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 143432115b10SPawel Jakub Dawidek if (res->hr_primary_localcnt == 143532115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt) { 143632115b10SPawel Jakub Dawidek res->hr_primary_localcnt++; 143732115b10SPawel Jakub Dawidek pjdlog_debug(1, 143832115b10SPawel Jakub Dawidek "Increasing localcnt to %ju.", 143932115b10SPawel Jakub Dawidek (uintmax_t)res->hr_primary_localcnt); 144032115b10SPawel Jakub Dawidek (void)metadata_write(res); 144132115b10SPawel Jakub Dawidek } 144232115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 144332115b10SPawel Jakub Dawidek } 144432115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 144532115b10SPawel Jakub Dawidek } 144632115b10SPawel Jakub Dawidek if (ioctl(res->hr_ggatefd, G_GATE_CMD_DONE, ggio) < 0) 144732115b10SPawel Jakub Dawidek primary_exit(EX_OSERR, "G_GATE_CMD_DONE failed"); 144832115b10SPawel Jakub Dawidek pjdlog_debug(2, 144932115b10SPawel Jakub Dawidek "ggate_send: (%p) Moving request to the free queue.", hio); 145032115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, free); 145132115b10SPawel Jakub Dawidek } 145232115b10SPawel Jakub Dawidek /* NOTREACHED */ 145332115b10SPawel Jakub Dawidek return (NULL); 145432115b10SPawel Jakub Dawidek } 145532115b10SPawel Jakub Dawidek 145632115b10SPawel Jakub Dawidek /* 145732115b10SPawel Jakub Dawidek * Thread synchronize local and remote components. 145832115b10SPawel Jakub Dawidek */ 145932115b10SPawel Jakub Dawidek static void * 146032115b10SPawel Jakub Dawidek sync_thread(void *arg __unused) 146132115b10SPawel Jakub Dawidek { 146232115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 146332115b10SPawel Jakub Dawidek struct hio *hio; 146432115b10SPawel Jakub Dawidek struct g_gate_ctl_io *ggio; 146532115b10SPawel Jakub Dawidek unsigned int ii, ncomp, ncomps; 146632115b10SPawel Jakub Dawidek off_t offset, length, synced; 146732115b10SPawel Jakub Dawidek bool dorewind; 146832115b10SPawel Jakub Dawidek int syncext; 146932115b10SPawel Jakub Dawidek 147032115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 147132115b10SPawel Jakub Dawidek dorewind = true; 147232115b10SPawel Jakub Dawidek synced = 0; 147332115b10SPawel Jakub Dawidek 147432115b10SPawel Jakub Dawidek for (;;) { 147532115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 147632115b10SPawel Jakub Dawidek while (!sync_inprogress) { 147732115b10SPawel Jakub Dawidek dorewind = true; 147832115b10SPawel Jakub Dawidek synced = 0; 147932115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 148032115b10SPawel Jakub Dawidek } 148132115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 148232115b10SPawel Jakub Dawidek /* 148332115b10SPawel Jakub Dawidek * Obtain offset at which we should synchronize. 148432115b10SPawel Jakub Dawidek * Rewind synchronization if needed. 148532115b10SPawel Jakub Dawidek */ 148632115b10SPawel Jakub Dawidek mtx_lock(&res->hr_amp_lock); 148732115b10SPawel Jakub Dawidek if (dorewind) 148832115b10SPawel Jakub Dawidek activemap_sync_rewind(res->hr_amp); 148932115b10SPawel Jakub Dawidek offset = activemap_sync_offset(res->hr_amp, &length, &syncext); 149032115b10SPawel Jakub Dawidek if (syncext != -1) { 149132115b10SPawel Jakub Dawidek /* 149232115b10SPawel Jakub Dawidek * We synchronized entire syncext extent, we can mark 149332115b10SPawel Jakub Dawidek * it as clean now. 149432115b10SPawel Jakub Dawidek */ 149532115b10SPawel Jakub Dawidek if (activemap_extent_complete(res->hr_amp, syncext)) 149632115b10SPawel Jakub Dawidek (void)hast_activemap_flush(res); 149732115b10SPawel Jakub Dawidek } 149832115b10SPawel Jakub Dawidek mtx_unlock(&res->hr_amp_lock); 149932115b10SPawel Jakub Dawidek if (dorewind) { 150032115b10SPawel Jakub Dawidek dorewind = false; 150132115b10SPawel Jakub Dawidek if (offset < 0) 150232115b10SPawel Jakub Dawidek pjdlog_info("Nodes are in sync."); 150332115b10SPawel Jakub Dawidek else { 150432115b10SPawel Jakub Dawidek pjdlog_info("Synchronization started. %ju bytes to go.", 150532115b10SPawel Jakub Dawidek (uintmax_t)(res->hr_extentsize * 150632115b10SPawel Jakub Dawidek activemap_ndirty(res->hr_amp))); 150732115b10SPawel Jakub Dawidek } 150832115b10SPawel Jakub Dawidek } 150932115b10SPawel Jakub Dawidek if (offset < 0) { 151032115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 151132115b10SPawel Jakub Dawidek sync_inprogress = false; 151232115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 151332115b10SPawel Jakub Dawidek pjdlog_debug(1, "Nothing to synchronize."); 151432115b10SPawel Jakub Dawidek /* 151532115b10SPawel Jakub Dawidek * Synchronization complete, make both localcnt and 151632115b10SPawel Jakub Dawidek * remotecnt equal. 151732115b10SPawel Jakub Dawidek */ 151832115b10SPawel Jakub Dawidek ncomp = 1; 151932115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ncomp]); 152032115b10SPawel Jakub Dawidek if (ISCONNECTED(res, ncomp)) { 152132115b10SPawel Jakub Dawidek if (synced > 0) { 152232115b10SPawel Jakub Dawidek pjdlog_info("Synchronization complete. " 152332115b10SPawel Jakub Dawidek "%jd bytes synchronized.", 152432115b10SPawel Jakub Dawidek (intmax_t)synced); 152532115b10SPawel Jakub Dawidek } 152632115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 152732115b10SPawel Jakub Dawidek res->hr_syncsrc = HAST_SYNCSRC_UNDEF; 152832115b10SPawel Jakub Dawidek res->hr_primary_localcnt = 152932115b10SPawel Jakub Dawidek res->hr_secondary_localcnt; 153032115b10SPawel Jakub Dawidek res->hr_primary_remotecnt = 153132115b10SPawel Jakub Dawidek res->hr_secondary_remotecnt; 153232115b10SPawel Jakub Dawidek pjdlog_debug(1, 153332115b10SPawel Jakub Dawidek "Setting localcnt to %ju and remotecnt to %ju.", 153432115b10SPawel Jakub Dawidek (uintmax_t)res->hr_primary_localcnt, 153532115b10SPawel Jakub Dawidek (uintmax_t)res->hr_secondary_localcnt); 153632115b10SPawel Jakub Dawidek (void)metadata_write(res); 153732115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 153832115b10SPawel Jakub Dawidek } else if (synced > 0) { 153932115b10SPawel Jakub Dawidek pjdlog_info("Synchronization interrupted. " 154032115b10SPawel Jakub Dawidek "%jd bytes synchronized so far.", 154132115b10SPawel Jakub Dawidek (intmax_t)synced); 154232115b10SPawel Jakub Dawidek } 154332115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ncomp]); 154432115b10SPawel Jakub Dawidek continue; 154532115b10SPawel Jakub Dawidek } 154632115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: Taking free request."); 154732115b10SPawel Jakub Dawidek QUEUE_TAKE2(hio, free); 154832115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Got free request.", hio); 154932115b10SPawel Jakub Dawidek /* 155032115b10SPawel Jakub Dawidek * Lock the range we are going to synchronize. We don't want 155132115b10SPawel Jakub Dawidek * race where someone writes between our read and write. 155232115b10SPawel Jakub Dawidek */ 155332115b10SPawel Jakub Dawidek for (;;) { 155432115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 155532115b10SPawel Jakub Dawidek if (rangelock_islocked(range_regular, offset, length)) { 155632115b10SPawel Jakub Dawidek pjdlog_debug(2, 155732115b10SPawel Jakub Dawidek "sync: Range offset=%jd length=%jd locked.", 155832115b10SPawel Jakub Dawidek (intmax_t)offset, (intmax_t)length); 155932115b10SPawel Jakub Dawidek range_sync_wait = true; 156032115b10SPawel Jakub Dawidek cv_wait(&range_sync_cond, &range_lock); 156132115b10SPawel Jakub Dawidek range_sync_wait = false; 156232115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 156332115b10SPawel Jakub Dawidek continue; 156432115b10SPawel Jakub Dawidek } 156532115b10SPawel Jakub Dawidek if (rangelock_add(range_sync, offset, length) < 0) { 156632115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 156732115b10SPawel Jakub Dawidek pjdlog_debug(2, 156832115b10SPawel Jakub Dawidek "sync: Range offset=%jd length=%jd is already locked, waiting.", 156932115b10SPawel Jakub Dawidek (intmax_t)offset, (intmax_t)length); 157032115b10SPawel Jakub Dawidek sleep(1); 157132115b10SPawel Jakub Dawidek continue; 157232115b10SPawel Jakub Dawidek } 157332115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 157432115b10SPawel Jakub Dawidek break; 157532115b10SPawel Jakub Dawidek } 157632115b10SPawel Jakub Dawidek /* 157732115b10SPawel Jakub Dawidek * First read the data from synchronization source. 157832115b10SPawel Jakub Dawidek */ 157932115b10SPawel Jakub Dawidek SYNCREQ(hio); 158032115b10SPawel Jakub Dawidek ggio = &hio->hio_ggio; 158132115b10SPawel Jakub Dawidek ggio->gctl_cmd = BIO_READ; 158232115b10SPawel Jakub Dawidek ggio->gctl_offset = offset; 158332115b10SPawel Jakub Dawidek ggio->gctl_length = length; 158432115b10SPawel Jakub Dawidek ggio->gctl_error = 0; 158532115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 158632115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 158732115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 158832115b10SPawel Jakub Dawidek hio); 158932115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 159032115b10SPawel Jakub Dawidek hio); 159132115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 159232115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 159332115b10SPawel Jakub Dawidek /* 159432115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 159532115b10SPawel Jakub Dawidek * so handle request locally. 159632115b10SPawel Jakub Dawidek */ 159732115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 159832115b10SPawel Jakub Dawidek ncomp = 0; 159932115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 160032115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 160132115b10SPawel Jakub Dawidek /* 160232115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 160332115b10SPawel Jakub Dawidek * so send request to the remote node. 160432115b10SPawel Jakub Dawidek */ 160532115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 160632115b10SPawel Jakub Dawidek ncomp = 1; 160732115b10SPawel Jakub Dawidek } 160832115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 160932115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 161032115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 161132115b10SPawel Jakub Dawidek 161232115b10SPawel Jakub Dawidek /* 161332115b10SPawel Jakub Dawidek * Let's wait for READ to finish. 161432115b10SPawel Jakub Dawidek */ 161532115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 161632115b10SPawel Jakub Dawidek while (!ISSYNCREQDONE(hio)) 161732115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 161832115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 161932115b10SPawel Jakub Dawidek 162032115b10SPawel Jakub Dawidek if (hio->hio_errors[ncomp] != 0) { 162132115b10SPawel Jakub Dawidek pjdlog_error("Unable to read synchronization data: %s.", 162232115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 162332115b10SPawel Jakub Dawidek goto free_queue; 162432115b10SPawel Jakub Dawidek } 162532115b10SPawel Jakub Dawidek 162632115b10SPawel Jakub Dawidek /* 162732115b10SPawel Jakub Dawidek * We read the data from synchronization source, now write it 162832115b10SPawel Jakub Dawidek * to synchronization target. 162932115b10SPawel Jakub Dawidek */ 163032115b10SPawel Jakub Dawidek SYNCREQ(hio); 163132115b10SPawel Jakub Dawidek ggio->gctl_cmd = BIO_WRITE; 163232115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) 163332115b10SPawel Jakub Dawidek hio->hio_errors[ii] = EINVAL; 163432115b10SPawel Jakub Dawidek reqlog(LOG_DEBUG, 2, ggio, "sync: (%p) Sending sync request: ", 163532115b10SPawel Jakub Dawidek hio); 163632115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queue.", 163732115b10SPawel Jakub Dawidek hio); 163832115b10SPawel Jakub Dawidek mtx_lock(&metadata_lock); 163932115b10SPawel Jakub Dawidek if (res->hr_syncsrc == HAST_SYNCSRC_PRIMARY) { 164032115b10SPawel Jakub Dawidek /* 164132115b10SPawel Jakub Dawidek * This range is up-to-date on local component, 164232115b10SPawel Jakub Dawidek * so we update remote component. 164332115b10SPawel Jakub Dawidek */ 164432115b10SPawel Jakub Dawidek /* Remote component is 1 for now. */ 164532115b10SPawel Jakub Dawidek ncomp = 1; 164632115b10SPawel Jakub Dawidek } else /* if (res->hr_syncsrc == HAST_SYNCSRC_SECONDARY) */ { 164732115b10SPawel Jakub Dawidek assert(res->hr_syncsrc == HAST_SYNCSRC_SECONDARY); 164832115b10SPawel Jakub Dawidek /* 164932115b10SPawel Jakub Dawidek * This range is out-of-date on local component, 165032115b10SPawel Jakub Dawidek * so we update it. 165132115b10SPawel Jakub Dawidek */ 165232115b10SPawel Jakub Dawidek /* Local component is 0 for now. */ 165332115b10SPawel Jakub Dawidek ncomp = 0; 165432115b10SPawel Jakub Dawidek } 165532115b10SPawel Jakub Dawidek mtx_unlock(&metadata_lock); 165632115b10SPawel Jakub Dawidek 165732115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the send queues.", 165832115b10SPawel Jakub Dawidek hio); 165932115b10SPawel Jakub Dawidek refcount_init(&hio->hio_countdown, 1); 166032115b10SPawel Jakub Dawidek QUEUE_INSERT1(hio, send, ncomp); 166132115b10SPawel Jakub Dawidek 166232115b10SPawel Jakub Dawidek /* 166332115b10SPawel Jakub Dawidek * Let's wait for WRITE to finish. 166432115b10SPawel Jakub Dawidek */ 166532115b10SPawel Jakub Dawidek mtx_lock(&sync_lock); 166632115b10SPawel Jakub Dawidek while (!ISSYNCREQDONE(hio)) 166732115b10SPawel Jakub Dawidek cv_wait(&sync_cond, &sync_lock); 166832115b10SPawel Jakub Dawidek mtx_unlock(&sync_lock); 166932115b10SPawel Jakub Dawidek 167032115b10SPawel Jakub Dawidek if (hio->hio_errors[ncomp] != 0) { 167132115b10SPawel Jakub Dawidek pjdlog_error("Unable to write synchronization data: %s.", 167232115b10SPawel Jakub Dawidek strerror(hio->hio_errors[ncomp])); 167332115b10SPawel Jakub Dawidek goto free_queue; 167432115b10SPawel Jakub Dawidek } 167532115b10SPawel Jakub Dawidek free_queue: 167632115b10SPawel Jakub Dawidek mtx_lock(&range_lock); 167732115b10SPawel Jakub Dawidek rangelock_del(range_sync, offset, length); 167832115b10SPawel Jakub Dawidek if (range_regular_wait) 167932115b10SPawel Jakub Dawidek cv_signal(&range_regular_cond); 168032115b10SPawel Jakub Dawidek mtx_unlock(&range_lock); 168132115b10SPawel Jakub Dawidek 168232115b10SPawel Jakub Dawidek synced += length; 168332115b10SPawel Jakub Dawidek 168432115b10SPawel Jakub Dawidek pjdlog_debug(2, "sync: (%p) Moving request to the free queue.", 168532115b10SPawel Jakub Dawidek hio); 168632115b10SPawel Jakub Dawidek QUEUE_INSERT2(hio, free); 168732115b10SPawel Jakub Dawidek } 168832115b10SPawel Jakub Dawidek /* NOTREACHED */ 168932115b10SPawel Jakub Dawidek return (NULL); 169032115b10SPawel Jakub Dawidek } 169132115b10SPawel Jakub Dawidek 169232115b10SPawel Jakub Dawidek static void 169332115b10SPawel Jakub Dawidek sighandler(int sig) 169432115b10SPawel Jakub Dawidek { 169532115b10SPawel Jakub Dawidek bool unlock; 169632115b10SPawel Jakub Dawidek 169732115b10SPawel Jakub Dawidek switch (sig) { 169832115b10SPawel Jakub Dawidek case SIGINT: 169932115b10SPawel Jakub Dawidek case SIGTERM: 170032115b10SPawel Jakub Dawidek sigexit_received = true; 170132115b10SPawel Jakub Dawidek break; 170232115b10SPawel Jakub Dawidek default: 170332115b10SPawel Jakub Dawidek assert(!"invalid condition"); 170432115b10SPawel Jakub Dawidek } 170532115b10SPawel Jakub Dawidek /* 170632115b10SPawel Jakub Dawidek * XXX: Racy, but if we cannot obtain hio_guard_lock here, we don't 170732115b10SPawel Jakub Dawidek * want to risk deadlock. 170832115b10SPawel Jakub Dawidek */ 170932115b10SPawel Jakub Dawidek unlock = mtx_trylock(&hio_guard_lock); 171032115b10SPawel Jakub Dawidek cv_signal(&hio_guard_cond); 171132115b10SPawel Jakub Dawidek if (unlock) 171232115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 171332115b10SPawel Jakub Dawidek } 171432115b10SPawel Jakub Dawidek 171532115b10SPawel Jakub Dawidek /* 171632115b10SPawel Jakub Dawidek * Thread guards remote connections and reconnects when needed, handles 171732115b10SPawel Jakub Dawidek * signals, etc. 171832115b10SPawel Jakub Dawidek */ 171932115b10SPawel Jakub Dawidek static void * 172032115b10SPawel Jakub Dawidek guard_thread(void *arg) 172132115b10SPawel Jakub Dawidek { 172232115b10SPawel Jakub Dawidek struct hast_resource *res = arg; 17230d9014f3SPawel Jakub Dawidek struct proto_conn *in, *out; 172432115b10SPawel Jakub Dawidek unsigned int ii, ncomps; 172532115b10SPawel Jakub Dawidek int timeout; 172632115b10SPawel Jakub Dawidek 172732115b10SPawel Jakub Dawidek ncomps = HAST_NCOMPONENTS; 172832115b10SPawel Jakub Dawidek /* The is only one remote component for now. */ 172932115b10SPawel Jakub Dawidek #define ISREMOTE(no) ((no) == 1) 173032115b10SPawel Jakub Dawidek 173132115b10SPawel Jakub Dawidek for (;;) { 173232115b10SPawel Jakub Dawidek if (sigexit_received) { 173332115b10SPawel Jakub Dawidek primary_exitx(EX_OK, 173432115b10SPawel Jakub Dawidek "Termination signal received, exiting."); 173532115b10SPawel Jakub Dawidek } 173632115b10SPawel Jakub Dawidek /* 173732115b10SPawel Jakub Dawidek * If all the connection will be fine, we will sleep until 173832115b10SPawel Jakub Dawidek * someone wakes us up. 173932115b10SPawel Jakub Dawidek * If any of the connections will be broken and we won't be 174032115b10SPawel Jakub Dawidek * able to connect, we will sleep only for RECONNECT_SLEEP 174132115b10SPawel Jakub Dawidek * seconds so we can retry soon. 174232115b10SPawel Jakub Dawidek */ 174332115b10SPawel Jakub Dawidek timeout = 0; 174432115b10SPawel Jakub Dawidek pjdlog_debug(2, "remote_guard: Checking connections."); 174532115b10SPawel Jakub Dawidek mtx_lock(&hio_guard_lock); 174632115b10SPawel Jakub Dawidek for (ii = 0; ii < ncomps; ii++) { 174732115b10SPawel Jakub Dawidek if (!ISREMOTE(ii)) 174832115b10SPawel Jakub Dawidek continue; 174932115b10SPawel Jakub Dawidek rw_rlock(&hio_remote_lock[ii]); 175032115b10SPawel Jakub Dawidek if (ISCONNECTED(res, ii)) { 175132115b10SPawel Jakub Dawidek assert(res->hr_remotein != NULL); 175232115b10SPawel Jakub Dawidek assert(res->hr_remoteout != NULL); 175332115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 175432115b10SPawel Jakub Dawidek pjdlog_debug(2, 175532115b10SPawel Jakub Dawidek "remote_guard: Connection to %s is ok.", 175632115b10SPawel Jakub Dawidek res->hr_remoteaddr); 175732115b10SPawel Jakub Dawidek } else { 175832115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 175932115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 176032115b10SPawel Jakub Dawidek /* 176132115b10SPawel Jakub Dawidek * Upgrade the lock. It doesn't have to be 176232115b10SPawel Jakub Dawidek * atomic as no other thread can change 176332115b10SPawel Jakub Dawidek * connection status from disconnected to 176432115b10SPawel Jakub Dawidek * connected. 176532115b10SPawel Jakub Dawidek */ 176632115b10SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 176732115b10SPawel Jakub Dawidek pjdlog_debug(2, 176832115b10SPawel Jakub Dawidek "remote_guard: Reconnecting to %s.", 176932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 17700d9014f3SPawel Jakub Dawidek in = out = NULL; 17710d9014f3SPawel Jakub Dawidek if (init_remote(res, &in, &out)) { 17720d9014f3SPawel Jakub Dawidek rw_wlock(&hio_remote_lock[ii]); 17730d9014f3SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 17740d9014f3SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 17750d9014f3SPawel Jakub Dawidek assert(in != NULL && out != NULL); 17760d9014f3SPawel Jakub Dawidek res->hr_remotein = in; 17770d9014f3SPawel Jakub Dawidek res->hr_remoteout = out; 17780d9014f3SPawel Jakub Dawidek rw_unlock(&hio_remote_lock[ii]); 177932115b10SPawel Jakub Dawidek pjdlog_info("Successfully reconnected to %s.", 178032115b10SPawel Jakub Dawidek res->hr_remoteaddr); 17810d9014f3SPawel Jakub Dawidek sync_start(); 178232115b10SPawel Jakub Dawidek } else { 178332115b10SPawel Jakub Dawidek /* Both connections should be NULL. */ 178432115b10SPawel Jakub Dawidek assert(res->hr_remotein == NULL); 178532115b10SPawel Jakub Dawidek assert(res->hr_remoteout == NULL); 17860d9014f3SPawel Jakub Dawidek assert(in == NULL && out == NULL); 178732115b10SPawel Jakub Dawidek pjdlog_debug(2, 178832115b10SPawel Jakub Dawidek "remote_guard: Reconnect to %s failed.", 178932115b10SPawel Jakub Dawidek res->hr_remoteaddr); 179032115b10SPawel Jakub Dawidek timeout = RECONNECT_SLEEP; 179132115b10SPawel Jakub Dawidek } 179232115b10SPawel Jakub Dawidek } 179332115b10SPawel Jakub Dawidek } 179432115b10SPawel Jakub Dawidek (void)cv_timedwait(&hio_guard_cond, &hio_guard_lock, timeout); 179532115b10SPawel Jakub Dawidek mtx_unlock(&hio_guard_lock); 179632115b10SPawel Jakub Dawidek } 179732115b10SPawel Jakub Dawidek #undef ISREMOTE 179832115b10SPawel Jakub Dawidek /* NOTREACHED */ 179932115b10SPawel Jakub Dawidek return (NULL); 180032115b10SPawel Jakub Dawidek } 1801