1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2017 by Lawrence Livermore National Security, LLC. 23eda14cbcSMatt Macy */ 24eda14cbcSMatt Macy 25eda14cbcSMatt Macy #include <sys/abd.h> 26eda14cbcSMatt Macy #include <sys/mmp.h> 27eda14cbcSMatt Macy #include <sys/spa.h> 28eda14cbcSMatt Macy #include <sys/spa_impl.h> 29eda14cbcSMatt Macy #include <sys/time.h> 30eda14cbcSMatt Macy #include <sys/vdev.h> 31eda14cbcSMatt Macy #include <sys/vdev_impl.h> 32eda14cbcSMatt Macy #include <sys/zfs_context.h> 33eda14cbcSMatt Macy #include <sys/callb.h> 34eda14cbcSMatt Macy 35eda14cbcSMatt Macy /* 36eda14cbcSMatt Macy * Multi-Modifier Protection (MMP) attempts to prevent a user from importing 37eda14cbcSMatt Macy * or opening a pool on more than one host at a time. In particular, it 38eda14cbcSMatt Macy * prevents "zpool import -f" on a host from succeeding while the pool is 39eda14cbcSMatt Macy * already imported on another host. There are many other ways in which a 40eda14cbcSMatt Macy * device could be used by two hosts for different purposes at the same time 41eda14cbcSMatt Macy * resulting in pool damage. This implementation does not attempt to detect 42eda14cbcSMatt Macy * those cases. 43eda14cbcSMatt Macy * 44eda14cbcSMatt Macy * MMP operates by ensuring there are frequent visible changes on disk (a 45eda14cbcSMatt Macy * "heartbeat") at all times. And by altering the import process to check 46eda14cbcSMatt Macy * for these changes and failing the import when they are detected. This 47eda14cbcSMatt Macy * functionality is enabled by setting the 'multihost' pool property to on. 48eda14cbcSMatt Macy * 49eda14cbcSMatt Macy * Uberblocks written by the txg_sync thread always go into the first 50eda14cbcSMatt Macy * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP. 51eda14cbcSMatt Macy * They are used to hold uberblocks which are exactly the same as the last 52eda14cbcSMatt Macy * synced uberblock except that the ub_timestamp and mmp_config are frequently 53eda14cbcSMatt Macy * updated. Like all other uberblocks, the slot is written with an embedded 54eda14cbcSMatt Macy * checksum, and slots with invalid checksums are ignored. This provides the 55eda14cbcSMatt Macy * "heartbeat", with no risk of overwriting good uberblocks that must be 56eda14cbcSMatt Macy * preserved, e.g. previous txgs and associated block pointers. 57eda14cbcSMatt Macy * 58eda14cbcSMatt Macy * Three optional fields are added to uberblock structure; ub_mmp_magic, 59eda14cbcSMatt Macy * ub_mmp_config, and ub_mmp_delay. The ub_mmp_magic value allows zfs to tell 60eda14cbcSMatt Macy * whether the other ub_mmp_* fields are valid. The ub_mmp_config field tells 61eda14cbcSMatt Macy * the importing host the settings of zfs_multihost_interval and 62eda14cbcSMatt Macy * zfs_multihost_fail_intervals on the host which last had (or currently has) 63eda14cbcSMatt Macy * the pool imported. These determine how long a host must wait to detect 64eda14cbcSMatt Macy * activity in the pool, before concluding the pool is not in use. The 65eda14cbcSMatt Macy * mmp_delay field is a decaying average of the amount of time between 66eda14cbcSMatt Macy * completion of successive MMP writes, in nanoseconds. It indicates whether 67eda14cbcSMatt Macy * MMP is enabled. 68eda14cbcSMatt Macy * 69eda14cbcSMatt Macy * During import an activity test may now be performed to determine if 70eda14cbcSMatt Macy * the pool is in use. The activity test is typically required if the 71eda14cbcSMatt Macy * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is 72eda14cbcSMatt Macy * POOL_STATE_ACTIVE, and the pool is not a root pool. 73eda14cbcSMatt Macy * 74eda14cbcSMatt Macy * The activity test finds the "best" uberblock (highest txg, timestamp, and, if 75eda14cbcSMatt Macy * ub_mmp_magic is valid, sequence number from ub_mmp_config). It then waits 76eda14cbcSMatt Macy * some time, and finds the "best" uberblock again. If any of the mentioned 77eda14cbcSMatt Macy * fields have different values in the newly read uberblock, the pool is in use 78eda14cbcSMatt Macy * by another host and the import fails. In order to assure the accuracy of the 79eda14cbcSMatt Macy * activity test, the default values result in an activity test duration of 20x 80eda14cbcSMatt Macy * the mmp write interval. 81eda14cbcSMatt Macy * 82eda14cbcSMatt Macy * The duration of the "zpool import" activity test depends on the information 83eda14cbcSMatt Macy * available in the "best" uberblock: 84eda14cbcSMatt Macy * 85eda14cbcSMatt Macy * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0: 86eda14cbcSMatt Macy * ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2 87eda14cbcSMatt Macy * 88eda14cbcSMatt Macy * In this case, a weak guarantee is provided. Since the host which last had 89eda14cbcSMatt Macy * the pool imported will suspend the pool if no mmp writes land within 90eda14cbcSMatt Macy * fail_intervals * multihost_interval ms, the absence of writes during that 91eda14cbcSMatt Macy * time means either the pool is not imported, or it is imported but the pool 92eda14cbcSMatt Macy * is suspended and no further writes will occur. 93eda14cbcSMatt Macy * 94eda14cbcSMatt Macy * Note that resuming the suspended pool on the remote host would invalidate 95eda14cbcSMatt Macy * this guarantee, and so it is not allowed. 96eda14cbcSMatt Macy * 97eda14cbcSMatt Macy * The factor of 2 provides a conservative safety factor and derives from 98eda14cbcSMatt Macy * MMP_IMPORT_SAFETY_FACTOR; 99eda14cbcSMatt Macy * 100eda14cbcSMatt Macy * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0: 101eda14cbcSMatt Macy * (ub_mmp_config.multihost_interval + ub_mmp_delay) * 102eda14cbcSMatt Macy * zfs_multihost_import_intervals 103eda14cbcSMatt Macy * 104eda14cbcSMatt Macy * In this case no guarantee can provided. However, as long as some devices 105eda14cbcSMatt Macy * are healthy and connected, it is likely that at least one write will land 106eda14cbcSMatt Macy * within (multihost_interval + mmp_delay) because multihost_interval is 107eda14cbcSMatt Macy * enough time for a write to be attempted to each leaf vdev, and mmp_delay 108eda14cbcSMatt Macy * is enough for one to land, based on past delays. Multiplying by 109eda14cbcSMatt Macy * zfs_multihost_import_intervals provides a conservative safety factor. 110eda14cbcSMatt Macy * 111eda14cbcSMatt Macy * 3) If uberblock was written by zfs-0.7: 112eda14cbcSMatt Macy * (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals 113eda14cbcSMatt Macy * 114eda14cbcSMatt Macy * The same logic as case #2 applies, but we do not know remote tunables. 115eda14cbcSMatt Macy * 116eda14cbcSMatt Macy * We use the local value for zfs_multihost_interval because the original MMP 117eda14cbcSMatt Macy * did not record this value in the uberblock. 118eda14cbcSMatt Macy * 119eda14cbcSMatt Macy * ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host 120eda14cbcSMatt Macy * has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect 121eda14cbcSMatt Macy * that. We will have waited enough time for zfs_multihost_import_intervals 122eda14cbcSMatt Macy * writes to be issued and all but one to land. 123eda14cbcSMatt Macy * 124eda14cbcSMatt Macy * single device pool example delays 125eda14cbcSMatt Macy * 126eda14cbcSMatt Macy * import_delay = (1 + 1) * 20 = 40s #defaults, no I/O delay 127eda14cbcSMatt Macy * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay 128eda14cbcSMatt Macy * import_delay = (10 + 10) * 20 = 400s #10s multihost_interval, 129eda14cbcSMatt Macy * no I/O delay 130eda14cbcSMatt Macy * 100 device pool example delays 131eda14cbcSMatt Macy * 132eda14cbcSMatt Macy * import_delay = (1 + .01) * 20 = 20s #defaults, no I/O delay 133eda14cbcSMatt Macy * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay 134eda14cbcSMatt Macy * import_delay = (10 + .1) * 20 = 202s #10s multihost_interval, 135eda14cbcSMatt Macy * no I/O delay 136eda14cbcSMatt Macy * 137eda14cbcSMatt Macy * 4) Otherwise, this uberblock was written by a pre-MMP zfs: 138eda14cbcSMatt Macy * zfs_multihost_import_intervals * zfs_multihost_interval 139eda14cbcSMatt Macy * 140eda14cbcSMatt Macy * In this case local tunables are used. By default this product = 10s, long 141eda14cbcSMatt Macy * enough for a pool with any activity at all to write at least one 142eda14cbcSMatt Macy * uberblock. No guarantee can be provided. 143eda14cbcSMatt Macy * 144eda14cbcSMatt Macy * Additionally, the duration is then extended by a random 25% to attempt to to 145eda14cbcSMatt Macy * detect simultaneous imports. For example, if both partner hosts are rebooted 146eda14cbcSMatt Macy * at the same time and automatically attempt to import the pool. 147eda14cbcSMatt Macy */ 148eda14cbcSMatt Macy 149eda14cbcSMatt Macy /* 150eda14cbcSMatt Macy * Used to control the frequency of mmp writes which are performed when the 151eda14cbcSMatt Macy * 'multihost' pool property is on. This is one factor used to determine the 152eda14cbcSMatt Macy * length of the activity check during import. 153eda14cbcSMatt Macy * 154eda14cbcSMatt Macy * On average an mmp write will be issued for each leaf vdev every 155eda14cbcSMatt Macy * zfs_multihost_interval milliseconds. In practice, the observed period can 156eda14cbcSMatt Macy * vary with the I/O load and this observed value is the ub_mmp_delay which is 157eda14cbcSMatt Macy * stored in the uberblock. The minimum allowed value is 100 ms. 158eda14cbcSMatt Macy */ 159dbd5678dSMartin Matuska uint64_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL; 160eda14cbcSMatt Macy 161eda14cbcSMatt Macy /* 162eda14cbcSMatt Macy * Used to control the duration of the activity test on import. Smaller values 163eda14cbcSMatt Macy * of zfs_multihost_import_intervals will reduce the import time but increase 164eda14cbcSMatt Macy * the risk of failing to detect an active pool. The total activity check time 165eda14cbcSMatt Macy * is never allowed to drop below one second. A value of 0 is ignored and 166eda14cbcSMatt Macy * treated as if it was set to 1. 167eda14cbcSMatt Macy */ 168eda14cbcSMatt Macy uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS; 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy /* 171eda14cbcSMatt Macy * Controls the behavior of the pool when mmp write failures or delays are 172eda14cbcSMatt Macy * detected. 173eda14cbcSMatt Macy * 174eda14cbcSMatt Macy * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are 175eda14cbcSMatt Macy * ignored. The failures will still be reported to the ZED which depending on 176eda14cbcSMatt Macy * its configuration may take action such as suspending the pool or taking a 177eda14cbcSMatt Macy * device offline. 178eda14cbcSMatt Macy * 179eda14cbcSMatt Macy * When zfs_multihost_fail_intervals > 0, the pool will be suspended if 180eda14cbcSMatt Macy * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass 181eda14cbcSMatt Macy * without a successful mmp write. This guarantees the activity test will see 182eda14cbcSMatt Macy * mmp writes if the pool is imported. A value of 1 is ignored and treated as 183eda14cbcSMatt Macy * if it was set to 2, because a single leaf vdev pool will issue a write once 184eda14cbcSMatt Macy * per multihost_interval and thus any variation in latency would cause the 185eda14cbcSMatt Macy * pool to be suspended. 186eda14cbcSMatt Macy */ 187eda14cbcSMatt Macy uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS; 188eda14cbcSMatt Macy 189a0b956f5SMartin Matuska static const void *const mmp_tag = "mmp_write_uberblock"; 190da5137abSMartin Matuska static __attribute__((noreturn)) void mmp_thread(void *arg); 191eda14cbcSMatt Macy 192eda14cbcSMatt Macy void 193eda14cbcSMatt Macy mmp_init(spa_t *spa) 194eda14cbcSMatt Macy { 195eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 196eda14cbcSMatt Macy 197eda14cbcSMatt Macy mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 198eda14cbcSMatt Macy cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL); 199eda14cbcSMatt Macy mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL); 200eda14cbcSMatt Macy mmp->mmp_kstat_id = 1; 201eda14cbcSMatt Macy } 202eda14cbcSMatt Macy 203eda14cbcSMatt Macy void 204eda14cbcSMatt Macy mmp_fini(spa_t *spa) 205eda14cbcSMatt Macy { 206eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 207eda14cbcSMatt Macy 208eda14cbcSMatt Macy mutex_destroy(&mmp->mmp_thread_lock); 209eda14cbcSMatt Macy cv_destroy(&mmp->mmp_thread_cv); 210eda14cbcSMatt Macy mutex_destroy(&mmp->mmp_io_lock); 211eda14cbcSMatt Macy } 212eda14cbcSMatt Macy 213eda14cbcSMatt Macy static void 214eda14cbcSMatt Macy mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr) 215eda14cbcSMatt Macy { 216eda14cbcSMatt Macy CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG); 217eda14cbcSMatt Macy mutex_enter(&mmp->mmp_thread_lock); 218eda14cbcSMatt Macy } 219eda14cbcSMatt Macy 220da5137abSMartin Matuska static void 221eda14cbcSMatt Macy mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr) 222eda14cbcSMatt Macy { 223eda14cbcSMatt Macy ASSERT(*mpp != NULL); 224eda14cbcSMatt Macy *mpp = NULL; 225eda14cbcSMatt Macy cv_broadcast(&mmp->mmp_thread_cv); 226eda14cbcSMatt Macy CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */ 227eda14cbcSMatt Macy } 228eda14cbcSMatt Macy 229eda14cbcSMatt Macy void 230eda14cbcSMatt Macy mmp_thread_start(spa_t *spa) 231eda14cbcSMatt Macy { 232eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 233eda14cbcSMatt Macy 234eda14cbcSMatt Macy if (spa_writeable(spa)) { 235eda14cbcSMatt Macy mutex_enter(&mmp->mmp_thread_lock); 236eda14cbcSMatt Macy if (!mmp->mmp_thread) { 237eda14cbcSMatt Macy mmp->mmp_thread = thread_create(NULL, 0, mmp_thread, 238eda14cbcSMatt Macy spa, 0, &p0, TS_RUN, defclsyspri); 239eda14cbcSMatt Macy zfs_dbgmsg("MMP thread started pool '%s' " 240eda14cbcSMatt Macy "gethrtime %llu", spa_name(spa), gethrtime()); 241eda14cbcSMatt Macy } 242eda14cbcSMatt Macy mutex_exit(&mmp->mmp_thread_lock); 243eda14cbcSMatt Macy } 244eda14cbcSMatt Macy } 245eda14cbcSMatt Macy 246eda14cbcSMatt Macy void 247eda14cbcSMatt Macy mmp_thread_stop(spa_t *spa) 248eda14cbcSMatt Macy { 249eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 250eda14cbcSMatt Macy 251eda14cbcSMatt Macy mutex_enter(&mmp->mmp_thread_lock); 252eda14cbcSMatt Macy mmp->mmp_thread_exiting = 1; 253eda14cbcSMatt Macy cv_broadcast(&mmp->mmp_thread_cv); 254eda14cbcSMatt Macy 255eda14cbcSMatt Macy while (mmp->mmp_thread) { 256eda14cbcSMatt Macy cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock); 257eda14cbcSMatt Macy } 258eda14cbcSMatt Macy mutex_exit(&mmp->mmp_thread_lock); 259eda14cbcSMatt Macy zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu", 260eda14cbcSMatt Macy spa_name(spa), gethrtime()); 261eda14cbcSMatt Macy 262eda14cbcSMatt Macy ASSERT(mmp->mmp_thread == NULL); 263eda14cbcSMatt Macy mmp->mmp_thread_exiting = 0; 264eda14cbcSMatt Macy } 265eda14cbcSMatt Macy 266eda14cbcSMatt Macy typedef enum mmp_vdev_state_flag { 267eda14cbcSMatt Macy MMP_FAIL_NOT_WRITABLE = (1 << 0), 268eda14cbcSMatt Macy MMP_FAIL_WRITE_PENDING = (1 << 1), 269eda14cbcSMatt Macy } mmp_vdev_state_flag_t; 270eda14cbcSMatt Macy 271eda14cbcSMatt Macy /* 272eda14cbcSMatt Macy * Find a leaf vdev to write an MMP block to. It must not have an outstanding 273eda14cbcSMatt Macy * mmp write (if so a new write will also likely block). If there is no usable 274eda14cbcSMatt Macy * leaf, a nonzero error value is returned. The error value returned is a bit 275eda14cbcSMatt Macy * field. 276eda14cbcSMatt Macy * 277eda14cbcSMatt Macy * MMP_FAIL_WRITE_PENDING One or more leaf vdevs are writeable, but have an 278eda14cbcSMatt Macy * outstanding MMP write. 279eda14cbcSMatt Macy * MMP_FAIL_NOT_WRITABLE One or more leaf vdevs are not writeable. 280eda14cbcSMatt Macy */ 281eda14cbcSMatt Macy 282eda14cbcSMatt Macy static int 283eda14cbcSMatt Macy mmp_next_leaf(spa_t *spa) 284eda14cbcSMatt Macy { 285eda14cbcSMatt Macy vdev_t *leaf; 286eda14cbcSMatt Macy vdev_t *starting_leaf; 287eda14cbcSMatt Macy int fail_mask = 0; 288eda14cbcSMatt Macy 289eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock)); 290eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_STATE, RW_READER)); 291eda14cbcSMatt Macy ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE); 292eda14cbcSMatt Macy ASSERT(!list_is_empty(&spa->spa_leaf_list)); 293eda14cbcSMatt Macy 294eda14cbcSMatt Macy if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) { 295eda14cbcSMatt Macy spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list); 296eda14cbcSMatt Macy spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen; 297eda14cbcSMatt Macy } 298eda14cbcSMatt Macy 299eda14cbcSMatt Macy leaf = spa->spa_mmp.mmp_last_leaf; 300eda14cbcSMatt Macy if (leaf == NULL) 301eda14cbcSMatt Macy leaf = list_head(&spa->spa_leaf_list); 302eda14cbcSMatt Macy starting_leaf = leaf; 303eda14cbcSMatt Macy 304eda14cbcSMatt Macy do { 305eda14cbcSMatt Macy leaf = list_next(&spa->spa_leaf_list, leaf); 306dbd5678dSMartin Matuska if (leaf == NULL) { 307eda14cbcSMatt Macy leaf = list_head(&spa->spa_leaf_list); 308dbd5678dSMartin Matuska ASSERT3P(leaf, !=, NULL); 309dbd5678dSMartin Matuska } 310eda14cbcSMatt Macy 3117877fdebSMatt Macy /* 3127877fdebSMatt Macy * We skip unwritable, offline, detached, and dRAID spare 3137877fdebSMatt Macy * devices as they are either not legal targets or the write 3147877fdebSMatt Macy * may fail or not be seen by other hosts. Skipped dRAID 3157877fdebSMatt Macy * spares can never be written so the fail mask is not set. 3167877fdebSMatt Macy */ 3177877fdebSMatt Macy if (!vdev_writeable(leaf) || leaf->vdev_offline || 3187877fdebSMatt Macy leaf->vdev_detached) { 319eda14cbcSMatt Macy fail_mask |= MMP_FAIL_NOT_WRITABLE; 3207877fdebSMatt Macy } else if (leaf->vdev_ops == &vdev_draid_spare_ops) { 3217877fdebSMatt Macy continue; 322eda14cbcSMatt Macy } else if (leaf->vdev_mmp_pending != 0) { 323eda14cbcSMatt Macy fail_mask |= MMP_FAIL_WRITE_PENDING; 324eda14cbcSMatt Macy } else { 325eda14cbcSMatt Macy spa->spa_mmp.mmp_last_leaf = leaf; 326eda14cbcSMatt Macy return (0); 327eda14cbcSMatt Macy } 328eda14cbcSMatt Macy } while (leaf != starting_leaf); 329eda14cbcSMatt Macy 330eda14cbcSMatt Macy ASSERT(fail_mask); 331eda14cbcSMatt Macy 332eda14cbcSMatt Macy return (fail_mask); 333eda14cbcSMatt Macy } 334eda14cbcSMatt Macy 335eda14cbcSMatt Macy /* 336eda14cbcSMatt Macy * MMP writes are issued on a fixed schedule, but may complete at variable, 337eda14cbcSMatt Macy * much longer, intervals. The mmp_delay captures long periods between 338eda14cbcSMatt Macy * successful writes for any reason, including disk latency, scheduling delays, 339eda14cbcSMatt Macy * etc. 340eda14cbcSMatt Macy * 341eda14cbcSMatt Macy * The mmp_delay is usually calculated as a decaying average, but if the latest 342eda14cbcSMatt Macy * delay is higher we do not average it, so that we do not hide sudden spikes 343eda14cbcSMatt Macy * which the importing host must wait for. 344eda14cbcSMatt Macy * 345eda14cbcSMatt Macy * If writes are occurring frequently, such as due to a high rate of txg syncs, 346eda14cbcSMatt Macy * the mmp_delay could become very small. Since those short delays depend on 347eda14cbcSMatt Macy * activity we cannot count on, we never allow mmp_delay to get lower than rate 348eda14cbcSMatt Macy * expected if only mmp_thread writes occur. 349eda14cbcSMatt Macy * 350eda14cbcSMatt Macy * If an mmp write was skipped or fails, and we have already waited longer than 351eda14cbcSMatt Macy * mmp_delay, we need to update it so the next write reflects the longer delay. 352eda14cbcSMatt Macy * 353eda14cbcSMatt Macy * Do not set mmp_delay if the multihost property is not on, so as not to 354eda14cbcSMatt Macy * trigger an activity check on import. 355eda14cbcSMatt Macy */ 356eda14cbcSMatt Macy static void 357eda14cbcSMatt Macy mmp_delay_update(spa_t *spa, boolean_t write_completed) 358eda14cbcSMatt Macy { 359eda14cbcSMatt Macy mmp_thread_t *mts = &spa->spa_mmp; 360eda14cbcSMatt Macy hrtime_t delay = gethrtime() - mts->mmp_last_write; 361eda14cbcSMatt Macy 362eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mts->mmp_io_lock)); 363eda14cbcSMatt Macy 364eda14cbcSMatt Macy if (spa_multihost(spa) == B_FALSE) { 365eda14cbcSMatt Macy mts->mmp_delay = 0; 366eda14cbcSMatt Macy return; 367eda14cbcSMatt Macy } 368eda14cbcSMatt Macy 369eda14cbcSMatt Macy if (delay > mts->mmp_delay) 370eda14cbcSMatt Macy mts->mmp_delay = delay; 371eda14cbcSMatt Macy 372eda14cbcSMatt Macy if (write_completed == B_FALSE) 373eda14cbcSMatt Macy return; 374eda14cbcSMatt Macy 375eda14cbcSMatt Macy mts->mmp_last_write = gethrtime(); 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy /* 378eda14cbcSMatt Macy * strictly less than, in case delay was changed above. 379eda14cbcSMatt Macy */ 380eda14cbcSMatt Macy if (delay < mts->mmp_delay) { 381eda14cbcSMatt Macy hrtime_t min_delay = 382eda14cbcSMatt Macy MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) / 383eda14cbcSMatt Macy MAX(1, vdev_count_leaves(spa)); 384eda14cbcSMatt Macy mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128), 385eda14cbcSMatt Macy min_delay); 386eda14cbcSMatt Macy } 387eda14cbcSMatt Macy } 388eda14cbcSMatt Macy 389eda14cbcSMatt Macy static void 390eda14cbcSMatt Macy mmp_write_done(zio_t *zio) 391eda14cbcSMatt Macy { 392eda14cbcSMatt Macy spa_t *spa = zio->io_spa; 393eda14cbcSMatt Macy vdev_t *vd = zio->io_vd; 394eda14cbcSMatt Macy mmp_thread_t *mts = zio->io_private; 395eda14cbcSMatt Macy 396eda14cbcSMatt Macy mutex_enter(&mts->mmp_io_lock); 397eda14cbcSMatt Macy uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id; 398eda14cbcSMatt Macy hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending; 399eda14cbcSMatt Macy 400eda14cbcSMatt Macy mmp_delay_update(spa, (zio->io_error == 0)); 401eda14cbcSMatt Macy 402eda14cbcSMatt Macy vd->vdev_mmp_pending = 0; 403eda14cbcSMatt Macy vd->vdev_mmp_kstat_id = 0; 404eda14cbcSMatt Macy 405eda14cbcSMatt Macy mutex_exit(&mts->mmp_io_lock); 406eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, mmp_tag); 407eda14cbcSMatt Macy 408eda14cbcSMatt Macy spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error, 409eda14cbcSMatt Macy mmp_write_duration); 410eda14cbcSMatt Macy 411eda14cbcSMatt Macy abd_free(zio->io_abd); 412eda14cbcSMatt Macy } 413eda14cbcSMatt Macy 414eda14cbcSMatt Macy /* 415eda14cbcSMatt Macy * When the uberblock on-disk is updated by a spa_sync, 416eda14cbcSMatt Macy * creating a new "best" uberblock, update the one stored 417eda14cbcSMatt Macy * in the mmp thread state, used for mmp writes. 418eda14cbcSMatt Macy */ 419eda14cbcSMatt Macy void 420eda14cbcSMatt Macy mmp_update_uberblock(spa_t *spa, uberblock_t *ub) 421eda14cbcSMatt Macy { 422eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 423eda14cbcSMatt Macy 424eda14cbcSMatt Macy mutex_enter(&mmp->mmp_io_lock); 425eda14cbcSMatt Macy mmp->mmp_ub = *ub; 426eda14cbcSMatt Macy mmp->mmp_seq = 1; 427eda14cbcSMatt Macy mmp->mmp_ub.ub_timestamp = gethrestime_sec(); 428eda14cbcSMatt Macy mmp_delay_update(spa, B_TRUE); 429eda14cbcSMatt Macy mutex_exit(&mmp->mmp_io_lock); 430eda14cbcSMatt Macy } 431eda14cbcSMatt Macy 432eda14cbcSMatt Macy /* 433eda14cbcSMatt Macy * Choose a random vdev, label, and MMP block, and write over it 434eda14cbcSMatt Macy * with a copy of the last-synced uberblock, whose timestamp 435eda14cbcSMatt Macy * has been updated to reflect that the pool is in use. 436eda14cbcSMatt Macy */ 437eda14cbcSMatt Macy static void 438eda14cbcSMatt Macy mmp_write_uberblock(spa_t *spa) 439eda14cbcSMatt Macy { 440eda14cbcSMatt Macy int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; 441eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 442eda14cbcSMatt Macy uberblock_t *ub; 443eda14cbcSMatt Macy vdev_t *vd = NULL; 444eda14cbcSMatt Macy int label, error; 445eda14cbcSMatt Macy uint64_t offset; 446eda14cbcSMatt Macy 447eda14cbcSMatt Macy hrtime_t lock_acquire_time = gethrtime(); 448d411c1d6SMartin Matuska spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER); 449eda14cbcSMatt Macy lock_acquire_time = gethrtime() - lock_acquire_time; 450eda14cbcSMatt Macy if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10)) 451eda14cbcSMatt Macy zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns " 452eda14cbcSMatt Macy "gethrtime %llu", spa_name(spa), lock_acquire_time, 453eda14cbcSMatt Macy gethrtime()); 454eda14cbcSMatt Macy 455eda14cbcSMatt Macy mutex_enter(&mmp->mmp_io_lock); 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy error = mmp_next_leaf(spa); 458eda14cbcSMatt Macy 459eda14cbcSMatt Macy /* 460eda14cbcSMatt Macy * spa_mmp_history has two types of entries: 461eda14cbcSMatt Macy * Issued MMP write: records time issued, error status, etc. 462eda14cbcSMatt Macy * Skipped MMP write: an MMP write could not be issued because no 463eda14cbcSMatt Macy * suitable leaf vdev was available. See comment above struct 464eda14cbcSMatt Macy * spa_mmp_history for details. 465eda14cbcSMatt Macy */ 466eda14cbcSMatt Macy 467eda14cbcSMatt Macy if (error) { 468eda14cbcSMatt Macy mmp_delay_update(spa, B_FALSE); 469eda14cbcSMatt Macy if (mmp->mmp_skip_error == error) { 470eda14cbcSMatt Macy spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1); 471eda14cbcSMatt Macy } else { 472eda14cbcSMatt Macy mmp->mmp_skip_error = error; 473eda14cbcSMatt Macy spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg, 474eda14cbcSMatt Macy gethrestime_sec(), mmp->mmp_delay, NULL, 0, 475eda14cbcSMatt Macy mmp->mmp_kstat_id++, error); 476eda14cbcSMatt Macy zfs_dbgmsg("MMP error choosing leaf pool '%s' " 477eda14cbcSMatt Macy "gethrtime %llu fail_mask %#x", spa_name(spa), 478eda14cbcSMatt Macy gethrtime(), error); 479eda14cbcSMatt Macy } 480eda14cbcSMatt Macy mutex_exit(&mmp->mmp_io_lock); 481eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, mmp_tag); 482eda14cbcSMatt Macy return; 483eda14cbcSMatt Macy } 484eda14cbcSMatt Macy 485eda14cbcSMatt Macy vd = spa->spa_mmp.mmp_last_leaf; 486eda14cbcSMatt Macy if (mmp->mmp_skip_error != 0) { 487eda14cbcSMatt Macy mmp->mmp_skip_error = 0; 488eda14cbcSMatt Macy zfs_dbgmsg("MMP write after skipping due to unavailable " 48933b8c039SMartin Matuska "leaves, pool '%s' gethrtime %llu leaf %llu", 49033b8c039SMartin Matuska spa_name(spa), (u_longlong_t)gethrtime(), 49133b8c039SMartin Matuska (u_longlong_t)vd->vdev_guid); 492eda14cbcSMatt Macy } 493eda14cbcSMatt Macy 494eda14cbcSMatt Macy if (mmp->mmp_zio_root == NULL) 495eda14cbcSMatt Macy mmp->mmp_zio_root = zio_root(spa, NULL, NULL, 496eda14cbcSMatt Macy flags | ZIO_FLAG_GODFATHER); 497eda14cbcSMatt Macy 498eda14cbcSMatt Macy if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) { 499eda14cbcSMatt Macy /* 500eda14cbcSMatt Macy * Want to reset mmp_seq when timestamp advances because after 501eda14cbcSMatt Macy * an mmp_seq wrap new values will not be chosen by 502eda14cbcSMatt Macy * uberblock_compare() as the "best". 503eda14cbcSMatt Macy */ 504eda14cbcSMatt Macy mmp->mmp_ub.ub_timestamp = gethrestime_sec(); 505eda14cbcSMatt Macy mmp->mmp_seq = 1; 506eda14cbcSMatt Macy } 507eda14cbcSMatt Macy 508eda14cbcSMatt Macy ub = &mmp->mmp_ub; 509eda14cbcSMatt Macy ub->ub_mmp_magic = MMP_MAGIC; 510eda14cbcSMatt Macy ub->ub_mmp_delay = mmp->mmp_delay; 511eda14cbcSMatt Macy ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) | 512eda14cbcSMatt Macy MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) | 513eda14cbcSMatt Macy MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK( 514eda14cbcSMatt Macy zfs_multihost_fail_intervals)); 515eda14cbcSMatt Macy vd->vdev_mmp_pending = gethrtime(); 516eda14cbcSMatt Macy vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id; 517eda14cbcSMatt Macy 518eda14cbcSMatt Macy zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags); 519eda14cbcSMatt Macy abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE); 520eda14cbcSMatt Macy abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t)); 521a2b560ccSMartin Matuska abd_zero_off(ub_abd, sizeof (uberblock_t), 522a2b560ccSMartin Matuska VDEV_UBERBLOCK_SIZE(vd) - sizeof (uberblock_t)); 523eda14cbcSMatt Macy 524eda14cbcSMatt Macy mmp->mmp_seq++; 525eda14cbcSMatt Macy mmp->mmp_kstat_id++; 526eda14cbcSMatt Macy mutex_exit(&mmp->mmp_io_lock); 527eda14cbcSMatt Macy 528eda14cbcSMatt Macy offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) - 52933b8c039SMartin Matuska MMP_BLOCKS_PER_LABEL + random_in_range(MMP_BLOCKS_PER_LABEL)); 530eda14cbcSMatt Macy 53133b8c039SMartin Matuska label = random_in_range(VDEV_LABELS); 532eda14cbcSMatt Macy vdev_label_write(zio, vd, label, ub_abd, offset, 533eda14cbcSMatt Macy VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp, 534eda14cbcSMatt Macy flags | ZIO_FLAG_DONT_PROPAGATE); 535eda14cbcSMatt Macy 536eda14cbcSMatt Macy (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp, 537eda14cbcSMatt Macy ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0); 538eda14cbcSMatt Macy 539eda14cbcSMatt Macy zio_nowait(zio); 540eda14cbcSMatt Macy } 541eda14cbcSMatt Macy 542da5137abSMartin Matuska static __attribute__((noreturn)) void 543eda14cbcSMatt Macy mmp_thread(void *arg) 544eda14cbcSMatt Macy { 545eda14cbcSMatt Macy spa_t *spa = (spa_t *)arg; 546eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 547eda14cbcSMatt Macy boolean_t suspended = spa_suspended(spa); 548eda14cbcSMatt Macy boolean_t multihost = spa_multihost(spa); 549eda14cbcSMatt Macy uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK( 550eda14cbcSMatt Macy zfs_multihost_interval)); 551eda14cbcSMatt Macy uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK( 552eda14cbcSMatt Macy zfs_multihost_fail_intervals); 553eda14cbcSMatt Macy hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval; 554dbd5678dSMartin Matuska boolean_t last_spa_suspended; 555dbd5678dSMartin Matuska boolean_t last_spa_multihost; 556dbd5678dSMartin Matuska uint64_t last_mmp_interval; 557dbd5678dSMartin Matuska uint32_t last_mmp_fail_intervals; 558dbd5678dSMartin Matuska hrtime_t last_mmp_fail_ns; 559eda14cbcSMatt Macy callb_cpr_t cpr; 560eda14cbcSMatt Macy int skip_wait = 0; 561eda14cbcSMatt Macy 562eda14cbcSMatt Macy mmp_thread_enter(mmp, &cpr); 563eda14cbcSMatt Macy 5642c48331dSMatt Macy /* 5652c48331dSMatt Macy * There have been no MMP writes yet. Setting mmp_last_write here gives 5662c48331dSMatt Macy * us one mmp_fail_ns period, which is consistent with the activity 5672c48331dSMatt Macy * check duration, to try to land an MMP write before MMP suspends the 5682c48331dSMatt Macy * pool (if so configured). 5692c48331dSMatt Macy */ 5702c48331dSMatt Macy 5712c48331dSMatt Macy mutex_enter(&mmp->mmp_io_lock); 5722c48331dSMatt Macy mmp->mmp_last_write = gethrtime(); 5732c48331dSMatt Macy mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)); 5742c48331dSMatt Macy mutex_exit(&mmp->mmp_io_lock); 5752c48331dSMatt Macy 576eda14cbcSMatt Macy while (!mmp->mmp_thread_exiting) { 577eda14cbcSMatt Macy hrtime_t next_time = gethrtime() + 578eda14cbcSMatt Macy MSEC2NSEC(MMP_DEFAULT_INTERVAL); 579eda14cbcSMatt Macy int leaves = MAX(vdev_count_leaves(spa), 1); 580eda14cbcSMatt Macy 581eda14cbcSMatt Macy /* Detect changes in tunables or state */ 582eda14cbcSMatt Macy 583eda14cbcSMatt Macy last_spa_suspended = suspended; 584eda14cbcSMatt Macy last_spa_multihost = multihost; 585eda14cbcSMatt Macy suspended = spa_suspended(spa); 586eda14cbcSMatt Macy multihost = spa_multihost(spa); 587eda14cbcSMatt Macy 588eda14cbcSMatt Macy last_mmp_interval = mmp_interval; 589eda14cbcSMatt Macy last_mmp_fail_intervals = mmp_fail_intervals; 590eda14cbcSMatt Macy last_mmp_fail_ns = mmp_fail_ns; 591eda14cbcSMatt Macy mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK( 592eda14cbcSMatt Macy zfs_multihost_interval)); 593eda14cbcSMatt Macy mmp_fail_intervals = MMP_FAIL_INTVS_OK( 594eda14cbcSMatt Macy zfs_multihost_fail_intervals); 595eda14cbcSMatt Macy 596eda14cbcSMatt Macy /* Smooth so pool is not suspended when reducing tunables */ 597eda14cbcSMatt Macy if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) { 598eda14cbcSMatt Macy mmp_fail_ns = (mmp_fail_ns * 31 + 599eda14cbcSMatt Macy mmp_fail_intervals * mmp_interval) / 32; 600eda14cbcSMatt Macy } else { 601eda14cbcSMatt Macy mmp_fail_ns = mmp_fail_intervals * 602eda14cbcSMatt Macy mmp_interval; 603eda14cbcSMatt Macy } 604eda14cbcSMatt Macy 605eda14cbcSMatt Macy if (mmp_interval != last_mmp_interval || 606eda14cbcSMatt Macy mmp_fail_intervals != last_mmp_fail_intervals) { 607eda14cbcSMatt Macy /* 608eda14cbcSMatt Macy * We want other hosts to see new tunables as quickly as 609eda14cbcSMatt Macy * possible. Write out at higher frequency than usual. 610eda14cbcSMatt Macy */ 611eda14cbcSMatt Macy skip_wait += leaves; 612eda14cbcSMatt Macy } 613eda14cbcSMatt Macy 614eda14cbcSMatt Macy if (multihost) 615eda14cbcSMatt Macy next_time = gethrtime() + mmp_interval / leaves; 616eda14cbcSMatt Macy 617eda14cbcSMatt Macy if (mmp_fail_ns != last_mmp_fail_ns) { 618eda14cbcSMatt Macy zfs_dbgmsg("MMP interval change pool '%s' " 619eda14cbcSMatt Macy "gethrtime %llu last_mmp_interval %llu " 620eda14cbcSMatt Macy "mmp_interval %llu last_mmp_fail_intervals %u " 621eda14cbcSMatt Macy "mmp_fail_intervals %u mmp_fail_ns %llu " 622eda14cbcSMatt Macy "skip_wait %d leaves %d next_time %llu", 62333b8c039SMartin Matuska spa_name(spa), (u_longlong_t)gethrtime(), 62433b8c039SMartin Matuska (u_longlong_t)last_mmp_interval, 62533b8c039SMartin Matuska (u_longlong_t)mmp_interval, last_mmp_fail_intervals, 62633b8c039SMartin Matuska mmp_fail_intervals, (u_longlong_t)mmp_fail_ns, 62733b8c039SMartin Matuska skip_wait, leaves, (u_longlong_t)next_time); 628eda14cbcSMatt Macy } 629eda14cbcSMatt Macy 630eda14cbcSMatt Macy /* 631eda14cbcSMatt Macy * MMP off => on, or suspended => !suspended: 632eda14cbcSMatt Macy * No writes occurred recently. Update mmp_last_write to give 633eda14cbcSMatt Macy * us some time to try. 634eda14cbcSMatt Macy */ 635eda14cbcSMatt Macy if ((!last_spa_multihost && multihost) || 636eda14cbcSMatt Macy (last_spa_suspended && !suspended)) { 637eda14cbcSMatt Macy zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu " 638eda14cbcSMatt Macy "last_spa_multihost %u multihost %u " 639eda14cbcSMatt Macy "last_spa_suspended %u suspended %u", 64033b8c039SMartin Matuska spa_name(spa), (u_longlong_t)gethrtime(), 64133b8c039SMartin Matuska last_spa_multihost, multihost, last_spa_suspended, 64233b8c039SMartin Matuska suspended); 643eda14cbcSMatt Macy mutex_enter(&mmp->mmp_io_lock); 644eda14cbcSMatt Macy mmp->mmp_last_write = gethrtime(); 645eda14cbcSMatt Macy mmp->mmp_delay = mmp_interval; 646eda14cbcSMatt Macy mutex_exit(&mmp->mmp_io_lock); 647eda14cbcSMatt Macy } 648eda14cbcSMatt Macy 649eda14cbcSMatt Macy /* 650eda14cbcSMatt Macy * MMP on => off: 651eda14cbcSMatt Macy * mmp_delay == 0 tells importing node to skip activity check. 652eda14cbcSMatt Macy */ 653eda14cbcSMatt Macy if (last_spa_multihost && !multihost) { 654eda14cbcSMatt Macy mutex_enter(&mmp->mmp_io_lock); 655eda14cbcSMatt Macy mmp->mmp_delay = 0; 656eda14cbcSMatt Macy mutex_exit(&mmp->mmp_io_lock); 657eda14cbcSMatt Macy } 658eda14cbcSMatt Macy 659eda14cbcSMatt Macy /* 660eda14cbcSMatt Macy * Suspend the pool if no MMP write has succeeded in over 661eda14cbcSMatt Macy * mmp_interval * mmp_fail_intervals nanoseconds. 662eda14cbcSMatt Macy */ 663eda14cbcSMatt Macy if (multihost && !suspended && mmp_fail_intervals && 664eda14cbcSMatt Macy (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) { 665eda14cbcSMatt Macy zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu " 666eda14cbcSMatt Macy "mmp_last_write %llu mmp_interval %llu " 667*b985c9caSMartin Matuska "mmp_fail_intervals %llu mmp_fail_ns %llu txg %llu", 668eda14cbcSMatt Macy spa_name(spa), (u_longlong_t)gethrtime(), 669eda14cbcSMatt Macy (u_longlong_t)mmp->mmp_last_write, 670eda14cbcSMatt Macy (u_longlong_t)mmp_interval, 671eda14cbcSMatt Macy (u_longlong_t)mmp_fail_intervals, 672*b985c9caSMartin Matuska (u_longlong_t)mmp_fail_ns, 673*b985c9caSMartin Matuska (u_longlong_t)spa->spa_uberblock.ub_txg); 674eda14cbcSMatt Macy cmn_err(CE_WARN, "MMP writes to pool '%s' have not " 675eda14cbcSMatt Macy "succeeded in over %llu ms; suspending pool. " 676eda14cbcSMatt Macy "Hrtime %llu", 677eda14cbcSMatt Macy spa_name(spa), 678eda14cbcSMatt Macy NSEC2MSEC(gethrtime() - mmp->mmp_last_write), 679eda14cbcSMatt Macy gethrtime()); 680eda14cbcSMatt Macy zio_suspend(spa, NULL, ZIO_SUSPEND_MMP); 681eda14cbcSMatt Macy } 682eda14cbcSMatt Macy 683eda14cbcSMatt Macy if (multihost && !suspended) 684eda14cbcSMatt Macy mmp_write_uberblock(spa); 685eda14cbcSMatt Macy 686eda14cbcSMatt Macy if (skip_wait > 0) { 687eda14cbcSMatt Macy next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) / 688eda14cbcSMatt Macy leaves; 689eda14cbcSMatt Macy skip_wait--; 690eda14cbcSMatt Macy } 691eda14cbcSMatt Macy 692eda14cbcSMatt Macy CALLB_CPR_SAFE_BEGIN(&cpr); 6932c48331dSMatt Macy (void) cv_timedwait_idle_hires(&mmp->mmp_thread_cv, 694eda14cbcSMatt Macy &mmp->mmp_thread_lock, next_time, USEC2NSEC(100), 695eda14cbcSMatt Macy CALLOUT_FLAG_ABSOLUTE); 696eda14cbcSMatt Macy CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock); 697eda14cbcSMatt Macy } 698eda14cbcSMatt Macy 699eda14cbcSMatt Macy /* Outstanding writes are allowed to complete. */ 700eda14cbcSMatt Macy zio_wait(mmp->mmp_zio_root); 701eda14cbcSMatt Macy 702eda14cbcSMatt Macy mmp->mmp_zio_root = NULL; 703eda14cbcSMatt Macy mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr); 704da5137abSMartin Matuska 705da5137abSMartin Matuska thread_exit(); 706eda14cbcSMatt Macy } 707eda14cbcSMatt Macy 708eda14cbcSMatt Macy /* 709eda14cbcSMatt Macy * Signal the MMP thread to wake it, when it is sleeping on 710eda14cbcSMatt Macy * its cv. Used when some module parameter has changed and 711eda14cbcSMatt Macy * we want the thread to know about it. 712eda14cbcSMatt Macy * Only signal if the pool is active and mmp thread is 713eda14cbcSMatt Macy * running, otherwise there is no thread to wake. 714eda14cbcSMatt Macy */ 715eda14cbcSMatt Macy static void 716eda14cbcSMatt Macy mmp_signal_thread(spa_t *spa) 717eda14cbcSMatt Macy { 718eda14cbcSMatt Macy mmp_thread_t *mmp = &spa->spa_mmp; 719eda14cbcSMatt Macy 720eda14cbcSMatt Macy mutex_enter(&mmp->mmp_thread_lock); 721eda14cbcSMatt Macy if (mmp->mmp_thread) 722eda14cbcSMatt Macy cv_broadcast(&mmp->mmp_thread_cv); 723eda14cbcSMatt Macy mutex_exit(&mmp->mmp_thread_lock); 724eda14cbcSMatt Macy } 725eda14cbcSMatt Macy 726eda14cbcSMatt Macy void 727eda14cbcSMatt Macy mmp_signal_all_threads(void) 728eda14cbcSMatt Macy { 729eda14cbcSMatt Macy spa_t *spa = NULL; 730eda14cbcSMatt Macy 731eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 732eda14cbcSMatt Macy while ((spa = spa_next(spa))) { 733eda14cbcSMatt Macy if (spa->spa_state == POOL_STATE_ACTIVE) 734eda14cbcSMatt Macy mmp_signal_thread(spa); 735eda14cbcSMatt Macy } 736eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 737eda14cbcSMatt Macy } 738eda14cbcSMatt Macy 739eda14cbcSMatt Macy /* BEGIN CSTYLED */ 740eda14cbcSMatt Macy ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval, 741dbd5678dSMartin Matuska param_set_multihost_interval, spl_param_get_u64, ZMOD_RW, 742eda14cbcSMatt Macy "Milliseconds between mmp writes to each leaf"); 743eda14cbcSMatt Macy /* END CSTYLED */ 744eda14cbcSMatt Macy 745eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW, 746eda14cbcSMatt Macy "Max allowed period without a successful mmp write"); 747eda14cbcSMatt Macy 748eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, import_intervals, UINT, ZMOD_RW, 749eda14cbcSMatt Macy "Number of zfs_multihost_interval periods to wait for activity"); 750