xref: /freebsd/sys/contrib/openzfs/module/zfs/mmp.c (revision 9e5787d2284e187abb5b654d924394a65772e004)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
23  */
24 
25 #include <sys/abd.h>
26 #include <sys/mmp.h>
27 #include <sys/spa.h>
28 #include <sys/spa_impl.h>
29 #include <sys/time.h>
30 #include <sys/vdev.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/zfs_context.h>
33 #include <sys/callb.h>
34 
35 /*
36  * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
37  * or opening a pool on more than one host at a time.  In particular, it
38  * prevents "zpool import -f" on a host from succeeding while the pool is
39  * already imported on another host.  There are many other ways in which a
40  * device could be used by two hosts for different purposes at the same time
41  * resulting in pool damage.  This implementation does not attempt to detect
42  * those cases.
43  *
44  * MMP operates by ensuring there are frequent visible changes on disk (a
45  * "heartbeat") at all times.  And by altering the import process to check
46  * for these changes and failing the import when they are detected.  This
47  * functionality is enabled by setting the 'multihost' pool property to on.
48  *
49  * Uberblocks written by the txg_sync thread always go into the first
50  * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
51  * They are used to hold uberblocks which are exactly the same as the last
52  * synced uberblock except that the ub_timestamp and mmp_config are frequently
53  * updated.  Like all other uberblocks, the slot is written with an embedded
54  * checksum, and slots with invalid checksums are ignored.  This provides the
55  * "heartbeat", with no risk of overwriting good uberblocks that must be
56  * preserved, e.g. previous txgs and associated block pointers.
57  *
58  * Three optional fields are added to uberblock structure; ub_mmp_magic,
59  * ub_mmp_config, and ub_mmp_delay.  The ub_mmp_magic value allows zfs to tell
60  * whether the other ub_mmp_* fields are valid.  The ub_mmp_config field tells
61  * the importing host the settings of zfs_multihost_interval and
62  * zfs_multihost_fail_intervals on the host which last had (or currently has)
63  * the pool imported.  These determine how long a host must wait to detect
64  * activity in the pool, before concluding the pool is not in use.  The
65  * mmp_delay field is a decaying average of the amount of time between
66  * completion of successive MMP writes, in nanoseconds.  It indicates whether
67  * MMP is enabled.
68  *
69  * During import an activity test may now be performed to determine if
70  * the pool is in use.  The activity test is typically required if the
71  * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
72  * POOL_STATE_ACTIVE, and the pool is not a root pool.
73  *
74  * The activity test finds the "best" uberblock (highest txg, timestamp, and, if
75  * ub_mmp_magic is valid, sequence number from ub_mmp_config).  It then waits
76  * some time, and finds the "best" uberblock again.  If any of the mentioned
77  * fields have different values in the newly read uberblock, the pool is in use
78  * by another host and the import fails.  In order to assure the accuracy of the
79  * activity test, the default values result in an activity test duration of 20x
80  * the mmp write interval.
81  *
82  * The duration of the "zpool import" activity test depends on the information
83  * available in the "best" uberblock:
84  *
85  * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0:
86  *    ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2
87  *
88  *    In this case, a weak guarantee is provided.  Since the host which last had
89  *    the pool imported will suspend the pool if no mmp writes land within
90  *    fail_intervals * multihost_interval ms, the absence of writes during that
91  *    time means either the pool is not imported, or it is imported but the pool
92  *    is suspended and no further writes will occur.
93  *
94  *    Note that resuming the suspended pool on the remote host would invalidate
95  *    this guarantee, and so it is not allowed.
96  *
97  *    The factor of 2 provides a conservative safety factor and derives from
98  *    MMP_IMPORT_SAFETY_FACTOR;
99  *
100  * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0:
101  *    (ub_mmp_config.multihost_interval + ub_mmp_delay) *
102  *        zfs_multihost_import_intervals
103  *
104  *    In this case no guarantee can provided.  However, as long as some devices
105  *    are healthy and connected, it is likely that at least one write will land
106  *    within (multihost_interval + mmp_delay) because multihost_interval is
107  *    enough time for a write to be attempted to each leaf vdev, and mmp_delay
108  *    is enough for one to land, based on past delays.  Multiplying by
109  *    zfs_multihost_import_intervals provides a conservative safety factor.
110  *
111  * 3) If uberblock was written by zfs-0.7:
112  *    (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals
113  *
114  *    The same logic as case #2 applies, but we do not know remote tunables.
115  *
116  *    We use the local value for zfs_multihost_interval because the original MMP
117  *    did not record this value in the uberblock.
118  *
119  *    ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host
120  *    has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect
121  *    that.  We will have waited enough time for zfs_multihost_import_intervals
122  *    writes to be issued and all but one to land.
123  *
124  *    single device pool example delays
125  *
126  *    import_delay = (1 + 1) * 20   =  40s #defaults, no I/O delay
127  *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
128  *    import_delay = (10 + 10) * 20 = 400s #10s multihost_interval,
129  *                                          no I/O delay
130  *    100 device pool example delays
131  *
132  *    import_delay = (1 + .01) * 20 =  20s #defaults, no I/O delay
133  *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
134  *    import_delay = (10 + .1) * 20 = 202s #10s multihost_interval,
135  *                                          no I/O delay
136  *
137  * 4) Otherwise, this uberblock was written by a pre-MMP zfs:
138  *    zfs_multihost_import_intervals * zfs_multihost_interval
139  *
140  *    In this case local tunables are used.  By default this product = 10s, long
141  *    enough for a pool with any activity at all to write at least one
142  *    uberblock.  No guarantee can be provided.
143  *
144  * Additionally, the duration is then extended by a random 25% to attempt to to
145  * detect simultaneous imports.  For example, if both partner hosts are rebooted
146  * at the same time and automatically attempt to import the pool.
147  */
148 
149 /*
150  * Used to control the frequency of mmp writes which are performed when the
151  * 'multihost' pool property is on.  This is one factor used to determine the
152  * length of the activity check during import.
153  *
154  * On average an mmp write will be issued for each leaf vdev every
155  * zfs_multihost_interval milliseconds.  In practice, the observed period can
156  * vary with the I/O load and this observed value is the ub_mmp_delay which is
157  * stored in the uberblock.  The minimum allowed value is 100 ms.
158  */
159 ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
160 
161 /*
162  * Used to control the duration of the activity test on import.  Smaller values
163  * of zfs_multihost_import_intervals will reduce the import time but increase
164  * the risk of failing to detect an active pool.  The total activity check time
165  * is never allowed to drop below one second.  A value of 0 is ignored and
166  * treated as if it was set to 1.
167  */
168 uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
169 
170 /*
171  * Controls the behavior of the pool when mmp write failures or delays are
172  * detected.
173  *
174  * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
175  * ignored.  The failures will still be reported to the ZED which depending on
176  * its configuration may take action such as suspending the pool or taking a
177  * device offline.
178  *
179  * When zfs_multihost_fail_intervals > 0, the pool will be suspended if
180  * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass
181  * without a successful mmp write.  This guarantees the activity test will see
182  * mmp writes if the pool is imported.  A value of 1 is ignored and treated as
183  * if it was set to 2, because a single leaf vdev pool will issue a write once
184  * per multihost_interval and thus any variation in latency would cause the
185  * pool to be suspended.
186  */
187 uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
188 
189 char *mmp_tag = "mmp_write_uberblock";
190 static void mmp_thread(void *arg);
191 
192 void
193 mmp_init(spa_t *spa)
194 {
195 	mmp_thread_t *mmp = &spa->spa_mmp;
196 
197 	mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
198 	cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
199 	mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
200 	mmp->mmp_kstat_id = 1;
201 
202 	/*
203 	 * mmp_write_done() calculates mmp_delay based on prior mmp_delay and
204 	 * the elapsed time since the last write.  For the first mmp write,
205 	 * there is no "last write", so we start with fake non-zero values.
206 	 */
207 	mmp->mmp_last_write = gethrtime();
208 	mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
209 }
210 
211 void
212 mmp_fini(spa_t *spa)
213 {
214 	mmp_thread_t *mmp = &spa->spa_mmp;
215 
216 	mutex_destroy(&mmp->mmp_thread_lock);
217 	cv_destroy(&mmp->mmp_thread_cv);
218 	mutex_destroy(&mmp->mmp_io_lock);
219 }
220 
221 static void
222 mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
223 {
224 	CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
225 	mutex_enter(&mmp->mmp_thread_lock);
226 }
227 
228 static void
229 mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
230 {
231 	ASSERT(*mpp != NULL);
232 	*mpp = NULL;
233 	cv_broadcast(&mmp->mmp_thread_cv);
234 	CALLB_CPR_EXIT(cpr);		/* drops &mmp->mmp_thread_lock */
235 	thread_exit();
236 }
237 
238 void
239 mmp_thread_start(spa_t *spa)
240 {
241 	mmp_thread_t *mmp = &spa->spa_mmp;
242 
243 	if (spa_writeable(spa)) {
244 		mutex_enter(&mmp->mmp_thread_lock);
245 		if (!mmp->mmp_thread) {
246 			mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
247 			    spa, 0, &p0, TS_RUN, defclsyspri);
248 			zfs_dbgmsg("MMP thread started pool '%s' "
249 			    "gethrtime %llu", spa_name(spa), gethrtime());
250 		}
251 		mutex_exit(&mmp->mmp_thread_lock);
252 	}
253 }
254 
255 void
256 mmp_thread_stop(spa_t *spa)
257 {
258 	mmp_thread_t *mmp = &spa->spa_mmp;
259 
260 	mutex_enter(&mmp->mmp_thread_lock);
261 	mmp->mmp_thread_exiting = 1;
262 	cv_broadcast(&mmp->mmp_thread_cv);
263 
264 	while (mmp->mmp_thread) {
265 		cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
266 	}
267 	mutex_exit(&mmp->mmp_thread_lock);
268 	zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
269 	    spa_name(spa), gethrtime());
270 
271 	ASSERT(mmp->mmp_thread == NULL);
272 	mmp->mmp_thread_exiting = 0;
273 }
274 
275 typedef enum mmp_vdev_state_flag {
276 	MMP_FAIL_NOT_WRITABLE	= (1 << 0),
277 	MMP_FAIL_WRITE_PENDING	= (1 << 1),
278 } mmp_vdev_state_flag_t;
279 
280 /*
281  * Find a leaf vdev to write an MMP block to.  It must not have an outstanding
282  * mmp write (if so a new write will also likely block).  If there is no usable
283  * leaf, a nonzero error value is returned. The error value returned is a bit
284  * field.
285  *
286  * MMP_FAIL_WRITE_PENDING   One or more leaf vdevs are writeable, but have an
287  *                          outstanding MMP write.
288  * MMP_FAIL_NOT_WRITABLE    One or more leaf vdevs are not writeable.
289  */
290 
291 static int
292 mmp_next_leaf(spa_t *spa)
293 {
294 	vdev_t *leaf;
295 	vdev_t *starting_leaf;
296 	int fail_mask = 0;
297 
298 	ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
299 	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
300 	ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
301 	ASSERT(!list_is_empty(&spa->spa_leaf_list));
302 
303 	if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
304 		spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
305 		spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
306 	}
307 
308 	leaf = spa->spa_mmp.mmp_last_leaf;
309 	if (leaf == NULL)
310 		leaf = list_head(&spa->spa_leaf_list);
311 	starting_leaf = leaf;
312 
313 	do {
314 		leaf = list_next(&spa->spa_leaf_list, leaf);
315 		if (leaf == NULL)
316 			leaf = list_head(&spa->spa_leaf_list);
317 
318 		if (!vdev_writeable(leaf)) {
319 			fail_mask |= MMP_FAIL_NOT_WRITABLE;
320 		} else if (leaf->vdev_mmp_pending != 0) {
321 			fail_mask |= MMP_FAIL_WRITE_PENDING;
322 		} else {
323 			spa->spa_mmp.mmp_last_leaf = leaf;
324 			return (0);
325 		}
326 	} while (leaf != starting_leaf);
327 
328 	ASSERT(fail_mask);
329 
330 	return (fail_mask);
331 }
332 
333 /*
334  * MMP writes are issued on a fixed schedule, but may complete at variable,
335  * much longer, intervals.  The mmp_delay captures long periods between
336  * successful writes for any reason, including disk latency, scheduling delays,
337  * etc.
338  *
339  * The mmp_delay is usually calculated as a decaying average, but if the latest
340  * delay is higher we do not average it, so that we do not hide sudden spikes
341  * which the importing host must wait for.
342  *
343  * If writes are occurring frequently, such as due to a high rate of txg syncs,
344  * the mmp_delay could become very small.  Since those short delays depend on
345  * activity we cannot count on, we never allow mmp_delay to get lower than rate
346  * expected if only mmp_thread writes occur.
347  *
348  * If an mmp write was skipped or fails, and we have already waited longer than
349  * mmp_delay, we need to update it so the next write reflects the longer delay.
350  *
351  * Do not set mmp_delay if the multihost property is not on, so as not to
352  * trigger an activity check on import.
353  */
354 static void
355 mmp_delay_update(spa_t *spa, boolean_t write_completed)
356 {
357 	mmp_thread_t *mts = &spa->spa_mmp;
358 	hrtime_t delay = gethrtime() - mts->mmp_last_write;
359 
360 	ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
361 
362 	if (spa_multihost(spa) == B_FALSE) {
363 		mts->mmp_delay = 0;
364 		return;
365 	}
366 
367 	if (delay > mts->mmp_delay)
368 		mts->mmp_delay = delay;
369 
370 	if (write_completed == B_FALSE)
371 		return;
372 
373 	mts->mmp_last_write = gethrtime();
374 
375 	/*
376 	 * strictly less than, in case delay was changed above.
377 	 */
378 	if (delay < mts->mmp_delay) {
379 		hrtime_t min_delay =
380 		    MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) /
381 		    MAX(1, vdev_count_leaves(spa));
382 		mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
383 		    min_delay);
384 	}
385 }
386 
387 static void
388 mmp_write_done(zio_t *zio)
389 {
390 	spa_t *spa = zio->io_spa;
391 	vdev_t *vd = zio->io_vd;
392 	mmp_thread_t *mts = zio->io_private;
393 
394 	mutex_enter(&mts->mmp_io_lock);
395 	uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
396 	hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
397 
398 	mmp_delay_update(spa, (zio->io_error == 0));
399 
400 	vd->vdev_mmp_pending = 0;
401 	vd->vdev_mmp_kstat_id = 0;
402 
403 	mutex_exit(&mts->mmp_io_lock);
404 	spa_config_exit(spa, SCL_STATE, mmp_tag);
405 
406 	spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error,
407 	    mmp_write_duration);
408 
409 	abd_free(zio->io_abd);
410 }
411 
412 /*
413  * When the uberblock on-disk is updated by a spa_sync,
414  * creating a new "best" uberblock, update the one stored
415  * in the mmp thread state, used for mmp writes.
416  */
417 void
418 mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
419 {
420 	mmp_thread_t *mmp = &spa->spa_mmp;
421 
422 	mutex_enter(&mmp->mmp_io_lock);
423 	mmp->mmp_ub = *ub;
424 	mmp->mmp_seq = 1;
425 	mmp->mmp_ub.ub_timestamp = gethrestime_sec();
426 	mmp_delay_update(spa, B_TRUE);
427 	mutex_exit(&mmp->mmp_io_lock);
428 }
429 
430 /*
431  * Choose a random vdev, label, and MMP block, and write over it
432  * with a copy of the last-synced uberblock, whose timestamp
433  * has been updated to reflect that the pool is in use.
434  */
435 static void
436 mmp_write_uberblock(spa_t *spa)
437 {
438 	int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
439 	mmp_thread_t *mmp = &spa->spa_mmp;
440 	uberblock_t *ub;
441 	vdev_t *vd = NULL;
442 	int label, error;
443 	uint64_t offset;
444 
445 	hrtime_t lock_acquire_time = gethrtime();
446 	spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
447 	lock_acquire_time = gethrtime() - lock_acquire_time;
448 	if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
449 		zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
450 		    "gethrtime %llu", spa_name(spa), lock_acquire_time,
451 		    gethrtime());
452 
453 	mutex_enter(&mmp->mmp_io_lock);
454 
455 	error = mmp_next_leaf(spa);
456 
457 	/*
458 	 * spa_mmp_history has two types of entries:
459 	 * Issued MMP write: records time issued, error status, etc.
460 	 * Skipped MMP write: an MMP write could not be issued because no
461 	 * suitable leaf vdev was available.  See comment above struct
462 	 * spa_mmp_history for details.
463 	 */
464 
465 	if (error) {
466 		mmp_delay_update(spa, B_FALSE);
467 		if (mmp->mmp_skip_error == error) {
468 			spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
469 		} else {
470 			mmp->mmp_skip_error = error;
471 			spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
472 			    gethrestime_sec(), mmp->mmp_delay, NULL, 0,
473 			    mmp->mmp_kstat_id++, error);
474 			zfs_dbgmsg("MMP error choosing leaf pool '%s' "
475 			    "gethrtime %llu fail_mask %#x", spa_name(spa),
476 			    gethrtime(), error);
477 		}
478 		mutex_exit(&mmp->mmp_io_lock);
479 		spa_config_exit(spa, SCL_STATE, mmp_tag);
480 		return;
481 	}
482 
483 	vd = spa->spa_mmp.mmp_last_leaf;
484 	if (mmp->mmp_skip_error != 0) {
485 		mmp->mmp_skip_error = 0;
486 		zfs_dbgmsg("MMP write after skipping due to unavailable "
487 		    "leaves, pool '%s' gethrtime %llu leaf %#llu",
488 		    spa_name(spa), gethrtime(), vd->vdev_guid);
489 	}
490 
491 	if (mmp->mmp_zio_root == NULL)
492 		mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
493 		    flags | ZIO_FLAG_GODFATHER);
494 
495 	if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
496 		/*
497 		 * Want to reset mmp_seq when timestamp advances because after
498 		 * an mmp_seq wrap new values will not be chosen by
499 		 * uberblock_compare() as the "best".
500 		 */
501 		mmp->mmp_ub.ub_timestamp = gethrestime_sec();
502 		mmp->mmp_seq = 1;
503 	}
504 
505 	ub = &mmp->mmp_ub;
506 	ub->ub_mmp_magic = MMP_MAGIC;
507 	ub->ub_mmp_delay = mmp->mmp_delay;
508 	ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
509 	    MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) |
510 	    MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK(
511 	    zfs_multihost_fail_intervals));
512 	vd->vdev_mmp_pending = gethrtime();
513 	vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
514 
515 	zio_t *zio  = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
516 	abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
517 	abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
518 	abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
519 
520 	mmp->mmp_seq++;
521 	mmp->mmp_kstat_id++;
522 	mutex_exit(&mmp->mmp_io_lock);
523 
524 	offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
525 	    MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
526 
527 	label = spa_get_random(VDEV_LABELS);
528 	vdev_label_write(zio, vd, label, ub_abd, offset,
529 	    VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
530 	    flags | ZIO_FLAG_DONT_PROPAGATE);
531 
532 	(void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
533 	    ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
534 
535 	zio_nowait(zio);
536 }
537 
538 static void
539 mmp_thread(void *arg)
540 {
541 	spa_t *spa = (spa_t *)arg;
542 	mmp_thread_t *mmp = &spa->spa_mmp;
543 	boolean_t suspended = spa_suspended(spa);
544 	boolean_t multihost = spa_multihost(spa);
545 	uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
546 	    zfs_multihost_interval));
547 	uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK(
548 	    zfs_multihost_fail_intervals);
549 	hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval;
550 	boolean_t last_spa_suspended = suspended;
551 	boolean_t last_spa_multihost = multihost;
552 	uint64_t last_mmp_interval = mmp_interval;
553 	uint32_t last_mmp_fail_intervals = mmp_fail_intervals;
554 	hrtime_t last_mmp_fail_ns = mmp_fail_ns;
555 	callb_cpr_t cpr;
556 	int skip_wait = 0;
557 
558 	mmp_thread_enter(mmp, &cpr);
559 
560 	while (!mmp->mmp_thread_exiting) {
561 		hrtime_t next_time = gethrtime() +
562 		    MSEC2NSEC(MMP_DEFAULT_INTERVAL);
563 		int leaves = MAX(vdev_count_leaves(spa), 1);
564 
565 		/* Detect changes in tunables or state */
566 
567 		last_spa_suspended = suspended;
568 		last_spa_multihost = multihost;
569 		suspended = spa_suspended(spa);
570 		multihost = spa_multihost(spa);
571 
572 		last_mmp_interval = mmp_interval;
573 		last_mmp_fail_intervals = mmp_fail_intervals;
574 		last_mmp_fail_ns = mmp_fail_ns;
575 		mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
576 		    zfs_multihost_interval));
577 		mmp_fail_intervals = MMP_FAIL_INTVS_OK(
578 		    zfs_multihost_fail_intervals);
579 
580 		/* Smooth so pool is not suspended when reducing tunables */
581 		if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) {
582 			mmp_fail_ns = (mmp_fail_ns * 31 +
583 			    mmp_fail_intervals * mmp_interval) / 32;
584 		} else {
585 			mmp_fail_ns = mmp_fail_intervals *
586 			    mmp_interval;
587 		}
588 
589 		if (mmp_interval != last_mmp_interval ||
590 		    mmp_fail_intervals != last_mmp_fail_intervals) {
591 			/*
592 			 * We want other hosts to see new tunables as quickly as
593 			 * possible.  Write out at higher frequency than usual.
594 			 */
595 			skip_wait += leaves;
596 		}
597 
598 		if (multihost)
599 			next_time = gethrtime() + mmp_interval / leaves;
600 
601 		if (mmp_fail_ns != last_mmp_fail_ns) {
602 			zfs_dbgmsg("MMP interval change pool '%s' "
603 			    "gethrtime %llu last_mmp_interval %llu "
604 			    "mmp_interval %llu last_mmp_fail_intervals %u "
605 			    "mmp_fail_intervals %u mmp_fail_ns %llu "
606 			    "skip_wait %d leaves %d next_time %llu",
607 			    spa_name(spa), gethrtime(), last_mmp_interval,
608 			    mmp_interval, last_mmp_fail_intervals,
609 			    mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves,
610 			    next_time);
611 		}
612 
613 		/*
614 		 * MMP off => on, or suspended => !suspended:
615 		 * No writes occurred recently.  Update mmp_last_write to give
616 		 * us some time to try.
617 		 */
618 		if ((!last_spa_multihost && multihost) ||
619 		    (last_spa_suspended && !suspended)) {
620 			zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
621 			    "last_spa_multihost %u multihost %u "
622 			    "last_spa_suspended %u suspended %u",
623 			    spa_name(spa), last_spa_multihost, multihost,
624 			    last_spa_suspended, suspended);
625 			mutex_enter(&mmp->mmp_io_lock);
626 			mmp->mmp_last_write = gethrtime();
627 			mmp->mmp_delay = mmp_interval;
628 			mutex_exit(&mmp->mmp_io_lock);
629 		}
630 
631 		/*
632 		 * MMP on => off:
633 		 * mmp_delay == 0 tells importing node to skip activity check.
634 		 */
635 		if (last_spa_multihost && !multihost) {
636 			mutex_enter(&mmp->mmp_io_lock);
637 			mmp->mmp_delay = 0;
638 			mutex_exit(&mmp->mmp_io_lock);
639 		}
640 
641 		/*
642 		 * Suspend the pool if no MMP write has succeeded in over
643 		 * mmp_interval * mmp_fail_intervals nanoseconds.
644 		 */
645 		if (multihost && !suspended && mmp_fail_intervals &&
646 		    (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
647 			zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu "
648 			    "mmp_last_write %llu mmp_interval %llu "
649 			    "mmp_fail_intervals %llu mmp_fail_ns %llu",
650 			    spa_name(spa), (u_longlong_t)gethrtime(),
651 			    (u_longlong_t)mmp->mmp_last_write,
652 			    (u_longlong_t)mmp_interval,
653 			    (u_longlong_t)mmp_fail_intervals,
654 			    (u_longlong_t)mmp_fail_ns);
655 			cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
656 			    "succeeded in over %llu ms; suspending pool. "
657 			    "Hrtime %llu",
658 			    spa_name(spa),
659 			    NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
660 			    gethrtime());
661 			zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
662 		}
663 
664 		if (multihost && !suspended)
665 			mmp_write_uberblock(spa);
666 
667 		if (skip_wait > 0) {
668 			next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) /
669 			    leaves;
670 			skip_wait--;
671 		}
672 
673 		CALLB_CPR_SAFE_BEGIN(&cpr);
674 		(void) cv_timedwait_sig_hires(&mmp->mmp_thread_cv,
675 		    &mmp->mmp_thread_lock, next_time, USEC2NSEC(100),
676 		    CALLOUT_FLAG_ABSOLUTE);
677 		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
678 	}
679 
680 	/* Outstanding writes are allowed to complete. */
681 	zio_wait(mmp->mmp_zio_root);
682 
683 	mmp->mmp_zio_root = NULL;
684 	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
685 }
686 
687 /*
688  * Signal the MMP thread to wake it, when it is sleeping on
689  * its cv.  Used when some module parameter has changed and
690  * we want the thread to know about it.
691  * Only signal if the pool is active and mmp thread is
692  * running, otherwise there is no thread to wake.
693  */
694 static void
695 mmp_signal_thread(spa_t *spa)
696 {
697 	mmp_thread_t *mmp = &spa->spa_mmp;
698 
699 	mutex_enter(&mmp->mmp_thread_lock);
700 	if (mmp->mmp_thread)
701 		cv_broadcast(&mmp->mmp_thread_cv);
702 	mutex_exit(&mmp->mmp_thread_lock);
703 }
704 
705 void
706 mmp_signal_all_threads(void)
707 {
708 	spa_t *spa = NULL;
709 
710 	mutex_enter(&spa_namespace_lock);
711 	while ((spa = spa_next(spa))) {
712 		if (spa->spa_state == POOL_STATE_ACTIVE)
713 			mmp_signal_thread(spa);
714 	}
715 	mutex_exit(&spa_namespace_lock);
716 }
717 
718 /* BEGIN CSTYLED */
719 ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval,
720 	param_set_multihost_interval, param_get_ulong, ZMOD_RW,
721 	"Milliseconds between mmp writes to each leaf");
722 /* END CSTYLED */
723 
724 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW,
725 	"Max allowed period without a successful mmp write");
726 
727 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, import_intervals, UINT, ZMOD_RW,
728 	"Number of zfs_multihost_interval periods to wait for activity");
729