xref: /freebsd/sys/contrib/openzfs/module/zfs/mmp.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
24  */
25 
26 #include <sys/abd.h>
27 #include <sys/mmp.h>
28 #include <sys/spa.h>
29 #include <sys/spa_impl.h>
30 #include <sys/time.h>
31 #include <sys/vdev.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zfs_context.h>
34 #include <sys/callb.h>
35 
36 /*
37  * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
38  * or opening a pool on more than one host at a time.  In particular, it
39  * prevents "zpool import -f" on a host from succeeding while the pool is
40  * already imported on another host.  There are many other ways in which a
41  * device could be used by two hosts for different purposes at the same time
42  * resulting in pool damage.  This implementation does not attempt to detect
43  * those cases.
44  *
45  * MMP operates by ensuring there are frequent visible changes on disk (a
46  * "heartbeat") at all times.  And by altering the import process to check
47  * for these changes and failing the import when they are detected.  This
48  * functionality is enabled by setting the 'multihost' pool property to on.
49  *
50  * Uberblocks written by the txg_sync thread always go into the first
51  * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
52  * They are used to hold uberblocks which are exactly the same as the last
53  * synced uberblock except that the ub_timestamp and mmp_config are frequently
54  * updated.  Like all other uberblocks, the slot is written with an embedded
55  * checksum, and slots with invalid checksums are ignored.  This provides the
56  * "heartbeat", with no risk of overwriting good uberblocks that must be
57  * preserved, e.g. previous txgs and associated block pointers.
58  *
59  * Three optional fields are added to uberblock structure; ub_mmp_magic,
60  * ub_mmp_config, and ub_mmp_delay.  The ub_mmp_magic value allows zfs to tell
61  * whether the other ub_mmp_* fields are valid.  The ub_mmp_config field tells
62  * the importing host the settings of zfs_multihost_interval and
63  * zfs_multihost_fail_intervals on the host which last had (or currently has)
64  * the pool imported.  These determine how long a host must wait to detect
65  * activity in the pool, before concluding the pool is not in use.  The
66  * mmp_delay field is a decaying average of the amount of time between
67  * completion of successive MMP writes, in nanoseconds.  It indicates whether
68  * MMP is enabled.
69  *
70  * During import an activity test may now be performed to determine if
71  * the pool is in use.  The activity test is typically required if the
72  * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
73  * POOL_STATE_ACTIVE, and the pool is not a root pool.
74  *
75  * The activity test finds the "best" uberblock (highest txg, timestamp, and, if
76  * ub_mmp_magic is valid, sequence number from ub_mmp_config).  It then waits
77  * some time, and finds the "best" uberblock again.  If any of the mentioned
78  * fields have different values in the newly read uberblock, the pool is in use
79  * by another host and the import fails.  In order to assure the accuracy of the
80  * activity test, the default values result in an activity test duration of 20x
81  * the mmp write interval.
82  *
83  * The duration of the "zpool import" activity test depends on the information
84  * available in the "best" uberblock:
85  *
86  * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0:
87  *    ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2
88  *
89  *    In this case, a weak guarantee is provided.  Since the host which last had
90  *    the pool imported will suspend the pool if no mmp writes land within
91  *    fail_intervals * multihost_interval ms, the absence of writes during that
92  *    time means either the pool is not imported, or it is imported but the pool
93  *    is suspended and no further writes will occur.
94  *
95  *    Note that resuming the suspended pool on the remote host would invalidate
96  *    this guarantee, and so it is not allowed.
97  *
98  *    The factor of 2 provides a conservative safety factor and derives from
99  *    MMP_IMPORT_SAFETY_FACTOR;
100  *
101  * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0:
102  *    (ub_mmp_config.multihost_interval + ub_mmp_delay) *
103  *        zfs_multihost_import_intervals
104  *
105  *    In this case no guarantee can provided.  However, as long as some devices
106  *    are healthy and connected, it is likely that at least one write will land
107  *    within (multihost_interval + mmp_delay) because multihost_interval is
108  *    enough time for a write to be attempted to each leaf vdev, and mmp_delay
109  *    is enough for one to land, based on past delays.  Multiplying by
110  *    zfs_multihost_import_intervals provides a conservative safety factor.
111  *
112  * 3) If uberblock was written by zfs-0.7:
113  *    (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals
114  *
115  *    The same logic as case #2 applies, but we do not know remote tunables.
116  *
117  *    We use the local value for zfs_multihost_interval because the original MMP
118  *    did not record this value in the uberblock.
119  *
120  *    ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host
121  *    has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect
122  *    that.  We will have waited enough time for zfs_multihost_import_intervals
123  *    writes to be issued and all but one to land.
124  *
125  *    single device pool example delays
126  *
127  *    import_delay = (1 + 1) * 20   =  40s #defaults, no I/O delay
128  *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
129  *    import_delay = (10 + 10) * 20 = 400s #10s multihost_interval,
130  *                                          no I/O delay
131  *    100 device pool example delays
132  *
133  *    import_delay = (1 + .01) * 20 =  20s #defaults, no I/O delay
134  *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
135  *    import_delay = (10 + .1) * 20 = 202s #10s multihost_interval,
136  *                                          no I/O delay
137  *
138  * 4) Otherwise, this uberblock was written by a pre-MMP zfs:
139  *    zfs_multihost_import_intervals * zfs_multihost_interval
140  *
141  *    In this case local tunables are used.  By default this product = 10s, long
142  *    enough for a pool with any activity at all to write at least one
143  *    uberblock.  No guarantee can be provided.
144  *
145  * Additionally, the duration is then extended by a random 25% to attempt to to
146  * detect simultaneous imports.  For example, if both partner hosts are rebooted
147  * at the same time and automatically attempt to import the pool.
148  */
149 
150 /*
151  * Used to control the frequency of mmp writes which are performed when the
152  * 'multihost' pool property is on.  This is one factor used to determine the
153  * length of the activity check during import.
154  *
155  * On average an mmp write will be issued for each leaf vdev every
156  * zfs_multihost_interval milliseconds.  In practice, the observed period can
157  * vary with the I/O load and this observed value is the ub_mmp_delay which is
158  * stored in the uberblock.  The minimum allowed value is 100 ms.
159  */
160 uint64_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
161 
162 /*
163  * Used to control the duration of the activity test on import.  Smaller values
164  * of zfs_multihost_import_intervals will reduce the import time but increase
165  * the risk of failing to detect an active pool.  The total activity check time
166  * is never allowed to drop below one second.  A value of 0 is ignored and
167  * treated as if it was set to 1.
168  */
169 uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
170 
171 /*
172  * Controls the behavior of the pool when mmp write failures or delays are
173  * detected.
174  *
175  * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
176  * ignored.  The failures will still be reported to the ZED which depending on
177  * its configuration may take action such as suspending the pool or taking a
178  * device offline.
179  *
180  * When zfs_multihost_fail_intervals > 0, the pool will be suspended if
181  * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass
182  * without a successful mmp write.  This guarantees the activity test will see
183  * mmp writes if the pool is imported.  A value of 1 is ignored and treated as
184  * if it was set to 2, because a single leaf vdev pool will issue a write once
185  * per multihost_interval and thus any variation in latency would cause the
186  * pool to be suspended.
187  */
188 uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
189 
190 static const void *const mmp_tag = "mmp_write_uberblock";
191 static __attribute__((noreturn)) void mmp_thread(void *arg);
192 
193 void
mmp_init(spa_t * spa)194 mmp_init(spa_t *spa)
195 {
196 	mmp_thread_t *mmp = &spa->spa_mmp;
197 
198 	mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
199 	cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
200 	mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
201 	mmp->mmp_kstat_id = 1;
202 }
203 
204 void
mmp_fini(spa_t * spa)205 mmp_fini(spa_t *spa)
206 {
207 	mmp_thread_t *mmp = &spa->spa_mmp;
208 
209 	mutex_destroy(&mmp->mmp_thread_lock);
210 	cv_destroy(&mmp->mmp_thread_cv);
211 	mutex_destroy(&mmp->mmp_io_lock);
212 }
213 
214 static void
mmp_thread_enter(mmp_thread_t * mmp,callb_cpr_t * cpr)215 mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
216 {
217 	CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
218 	mutex_enter(&mmp->mmp_thread_lock);
219 }
220 
221 static void
mmp_thread_exit(mmp_thread_t * mmp,kthread_t ** mpp,callb_cpr_t * cpr)222 mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
223 {
224 	ASSERT(*mpp != NULL);
225 	*mpp = NULL;
226 	cv_broadcast(&mmp->mmp_thread_cv);
227 	CALLB_CPR_EXIT(cpr);		/* drops &mmp->mmp_thread_lock */
228 }
229 
230 void
mmp_thread_start(spa_t * spa)231 mmp_thread_start(spa_t *spa)
232 {
233 	mmp_thread_t *mmp = &spa->spa_mmp;
234 
235 	if (spa_writeable(spa)) {
236 		mutex_enter(&mmp->mmp_thread_lock);
237 		if (!mmp->mmp_thread) {
238 			mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
239 			    spa, 0, &p0, TS_RUN, defclsyspri);
240 			zfs_dbgmsg("MMP thread started pool '%s' "
241 			    "gethrtime %llu", spa_name(spa), gethrtime());
242 		}
243 		mutex_exit(&mmp->mmp_thread_lock);
244 	}
245 }
246 
247 void
mmp_thread_stop(spa_t * spa)248 mmp_thread_stop(spa_t *spa)
249 {
250 	mmp_thread_t *mmp = &spa->spa_mmp;
251 
252 	mutex_enter(&mmp->mmp_thread_lock);
253 	mmp->mmp_thread_exiting = 1;
254 	cv_broadcast(&mmp->mmp_thread_cv);
255 
256 	while (mmp->mmp_thread) {
257 		cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
258 	}
259 	mutex_exit(&mmp->mmp_thread_lock);
260 	zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
261 	    spa_name(spa), gethrtime());
262 
263 	ASSERT(mmp->mmp_thread == NULL);
264 	mmp->mmp_thread_exiting = 0;
265 }
266 
267 typedef enum mmp_vdev_state_flag {
268 	MMP_FAIL_NOT_WRITABLE	= (1 << 0),
269 	MMP_FAIL_WRITE_PENDING	= (1 << 1),
270 } mmp_vdev_state_flag_t;
271 
272 /*
273  * Find a leaf vdev to write an MMP block to.  It must not have an outstanding
274  * mmp write (if so a new write will also likely block).  If there is no usable
275  * leaf, a nonzero error value is returned. The error value returned is a bit
276  * field.
277  *
278  * MMP_FAIL_WRITE_PENDING   One or more leaf vdevs are writeable, but have an
279  *                          outstanding MMP write.
280  * MMP_FAIL_NOT_WRITABLE    One or more leaf vdevs are not writeable.
281  */
282 
283 static int
mmp_next_leaf(spa_t * spa)284 mmp_next_leaf(spa_t *spa)
285 {
286 	vdev_t *leaf;
287 	vdev_t *starting_leaf;
288 	int fail_mask = 0;
289 
290 	ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
291 	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
292 	ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
293 	ASSERT(!list_is_empty(&spa->spa_leaf_list));
294 
295 	if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
296 		spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
297 		spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
298 	}
299 
300 	leaf = spa->spa_mmp.mmp_last_leaf;
301 	if (leaf == NULL)
302 		leaf = list_head(&spa->spa_leaf_list);
303 	starting_leaf = leaf;
304 
305 	do {
306 		leaf = list_next(&spa->spa_leaf_list, leaf);
307 		if (leaf == NULL) {
308 			leaf = list_head(&spa->spa_leaf_list);
309 			ASSERT3P(leaf, !=, NULL);
310 		}
311 
312 		/*
313 		 * We skip unwritable, offline, detached, and dRAID spare
314 		 * devices as they are either not legal targets or the write
315 		 * may fail or not be seen by other hosts.  Skipped dRAID
316 		 * spares can never be written so the fail mask is not set.
317 		 */
318 		if (!vdev_writeable(leaf) || leaf->vdev_offline ||
319 		    leaf->vdev_detached) {
320 			fail_mask |= MMP_FAIL_NOT_WRITABLE;
321 		} else if (leaf->vdev_ops == &vdev_draid_spare_ops) {
322 			continue;
323 		} else if (leaf->vdev_mmp_pending != 0) {
324 			fail_mask |= MMP_FAIL_WRITE_PENDING;
325 		} else {
326 			spa->spa_mmp.mmp_last_leaf = leaf;
327 			return (0);
328 		}
329 	} while (leaf != starting_leaf);
330 
331 	ASSERT(fail_mask);
332 
333 	return (fail_mask);
334 }
335 
336 /*
337  * MMP writes are issued on a fixed schedule, but may complete at variable,
338  * much longer, intervals.  The mmp_delay captures long periods between
339  * successful writes for any reason, including disk latency, scheduling delays,
340  * etc.
341  *
342  * The mmp_delay is usually calculated as a decaying average, but if the latest
343  * delay is higher we do not average it, so that we do not hide sudden spikes
344  * which the importing host must wait for.
345  *
346  * If writes are occurring frequently, such as due to a high rate of txg syncs,
347  * the mmp_delay could become very small.  Since those short delays depend on
348  * activity we cannot count on, we never allow mmp_delay to get lower than rate
349  * expected if only mmp_thread writes occur.
350  *
351  * If an mmp write was skipped or fails, and we have already waited longer than
352  * mmp_delay, we need to update it so the next write reflects the longer delay.
353  *
354  * Do not set mmp_delay if the multihost property is not on, so as not to
355  * trigger an activity check on import.
356  */
357 static void
mmp_delay_update(spa_t * spa,boolean_t write_completed)358 mmp_delay_update(spa_t *spa, boolean_t write_completed)
359 {
360 	mmp_thread_t *mts = &spa->spa_mmp;
361 	hrtime_t delay = gethrtime() - mts->mmp_last_write;
362 
363 	ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
364 
365 	if (spa_multihost(spa) == B_FALSE) {
366 		mts->mmp_delay = 0;
367 		return;
368 	}
369 
370 	if (delay > mts->mmp_delay)
371 		mts->mmp_delay = delay;
372 
373 	if (write_completed == B_FALSE)
374 		return;
375 
376 	mts->mmp_last_write = gethrtime();
377 
378 	/*
379 	 * strictly less than, in case delay was changed above.
380 	 */
381 	if (delay < mts->mmp_delay) {
382 		hrtime_t min_delay =
383 		    MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) /
384 		    MAX(1, vdev_count_leaves(spa));
385 		mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
386 		    min_delay);
387 	}
388 }
389 
390 static void
mmp_write_done(zio_t * zio)391 mmp_write_done(zio_t *zio)
392 {
393 	spa_t *spa = zio->io_spa;
394 	vdev_t *vd = zio->io_vd;
395 	mmp_thread_t *mts = zio->io_private;
396 
397 	mutex_enter(&mts->mmp_io_lock);
398 	uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
399 	hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
400 
401 	mmp_delay_update(spa, (zio->io_error == 0));
402 
403 	vd->vdev_mmp_pending = 0;
404 	vd->vdev_mmp_kstat_id = 0;
405 
406 	mutex_exit(&mts->mmp_io_lock);
407 	spa_config_exit(spa, SCL_STATE, mmp_tag);
408 
409 	spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error,
410 	    mmp_write_duration);
411 
412 	abd_free(zio->io_abd);
413 }
414 
415 /*
416  * When the uberblock on-disk is updated by a spa_sync,
417  * creating a new "best" uberblock, update the one stored
418  * in the mmp thread state, used for mmp writes.
419  */
420 void
mmp_update_uberblock(spa_t * spa,uberblock_t * ub)421 mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
422 {
423 	mmp_thread_t *mmp = &spa->spa_mmp;
424 
425 	mutex_enter(&mmp->mmp_io_lock);
426 	mmp->mmp_ub = *ub;
427 	mmp->mmp_seq = 1;
428 	mmp->mmp_ub.ub_timestamp = gethrestime_sec();
429 	mmp_delay_update(spa, B_TRUE);
430 	mutex_exit(&mmp->mmp_io_lock);
431 }
432 
433 /*
434  * Choose a random vdev, label, and MMP block, and write over it
435  * with a copy of the last-synced uberblock, whose timestamp
436  * has been updated to reflect that the pool is in use.
437  */
438 static void
mmp_write_uberblock(spa_t * spa)439 mmp_write_uberblock(spa_t *spa)
440 {
441 	int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
442 	mmp_thread_t *mmp = &spa->spa_mmp;
443 	uberblock_t *ub;
444 	vdev_t *vd = NULL;
445 	int label, error;
446 	uint64_t offset;
447 
448 	hrtime_t lock_acquire_time = gethrtime();
449 	spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER);
450 	lock_acquire_time = gethrtime() - lock_acquire_time;
451 	if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
452 		zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
453 		    "gethrtime %llu", spa_name(spa), lock_acquire_time,
454 		    gethrtime());
455 
456 	mutex_enter(&mmp->mmp_io_lock);
457 
458 	error = mmp_next_leaf(spa);
459 
460 	/*
461 	 * spa_mmp_history has two types of entries:
462 	 * Issued MMP write: records time issued, error status, etc.
463 	 * Skipped MMP write: an MMP write could not be issued because no
464 	 * suitable leaf vdev was available.  See comment above struct
465 	 * spa_mmp_history for details.
466 	 */
467 
468 	if (error) {
469 		mmp_delay_update(spa, B_FALSE);
470 		if (mmp->mmp_skip_error == error) {
471 			spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
472 		} else {
473 			mmp->mmp_skip_error = error;
474 			spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
475 			    gethrestime_sec(), mmp->mmp_delay, NULL, 0,
476 			    mmp->mmp_kstat_id++, error);
477 			zfs_dbgmsg("MMP error choosing leaf pool '%s' "
478 			    "gethrtime %llu fail_mask %#x", spa_name(spa),
479 			    gethrtime(), error);
480 		}
481 		mutex_exit(&mmp->mmp_io_lock);
482 		spa_config_exit(spa, SCL_STATE, mmp_tag);
483 		return;
484 	}
485 
486 	vd = spa->spa_mmp.mmp_last_leaf;
487 	if (mmp->mmp_skip_error != 0) {
488 		mmp->mmp_skip_error = 0;
489 		zfs_dbgmsg("MMP write after skipping due to unavailable "
490 		    "leaves, pool '%s' gethrtime %llu leaf %llu",
491 		    spa_name(spa), (u_longlong_t)gethrtime(),
492 		    (u_longlong_t)vd->vdev_guid);
493 	}
494 
495 	if (mmp->mmp_zio_root == NULL)
496 		mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
497 		    flags | ZIO_FLAG_GODFATHER);
498 
499 	if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
500 		/*
501 		 * Want to reset mmp_seq when timestamp advances because after
502 		 * an mmp_seq wrap new values will not be chosen by
503 		 * uberblock_compare() as the "best".
504 		 */
505 		mmp->mmp_ub.ub_timestamp = gethrestime_sec();
506 		mmp->mmp_seq = 1;
507 	}
508 
509 	ub = &mmp->mmp_ub;
510 	ub->ub_mmp_magic = MMP_MAGIC;
511 	ub->ub_mmp_delay = mmp->mmp_delay;
512 	ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
513 	    MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) |
514 	    MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK(
515 	    zfs_multihost_fail_intervals));
516 	vd->vdev_mmp_pending = gethrtime();
517 	vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
518 
519 	zio_t *zio  = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
520 	abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
521 	abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
522 	abd_zero_off(ub_abd, sizeof (uberblock_t),
523 	    VDEV_UBERBLOCK_SIZE(vd) - sizeof (uberblock_t));
524 
525 	mmp->mmp_seq++;
526 	mmp->mmp_kstat_id++;
527 	mutex_exit(&mmp->mmp_io_lock);
528 
529 	offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
530 	    MMP_BLOCKS_PER_LABEL + random_in_range(MMP_BLOCKS_PER_LABEL));
531 
532 	label = random_in_range(VDEV_LABELS);
533 	vdev_label_write(zio, vd, label, ub_abd, offset,
534 	    VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
535 	    flags | ZIO_FLAG_DONT_PROPAGATE);
536 
537 	(void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
538 	    ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
539 
540 	zio_nowait(zio);
541 }
542 
543 static __attribute__((noreturn)) void
mmp_thread(void * arg)544 mmp_thread(void *arg)
545 {
546 	spa_t *spa = (spa_t *)arg;
547 	mmp_thread_t *mmp = &spa->spa_mmp;
548 	boolean_t suspended = spa_suspended(spa);
549 	boolean_t multihost = spa_multihost(spa);
550 	uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
551 	    zfs_multihost_interval));
552 	uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK(
553 	    zfs_multihost_fail_intervals);
554 	hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval;
555 	boolean_t last_spa_suspended;
556 	boolean_t last_spa_multihost;
557 	uint64_t last_mmp_interval;
558 	uint32_t last_mmp_fail_intervals;
559 	hrtime_t last_mmp_fail_ns;
560 	callb_cpr_t cpr;
561 	int skip_wait = 0;
562 
563 	mmp_thread_enter(mmp, &cpr);
564 
565 	/*
566 	 * There have been no MMP writes yet.  Setting mmp_last_write here gives
567 	 * us one mmp_fail_ns period, which is consistent with the activity
568 	 * check duration, to try to land an MMP write before MMP suspends the
569 	 * pool (if so configured).
570 	 */
571 
572 	mutex_enter(&mmp->mmp_io_lock);
573 	mmp->mmp_last_write = gethrtime();
574 	mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
575 	mutex_exit(&mmp->mmp_io_lock);
576 
577 	while (!mmp->mmp_thread_exiting) {
578 		hrtime_t next_time = gethrtime() +
579 		    MSEC2NSEC(MMP_DEFAULT_INTERVAL);
580 		int leaves = MAX(vdev_count_leaves(spa), 1);
581 
582 		/* Detect changes in tunables or state */
583 
584 		last_spa_suspended = suspended;
585 		last_spa_multihost = multihost;
586 		suspended = spa_suspended(spa);
587 		multihost = spa_multihost(spa);
588 
589 		last_mmp_interval = mmp_interval;
590 		last_mmp_fail_intervals = mmp_fail_intervals;
591 		last_mmp_fail_ns = mmp_fail_ns;
592 		mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
593 		    zfs_multihost_interval));
594 		mmp_fail_intervals = MMP_FAIL_INTVS_OK(
595 		    zfs_multihost_fail_intervals);
596 
597 		/* Smooth so pool is not suspended when reducing tunables */
598 		if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) {
599 			mmp_fail_ns = (mmp_fail_ns * 31 +
600 			    mmp_fail_intervals * mmp_interval) / 32;
601 		} else {
602 			mmp_fail_ns = mmp_fail_intervals *
603 			    mmp_interval;
604 		}
605 
606 		if (mmp_interval != last_mmp_interval ||
607 		    mmp_fail_intervals != last_mmp_fail_intervals) {
608 			/*
609 			 * We want other hosts to see new tunables as quickly as
610 			 * possible.  Write out at higher frequency than usual.
611 			 */
612 			skip_wait += leaves;
613 		}
614 
615 		if (multihost)
616 			next_time = gethrtime() + mmp_interval / leaves;
617 
618 		if (mmp_fail_ns != last_mmp_fail_ns) {
619 			zfs_dbgmsg("MMP interval change pool '%s' "
620 			    "gethrtime %llu last_mmp_interval %llu "
621 			    "mmp_interval %llu last_mmp_fail_intervals %u "
622 			    "mmp_fail_intervals %u mmp_fail_ns %llu "
623 			    "skip_wait %d leaves %d next_time %llu",
624 			    spa_name(spa), (u_longlong_t)gethrtime(),
625 			    (u_longlong_t)last_mmp_interval,
626 			    (u_longlong_t)mmp_interval, last_mmp_fail_intervals,
627 			    mmp_fail_intervals, (u_longlong_t)mmp_fail_ns,
628 			    skip_wait, leaves, (u_longlong_t)next_time);
629 		}
630 
631 		/*
632 		 * MMP off => on, or suspended => !suspended:
633 		 * No writes occurred recently.  Update mmp_last_write to give
634 		 * us some time to try.
635 		 */
636 		if ((!last_spa_multihost && multihost) ||
637 		    (last_spa_suspended && !suspended)) {
638 			zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
639 			    "last_spa_multihost %u multihost %u "
640 			    "last_spa_suspended %u suspended %u",
641 			    spa_name(spa), (u_longlong_t)gethrtime(),
642 			    last_spa_multihost, multihost, last_spa_suspended,
643 			    suspended);
644 			mutex_enter(&mmp->mmp_io_lock);
645 			mmp->mmp_last_write = gethrtime();
646 			mmp->mmp_delay = mmp_interval;
647 			mutex_exit(&mmp->mmp_io_lock);
648 		}
649 
650 		/*
651 		 * MMP on => off:
652 		 * mmp_delay == 0 tells importing node to skip activity check.
653 		 */
654 		if (last_spa_multihost && !multihost) {
655 			mutex_enter(&mmp->mmp_io_lock);
656 			mmp->mmp_delay = 0;
657 			mutex_exit(&mmp->mmp_io_lock);
658 		}
659 
660 		/*
661 		 * Suspend the pool if no MMP write has succeeded in over
662 		 * mmp_interval * mmp_fail_intervals nanoseconds.
663 		 */
664 		if (multihost && !suspended && mmp_fail_intervals &&
665 		    (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
666 			zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu "
667 			    "mmp_last_write %llu mmp_interval %llu "
668 			    "mmp_fail_intervals %llu mmp_fail_ns %llu txg %llu",
669 			    spa_name(spa), (u_longlong_t)gethrtime(),
670 			    (u_longlong_t)mmp->mmp_last_write,
671 			    (u_longlong_t)mmp_interval,
672 			    (u_longlong_t)mmp_fail_intervals,
673 			    (u_longlong_t)mmp_fail_ns,
674 			    (u_longlong_t)spa->spa_uberblock.ub_txg);
675 			cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
676 			    "succeeded in over %llu ms; suspending pool. "
677 			    "Hrtime %llu",
678 			    spa_name(spa),
679 			    NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
680 			    gethrtime());
681 			zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
682 		}
683 
684 		if (multihost && !suspended)
685 			mmp_write_uberblock(spa);
686 
687 		if (skip_wait > 0) {
688 			next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) /
689 			    leaves;
690 			skip_wait--;
691 		}
692 
693 		CALLB_CPR_SAFE_BEGIN(&cpr);
694 		(void) cv_timedwait_idle_hires(&mmp->mmp_thread_cv,
695 		    &mmp->mmp_thread_lock, next_time, USEC2NSEC(100),
696 		    CALLOUT_FLAG_ABSOLUTE);
697 		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
698 	}
699 
700 	/* Outstanding writes are allowed to complete. */
701 	zio_wait(mmp->mmp_zio_root);
702 
703 	mmp->mmp_zio_root = NULL;
704 	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
705 
706 	thread_exit();
707 }
708 
709 /*
710  * Signal the MMP thread to wake it, when it is sleeping on
711  * its cv.  Used when some module parameter has changed and
712  * we want the thread to know about it.
713  * Only signal if the pool is active and mmp thread is
714  * running, otherwise there is no thread to wake.
715  */
716 static void
mmp_signal_thread(spa_t * spa)717 mmp_signal_thread(spa_t *spa)
718 {
719 	mmp_thread_t *mmp = &spa->spa_mmp;
720 
721 	mutex_enter(&mmp->mmp_thread_lock);
722 	if (mmp->mmp_thread)
723 		cv_broadcast(&mmp->mmp_thread_cv);
724 	mutex_exit(&mmp->mmp_thread_lock);
725 }
726 
727 void
mmp_signal_all_threads(void)728 mmp_signal_all_threads(void)
729 {
730 	spa_t *spa = NULL;
731 
732 	mutex_enter(&spa_namespace_lock);
733 	while ((spa = spa_next(spa))) {
734 		if (spa->spa_state == POOL_STATE_ACTIVE)
735 			mmp_signal_thread(spa);
736 	}
737 	mutex_exit(&spa_namespace_lock);
738 }
739 
740 ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval,
741 	param_set_multihost_interval, spl_param_get_u64, ZMOD_RW,
742 	"Milliseconds between mmp writes to each leaf");
743 
744 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW,
745 	"Max allowed period without a successful mmp write");
746 
747 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, import_intervals, UINT, ZMOD_RW,
748 	"Number of zfs_multihost_interval periods to wait for activity");
749