xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_misc.c (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  * Copyright 2013 Saso Kiselkov. All rights reserved.
27  * Copyright (c) 2017 Datto Inc.
28  * Copyright (c) 2017, Intel Corporation.
29  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/zfs_chksum.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zio.h>
36 #include <sys/zio_checksum.h>
37 #include <sys/zio_compress.h>
38 #include <sys/dmu.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/vdev_impl.h>
43 #include <sys/vdev_initialize.h>
44 #include <sys/vdev_trim.h>
45 #include <sys/vdev_file.h>
46 #include <sys/vdev_raidz.h>
47 #include <sys/metaslab.h>
48 #include <sys/uberblock_impl.h>
49 #include <sys/txg.h>
50 #include <sys/avl.h>
51 #include <sys/unique.h>
52 #include <sys/dsl_pool.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/fm/util.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/fs/zfs.h>
58 #include <sys/metaslab_impl.h>
59 #include <sys/arc.h>
60 #include <sys/ddt.h>
61 #include <sys/kstat.h>
62 #include "zfs_prop.h"
63 #include <sys/btree.h>
64 #include <sys/zfeature.h>
65 #include <sys/qat.h>
66 #include <sys/zstd/zstd.h>
67 
68 /*
69  * SPA locking
70  *
71  * There are three basic locks for managing spa_t structures:
72  *
73  * spa_namespace_lock (global mutex)
74  *
75  *	This lock must be acquired to do any of the following:
76  *
77  *		- Lookup a spa_t by name
78  *		- Add or remove a spa_t from the namespace
79  *		- Increase spa_refcount from non-zero
80  *		- Check if spa_refcount is zero
81  *		- Rename a spa_t
82  *		- add/remove/attach/detach devices
83  *		- Held for the duration of create/destroy/import/export
84  *
85  *	It does not need to handle recursion.  A create or destroy may
86  *	reference objects (files or zvols) in other pools, but by
87  *	definition they must have an existing reference, and will never need
88  *	to lookup a spa_t by name.
89  *
90  * spa_refcount (per-spa zfs_refcount_t protected by mutex)
91  *
92  *	This reference count keep track of any active users of the spa_t.  The
93  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
94  *	the refcount is never really 'zero' - opening a pool implicitly keeps
95  *	some references in the DMU.  Internally we check against spa_minref, but
96  *	present the image of a zero/non-zero value to consumers.
97  *
98  * spa_config_lock[] (per-spa array of rwlocks)
99  *
100  *	This protects the spa_t from config changes, and must be held in
101  *	the following circumstances:
102  *
103  *		- RW_READER to perform I/O to the spa
104  *		- RW_WRITER to change the vdev config
105  *
106  * The locking order is fairly straightforward:
107  *
108  *		spa_namespace_lock	->	spa_refcount
109  *
110  *	The namespace lock must be acquired to increase the refcount from 0
111  *	or to check if it is zero.
112  *
113  *		spa_refcount		->	spa_config_lock[]
114  *
115  *	There must be at least one valid reference on the spa_t to acquire
116  *	the config lock.
117  *
118  *		spa_namespace_lock	->	spa_config_lock[]
119  *
120  *	The namespace lock must always be taken before the config lock.
121  *
122  *
123  * The spa_namespace_lock can be acquired directly and is globally visible.
124  *
125  * The namespace is manipulated using the following functions, all of which
126  * require the spa_namespace_lock to be held.
127  *
128  *	spa_lookup()		Lookup a spa_t by name.
129  *
130  *	spa_add()		Create a new spa_t in the namespace.
131  *
132  *	spa_remove()		Remove a spa_t from the namespace.  This also
133  *				frees up any memory associated with the spa_t.
134  *
135  *	spa_next()		Returns the next spa_t in the system, or the
136  *				first if NULL is passed.
137  *
138  *	spa_evict_all()		Shutdown and remove all spa_t structures in
139  *				the system.
140  *
141  *	spa_guid_exists()	Determine whether a pool/device guid exists.
142  *
143  * The spa_refcount is manipulated using the following functions:
144  *
145  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
146  *				called with spa_namespace_lock held if the
147  *				refcount is currently zero.
148  *
149  *	spa_close()		Remove a reference from the spa_t.  This will
150  *				not free the spa_t or remove it from the
151  *				namespace.  No locking is required.
152  *
153  *	spa_refcount_zero()	Returns true if the refcount is currently
154  *				zero.  Must be called with spa_namespace_lock
155  *				held.
156  *
157  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
158  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
159  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
160  *
161  * To read the configuration, it suffices to hold one of these locks as reader.
162  * To modify the configuration, you must hold all locks as writer.  To modify
163  * vdev state without altering the vdev tree's topology (e.g. online/offline),
164  * you must hold SCL_STATE and SCL_ZIO as writer.
165  *
166  * We use these distinct config locks to avoid recursive lock entry.
167  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
168  * block allocations (SCL_ALLOC), which may require reading space maps
169  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
170  *
171  * The spa config locks cannot be normal rwlocks because we need the
172  * ability to hand off ownership.  For example, SCL_ZIO is acquired
173  * by the issuing thread and later released by an interrupt thread.
174  * They do, however, obey the usual write-wanted semantics to prevent
175  * writer (i.e. system administrator) starvation.
176  *
177  * The lock acquisition rules are as follows:
178  *
179  * SCL_CONFIG
180  *	Protects changes to the vdev tree topology, such as vdev
181  *	add/remove/attach/detach.  Protects the dirty config list
182  *	(spa_config_dirty_list) and the set of spares and l2arc devices.
183  *
184  * SCL_STATE
185  *	Protects changes to pool state and vdev state, such as vdev
186  *	online/offline/fault/degrade/clear.  Protects the dirty state list
187  *	(spa_state_dirty_list) and global pool state (spa_state).
188  *
189  * SCL_ALLOC
190  *	Protects changes to metaslab groups and classes.
191  *	Held as reader by metaslab_alloc() and metaslab_claim().
192  *
193  * SCL_ZIO
194  *	Held by bp-level zios (those which have no io_vd upon entry)
195  *	to prevent changes to the vdev tree.  The bp-level zio implicitly
196  *	protects all of its vdev child zios, which do not hold SCL_ZIO.
197  *
198  * SCL_FREE
199  *	Protects changes to metaslab groups and classes.
200  *	Held as reader by metaslab_free().  SCL_FREE is distinct from
201  *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
202  *	blocks in zio_done() while another i/o that holds either
203  *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
204  *
205  * SCL_VDEV
206  *	Held as reader to prevent changes to the vdev tree during trivial
207  *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
208  *	other locks, and lower than all of them, to ensure that it's safe
209  *	to acquire regardless of caller context.
210  *
211  * In addition, the following rules apply:
212  *
213  * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
214  *	The lock ordering is SCL_CONFIG > spa_props_lock.
215  *
216  * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
217  *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
218  *	or zio_write_phys() -- the caller must ensure that the config cannot
219  *	cannot change in the interim, and that the vdev cannot be reopened.
220  *	SCL_STATE as reader suffices for both.
221  *
222  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
223  *
224  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
225  *				for writing.
226  *
227  *	spa_vdev_exit()		Release the config lock, wait for all I/O
228  *				to complete, sync the updated configs to the
229  *				cache, and release the namespace lock.
230  *
231  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
232  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
233  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
234  */
235 
236 static avl_tree_t spa_namespace_avl;
237 kmutex_t spa_namespace_lock;
238 static kcondvar_t spa_namespace_cv;
239 static const int spa_max_replication_override = SPA_DVAS_PER_BP;
240 
241 static kmutex_t spa_spare_lock;
242 static avl_tree_t spa_spare_avl;
243 static kmutex_t spa_l2cache_lock;
244 static avl_tree_t spa_l2cache_avl;
245 
246 spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
247 
248 #ifdef ZFS_DEBUG
249 /*
250  * Everything except dprintf, set_error, spa, and indirect_remap is on
251  * by default in debug builds.
252  */
253 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
254     ZFS_DEBUG_INDIRECT_REMAP);
255 #else
256 int zfs_flags = 0;
257 #endif
258 
259 /*
260  * zfs_recover can be set to nonzero to attempt to recover from
261  * otherwise-fatal errors, typically caused by on-disk corruption.  When
262  * set, calls to zfs_panic_recover() will turn into warning messages.
263  * This should only be used as a last resort, as it typically results
264  * in leaked space, or worse.
265  */
266 int zfs_recover = B_FALSE;
267 
268 /*
269  * If destroy encounters an EIO while reading metadata (e.g. indirect
270  * blocks), space referenced by the missing metadata can not be freed.
271  * Normally this causes the background destroy to become "stalled", as
272  * it is unable to make forward progress.  While in this stalled state,
273  * all remaining space to free from the error-encountering filesystem is
274  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
275  * permanently leak the space from indirect blocks that can not be read,
276  * and continue to free everything else that it can.
277  *
278  * The default, "stalling" behavior is useful if the storage partially
279  * fails (i.e. some but not all i/os fail), and then later recovers.  In
280  * this case, we will be able to continue pool operations while it is
281  * partially failed, and when it recovers, we can continue to free the
282  * space, with no leaks.  However, note that this case is actually
283  * fairly rare.
284  *
285  * Typically pools either (a) fail completely (but perhaps temporarily,
286  * e.g. a top-level vdev going offline), or (b) have localized,
287  * permanent errors (e.g. disk returns the wrong data due to bit flip or
288  * firmware bug).  In case (a), this setting does not matter because the
289  * pool will be suspended and the sync thread will not be able to make
290  * forward progress regardless.  In case (b), because the error is
291  * permanent, the best we can do is leak the minimum amount of space,
292  * which is what setting this flag will do.  Therefore, it is reasonable
293  * for this flag to normally be set, but we chose the more conservative
294  * approach of not setting it, so that there is no possibility of
295  * leaking space in the "partial temporary" failure case.
296  */
297 int zfs_free_leak_on_eio = B_FALSE;
298 
299 /*
300  * Expiration time in milliseconds. This value has two meanings. First it is
301  * used to determine when the spa_deadman() logic should fire. By default the
302  * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
303  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
304  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
305  * in one of three behaviors controlled by zfs_deadman_failmode.
306  */
307 unsigned long zfs_deadman_synctime_ms = 600000UL;  /* 10 min. */
308 
309 /*
310  * This value controls the maximum amount of time zio_wait() will block for an
311  * outstanding IO.  By default this is 300 seconds at which point the "hung"
312  * behavior will be applied as described for zfs_deadman_synctime_ms.
313  */
314 unsigned long zfs_deadman_ziotime_ms = 300000UL;  /* 5 min. */
315 
316 /*
317  * Check time in milliseconds. This defines the frequency at which we check
318  * for hung I/O.
319  */
320 unsigned long zfs_deadman_checktime_ms = 60000UL;  /* 1 min. */
321 
322 /*
323  * By default the deadman is enabled.
324  */
325 int zfs_deadman_enabled = B_TRUE;
326 
327 /*
328  * Controls the behavior of the deadman when it detects a "hung" I/O.
329  * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
330  *
331  * wait     - Wait for the "hung" I/O (default)
332  * continue - Attempt to recover from a "hung" I/O
333  * panic    - Panic the system
334  */
335 const char *zfs_deadman_failmode = "wait";
336 
337 /*
338  * The worst case is single-sector max-parity RAID-Z blocks, in which
339  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
340  * times the size; so just assume that.  Add to this the fact that
341  * we can have up to 3 DVAs per bp, and one more factor of 2 because
342  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
343  * the worst case is:
344  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
345  */
346 int spa_asize_inflation = 24;
347 
348 /*
349  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
350  * the pool to be consumed (bounded by spa_max_slop).  This ensures that we
351  * don't run the pool completely out of space, due to unaccounted changes (e.g.
352  * to the MOS).  It also limits the worst-case time to allocate space.  If we
353  * have less than this amount of free space, most ZPL operations (e.g.  write,
354  * create) will return ENOSPC.  The ZIL metaslabs (spa_embedded_log_class) are
355  * also part of this 3.2% of space which can't be consumed by normal writes;
356  * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
357  * log space.
358  *
359  * Certain operations (e.g. file removal, most administrative actions) can
360  * use half the slop space.  They will only return ENOSPC if less than half
361  * the slop space is free.  Typically, once the pool has less than the slop
362  * space free, the user will use these operations to free up space in the pool.
363  * These are the operations that call dsl_pool_adjustedsize() with the netfree
364  * argument set to TRUE.
365  *
366  * Operations that are almost guaranteed to free up space in the absence of
367  * a pool checkpoint can use up to three quarters of the slop space
368  * (e.g zfs destroy).
369  *
370  * A very restricted set of operations are always permitted, regardless of
371  * the amount of free space.  These are the operations that call
372  * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
373  * increase in the amount of space used, it is possible to run the pool
374  * completely out of space, causing it to be permanently read-only.
375  *
376  * Note that on very small pools, the slop space will be larger than
377  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
378  * but we never allow it to be more than half the pool size.
379  *
380  * Further, on very large pools, the slop space will be smaller than
381  * 3.2%, to avoid reserving much more space than we actually need; bounded
382  * by spa_max_slop (128GB).
383  *
384  * See also the comments in zfs_space_check_t.
385  */
386 int spa_slop_shift = 5;
387 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
388 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
389 static const int spa_allocators = 4;
390 
391 
392 void
393 spa_load_failed(spa_t *spa, const char *fmt, ...)
394 {
395 	va_list adx;
396 	char buf[256];
397 
398 	va_start(adx, fmt);
399 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
400 	va_end(adx);
401 
402 	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
403 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
404 }
405 
406 void
407 spa_load_note(spa_t *spa, const char *fmt, ...)
408 {
409 	va_list adx;
410 	char buf[256];
411 
412 	va_start(adx, fmt);
413 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
414 	va_end(adx);
415 
416 	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
417 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
418 }
419 
420 /*
421  * By default dedup and user data indirects land in the special class
422  */
423 static int zfs_ddt_data_is_special = B_TRUE;
424 static int zfs_user_indirect_is_special = B_TRUE;
425 
426 /*
427  * The percentage of special class final space reserved for metadata only.
428  * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
429  * let metadata into the class.
430  */
431 static int zfs_special_class_metadata_reserve_pct = 25;
432 
433 /*
434  * ==========================================================================
435  * SPA config locking
436  * ==========================================================================
437  */
438 static void
439 spa_config_lock_init(spa_t *spa)
440 {
441 	for (int i = 0; i < SCL_LOCKS; i++) {
442 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
443 		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
444 		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
445 		scl->scl_writer = NULL;
446 		scl->scl_write_wanted = 0;
447 		scl->scl_count = 0;
448 	}
449 }
450 
451 static void
452 spa_config_lock_destroy(spa_t *spa)
453 {
454 	for (int i = 0; i < SCL_LOCKS; i++) {
455 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
456 		mutex_destroy(&scl->scl_lock);
457 		cv_destroy(&scl->scl_cv);
458 		ASSERT(scl->scl_writer == NULL);
459 		ASSERT(scl->scl_write_wanted == 0);
460 		ASSERT(scl->scl_count == 0);
461 	}
462 }
463 
464 int
465 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
466 {
467 	for (int i = 0; i < SCL_LOCKS; i++) {
468 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
469 		if (!(locks & (1 << i)))
470 			continue;
471 		mutex_enter(&scl->scl_lock);
472 		if (rw == RW_READER) {
473 			if (scl->scl_writer || scl->scl_write_wanted) {
474 				mutex_exit(&scl->scl_lock);
475 				spa_config_exit(spa, locks & ((1 << i) - 1),
476 				    tag);
477 				return (0);
478 			}
479 		} else {
480 			ASSERT(scl->scl_writer != curthread);
481 			if (scl->scl_count != 0) {
482 				mutex_exit(&scl->scl_lock);
483 				spa_config_exit(spa, locks & ((1 << i) - 1),
484 				    tag);
485 				return (0);
486 			}
487 			scl->scl_writer = curthread;
488 		}
489 		scl->scl_count++;
490 		mutex_exit(&scl->scl_lock);
491 	}
492 	return (1);
493 }
494 
495 void
496 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
497 {
498 	(void) tag;
499 	int wlocks_held = 0;
500 
501 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
502 
503 	for (int i = 0; i < SCL_LOCKS; i++) {
504 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
505 		if (scl->scl_writer == curthread)
506 			wlocks_held |= (1 << i);
507 		if (!(locks & (1 << i)))
508 			continue;
509 		mutex_enter(&scl->scl_lock);
510 		if (rw == RW_READER) {
511 			while (scl->scl_writer || scl->scl_write_wanted) {
512 				cv_wait(&scl->scl_cv, &scl->scl_lock);
513 			}
514 		} else {
515 			ASSERT(scl->scl_writer != curthread);
516 			while (scl->scl_count != 0) {
517 				scl->scl_write_wanted++;
518 				cv_wait(&scl->scl_cv, &scl->scl_lock);
519 				scl->scl_write_wanted--;
520 			}
521 			scl->scl_writer = curthread;
522 		}
523 		scl->scl_count++;
524 		mutex_exit(&scl->scl_lock);
525 	}
526 	ASSERT3U(wlocks_held, <=, locks);
527 }
528 
529 void
530 spa_config_exit(spa_t *spa, int locks, const void *tag)
531 {
532 	(void) tag;
533 	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
534 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
535 		if (!(locks & (1 << i)))
536 			continue;
537 		mutex_enter(&scl->scl_lock);
538 		ASSERT(scl->scl_count > 0);
539 		if (--scl->scl_count == 0) {
540 			ASSERT(scl->scl_writer == NULL ||
541 			    scl->scl_writer == curthread);
542 			scl->scl_writer = NULL;	/* OK in either case */
543 			cv_broadcast(&scl->scl_cv);
544 		}
545 		mutex_exit(&scl->scl_lock);
546 	}
547 }
548 
549 int
550 spa_config_held(spa_t *spa, int locks, krw_t rw)
551 {
552 	int locks_held = 0;
553 
554 	for (int i = 0; i < SCL_LOCKS; i++) {
555 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
556 		if (!(locks & (1 << i)))
557 			continue;
558 		if ((rw == RW_READER && scl->scl_count != 0) ||
559 		    (rw == RW_WRITER && scl->scl_writer == curthread))
560 			locks_held |= 1 << i;
561 	}
562 
563 	return (locks_held);
564 }
565 
566 /*
567  * ==========================================================================
568  * SPA namespace functions
569  * ==========================================================================
570  */
571 
572 /*
573  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
574  * Returns NULL if no matching spa_t is found.
575  */
576 spa_t *
577 spa_lookup(const char *name)
578 {
579 	static spa_t search;	/* spa_t is large; don't allocate on stack */
580 	spa_t *spa;
581 	avl_index_t where;
582 	char *cp;
583 
584 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
585 
586 	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
587 
588 	/*
589 	 * If it's a full dataset name, figure out the pool name and
590 	 * just use that.
591 	 */
592 	cp = strpbrk(search.spa_name, "/@#");
593 	if (cp != NULL)
594 		*cp = '\0';
595 
596 	spa = avl_find(&spa_namespace_avl, &search, &where);
597 
598 	return (spa);
599 }
600 
601 /*
602  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
603  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
604  * looking for potentially hung I/Os.
605  */
606 void
607 spa_deadman(void *arg)
608 {
609 	spa_t *spa = arg;
610 
611 	/* Disable the deadman if the pool is suspended. */
612 	if (spa_suspended(spa))
613 		return;
614 
615 	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
616 	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
617 	    (u_longlong_t)++spa->spa_deadman_calls);
618 	if (zfs_deadman_enabled)
619 		vdev_deadman(spa->spa_root_vdev, FTAG);
620 
621 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
622 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
623 	    MSEC_TO_TICK(zfs_deadman_checktime_ms));
624 }
625 
626 static int
627 spa_log_sm_sort_by_txg(const void *va, const void *vb)
628 {
629 	const spa_log_sm_t *a = va;
630 	const spa_log_sm_t *b = vb;
631 
632 	return (TREE_CMP(a->sls_txg, b->sls_txg));
633 }
634 
635 /*
636  * Create an uninitialized spa_t with the given name.  Requires
637  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
638  * exist by calling spa_lookup() first.
639  */
640 spa_t *
641 spa_add(const char *name, nvlist_t *config, const char *altroot)
642 {
643 	spa_t *spa;
644 	spa_config_dirent_t *dp;
645 
646 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
647 
648 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
649 
650 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
651 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
652 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
653 	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
654 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
655 	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
656 	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
657 	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
658 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
659 	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
660 	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
661 	mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
662 	mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
663 	mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
664 
665 	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
666 	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
667 	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
668 	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
669 	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
670 	cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
671 	cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
672 
673 	for (int t = 0; t < TXG_SIZE; t++)
674 		bplist_create(&spa->spa_free_bplist[t]);
675 
676 	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
677 	spa->spa_state = POOL_STATE_UNINITIALIZED;
678 	spa->spa_freeze_txg = UINT64_MAX;
679 	spa->spa_final_txg = UINT64_MAX;
680 	spa->spa_load_max_txg = UINT64_MAX;
681 	spa->spa_proc = &p0;
682 	spa->spa_proc_state = SPA_PROC_NONE;
683 	spa->spa_trust_config = B_TRUE;
684 	spa->spa_hostid = zone_get_hostid(NULL);
685 
686 	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
687 	spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
688 	spa_set_deadman_failmode(spa, zfs_deadman_failmode);
689 
690 	zfs_refcount_create(&spa->spa_refcount);
691 	spa_config_lock_init(spa);
692 	spa_stats_init(spa);
693 
694 	avl_add(&spa_namespace_avl, spa);
695 
696 	/*
697 	 * Set the alternate root, if there is one.
698 	 */
699 	if (altroot)
700 		spa->spa_root = spa_strdup(altroot);
701 
702 	spa->spa_alloc_count = spa_allocators;
703 	spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
704 	    sizeof (spa_alloc_t), KM_SLEEP);
705 	for (int i = 0; i < spa->spa_alloc_count; i++) {
706 		mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
707 		    NULL);
708 		avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
709 		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
710 	}
711 	avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
712 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
713 	avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
714 	    sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
715 	list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
716 	    offsetof(log_summary_entry_t, lse_node));
717 
718 	/*
719 	 * Every pool starts with the default cachefile
720 	 */
721 	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
722 	    offsetof(spa_config_dirent_t, scd_link));
723 
724 	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
725 	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
726 	list_insert_head(&spa->spa_config_list, dp);
727 
728 	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
729 	    KM_SLEEP) == 0);
730 
731 	if (config != NULL) {
732 		nvlist_t *features;
733 
734 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
735 		    &features) == 0) {
736 			VERIFY(nvlist_dup(features, &spa->spa_label_features,
737 			    0) == 0);
738 		}
739 
740 		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
741 	}
742 
743 	if (spa->spa_label_features == NULL) {
744 		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
745 		    KM_SLEEP) == 0);
746 	}
747 
748 	spa->spa_min_ashift = INT_MAX;
749 	spa->spa_max_ashift = 0;
750 	spa->spa_min_alloc = INT_MAX;
751 
752 	/* Reset cached value */
753 	spa->spa_dedup_dspace = ~0ULL;
754 
755 	/*
756 	 * As a pool is being created, treat all features as disabled by
757 	 * setting SPA_FEATURE_DISABLED for all entries in the feature
758 	 * refcount cache.
759 	 */
760 	for (int i = 0; i < SPA_FEATURES; i++) {
761 		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
762 	}
763 
764 	list_create(&spa->spa_leaf_list, sizeof (vdev_t),
765 	    offsetof(vdev_t, vdev_leaf_node));
766 
767 	return (spa);
768 }
769 
770 /*
771  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
772  * spa_namespace_lock.  This is called only after the spa_t has been closed and
773  * deactivated.
774  */
775 void
776 spa_remove(spa_t *spa)
777 {
778 	spa_config_dirent_t *dp;
779 
780 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
781 	ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
782 	ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
783 	ASSERT0(spa->spa_waiters);
784 
785 	nvlist_free(spa->spa_config_splitting);
786 
787 	avl_remove(&spa_namespace_avl, spa);
788 	cv_broadcast(&spa_namespace_cv);
789 
790 	if (spa->spa_root)
791 		spa_strfree(spa->spa_root);
792 
793 	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
794 		list_remove(&spa->spa_config_list, dp);
795 		if (dp->scd_path != NULL)
796 			spa_strfree(dp->scd_path);
797 		kmem_free(dp, sizeof (spa_config_dirent_t));
798 	}
799 
800 	for (int i = 0; i < spa->spa_alloc_count; i++) {
801 		avl_destroy(&spa->spa_allocs[i].spaa_tree);
802 		mutex_destroy(&spa->spa_allocs[i].spaa_lock);
803 	}
804 	kmem_free(spa->spa_allocs, spa->spa_alloc_count *
805 	    sizeof (spa_alloc_t));
806 
807 	avl_destroy(&spa->spa_metaslabs_by_flushed);
808 	avl_destroy(&spa->spa_sm_logs_by_txg);
809 	list_destroy(&spa->spa_log_summary);
810 	list_destroy(&spa->spa_config_list);
811 	list_destroy(&spa->spa_leaf_list);
812 
813 	nvlist_free(spa->spa_label_features);
814 	nvlist_free(spa->spa_load_info);
815 	nvlist_free(spa->spa_feat_stats);
816 	spa_config_set(spa, NULL);
817 
818 	zfs_refcount_destroy(&spa->spa_refcount);
819 
820 	spa_stats_destroy(spa);
821 	spa_config_lock_destroy(spa);
822 
823 	for (int t = 0; t < TXG_SIZE; t++)
824 		bplist_destroy(&spa->spa_free_bplist[t]);
825 
826 	zio_checksum_templates_free(spa);
827 
828 	cv_destroy(&spa->spa_async_cv);
829 	cv_destroy(&spa->spa_evicting_os_cv);
830 	cv_destroy(&spa->spa_proc_cv);
831 	cv_destroy(&spa->spa_scrub_io_cv);
832 	cv_destroy(&spa->spa_suspend_cv);
833 	cv_destroy(&spa->spa_activities_cv);
834 	cv_destroy(&spa->spa_waiters_cv);
835 
836 	mutex_destroy(&spa->spa_flushed_ms_lock);
837 	mutex_destroy(&spa->spa_async_lock);
838 	mutex_destroy(&spa->spa_errlist_lock);
839 	mutex_destroy(&spa->spa_errlog_lock);
840 	mutex_destroy(&spa->spa_evicting_os_lock);
841 	mutex_destroy(&spa->spa_history_lock);
842 	mutex_destroy(&spa->spa_proc_lock);
843 	mutex_destroy(&spa->spa_props_lock);
844 	mutex_destroy(&spa->spa_cksum_tmpls_lock);
845 	mutex_destroy(&spa->spa_scrub_lock);
846 	mutex_destroy(&spa->spa_suspend_lock);
847 	mutex_destroy(&spa->spa_vdev_top_lock);
848 	mutex_destroy(&spa->spa_feat_stats_lock);
849 	mutex_destroy(&spa->spa_activities_lock);
850 
851 	kmem_free(spa, sizeof (spa_t));
852 }
853 
854 /*
855  * Given a pool, return the next pool in the namespace, or NULL if there is
856  * none.  If 'prev' is NULL, return the first pool.
857  */
858 spa_t *
859 spa_next(spa_t *prev)
860 {
861 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
862 
863 	if (prev)
864 		return (AVL_NEXT(&spa_namespace_avl, prev));
865 	else
866 		return (avl_first(&spa_namespace_avl));
867 }
868 
869 /*
870  * ==========================================================================
871  * SPA refcount functions
872  * ==========================================================================
873  */
874 
875 /*
876  * Add a reference to the given spa_t.  Must have at least one reference, or
877  * have the namespace lock held.
878  */
879 void
880 spa_open_ref(spa_t *spa, void *tag)
881 {
882 	ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
883 	    MUTEX_HELD(&spa_namespace_lock));
884 	(void) zfs_refcount_add(&spa->spa_refcount, tag);
885 }
886 
887 /*
888  * Remove a reference to the given spa_t.  Must have at least one reference, or
889  * have the namespace lock held.
890  */
891 void
892 spa_close(spa_t *spa, void *tag)
893 {
894 	ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
895 	    MUTEX_HELD(&spa_namespace_lock));
896 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
897 }
898 
899 /*
900  * Remove a reference to the given spa_t held by a dsl dir that is
901  * being asynchronously released.  Async releases occur from a taskq
902  * performing eviction of dsl datasets and dirs.  The namespace lock
903  * isn't held and the hold by the object being evicted may contribute to
904  * spa_minref (e.g. dataset or directory released during pool export),
905  * so the asserts in spa_close() do not apply.
906  */
907 void
908 spa_async_close(spa_t *spa, void *tag)
909 {
910 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
911 }
912 
913 /*
914  * Check to see if the spa refcount is zero.  Must be called with
915  * spa_namespace_lock held.  We really compare against spa_minref, which is the
916  * number of references acquired when opening a pool
917  */
918 boolean_t
919 spa_refcount_zero(spa_t *spa)
920 {
921 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
922 
923 	return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
924 }
925 
926 /*
927  * ==========================================================================
928  * SPA spare and l2cache tracking
929  * ==========================================================================
930  */
931 
932 /*
933  * Hot spares and cache devices are tracked using the same code below,
934  * for 'auxiliary' devices.
935  */
936 
937 typedef struct spa_aux {
938 	uint64_t	aux_guid;
939 	uint64_t	aux_pool;
940 	avl_node_t	aux_avl;
941 	int		aux_count;
942 } spa_aux_t;
943 
944 static inline int
945 spa_aux_compare(const void *a, const void *b)
946 {
947 	const spa_aux_t *sa = (const spa_aux_t *)a;
948 	const spa_aux_t *sb = (const spa_aux_t *)b;
949 
950 	return (TREE_CMP(sa->aux_guid, sb->aux_guid));
951 }
952 
953 static void
954 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
955 {
956 	avl_index_t where;
957 	spa_aux_t search;
958 	spa_aux_t *aux;
959 
960 	search.aux_guid = vd->vdev_guid;
961 	if ((aux = avl_find(avl, &search, &where)) != NULL) {
962 		aux->aux_count++;
963 	} else {
964 		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
965 		aux->aux_guid = vd->vdev_guid;
966 		aux->aux_count = 1;
967 		avl_insert(avl, aux, where);
968 	}
969 }
970 
971 static void
972 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
973 {
974 	spa_aux_t search;
975 	spa_aux_t *aux;
976 	avl_index_t where;
977 
978 	search.aux_guid = vd->vdev_guid;
979 	aux = avl_find(avl, &search, &where);
980 
981 	ASSERT(aux != NULL);
982 
983 	if (--aux->aux_count == 0) {
984 		avl_remove(avl, aux);
985 		kmem_free(aux, sizeof (spa_aux_t));
986 	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
987 		aux->aux_pool = 0ULL;
988 	}
989 }
990 
991 static boolean_t
992 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
993 {
994 	spa_aux_t search, *found;
995 
996 	search.aux_guid = guid;
997 	found = avl_find(avl, &search, NULL);
998 
999 	if (pool) {
1000 		if (found)
1001 			*pool = found->aux_pool;
1002 		else
1003 			*pool = 0ULL;
1004 	}
1005 
1006 	if (refcnt) {
1007 		if (found)
1008 			*refcnt = found->aux_count;
1009 		else
1010 			*refcnt = 0;
1011 	}
1012 
1013 	return (found != NULL);
1014 }
1015 
1016 static void
1017 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1018 {
1019 	spa_aux_t search, *found;
1020 	avl_index_t where;
1021 
1022 	search.aux_guid = vd->vdev_guid;
1023 	found = avl_find(avl, &search, &where);
1024 	ASSERT(found != NULL);
1025 	ASSERT(found->aux_pool == 0ULL);
1026 
1027 	found->aux_pool = spa_guid(vd->vdev_spa);
1028 }
1029 
1030 /*
1031  * Spares are tracked globally due to the following constraints:
1032  *
1033  *	- A spare may be part of multiple pools.
1034  *	- A spare may be added to a pool even if it's actively in use within
1035  *	  another pool.
1036  *	- A spare in use in any pool can only be the source of a replacement if
1037  *	  the target is a spare in the same pool.
1038  *
1039  * We keep track of all spares on the system through the use of a reference
1040  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
1041  * spare, then we bump the reference count in the AVL tree.  In addition, we set
1042  * the 'vdev_isspare' member to indicate that the device is a spare (active or
1043  * inactive).  When a spare is made active (used to replace a device in the
1044  * pool), we also keep track of which pool its been made a part of.
1045  *
1046  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
1047  * called under the spa_namespace lock as part of vdev reconfiguration.  The
1048  * separate spare lock exists for the status query path, which does not need to
1049  * be completely consistent with respect to other vdev configuration changes.
1050  */
1051 
1052 static int
1053 spa_spare_compare(const void *a, const void *b)
1054 {
1055 	return (spa_aux_compare(a, b));
1056 }
1057 
1058 void
1059 spa_spare_add(vdev_t *vd)
1060 {
1061 	mutex_enter(&spa_spare_lock);
1062 	ASSERT(!vd->vdev_isspare);
1063 	spa_aux_add(vd, &spa_spare_avl);
1064 	vd->vdev_isspare = B_TRUE;
1065 	mutex_exit(&spa_spare_lock);
1066 }
1067 
1068 void
1069 spa_spare_remove(vdev_t *vd)
1070 {
1071 	mutex_enter(&spa_spare_lock);
1072 	ASSERT(vd->vdev_isspare);
1073 	spa_aux_remove(vd, &spa_spare_avl);
1074 	vd->vdev_isspare = B_FALSE;
1075 	mutex_exit(&spa_spare_lock);
1076 }
1077 
1078 boolean_t
1079 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1080 {
1081 	boolean_t found;
1082 
1083 	mutex_enter(&spa_spare_lock);
1084 	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1085 	mutex_exit(&spa_spare_lock);
1086 
1087 	return (found);
1088 }
1089 
1090 void
1091 spa_spare_activate(vdev_t *vd)
1092 {
1093 	mutex_enter(&spa_spare_lock);
1094 	ASSERT(vd->vdev_isspare);
1095 	spa_aux_activate(vd, &spa_spare_avl);
1096 	mutex_exit(&spa_spare_lock);
1097 }
1098 
1099 /*
1100  * Level 2 ARC devices are tracked globally for the same reasons as spares.
1101  * Cache devices currently only support one pool per cache device, and so
1102  * for these devices the aux reference count is currently unused beyond 1.
1103  */
1104 
1105 static int
1106 spa_l2cache_compare(const void *a, const void *b)
1107 {
1108 	return (spa_aux_compare(a, b));
1109 }
1110 
1111 void
1112 spa_l2cache_add(vdev_t *vd)
1113 {
1114 	mutex_enter(&spa_l2cache_lock);
1115 	ASSERT(!vd->vdev_isl2cache);
1116 	spa_aux_add(vd, &spa_l2cache_avl);
1117 	vd->vdev_isl2cache = B_TRUE;
1118 	mutex_exit(&spa_l2cache_lock);
1119 }
1120 
1121 void
1122 spa_l2cache_remove(vdev_t *vd)
1123 {
1124 	mutex_enter(&spa_l2cache_lock);
1125 	ASSERT(vd->vdev_isl2cache);
1126 	spa_aux_remove(vd, &spa_l2cache_avl);
1127 	vd->vdev_isl2cache = B_FALSE;
1128 	mutex_exit(&spa_l2cache_lock);
1129 }
1130 
1131 boolean_t
1132 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1133 {
1134 	boolean_t found;
1135 
1136 	mutex_enter(&spa_l2cache_lock);
1137 	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1138 	mutex_exit(&spa_l2cache_lock);
1139 
1140 	return (found);
1141 }
1142 
1143 void
1144 spa_l2cache_activate(vdev_t *vd)
1145 {
1146 	mutex_enter(&spa_l2cache_lock);
1147 	ASSERT(vd->vdev_isl2cache);
1148 	spa_aux_activate(vd, &spa_l2cache_avl);
1149 	mutex_exit(&spa_l2cache_lock);
1150 }
1151 
1152 /*
1153  * ==========================================================================
1154  * SPA vdev locking
1155  * ==========================================================================
1156  */
1157 
1158 /*
1159  * Lock the given spa_t for the purpose of adding or removing a vdev.
1160  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1161  * It returns the next transaction group for the spa_t.
1162  */
1163 uint64_t
1164 spa_vdev_enter(spa_t *spa)
1165 {
1166 	mutex_enter(&spa->spa_vdev_top_lock);
1167 	mutex_enter(&spa_namespace_lock);
1168 
1169 	vdev_autotrim_stop_all(spa);
1170 
1171 	return (spa_vdev_config_enter(spa));
1172 }
1173 
1174 /*
1175  * The same as spa_vdev_enter() above but additionally takes the guid of
1176  * the vdev being detached.  When there is a rebuild in process it will be
1177  * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1178  * The rebuild is canceled if only a single child remains after the detach.
1179  */
1180 uint64_t
1181 spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
1182 {
1183 	mutex_enter(&spa->spa_vdev_top_lock);
1184 	mutex_enter(&spa_namespace_lock);
1185 
1186 	vdev_autotrim_stop_all(spa);
1187 
1188 	if (guid != 0) {
1189 		vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1190 		if (vd) {
1191 			vdev_rebuild_stop_wait(vd->vdev_top);
1192 		}
1193 	}
1194 
1195 	return (spa_vdev_config_enter(spa));
1196 }
1197 
1198 /*
1199  * Internal implementation for spa_vdev_enter().  Used when a vdev
1200  * operation requires multiple syncs (i.e. removing a device) while
1201  * keeping the spa_namespace_lock held.
1202  */
1203 uint64_t
1204 spa_vdev_config_enter(spa_t *spa)
1205 {
1206 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1207 
1208 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1209 
1210 	return (spa_last_synced_txg(spa) + 1);
1211 }
1212 
1213 /*
1214  * Used in combination with spa_vdev_config_enter() to allow the syncing
1215  * of multiple transactions without releasing the spa_namespace_lock.
1216  */
1217 void
1218 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1219 {
1220 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1221 
1222 	int config_changed = B_FALSE;
1223 
1224 	ASSERT(txg > spa_last_synced_txg(spa));
1225 
1226 	spa->spa_pending_vdev = NULL;
1227 
1228 	/*
1229 	 * Reassess the DTLs.
1230 	 */
1231 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
1232 
1233 	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1234 		config_changed = B_TRUE;
1235 		spa->spa_config_generation++;
1236 	}
1237 
1238 	/*
1239 	 * Verify the metaslab classes.
1240 	 */
1241 	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1242 	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1243 	ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
1244 	ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1245 	ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
1246 
1247 	spa_config_exit(spa, SCL_ALL, spa);
1248 
1249 	/*
1250 	 * Panic the system if the specified tag requires it.  This
1251 	 * is useful for ensuring that configurations are updated
1252 	 * transactionally.
1253 	 */
1254 	if (zio_injection_enabled)
1255 		zio_handle_panic_injection(spa, tag, 0);
1256 
1257 	/*
1258 	 * Note: this txg_wait_synced() is important because it ensures
1259 	 * that there won't be more than one config change per txg.
1260 	 * This allows us to use the txg as the generation number.
1261 	 */
1262 	if (error == 0)
1263 		txg_wait_synced(spa->spa_dsl_pool, txg);
1264 
1265 	if (vd != NULL) {
1266 		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1267 		if (vd->vdev_ops->vdev_op_leaf) {
1268 			mutex_enter(&vd->vdev_initialize_lock);
1269 			vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1270 			    NULL);
1271 			mutex_exit(&vd->vdev_initialize_lock);
1272 
1273 			mutex_enter(&vd->vdev_trim_lock);
1274 			vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1275 			mutex_exit(&vd->vdev_trim_lock);
1276 		}
1277 
1278 		/*
1279 		 * The vdev may be both a leaf and top-level device.
1280 		 */
1281 		vdev_autotrim_stop_wait(vd);
1282 
1283 		spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
1284 		vdev_free(vd);
1285 		spa_config_exit(spa, SCL_STATE_ALL, spa);
1286 	}
1287 
1288 	/*
1289 	 * If the config changed, update the config cache.
1290 	 */
1291 	if (config_changed)
1292 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1293 }
1294 
1295 /*
1296  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1297  * locking of spa_vdev_enter(), we also want make sure the transactions have
1298  * synced to disk, and then update the global configuration cache with the new
1299  * information.
1300  */
1301 int
1302 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1303 {
1304 	vdev_autotrim_restart(spa);
1305 	vdev_rebuild_restart(spa);
1306 
1307 	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1308 	mutex_exit(&spa_namespace_lock);
1309 	mutex_exit(&spa->spa_vdev_top_lock);
1310 
1311 	return (error);
1312 }
1313 
1314 /*
1315  * Lock the given spa_t for the purpose of changing vdev state.
1316  */
1317 void
1318 spa_vdev_state_enter(spa_t *spa, int oplocks)
1319 {
1320 	int locks = SCL_STATE_ALL | oplocks;
1321 
1322 	/*
1323 	 * Root pools may need to read of the underlying devfs filesystem
1324 	 * when opening up a vdev.  Unfortunately if we're holding the
1325 	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1326 	 * the read from the root filesystem.  Instead we "prefetch"
1327 	 * the associated vnodes that we need prior to opening the
1328 	 * underlying devices and cache them so that we can prevent
1329 	 * any I/O when we are doing the actual open.
1330 	 */
1331 	if (spa_is_root(spa)) {
1332 		int low = locks & ~(SCL_ZIO - 1);
1333 		int high = locks & ~low;
1334 
1335 		spa_config_enter(spa, high, spa, RW_WRITER);
1336 		vdev_hold(spa->spa_root_vdev);
1337 		spa_config_enter(spa, low, spa, RW_WRITER);
1338 	} else {
1339 		spa_config_enter(spa, locks, spa, RW_WRITER);
1340 	}
1341 	spa->spa_vdev_locks = locks;
1342 }
1343 
1344 int
1345 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1346 {
1347 	boolean_t config_changed = B_FALSE;
1348 	vdev_t *vdev_top;
1349 
1350 	if (vd == NULL || vd == spa->spa_root_vdev) {
1351 		vdev_top = spa->spa_root_vdev;
1352 	} else {
1353 		vdev_top = vd->vdev_top;
1354 	}
1355 
1356 	if (vd != NULL || error == 0)
1357 		vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
1358 
1359 	if (vd != NULL) {
1360 		if (vd != spa->spa_root_vdev)
1361 			vdev_state_dirty(vdev_top);
1362 
1363 		config_changed = B_TRUE;
1364 		spa->spa_config_generation++;
1365 	}
1366 
1367 	if (spa_is_root(spa))
1368 		vdev_rele(spa->spa_root_vdev);
1369 
1370 	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1371 	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1372 
1373 	/*
1374 	 * If anything changed, wait for it to sync.  This ensures that,
1375 	 * from the system administrator's perspective, zpool(8) commands
1376 	 * are synchronous.  This is important for things like zpool offline:
1377 	 * when the command completes, you expect no further I/O from ZFS.
1378 	 */
1379 	if (vd != NULL)
1380 		txg_wait_synced(spa->spa_dsl_pool, 0);
1381 
1382 	/*
1383 	 * If the config changed, update the config cache.
1384 	 */
1385 	if (config_changed) {
1386 		mutex_enter(&spa_namespace_lock);
1387 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1388 		mutex_exit(&spa_namespace_lock);
1389 	}
1390 
1391 	return (error);
1392 }
1393 
1394 /*
1395  * ==========================================================================
1396  * Miscellaneous functions
1397  * ==========================================================================
1398  */
1399 
1400 void
1401 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1402 {
1403 	if (!nvlist_exists(spa->spa_label_features, feature)) {
1404 		fnvlist_add_boolean(spa->spa_label_features, feature);
1405 		/*
1406 		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1407 		 * dirty the vdev config because lock SCL_CONFIG is not held.
1408 		 * Thankfully, in this case we don't need to dirty the config
1409 		 * because it will be written out anyway when we finish
1410 		 * creating the pool.
1411 		 */
1412 		if (tx->tx_txg != TXG_INITIAL)
1413 			vdev_config_dirty(spa->spa_root_vdev);
1414 	}
1415 }
1416 
1417 void
1418 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1419 {
1420 	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1421 		vdev_config_dirty(spa->spa_root_vdev);
1422 }
1423 
1424 /*
1425  * Return the spa_t associated with given pool_guid, if it exists.  If
1426  * device_guid is non-zero, determine whether the pool exists *and* contains
1427  * a device with the specified device_guid.
1428  */
1429 spa_t *
1430 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1431 {
1432 	spa_t *spa;
1433 	avl_tree_t *t = &spa_namespace_avl;
1434 
1435 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1436 
1437 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1438 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1439 			continue;
1440 		if (spa->spa_root_vdev == NULL)
1441 			continue;
1442 		if (spa_guid(spa) == pool_guid) {
1443 			if (device_guid == 0)
1444 				break;
1445 
1446 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1447 			    device_guid) != NULL)
1448 				break;
1449 
1450 			/*
1451 			 * Check any devices we may be in the process of adding.
1452 			 */
1453 			if (spa->spa_pending_vdev) {
1454 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1455 				    device_guid) != NULL)
1456 					break;
1457 			}
1458 		}
1459 	}
1460 
1461 	return (spa);
1462 }
1463 
1464 /*
1465  * Determine whether a pool with the given pool_guid exists.
1466  */
1467 boolean_t
1468 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1469 {
1470 	return (spa_by_guid(pool_guid, device_guid) != NULL);
1471 }
1472 
1473 char *
1474 spa_strdup(const char *s)
1475 {
1476 	size_t len;
1477 	char *new;
1478 
1479 	len = strlen(s);
1480 	new = kmem_alloc(len + 1, KM_SLEEP);
1481 	memcpy(new, s, len + 1);
1482 
1483 	return (new);
1484 }
1485 
1486 void
1487 spa_strfree(char *s)
1488 {
1489 	kmem_free(s, strlen(s) + 1);
1490 }
1491 
1492 uint64_t
1493 spa_generate_guid(spa_t *spa)
1494 {
1495 	uint64_t guid;
1496 
1497 	if (spa != NULL) {
1498 		do {
1499 			(void) random_get_pseudo_bytes((void *)&guid,
1500 			    sizeof (guid));
1501 		} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
1502 	} else {
1503 		do {
1504 			(void) random_get_pseudo_bytes((void *)&guid,
1505 			    sizeof (guid));
1506 		} while (guid == 0 || spa_guid_exists(guid, 0));
1507 	}
1508 
1509 	return (guid);
1510 }
1511 
1512 void
1513 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1514 {
1515 	char type[256];
1516 	char *checksum = NULL;
1517 	char *compress = NULL;
1518 
1519 	if (bp != NULL) {
1520 		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1521 			dmu_object_byteswap_t bswap =
1522 			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1523 			(void) snprintf(type, sizeof (type), "bswap %s %s",
1524 			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1525 			    "metadata" : "data",
1526 			    dmu_ot_byteswap[bswap].ob_name);
1527 		} else {
1528 			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1529 			    sizeof (type));
1530 		}
1531 		if (!BP_IS_EMBEDDED(bp)) {
1532 			checksum =
1533 			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1534 		}
1535 		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1536 	}
1537 
1538 	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1539 	    compress);
1540 }
1541 
1542 void
1543 spa_freeze(spa_t *spa)
1544 {
1545 	uint64_t freeze_txg = 0;
1546 
1547 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1548 	if (spa->spa_freeze_txg == UINT64_MAX) {
1549 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1550 		spa->spa_freeze_txg = freeze_txg;
1551 	}
1552 	spa_config_exit(spa, SCL_ALL, FTAG);
1553 	if (freeze_txg != 0)
1554 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1555 }
1556 
1557 void
1558 zfs_panic_recover(const char *fmt, ...)
1559 {
1560 	va_list adx;
1561 
1562 	va_start(adx, fmt);
1563 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1564 	va_end(adx);
1565 }
1566 
1567 /*
1568  * This is a stripped-down version of strtoull, suitable only for converting
1569  * lowercase hexadecimal numbers that don't overflow.
1570  */
1571 uint64_t
1572 zfs_strtonum(const char *str, char **nptr)
1573 {
1574 	uint64_t val = 0;
1575 	char c;
1576 	int digit;
1577 
1578 	while ((c = *str) != '\0') {
1579 		if (c >= '0' && c <= '9')
1580 			digit = c - '0';
1581 		else if (c >= 'a' && c <= 'f')
1582 			digit = 10 + c - 'a';
1583 		else
1584 			break;
1585 
1586 		val *= 16;
1587 		val += digit;
1588 
1589 		str++;
1590 	}
1591 
1592 	if (nptr)
1593 		*nptr = (char *)str;
1594 
1595 	return (val);
1596 }
1597 
1598 void
1599 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1600 {
1601 	/*
1602 	 * We bump the feature refcount for each special vdev added to the pool
1603 	 */
1604 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1605 	spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1606 }
1607 
1608 /*
1609  * ==========================================================================
1610  * Accessor functions
1611  * ==========================================================================
1612  */
1613 
1614 boolean_t
1615 spa_shutting_down(spa_t *spa)
1616 {
1617 	return (spa->spa_async_suspended);
1618 }
1619 
1620 dsl_pool_t *
1621 spa_get_dsl(spa_t *spa)
1622 {
1623 	return (spa->spa_dsl_pool);
1624 }
1625 
1626 boolean_t
1627 spa_is_initializing(spa_t *spa)
1628 {
1629 	return (spa->spa_is_initializing);
1630 }
1631 
1632 boolean_t
1633 spa_indirect_vdevs_loaded(spa_t *spa)
1634 {
1635 	return (spa->spa_indirect_vdevs_loaded);
1636 }
1637 
1638 blkptr_t *
1639 spa_get_rootblkptr(spa_t *spa)
1640 {
1641 	return (&spa->spa_ubsync.ub_rootbp);
1642 }
1643 
1644 void
1645 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1646 {
1647 	spa->spa_uberblock.ub_rootbp = *bp;
1648 }
1649 
1650 void
1651 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1652 {
1653 	if (spa->spa_root == NULL)
1654 		buf[0] = '\0';
1655 	else
1656 		(void) strncpy(buf, spa->spa_root, buflen);
1657 }
1658 
1659 int
1660 spa_sync_pass(spa_t *spa)
1661 {
1662 	return (spa->spa_sync_pass);
1663 }
1664 
1665 char *
1666 spa_name(spa_t *spa)
1667 {
1668 	return (spa->spa_name);
1669 }
1670 
1671 uint64_t
1672 spa_guid(spa_t *spa)
1673 {
1674 	dsl_pool_t *dp = spa_get_dsl(spa);
1675 	uint64_t guid;
1676 
1677 	/*
1678 	 * If we fail to parse the config during spa_load(), we can go through
1679 	 * the error path (which posts an ereport) and end up here with no root
1680 	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1681 	 * this case.
1682 	 */
1683 	if (spa->spa_root_vdev == NULL)
1684 		return (spa->spa_config_guid);
1685 
1686 	guid = spa->spa_last_synced_guid != 0 ?
1687 	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1688 
1689 	/*
1690 	 * Return the most recently synced out guid unless we're
1691 	 * in syncing context.
1692 	 */
1693 	if (dp && dsl_pool_sync_context(dp))
1694 		return (spa->spa_root_vdev->vdev_guid);
1695 	else
1696 		return (guid);
1697 }
1698 
1699 uint64_t
1700 spa_load_guid(spa_t *spa)
1701 {
1702 	/*
1703 	 * This is a GUID that exists solely as a reference for the
1704 	 * purposes of the arc.  It is generated at load time, and
1705 	 * is never written to persistent storage.
1706 	 */
1707 	return (spa->spa_load_guid);
1708 }
1709 
1710 uint64_t
1711 spa_last_synced_txg(spa_t *spa)
1712 {
1713 	return (spa->spa_ubsync.ub_txg);
1714 }
1715 
1716 uint64_t
1717 spa_first_txg(spa_t *spa)
1718 {
1719 	return (spa->spa_first_txg);
1720 }
1721 
1722 uint64_t
1723 spa_syncing_txg(spa_t *spa)
1724 {
1725 	return (spa->spa_syncing_txg);
1726 }
1727 
1728 /*
1729  * Return the last txg where data can be dirtied. The final txgs
1730  * will be used to just clear out any deferred frees that remain.
1731  */
1732 uint64_t
1733 spa_final_dirty_txg(spa_t *spa)
1734 {
1735 	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1736 }
1737 
1738 pool_state_t
1739 spa_state(spa_t *spa)
1740 {
1741 	return (spa->spa_state);
1742 }
1743 
1744 spa_load_state_t
1745 spa_load_state(spa_t *spa)
1746 {
1747 	return (spa->spa_load_state);
1748 }
1749 
1750 uint64_t
1751 spa_freeze_txg(spa_t *spa)
1752 {
1753 	return (spa->spa_freeze_txg);
1754 }
1755 
1756 /*
1757  * Return the inflated asize for a logical write in bytes. This is used by the
1758  * DMU to calculate the space a logical write will require on disk.
1759  * If lsize is smaller than the largest physical block size allocatable on this
1760  * pool we use its value instead, since the write will end up using the whole
1761  * block anyway.
1762  */
1763 uint64_t
1764 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1765 {
1766 	if (lsize == 0)
1767 		return (0);	/* No inflation needed */
1768 	return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
1769 }
1770 
1771 /*
1772  * Return the amount of slop space in bytes.  It is typically 1/32 of the pool
1773  * (3.2%), minus the embedded log space.  On very small pools, it may be
1774  * slightly larger than this.  On very large pools, it will be capped to
1775  * the value of spa_max_slop.  The embedded log space is not included in
1776  * spa_dspace.  By subtracting it, the usable space (per "zfs list") is a
1777  * constant 97% of the total space, regardless of metaslab size (assuming the
1778  * default spa_slop_shift=5 and a non-tiny pool).
1779  *
1780  * See the comment above spa_slop_shift for more details.
1781  */
1782 uint64_t
1783 spa_get_slop_space(spa_t *spa)
1784 {
1785 	uint64_t space = 0;
1786 	uint64_t slop = 0;
1787 
1788 	/*
1789 	 * Make sure spa_dedup_dspace has been set.
1790 	 */
1791 	if (spa->spa_dedup_dspace == ~0ULL)
1792 		spa_update_dspace(spa);
1793 
1794 	/*
1795 	 * spa_get_dspace() includes the space only logically "used" by
1796 	 * deduplicated data, so since it's not useful to reserve more
1797 	 * space with more deduplicated data, we subtract that out here.
1798 	 */
1799 	space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
1800 	slop = MIN(space >> spa_slop_shift, spa_max_slop);
1801 
1802 	/*
1803 	 * Subtract the embedded log space, but no more than half the (3.2%)
1804 	 * unusable space.  Note, the "no more than half" is only relevant if
1805 	 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1806 	 * default.
1807 	 */
1808 	uint64_t embedded_log =
1809 	    metaslab_class_get_dspace(spa_embedded_log_class(spa));
1810 	slop -= MIN(embedded_log, slop >> 1);
1811 
1812 	/*
1813 	 * Slop space should be at least spa_min_slop, but no more than half
1814 	 * the entire pool.
1815 	 */
1816 	slop = MAX(slop, MIN(space >> 1, spa_min_slop));
1817 	return (slop);
1818 }
1819 
1820 uint64_t
1821 spa_get_dspace(spa_t *spa)
1822 {
1823 	return (spa->spa_dspace);
1824 }
1825 
1826 uint64_t
1827 spa_get_checkpoint_space(spa_t *spa)
1828 {
1829 	return (spa->spa_checkpoint_info.sci_dspace);
1830 }
1831 
1832 void
1833 spa_update_dspace(spa_t *spa)
1834 {
1835 	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1836 	    ddt_get_dedup_dspace(spa);
1837 	if (spa->spa_nonallocating_dspace > 0) {
1838 		/*
1839 		 * Subtract the space provided by all non-allocating vdevs that
1840 		 * contribute to dspace.  If a file is overwritten, its old
1841 		 * blocks are freed and new blocks are allocated.  If there are
1842 		 * no snapshots of the file, the available space should remain
1843 		 * the same.  The old blocks could be freed from the
1844 		 * non-allocating vdev, but the new blocks must be allocated on
1845 		 * other (allocating) vdevs.  By reserving the entire size of
1846 		 * the non-allocating vdevs (including allocated space), we
1847 		 * ensure that there will be enough space on the allocating
1848 		 * vdevs for this file overwrite to succeed.
1849 		 *
1850 		 * Note that the DMU/DSL doesn't actually know or care
1851 		 * how much space is allocated (it does its own tracking
1852 		 * of how much space has been logically used).  So it
1853 		 * doesn't matter that the data we are moving may be
1854 		 * allocated twice (on the old device and the new device).
1855 		 */
1856 		ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
1857 		spa->spa_dspace -= spa->spa_nonallocating_dspace;
1858 	}
1859 }
1860 
1861 /*
1862  * Return the failure mode that has been set to this pool. The default
1863  * behavior will be to block all I/Os when a complete failure occurs.
1864  */
1865 uint64_t
1866 spa_get_failmode(spa_t *spa)
1867 {
1868 	return (spa->spa_failmode);
1869 }
1870 
1871 boolean_t
1872 spa_suspended(spa_t *spa)
1873 {
1874 	return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1875 }
1876 
1877 uint64_t
1878 spa_version(spa_t *spa)
1879 {
1880 	return (spa->spa_ubsync.ub_version);
1881 }
1882 
1883 boolean_t
1884 spa_deflate(spa_t *spa)
1885 {
1886 	return (spa->spa_deflate);
1887 }
1888 
1889 metaslab_class_t *
1890 spa_normal_class(spa_t *spa)
1891 {
1892 	return (spa->spa_normal_class);
1893 }
1894 
1895 metaslab_class_t *
1896 spa_log_class(spa_t *spa)
1897 {
1898 	return (spa->spa_log_class);
1899 }
1900 
1901 metaslab_class_t *
1902 spa_embedded_log_class(spa_t *spa)
1903 {
1904 	return (spa->spa_embedded_log_class);
1905 }
1906 
1907 metaslab_class_t *
1908 spa_special_class(spa_t *spa)
1909 {
1910 	return (spa->spa_special_class);
1911 }
1912 
1913 metaslab_class_t *
1914 spa_dedup_class(spa_t *spa)
1915 {
1916 	return (spa->spa_dedup_class);
1917 }
1918 
1919 /*
1920  * Locate an appropriate allocation class
1921  */
1922 metaslab_class_t *
1923 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1924     uint_t level, uint_t special_smallblk)
1925 {
1926 	/*
1927 	 * ZIL allocations determine their class in zio_alloc_zil().
1928 	 */
1929 	ASSERT(objtype != DMU_OT_INTENT_LOG);
1930 
1931 	boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1932 
1933 	if (DMU_OT_IS_DDT(objtype)) {
1934 		if (spa->spa_dedup_class->mc_groups != 0)
1935 			return (spa_dedup_class(spa));
1936 		else if (has_special_class && zfs_ddt_data_is_special)
1937 			return (spa_special_class(spa));
1938 		else
1939 			return (spa_normal_class(spa));
1940 	}
1941 
1942 	/* Indirect blocks for user data can land in special if allowed */
1943 	if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1944 		if (has_special_class && zfs_user_indirect_is_special)
1945 			return (spa_special_class(spa));
1946 		else
1947 			return (spa_normal_class(spa));
1948 	}
1949 
1950 	if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1951 		if (has_special_class)
1952 			return (spa_special_class(spa));
1953 		else
1954 			return (spa_normal_class(spa));
1955 	}
1956 
1957 	/*
1958 	 * Allow small file blocks in special class in some cases (like
1959 	 * for the dRAID vdev feature). But always leave a reserve of
1960 	 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
1961 	 */
1962 	if (DMU_OT_IS_FILE(objtype) &&
1963 	    has_special_class && size <= special_smallblk) {
1964 		metaslab_class_t *special = spa_special_class(spa);
1965 		uint64_t alloc = metaslab_class_get_alloc(special);
1966 		uint64_t space = metaslab_class_get_space(special);
1967 		uint64_t limit =
1968 		    (space * (100 - zfs_special_class_metadata_reserve_pct))
1969 		    / 100;
1970 
1971 		if (alloc < limit)
1972 			return (special);
1973 	}
1974 
1975 	return (spa_normal_class(spa));
1976 }
1977 
1978 void
1979 spa_evicting_os_register(spa_t *spa, objset_t *os)
1980 {
1981 	mutex_enter(&spa->spa_evicting_os_lock);
1982 	list_insert_head(&spa->spa_evicting_os_list, os);
1983 	mutex_exit(&spa->spa_evicting_os_lock);
1984 }
1985 
1986 void
1987 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1988 {
1989 	mutex_enter(&spa->spa_evicting_os_lock);
1990 	list_remove(&spa->spa_evicting_os_list, os);
1991 	cv_broadcast(&spa->spa_evicting_os_cv);
1992 	mutex_exit(&spa->spa_evicting_os_lock);
1993 }
1994 
1995 void
1996 spa_evicting_os_wait(spa_t *spa)
1997 {
1998 	mutex_enter(&spa->spa_evicting_os_lock);
1999 	while (!list_is_empty(&spa->spa_evicting_os_list))
2000 		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
2001 	mutex_exit(&spa->spa_evicting_os_lock);
2002 
2003 	dmu_buf_user_evict_wait();
2004 }
2005 
2006 int
2007 spa_max_replication(spa_t *spa)
2008 {
2009 	/*
2010 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2011 	 * handle BPs with more than one DVA allocated.  Set our max
2012 	 * replication level accordingly.
2013 	 */
2014 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
2015 		return (1);
2016 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
2017 }
2018 
2019 int
2020 spa_prev_software_version(spa_t *spa)
2021 {
2022 	return (spa->spa_prev_software_version);
2023 }
2024 
2025 uint64_t
2026 spa_deadman_synctime(spa_t *spa)
2027 {
2028 	return (spa->spa_deadman_synctime);
2029 }
2030 
2031 spa_autotrim_t
2032 spa_get_autotrim(spa_t *spa)
2033 {
2034 	return (spa->spa_autotrim);
2035 }
2036 
2037 uint64_t
2038 spa_deadman_ziotime(spa_t *spa)
2039 {
2040 	return (spa->spa_deadman_ziotime);
2041 }
2042 
2043 uint64_t
2044 spa_get_deadman_failmode(spa_t *spa)
2045 {
2046 	return (spa->spa_deadman_failmode);
2047 }
2048 
2049 void
2050 spa_set_deadman_failmode(spa_t *spa, const char *failmode)
2051 {
2052 	if (strcmp(failmode, "wait") == 0)
2053 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2054 	else if (strcmp(failmode, "continue") == 0)
2055 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2056 	else if (strcmp(failmode, "panic") == 0)
2057 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2058 	else
2059 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2060 }
2061 
2062 void
2063 spa_set_deadman_ziotime(hrtime_t ns)
2064 {
2065 	spa_t *spa = NULL;
2066 
2067 	if (spa_mode_global != SPA_MODE_UNINIT) {
2068 		mutex_enter(&spa_namespace_lock);
2069 		while ((spa = spa_next(spa)) != NULL)
2070 			spa->spa_deadman_ziotime = ns;
2071 		mutex_exit(&spa_namespace_lock);
2072 	}
2073 }
2074 
2075 void
2076 spa_set_deadman_synctime(hrtime_t ns)
2077 {
2078 	spa_t *spa = NULL;
2079 
2080 	if (spa_mode_global != SPA_MODE_UNINIT) {
2081 		mutex_enter(&spa_namespace_lock);
2082 		while ((spa = spa_next(spa)) != NULL)
2083 			spa->spa_deadman_synctime = ns;
2084 		mutex_exit(&spa_namespace_lock);
2085 	}
2086 }
2087 
2088 uint64_t
2089 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2090 {
2091 	uint64_t asize = DVA_GET_ASIZE(dva);
2092 	uint64_t dsize = asize;
2093 
2094 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2095 
2096 	if (asize != 0 && spa->spa_deflate) {
2097 		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2098 		if (vd != NULL)
2099 			dsize = (asize >> SPA_MINBLOCKSHIFT) *
2100 			    vd->vdev_deflate_ratio;
2101 	}
2102 
2103 	return (dsize);
2104 }
2105 
2106 uint64_t
2107 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2108 {
2109 	uint64_t dsize = 0;
2110 
2111 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2112 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2113 
2114 	return (dsize);
2115 }
2116 
2117 uint64_t
2118 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2119 {
2120 	uint64_t dsize = 0;
2121 
2122 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2123 
2124 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2125 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2126 
2127 	spa_config_exit(spa, SCL_VDEV, FTAG);
2128 
2129 	return (dsize);
2130 }
2131 
2132 uint64_t
2133 spa_dirty_data(spa_t *spa)
2134 {
2135 	return (spa->spa_dsl_pool->dp_dirty_total);
2136 }
2137 
2138 /*
2139  * ==========================================================================
2140  * SPA Import Progress Routines
2141  * ==========================================================================
2142  */
2143 
2144 typedef struct spa_import_progress {
2145 	uint64_t		pool_guid;	/* unique id for updates */
2146 	char			*pool_name;
2147 	spa_load_state_t	spa_load_state;
2148 	uint64_t		mmp_sec_remaining;	/* MMP activity check */
2149 	uint64_t		spa_load_max_txg;	/* rewind txg */
2150 	procfs_list_node_t	smh_node;
2151 } spa_import_progress_t;
2152 
2153 spa_history_list_t *spa_import_progress_list = NULL;
2154 
2155 static int
2156 spa_import_progress_show_header(struct seq_file *f)
2157 {
2158 	seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
2159 	    "load_state", "multihost_secs", "max_txg",
2160 	    "pool_name");
2161 	return (0);
2162 }
2163 
2164 static int
2165 spa_import_progress_show(struct seq_file *f, void *data)
2166 {
2167 	spa_import_progress_t *sip = (spa_import_progress_t *)data;
2168 
2169 	seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
2170 	    (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2171 	    (u_longlong_t)sip->mmp_sec_remaining,
2172 	    (u_longlong_t)sip->spa_load_max_txg,
2173 	    (sip->pool_name ? sip->pool_name : "-"));
2174 
2175 	return (0);
2176 }
2177 
2178 /* Remove oldest elements from list until there are no more than 'size' left */
2179 static void
2180 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
2181 {
2182 	spa_import_progress_t *sip;
2183 	while (shl->size > size) {
2184 		sip = list_remove_head(&shl->procfs_list.pl_list);
2185 		if (sip->pool_name)
2186 			spa_strfree(sip->pool_name);
2187 		kmem_free(sip, sizeof (spa_import_progress_t));
2188 		shl->size--;
2189 	}
2190 
2191 	IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2192 }
2193 
2194 static void
2195 spa_import_progress_init(void)
2196 {
2197 	spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
2198 	    KM_SLEEP);
2199 
2200 	spa_import_progress_list->size = 0;
2201 
2202 	spa_import_progress_list->procfs_list.pl_private =
2203 	    spa_import_progress_list;
2204 
2205 	procfs_list_install("zfs",
2206 	    NULL,
2207 	    "import_progress",
2208 	    0644,
2209 	    &spa_import_progress_list->procfs_list,
2210 	    spa_import_progress_show,
2211 	    spa_import_progress_show_header,
2212 	    NULL,
2213 	    offsetof(spa_import_progress_t, smh_node));
2214 }
2215 
2216 static void
2217 spa_import_progress_destroy(void)
2218 {
2219 	spa_history_list_t *shl = spa_import_progress_list;
2220 	procfs_list_uninstall(&shl->procfs_list);
2221 	spa_import_progress_truncate(shl, 0);
2222 	procfs_list_destroy(&shl->procfs_list);
2223 	kmem_free(shl, sizeof (spa_history_list_t));
2224 }
2225 
2226 int
2227 spa_import_progress_set_state(uint64_t pool_guid,
2228     spa_load_state_t load_state)
2229 {
2230 	spa_history_list_t *shl = spa_import_progress_list;
2231 	spa_import_progress_t *sip;
2232 	int error = ENOENT;
2233 
2234 	if (shl->size == 0)
2235 		return (0);
2236 
2237 	mutex_enter(&shl->procfs_list.pl_lock);
2238 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2239 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2240 		if (sip->pool_guid == pool_guid) {
2241 			sip->spa_load_state = load_state;
2242 			error = 0;
2243 			break;
2244 		}
2245 	}
2246 	mutex_exit(&shl->procfs_list.pl_lock);
2247 
2248 	return (error);
2249 }
2250 
2251 int
2252 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
2253 {
2254 	spa_history_list_t *shl = spa_import_progress_list;
2255 	spa_import_progress_t *sip;
2256 	int error = ENOENT;
2257 
2258 	if (shl->size == 0)
2259 		return (0);
2260 
2261 	mutex_enter(&shl->procfs_list.pl_lock);
2262 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2263 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2264 		if (sip->pool_guid == pool_guid) {
2265 			sip->spa_load_max_txg = load_max_txg;
2266 			error = 0;
2267 			break;
2268 		}
2269 	}
2270 	mutex_exit(&shl->procfs_list.pl_lock);
2271 
2272 	return (error);
2273 }
2274 
2275 int
2276 spa_import_progress_set_mmp_check(uint64_t pool_guid,
2277     uint64_t mmp_sec_remaining)
2278 {
2279 	spa_history_list_t *shl = spa_import_progress_list;
2280 	spa_import_progress_t *sip;
2281 	int error = ENOENT;
2282 
2283 	if (shl->size == 0)
2284 		return (0);
2285 
2286 	mutex_enter(&shl->procfs_list.pl_lock);
2287 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2288 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2289 		if (sip->pool_guid == pool_guid) {
2290 			sip->mmp_sec_remaining = mmp_sec_remaining;
2291 			error = 0;
2292 			break;
2293 		}
2294 	}
2295 	mutex_exit(&shl->procfs_list.pl_lock);
2296 
2297 	return (error);
2298 }
2299 
2300 /*
2301  * A new import is in progress, add an entry.
2302  */
2303 void
2304 spa_import_progress_add(spa_t *spa)
2305 {
2306 	spa_history_list_t *shl = spa_import_progress_list;
2307 	spa_import_progress_t *sip;
2308 	char *poolname = NULL;
2309 
2310 	sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
2311 	sip->pool_guid = spa_guid(spa);
2312 
2313 	(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2314 	    &poolname);
2315 	if (poolname == NULL)
2316 		poolname = spa_name(spa);
2317 	sip->pool_name = spa_strdup(poolname);
2318 	sip->spa_load_state = spa_load_state(spa);
2319 
2320 	mutex_enter(&shl->procfs_list.pl_lock);
2321 	procfs_list_add(&shl->procfs_list, sip);
2322 	shl->size++;
2323 	mutex_exit(&shl->procfs_list.pl_lock);
2324 }
2325 
2326 void
2327 spa_import_progress_remove(uint64_t pool_guid)
2328 {
2329 	spa_history_list_t *shl = spa_import_progress_list;
2330 	spa_import_progress_t *sip;
2331 
2332 	mutex_enter(&shl->procfs_list.pl_lock);
2333 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2334 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2335 		if (sip->pool_guid == pool_guid) {
2336 			if (sip->pool_name)
2337 				spa_strfree(sip->pool_name);
2338 			list_remove(&shl->procfs_list.pl_list, sip);
2339 			shl->size--;
2340 			kmem_free(sip, sizeof (spa_import_progress_t));
2341 			break;
2342 		}
2343 	}
2344 	mutex_exit(&shl->procfs_list.pl_lock);
2345 }
2346 
2347 /*
2348  * ==========================================================================
2349  * Initialization and Termination
2350  * ==========================================================================
2351  */
2352 
2353 static int
2354 spa_name_compare(const void *a1, const void *a2)
2355 {
2356 	const spa_t *s1 = a1;
2357 	const spa_t *s2 = a2;
2358 	int s;
2359 
2360 	s = strcmp(s1->spa_name, s2->spa_name);
2361 
2362 	return (TREE_ISIGN(s));
2363 }
2364 
2365 void
2366 spa_boot_init(void)
2367 {
2368 	spa_config_load();
2369 }
2370 
2371 void
2372 spa_init(spa_mode_t mode)
2373 {
2374 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2375 	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2376 	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2377 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2378 
2379 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2380 	    offsetof(spa_t, spa_avl));
2381 
2382 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2383 	    offsetof(spa_aux_t, aux_avl));
2384 
2385 	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2386 	    offsetof(spa_aux_t, aux_avl));
2387 
2388 	spa_mode_global = mode;
2389 
2390 #ifndef _KERNEL
2391 	if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
2392 		struct sigaction sa;
2393 
2394 		sa.sa_flags = SA_SIGINFO;
2395 		sigemptyset(&sa.sa_mask);
2396 		sa.sa_sigaction = arc_buf_sigsegv;
2397 
2398 		if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2399 			perror("could not enable watchpoints: "
2400 			    "sigaction(SIGSEGV, ...) = ");
2401 		} else {
2402 			arc_watch = B_TRUE;
2403 		}
2404 	}
2405 #endif
2406 
2407 	fm_init();
2408 	zfs_refcount_init();
2409 	unique_init();
2410 	zfs_btree_init();
2411 	metaslab_stat_init();
2412 	ddt_init();
2413 	zio_init();
2414 	dmu_init();
2415 	zil_init();
2416 	vdev_cache_stat_init();
2417 	vdev_mirror_stat_init();
2418 	vdev_raidz_math_init();
2419 	vdev_file_init();
2420 	zfs_prop_init();
2421 	chksum_init();
2422 	zpool_prop_init();
2423 	zpool_feature_init();
2424 	spa_config_load();
2425 	vdev_prop_init();
2426 	l2arc_start();
2427 	scan_init();
2428 	qat_init();
2429 	spa_import_progress_init();
2430 }
2431 
2432 void
2433 spa_fini(void)
2434 {
2435 	l2arc_stop();
2436 
2437 	spa_evict_all();
2438 
2439 	vdev_file_fini();
2440 	vdev_cache_stat_fini();
2441 	vdev_mirror_stat_fini();
2442 	vdev_raidz_math_fini();
2443 	chksum_fini();
2444 	zil_fini();
2445 	dmu_fini();
2446 	zio_fini();
2447 	ddt_fini();
2448 	metaslab_stat_fini();
2449 	zfs_btree_fini();
2450 	unique_fini();
2451 	zfs_refcount_fini();
2452 	fm_fini();
2453 	scan_fini();
2454 	qat_fini();
2455 	spa_import_progress_destroy();
2456 
2457 	avl_destroy(&spa_namespace_avl);
2458 	avl_destroy(&spa_spare_avl);
2459 	avl_destroy(&spa_l2cache_avl);
2460 
2461 	cv_destroy(&spa_namespace_cv);
2462 	mutex_destroy(&spa_namespace_lock);
2463 	mutex_destroy(&spa_spare_lock);
2464 	mutex_destroy(&spa_l2cache_lock);
2465 }
2466 
2467 /*
2468  * Return whether this pool has a dedicated slog device. No locking needed.
2469  * It's not a problem if the wrong answer is returned as it's only for
2470  * performance and not correctness.
2471  */
2472 boolean_t
2473 spa_has_slogs(spa_t *spa)
2474 {
2475 	return (spa->spa_log_class->mc_groups != 0);
2476 }
2477 
2478 spa_log_state_t
2479 spa_get_log_state(spa_t *spa)
2480 {
2481 	return (spa->spa_log_state);
2482 }
2483 
2484 void
2485 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2486 {
2487 	spa->spa_log_state = state;
2488 }
2489 
2490 boolean_t
2491 spa_is_root(spa_t *spa)
2492 {
2493 	return (spa->spa_is_root);
2494 }
2495 
2496 boolean_t
2497 spa_writeable(spa_t *spa)
2498 {
2499 	return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
2500 }
2501 
2502 /*
2503  * Returns true if there is a pending sync task in any of the current
2504  * syncing txg, the current quiescing txg, or the current open txg.
2505  */
2506 boolean_t
2507 spa_has_pending_synctask(spa_t *spa)
2508 {
2509 	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2510 	    !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2511 }
2512 
2513 spa_mode_t
2514 spa_mode(spa_t *spa)
2515 {
2516 	return (spa->spa_mode);
2517 }
2518 
2519 uint64_t
2520 spa_bootfs(spa_t *spa)
2521 {
2522 	return (spa->spa_bootfs);
2523 }
2524 
2525 uint64_t
2526 spa_delegation(spa_t *spa)
2527 {
2528 	return (spa->spa_delegation);
2529 }
2530 
2531 objset_t *
2532 spa_meta_objset(spa_t *spa)
2533 {
2534 	return (spa->spa_meta_objset);
2535 }
2536 
2537 enum zio_checksum
2538 spa_dedup_checksum(spa_t *spa)
2539 {
2540 	return (spa->spa_dedup_checksum);
2541 }
2542 
2543 /*
2544  * Reset pool scan stat per scan pass (or reboot).
2545  */
2546 void
2547 spa_scan_stat_init(spa_t *spa)
2548 {
2549 	/* data not stored on disk */
2550 	spa->spa_scan_pass_start = gethrestime_sec();
2551 	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2552 		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2553 	else
2554 		spa->spa_scan_pass_scrub_pause = 0;
2555 	spa->spa_scan_pass_scrub_spent_paused = 0;
2556 	spa->spa_scan_pass_exam = 0;
2557 	spa->spa_scan_pass_issued = 0;
2558 	vdev_scan_stat_init(spa->spa_root_vdev);
2559 }
2560 
2561 /*
2562  * Get scan stats for zpool status reports
2563  */
2564 int
2565 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2566 {
2567 	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2568 
2569 	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2570 		return (SET_ERROR(ENOENT));
2571 	memset(ps, 0, sizeof (pool_scan_stat_t));
2572 
2573 	/* data stored on disk */
2574 	ps->pss_func = scn->scn_phys.scn_func;
2575 	ps->pss_state = scn->scn_phys.scn_state;
2576 	ps->pss_start_time = scn->scn_phys.scn_start_time;
2577 	ps->pss_end_time = scn->scn_phys.scn_end_time;
2578 	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2579 	ps->pss_examined = scn->scn_phys.scn_examined;
2580 	ps->pss_to_process = scn->scn_phys.scn_to_process;
2581 	ps->pss_processed = scn->scn_phys.scn_processed;
2582 	ps->pss_errors = scn->scn_phys.scn_errors;
2583 
2584 	/* data not stored on disk */
2585 	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2586 	ps->pss_pass_start = spa->spa_scan_pass_start;
2587 	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2588 	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2589 	ps->pss_pass_issued = spa->spa_scan_pass_issued;
2590 	ps->pss_issued =
2591 	    scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2592 
2593 	return (0);
2594 }
2595 
2596 int
2597 spa_maxblocksize(spa_t *spa)
2598 {
2599 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2600 		return (SPA_MAXBLOCKSIZE);
2601 	else
2602 		return (SPA_OLD_MAXBLOCKSIZE);
2603 }
2604 
2605 
2606 /*
2607  * Returns the txg that the last device removal completed. No indirect mappings
2608  * have been added since this txg.
2609  */
2610 uint64_t
2611 spa_get_last_removal_txg(spa_t *spa)
2612 {
2613 	uint64_t vdevid;
2614 	uint64_t ret = -1ULL;
2615 
2616 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2617 	/*
2618 	 * sr_prev_indirect_vdev is only modified while holding all the
2619 	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2620 	 * examining it.
2621 	 */
2622 	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2623 
2624 	while (vdevid != -1ULL) {
2625 		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2626 		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2627 
2628 		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2629 
2630 		/*
2631 		 * If the removal did not remap any data, we don't care.
2632 		 */
2633 		if (vdev_indirect_births_count(vib) != 0) {
2634 			ret = vdev_indirect_births_last_entry_txg(vib);
2635 			break;
2636 		}
2637 
2638 		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2639 	}
2640 	spa_config_exit(spa, SCL_VDEV, FTAG);
2641 
2642 	IMPLY(ret != -1ULL,
2643 	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2644 
2645 	return (ret);
2646 }
2647 
2648 int
2649 spa_maxdnodesize(spa_t *spa)
2650 {
2651 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2652 		return (DNODE_MAX_SIZE);
2653 	else
2654 		return (DNODE_MIN_SIZE);
2655 }
2656 
2657 boolean_t
2658 spa_multihost(spa_t *spa)
2659 {
2660 	return (spa->spa_multihost ? B_TRUE : B_FALSE);
2661 }
2662 
2663 uint32_t
2664 spa_get_hostid(spa_t *spa)
2665 {
2666 	return (spa->spa_hostid);
2667 }
2668 
2669 boolean_t
2670 spa_trust_config(spa_t *spa)
2671 {
2672 	return (spa->spa_trust_config);
2673 }
2674 
2675 uint64_t
2676 spa_missing_tvds_allowed(spa_t *spa)
2677 {
2678 	return (spa->spa_missing_tvds_allowed);
2679 }
2680 
2681 space_map_t *
2682 spa_syncing_log_sm(spa_t *spa)
2683 {
2684 	return (spa->spa_syncing_log_sm);
2685 }
2686 
2687 void
2688 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2689 {
2690 	spa->spa_missing_tvds = missing;
2691 }
2692 
2693 /*
2694  * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2695  */
2696 const char *
2697 spa_state_to_name(spa_t *spa)
2698 {
2699 	ASSERT3P(spa, !=, NULL);
2700 
2701 	/*
2702 	 * it is possible for the spa to exist, without root vdev
2703 	 * as the spa transitions during import/export
2704 	 */
2705 	vdev_t *rvd = spa->spa_root_vdev;
2706 	if (rvd == NULL) {
2707 		return ("TRANSITIONING");
2708 	}
2709 	vdev_state_t state = rvd->vdev_state;
2710 	vdev_aux_t aux = rvd->vdev_stat.vs_aux;
2711 
2712 	if (spa_suspended(spa) &&
2713 	    (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
2714 		return ("SUSPENDED");
2715 
2716 	switch (state) {
2717 	case VDEV_STATE_CLOSED:
2718 	case VDEV_STATE_OFFLINE:
2719 		return ("OFFLINE");
2720 	case VDEV_STATE_REMOVED:
2721 		return ("REMOVED");
2722 	case VDEV_STATE_CANT_OPEN:
2723 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
2724 			return ("FAULTED");
2725 		else if (aux == VDEV_AUX_SPLIT_POOL)
2726 			return ("SPLIT");
2727 		else
2728 			return ("UNAVAIL");
2729 	case VDEV_STATE_FAULTED:
2730 		return ("FAULTED");
2731 	case VDEV_STATE_DEGRADED:
2732 		return ("DEGRADED");
2733 	case VDEV_STATE_HEALTHY:
2734 		return ("ONLINE");
2735 	default:
2736 		break;
2737 	}
2738 
2739 	return ("UNKNOWN");
2740 }
2741 
2742 boolean_t
2743 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2744 {
2745 	vdev_t *rvd = spa->spa_root_vdev;
2746 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2747 		if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2748 			return (B_FALSE);
2749 	}
2750 	return (B_TRUE);
2751 }
2752 
2753 boolean_t
2754 spa_has_checkpoint(spa_t *spa)
2755 {
2756 	return (spa->spa_checkpoint_txg != 0);
2757 }
2758 
2759 boolean_t
2760 spa_importing_readonly_checkpoint(spa_t *spa)
2761 {
2762 	return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2763 	    spa->spa_mode == SPA_MODE_READ);
2764 }
2765 
2766 uint64_t
2767 spa_min_claim_txg(spa_t *spa)
2768 {
2769 	uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2770 
2771 	if (checkpoint_txg != 0)
2772 		return (checkpoint_txg + 1);
2773 
2774 	return (spa->spa_first_txg);
2775 }
2776 
2777 /*
2778  * If there is a checkpoint, async destroys may consume more space from
2779  * the pool instead of freeing it. In an attempt to save the pool from
2780  * getting suspended when it is about to run out of space, we stop
2781  * processing async destroys.
2782  */
2783 boolean_t
2784 spa_suspend_async_destroy(spa_t *spa)
2785 {
2786 	dsl_pool_t *dp = spa_get_dsl(spa);
2787 
2788 	uint64_t unreserved = dsl_pool_unreserved_space(dp,
2789 	    ZFS_SPACE_CHECK_EXTRA_RESERVED);
2790 	uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2791 	uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2792 
2793 	if (spa_has_checkpoint(spa) && avail == 0)
2794 		return (B_TRUE);
2795 
2796 	return (B_FALSE);
2797 }
2798 
2799 #if defined(_KERNEL)
2800 
2801 int
2802 param_set_deadman_failmode_common(const char *val)
2803 {
2804 	spa_t *spa = NULL;
2805 	char *p;
2806 
2807 	if (val == NULL)
2808 		return (SET_ERROR(EINVAL));
2809 
2810 	if ((p = strchr(val, '\n')) != NULL)
2811 		*p = '\0';
2812 
2813 	if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
2814 	    strcmp(val, "panic"))
2815 		return (SET_ERROR(EINVAL));
2816 
2817 	if (spa_mode_global != SPA_MODE_UNINIT) {
2818 		mutex_enter(&spa_namespace_lock);
2819 		while ((spa = spa_next(spa)) != NULL)
2820 			spa_set_deadman_failmode(spa, val);
2821 		mutex_exit(&spa_namespace_lock);
2822 	}
2823 
2824 	return (0);
2825 }
2826 #endif
2827 
2828 /* Namespace manipulation */
2829 EXPORT_SYMBOL(spa_lookup);
2830 EXPORT_SYMBOL(spa_add);
2831 EXPORT_SYMBOL(spa_remove);
2832 EXPORT_SYMBOL(spa_next);
2833 
2834 /* Refcount functions */
2835 EXPORT_SYMBOL(spa_open_ref);
2836 EXPORT_SYMBOL(spa_close);
2837 EXPORT_SYMBOL(spa_refcount_zero);
2838 
2839 /* Pool configuration lock */
2840 EXPORT_SYMBOL(spa_config_tryenter);
2841 EXPORT_SYMBOL(spa_config_enter);
2842 EXPORT_SYMBOL(spa_config_exit);
2843 EXPORT_SYMBOL(spa_config_held);
2844 
2845 /* Pool vdev add/remove lock */
2846 EXPORT_SYMBOL(spa_vdev_enter);
2847 EXPORT_SYMBOL(spa_vdev_exit);
2848 
2849 /* Pool vdev state change lock */
2850 EXPORT_SYMBOL(spa_vdev_state_enter);
2851 EXPORT_SYMBOL(spa_vdev_state_exit);
2852 
2853 /* Accessor functions */
2854 EXPORT_SYMBOL(spa_shutting_down);
2855 EXPORT_SYMBOL(spa_get_dsl);
2856 EXPORT_SYMBOL(spa_get_rootblkptr);
2857 EXPORT_SYMBOL(spa_set_rootblkptr);
2858 EXPORT_SYMBOL(spa_altroot);
2859 EXPORT_SYMBOL(spa_sync_pass);
2860 EXPORT_SYMBOL(spa_name);
2861 EXPORT_SYMBOL(spa_guid);
2862 EXPORT_SYMBOL(spa_last_synced_txg);
2863 EXPORT_SYMBOL(spa_first_txg);
2864 EXPORT_SYMBOL(spa_syncing_txg);
2865 EXPORT_SYMBOL(spa_version);
2866 EXPORT_SYMBOL(spa_state);
2867 EXPORT_SYMBOL(spa_load_state);
2868 EXPORT_SYMBOL(spa_freeze_txg);
2869 EXPORT_SYMBOL(spa_get_dspace);
2870 EXPORT_SYMBOL(spa_update_dspace);
2871 EXPORT_SYMBOL(spa_deflate);
2872 EXPORT_SYMBOL(spa_normal_class);
2873 EXPORT_SYMBOL(spa_log_class);
2874 EXPORT_SYMBOL(spa_special_class);
2875 EXPORT_SYMBOL(spa_preferred_class);
2876 EXPORT_SYMBOL(spa_max_replication);
2877 EXPORT_SYMBOL(spa_prev_software_version);
2878 EXPORT_SYMBOL(spa_get_failmode);
2879 EXPORT_SYMBOL(spa_suspended);
2880 EXPORT_SYMBOL(spa_bootfs);
2881 EXPORT_SYMBOL(spa_delegation);
2882 EXPORT_SYMBOL(spa_meta_objset);
2883 EXPORT_SYMBOL(spa_maxblocksize);
2884 EXPORT_SYMBOL(spa_maxdnodesize);
2885 
2886 /* Miscellaneous support routines */
2887 EXPORT_SYMBOL(spa_guid_exists);
2888 EXPORT_SYMBOL(spa_strdup);
2889 EXPORT_SYMBOL(spa_strfree);
2890 EXPORT_SYMBOL(spa_generate_guid);
2891 EXPORT_SYMBOL(snprintf_blkptr);
2892 EXPORT_SYMBOL(spa_freeze);
2893 EXPORT_SYMBOL(spa_upgrade);
2894 EXPORT_SYMBOL(spa_evict_all);
2895 EXPORT_SYMBOL(spa_lookup_by_guid);
2896 EXPORT_SYMBOL(spa_has_spare);
2897 EXPORT_SYMBOL(dva_get_dsize_sync);
2898 EXPORT_SYMBOL(bp_get_dsize_sync);
2899 EXPORT_SYMBOL(bp_get_dsize);
2900 EXPORT_SYMBOL(spa_has_slogs);
2901 EXPORT_SYMBOL(spa_is_root);
2902 EXPORT_SYMBOL(spa_writeable);
2903 EXPORT_SYMBOL(spa_mode);
2904 EXPORT_SYMBOL(spa_namespace_lock);
2905 EXPORT_SYMBOL(spa_trust_config);
2906 EXPORT_SYMBOL(spa_missing_tvds_allowed);
2907 EXPORT_SYMBOL(spa_set_missing_tvds);
2908 EXPORT_SYMBOL(spa_state_to_name);
2909 EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
2910 EXPORT_SYMBOL(spa_min_claim_txg);
2911 EXPORT_SYMBOL(spa_suspend_async_destroy);
2912 EXPORT_SYMBOL(spa_has_checkpoint);
2913 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
2914 
2915 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
2916 	"Set additional debugging flags");
2917 
2918 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
2919 	"Set to attempt to recover from fatal errors");
2920 
2921 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
2922 	"Set to ignore IO errors during free and permanently leak the space");
2923 
2924 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW,
2925 	"Dead I/O check interval in milliseconds");
2926 
2927 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
2928 	"Enable deadman timer");
2929 
2930 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW,
2931 	"SPA size estimate multiplication factor");
2932 
2933 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
2934 	"Place DDT data into the special class");
2935 
2936 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
2937 	"Place user data indirect blocks into the special class");
2938 
2939 /* BEGIN CSTYLED */
2940 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
2941 	param_set_deadman_failmode, param_get_charp, ZMOD_RW,
2942 	"Failmode for deadman timer");
2943 
2944 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
2945 	param_set_deadman_synctime, param_get_ulong, ZMOD_RW,
2946 	"Pool sync expiration time in milliseconds");
2947 
2948 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
2949 	param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
2950 	"IO expiration time in milliseconds");
2951 
2952 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW,
2953 	"Small file blocks in special vdevs depends on this much "
2954 	"free space available");
2955 /* END CSTYLED */
2956 
2957 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
2958 	param_get_int, ZMOD_RW, "Reserved free space in pool");
2959