xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_misc.c (revision 2eb4d8dc723da3cf7d735a3226ae49da4c8c5dbc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  * Copyright 2013 Saso Kiselkov. All rights reserved.
27  * Copyright (c) 2017 Datto Inc.
28  * Copyright (c) 2017, Intel Corporation.
29  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/spa_impl.h>
34 #include <sys/zio.h>
35 #include <sys/zio_checksum.h>
36 #include <sys/zio_compress.h>
37 #include <sys/dmu.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zap.h>
40 #include <sys/zil.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/vdev_initialize.h>
43 #include <sys/vdev_trim.h>
44 #include <sys/vdev_file.h>
45 #include <sys/vdev_raidz.h>
46 #include <sys/metaslab.h>
47 #include <sys/uberblock_impl.h>
48 #include <sys/txg.h>
49 #include <sys/avl.h>
50 #include <sys/unique.h>
51 #include <sys/dsl_pool.h>
52 #include <sys/dsl_dir.h>
53 #include <sys/dsl_prop.h>
54 #include <sys/fm/util.h>
55 #include <sys/dsl_scan.h>
56 #include <sys/fs/zfs.h>
57 #include <sys/metaslab_impl.h>
58 #include <sys/arc.h>
59 #include <sys/ddt.h>
60 #include <sys/kstat.h>
61 #include "zfs_prop.h"
62 #include <sys/btree.h>
63 #include <sys/zfeature.h>
64 #include <sys/qat.h>
65 #include <sys/zstd/zstd.h>
66 
67 /*
68  * SPA locking
69  *
70  * There are three basic locks for managing spa_t structures:
71  *
72  * spa_namespace_lock (global mutex)
73  *
74  *	This lock must be acquired to do any of the following:
75  *
76  *		- Lookup a spa_t by name
77  *		- Add or remove a spa_t from the namespace
78  *		- Increase spa_refcount from non-zero
79  *		- Check if spa_refcount is zero
80  *		- Rename a spa_t
81  *		- add/remove/attach/detach devices
82  *		- Held for the duration of create/destroy/import/export
83  *
84  *	It does not need to handle recursion.  A create or destroy may
85  *	reference objects (files or zvols) in other pools, but by
86  *	definition they must have an existing reference, and will never need
87  *	to lookup a spa_t by name.
88  *
89  * spa_refcount (per-spa zfs_refcount_t protected by mutex)
90  *
91  *	This reference count keep track of any active users of the spa_t.  The
92  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
93  *	the refcount is never really 'zero' - opening a pool implicitly keeps
94  *	some references in the DMU.  Internally we check against spa_minref, but
95  *	present the image of a zero/non-zero value to consumers.
96  *
97  * spa_config_lock[] (per-spa array of rwlocks)
98  *
99  *	This protects the spa_t from config changes, and must be held in
100  *	the following circumstances:
101  *
102  *		- RW_READER to perform I/O to the spa
103  *		- RW_WRITER to change the vdev config
104  *
105  * The locking order is fairly straightforward:
106  *
107  *		spa_namespace_lock	->	spa_refcount
108  *
109  *	The namespace lock must be acquired to increase the refcount from 0
110  *	or to check if it is zero.
111  *
112  *		spa_refcount		->	spa_config_lock[]
113  *
114  *	There must be at least one valid reference on the spa_t to acquire
115  *	the config lock.
116  *
117  *		spa_namespace_lock	->	spa_config_lock[]
118  *
119  *	The namespace lock must always be taken before the config lock.
120  *
121  *
122  * The spa_namespace_lock can be acquired directly and is globally visible.
123  *
124  * The namespace is manipulated using the following functions, all of which
125  * require the spa_namespace_lock to be held.
126  *
127  *	spa_lookup()		Lookup a spa_t by name.
128  *
129  *	spa_add()		Create a new spa_t in the namespace.
130  *
131  *	spa_remove()		Remove a spa_t from the namespace.  This also
132  *				frees up any memory associated with the spa_t.
133  *
134  *	spa_next()		Returns the next spa_t in the system, or the
135  *				first if NULL is passed.
136  *
137  *	spa_evict_all()		Shutdown and remove all spa_t structures in
138  *				the system.
139  *
140  *	spa_guid_exists()	Determine whether a pool/device guid exists.
141  *
142  * The spa_refcount is manipulated using the following functions:
143  *
144  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
145  *				called with spa_namespace_lock held if the
146  *				refcount is currently zero.
147  *
148  *	spa_close()		Remove a reference from the spa_t.  This will
149  *				not free the spa_t or remove it from the
150  *				namespace.  No locking is required.
151  *
152  *	spa_refcount_zero()	Returns true if the refcount is currently
153  *				zero.  Must be called with spa_namespace_lock
154  *				held.
155  *
156  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
157  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
158  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
159  *
160  * To read the configuration, it suffices to hold one of these locks as reader.
161  * To modify the configuration, you must hold all locks as writer.  To modify
162  * vdev state without altering the vdev tree's topology (e.g. online/offline),
163  * you must hold SCL_STATE and SCL_ZIO as writer.
164  *
165  * We use these distinct config locks to avoid recursive lock entry.
166  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
167  * block allocations (SCL_ALLOC), which may require reading space maps
168  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
169  *
170  * The spa config locks cannot be normal rwlocks because we need the
171  * ability to hand off ownership.  For example, SCL_ZIO is acquired
172  * by the issuing thread and later released by an interrupt thread.
173  * They do, however, obey the usual write-wanted semantics to prevent
174  * writer (i.e. system administrator) starvation.
175  *
176  * The lock acquisition rules are as follows:
177  *
178  * SCL_CONFIG
179  *	Protects changes to the vdev tree topology, such as vdev
180  *	add/remove/attach/detach.  Protects the dirty config list
181  *	(spa_config_dirty_list) and the set of spares and l2arc devices.
182  *
183  * SCL_STATE
184  *	Protects changes to pool state and vdev state, such as vdev
185  *	online/offline/fault/degrade/clear.  Protects the dirty state list
186  *	(spa_state_dirty_list) and global pool state (spa_state).
187  *
188  * SCL_ALLOC
189  *	Protects changes to metaslab groups and classes.
190  *	Held as reader by metaslab_alloc() and metaslab_claim().
191  *
192  * SCL_ZIO
193  *	Held by bp-level zios (those which have no io_vd upon entry)
194  *	to prevent changes to the vdev tree.  The bp-level zio implicitly
195  *	protects all of its vdev child zios, which do not hold SCL_ZIO.
196  *
197  * SCL_FREE
198  *	Protects changes to metaslab groups and classes.
199  *	Held as reader by metaslab_free().  SCL_FREE is distinct from
200  *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
201  *	blocks in zio_done() while another i/o that holds either
202  *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
203  *
204  * SCL_VDEV
205  *	Held as reader to prevent changes to the vdev tree during trivial
206  *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
207  *	other locks, and lower than all of them, to ensure that it's safe
208  *	to acquire regardless of caller context.
209  *
210  * In addition, the following rules apply:
211  *
212  * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
213  *	The lock ordering is SCL_CONFIG > spa_props_lock.
214  *
215  * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
216  *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
217  *	or zio_write_phys() -- the caller must ensure that the config cannot
218  *	cannot change in the interim, and that the vdev cannot be reopened.
219  *	SCL_STATE as reader suffices for both.
220  *
221  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
222  *
223  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
224  *				for writing.
225  *
226  *	spa_vdev_exit()		Release the config lock, wait for all I/O
227  *				to complete, sync the updated configs to the
228  *				cache, and release the namespace lock.
229  *
230  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
231  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
232  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
233  */
234 
235 static avl_tree_t spa_namespace_avl;
236 kmutex_t spa_namespace_lock;
237 static kcondvar_t spa_namespace_cv;
238 int spa_max_replication_override = SPA_DVAS_PER_BP;
239 
240 static kmutex_t spa_spare_lock;
241 static avl_tree_t spa_spare_avl;
242 static kmutex_t spa_l2cache_lock;
243 static avl_tree_t spa_l2cache_avl;
244 
245 kmem_cache_t *spa_buffer_pool;
246 spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
247 
248 #ifdef ZFS_DEBUG
249 /*
250  * Everything except dprintf, set_error, spa, and indirect_remap is on
251  * by default in debug builds.
252  */
253 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
254     ZFS_DEBUG_INDIRECT_REMAP);
255 #else
256 int zfs_flags = 0;
257 #endif
258 
259 /*
260  * zfs_recover can be set to nonzero to attempt to recover from
261  * otherwise-fatal errors, typically caused by on-disk corruption.  When
262  * set, calls to zfs_panic_recover() will turn into warning messages.
263  * This should only be used as a last resort, as it typically results
264  * in leaked space, or worse.
265  */
266 int zfs_recover = B_FALSE;
267 
268 /*
269  * If destroy encounters an EIO while reading metadata (e.g. indirect
270  * blocks), space referenced by the missing metadata can not be freed.
271  * Normally this causes the background destroy to become "stalled", as
272  * it is unable to make forward progress.  While in this stalled state,
273  * all remaining space to free from the error-encountering filesystem is
274  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
275  * permanently leak the space from indirect blocks that can not be read,
276  * and continue to free everything else that it can.
277  *
278  * The default, "stalling" behavior is useful if the storage partially
279  * fails (i.e. some but not all i/os fail), and then later recovers.  In
280  * this case, we will be able to continue pool operations while it is
281  * partially failed, and when it recovers, we can continue to free the
282  * space, with no leaks.  However, note that this case is actually
283  * fairly rare.
284  *
285  * Typically pools either (a) fail completely (but perhaps temporarily,
286  * e.g. a top-level vdev going offline), or (b) have localized,
287  * permanent errors (e.g. disk returns the wrong data due to bit flip or
288  * firmware bug).  In case (a), this setting does not matter because the
289  * pool will be suspended and the sync thread will not be able to make
290  * forward progress regardless.  In case (b), because the error is
291  * permanent, the best we can do is leak the minimum amount of space,
292  * which is what setting this flag will do.  Therefore, it is reasonable
293  * for this flag to normally be set, but we chose the more conservative
294  * approach of not setting it, so that there is no possibility of
295  * leaking space in the "partial temporary" failure case.
296  */
297 int zfs_free_leak_on_eio = B_FALSE;
298 
299 /*
300  * Expiration time in milliseconds. This value has two meanings. First it is
301  * used to determine when the spa_deadman() logic should fire. By default the
302  * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
303  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
304  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
305  * in one of three behaviors controlled by zfs_deadman_failmode.
306  */
307 unsigned long zfs_deadman_synctime_ms = 600000UL;
308 
309 /*
310  * This value controls the maximum amount of time zio_wait() will block for an
311  * outstanding IO.  By default this is 300 seconds at which point the "hung"
312  * behavior will be applied as described for zfs_deadman_synctime_ms.
313  */
314 unsigned long zfs_deadman_ziotime_ms = 300000UL;
315 
316 /*
317  * Check time in milliseconds. This defines the frequency at which we check
318  * for hung I/O.
319  */
320 unsigned long zfs_deadman_checktime_ms = 60000UL;
321 
322 /*
323  * By default the deadman is enabled.
324  */
325 int zfs_deadman_enabled = 1;
326 
327 /*
328  * Controls the behavior of the deadman when it detects a "hung" I/O.
329  * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
330  *
331  * wait     - Wait for the "hung" I/O (default)
332  * continue - Attempt to recover from a "hung" I/O
333  * panic    - Panic the system
334  */
335 char *zfs_deadman_failmode = "wait";
336 
337 /*
338  * The worst case is single-sector max-parity RAID-Z blocks, in which
339  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
340  * times the size; so just assume that.  Add to this the fact that
341  * we can have up to 3 DVAs per bp, and one more factor of 2 because
342  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
343  * the worst case is:
344  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
345  */
346 int spa_asize_inflation = 24;
347 
348 /*
349  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
350  * the pool to be consumed (bounded by spa_max_slop).  This ensures that we
351  * don't run the pool completely out of space, due to unaccounted changes (e.g.
352  * to the MOS).  It also limits the worst-case time to allocate space.  If we
353  * have less than this amount of free space, most ZPL operations (e.g.  write,
354  * create) will return ENOSPC.  The ZIL metaslabs (spa_embedded_log_class) are
355  * also part of this 3.2% of space which can't be consumed by normal writes;
356  * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
357  * log space.
358  *
359  * Certain operations (e.g. file removal, most administrative actions) can
360  * use half the slop space.  They will only return ENOSPC if less than half
361  * the slop space is free.  Typically, once the pool has less than the slop
362  * space free, the user will use these operations to free up space in the pool.
363  * These are the operations that call dsl_pool_adjustedsize() with the netfree
364  * argument set to TRUE.
365  *
366  * Operations that are almost guaranteed to free up space in the absence of
367  * a pool checkpoint can use up to three quarters of the slop space
368  * (e.g zfs destroy).
369  *
370  * A very restricted set of operations are always permitted, regardless of
371  * the amount of free space.  These are the operations that call
372  * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
373  * increase in the amount of space used, it is possible to run the pool
374  * completely out of space, causing it to be permanently read-only.
375  *
376  * Note that on very small pools, the slop space will be larger than
377  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
378  * but we never allow it to be more than half the pool size.
379  *
380  * Further, on very large pools, the slop space will be smaller than
381  * 3.2%, to avoid reserving much more space than we actually need; bounded
382  * by spa_max_slop (128GB).
383  *
384  * See also the comments in zfs_space_check_t.
385  */
386 int spa_slop_shift = 5;
387 uint64_t spa_min_slop = 128ULL * 1024 * 1024;
388 uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
389 int spa_allocators = 4;
390 
391 
392 /*PRINTFLIKE2*/
393 void
394 spa_load_failed(spa_t *spa, const char *fmt, ...)
395 {
396 	va_list adx;
397 	char buf[256];
398 
399 	va_start(adx, fmt);
400 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
401 	va_end(adx);
402 
403 	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
404 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
405 }
406 
407 /*PRINTFLIKE2*/
408 void
409 spa_load_note(spa_t *spa, const char *fmt, ...)
410 {
411 	va_list adx;
412 	char buf[256];
413 
414 	va_start(adx, fmt);
415 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
416 	va_end(adx);
417 
418 	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
419 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
420 }
421 
422 /*
423  * By default dedup and user data indirects land in the special class
424  */
425 int zfs_ddt_data_is_special = B_TRUE;
426 int zfs_user_indirect_is_special = B_TRUE;
427 
428 /*
429  * The percentage of special class final space reserved for metadata only.
430  * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
431  * let metadata into the class.
432  */
433 int zfs_special_class_metadata_reserve_pct = 25;
434 
435 /*
436  * ==========================================================================
437  * SPA config locking
438  * ==========================================================================
439  */
440 static void
441 spa_config_lock_init(spa_t *spa)
442 {
443 	for (int i = 0; i < SCL_LOCKS; i++) {
444 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
445 		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
446 		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
447 		zfs_refcount_create_untracked(&scl->scl_count);
448 		scl->scl_writer = NULL;
449 		scl->scl_write_wanted = 0;
450 	}
451 }
452 
453 static void
454 spa_config_lock_destroy(spa_t *spa)
455 {
456 	for (int i = 0; i < SCL_LOCKS; i++) {
457 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
458 		mutex_destroy(&scl->scl_lock);
459 		cv_destroy(&scl->scl_cv);
460 		zfs_refcount_destroy(&scl->scl_count);
461 		ASSERT(scl->scl_writer == NULL);
462 		ASSERT(scl->scl_write_wanted == 0);
463 	}
464 }
465 
466 int
467 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
468 {
469 	for (int i = 0; i < SCL_LOCKS; i++) {
470 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
471 		if (!(locks & (1 << i)))
472 			continue;
473 		mutex_enter(&scl->scl_lock);
474 		if (rw == RW_READER) {
475 			if (scl->scl_writer || scl->scl_write_wanted) {
476 				mutex_exit(&scl->scl_lock);
477 				spa_config_exit(spa, locks & ((1 << i) - 1),
478 				    tag);
479 				return (0);
480 			}
481 		} else {
482 			ASSERT(scl->scl_writer != curthread);
483 			if (!zfs_refcount_is_zero(&scl->scl_count)) {
484 				mutex_exit(&scl->scl_lock);
485 				spa_config_exit(spa, locks & ((1 << i) - 1),
486 				    tag);
487 				return (0);
488 			}
489 			scl->scl_writer = curthread;
490 		}
491 		(void) zfs_refcount_add(&scl->scl_count, tag);
492 		mutex_exit(&scl->scl_lock);
493 	}
494 	return (1);
495 }
496 
497 void
498 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
499 {
500 	int wlocks_held = 0;
501 
502 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
503 
504 	for (int i = 0; i < SCL_LOCKS; i++) {
505 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
506 		if (scl->scl_writer == curthread)
507 			wlocks_held |= (1 << i);
508 		if (!(locks & (1 << i)))
509 			continue;
510 		mutex_enter(&scl->scl_lock);
511 		if (rw == RW_READER) {
512 			while (scl->scl_writer || scl->scl_write_wanted) {
513 				cv_wait(&scl->scl_cv, &scl->scl_lock);
514 			}
515 		} else {
516 			ASSERT(scl->scl_writer != curthread);
517 			while (!zfs_refcount_is_zero(&scl->scl_count)) {
518 				scl->scl_write_wanted++;
519 				cv_wait(&scl->scl_cv, &scl->scl_lock);
520 				scl->scl_write_wanted--;
521 			}
522 			scl->scl_writer = curthread;
523 		}
524 		(void) zfs_refcount_add(&scl->scl_count, tag);
525 		mutex_exit(&scl->scl_lock);
526 	}
527 	ASSERT3U(wlocks_held, <=, locks);
528 }
529 
530 void
531 spa_config_exit(spa_t *spa, int locks, const void *tag)
532 {
533 	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
534 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
535 		if (!(locks & (1 << i)))
536 			continue;
537 		mutex_enter(&scl->scl_lock);
538 		ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
539 		if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
540 			ASSERT(scl->scl_writer == NULL ||
541 			    scl->scl_writer == curthread);
542 			scl->scl_writer = NULL;	/* OK in either case */
543 			cv_broadcast(&scl->scl_cv);
544 		}
545 		mutex_exit(&scl->scl_lock);
546 	}
547 }
548 
549 int
550 spa_config_held(spa_t *spa, int locks, krw_t rw)
551 {
552 	int locks_held = 0;
553 
554 	for (int i = 0; i < SCL_LOCKS; i++) {
555 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
556 		if (!(locks & (1 << i)))
557 			continue;
558 		if ((rw == RW_READER &&
559 		    !zfs_refcount_is_zero(&scl->scl_count)) ||
560 		    (rw == RW_WRITER && scl->scl_writer == curthread))
561 			locks_held |= 1 << i;
562 	}
563 
564 	return (locks_held);
565 }
566 
567 /*
568  * ==========================================================================
569  * SPA namespace functions
570  * ==========================================================================
571  */
572 
573 /*
574  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
575  * Returns NULL if no matching spa_t is found.
576  */
577 spa_t *
578 spa_lookup(const char *name)
579 {
580 	static spa_t search;	/* spa_t is large; don't allocate on stack */
581 	spa_t *spa;
582 	avl_index_t where;
583 	char *cp;
584 
585 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
586 
587 	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
588 
589 	/*
590 	 * If it's a full dataset name, figure out the pool name and
591 	 * just use that.
592 	 */
593 	cp = strpbrk(search.spa_name, "/@#");
594 	if (cp != NULL)
595 		*cp = '\0';
596 
597 	spa = avl_find(&spa_namespace_avl, &search, &where);
598 
599 	return (spa);
600 }
601 
602 /*
603  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
604  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
605  * looking for potentially hung I/Os.
606  */
607 void
608 spa_deadman(void *arg)
609 {
610 	spa_t *spa = arg;
611 
612 	/* Disable the deadman if the pool is suspended. */
613 	if (spa_suspended(spa))
614 		return;
615 
616 	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
617 	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
618 	    (u_longlong_t)++spa->spa_deadman_calls);
619 	if (zfs_deadman_enabled)
620 		vdev_deadman(spa->spa_root_vdev, FTAG);
621 
622 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
623 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
624 	    MSEC_TO_TICK(zfs_deadman_checktime_ms));
625 }
626 
627 static int
628 spa_log_sm_sort_by_txg(const void *va, const void *vb)
629 {
630 	const spa_log_sm_t *a = va;
631 	const spa_log_sm_t *b = vb;
632 
633 	return (TREE_CMP(a->sls_txg, b->sls_txg));
634 }
635 
636 /*
637  * Create an uninitialized spa_t with the given name.  Requires
638  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
639  * exist by calling spa_lookup() first.
640  */
641 spa_t *
642 spa_add(const char *name, nvlist_t *config, const char *altroot)
643 {
644 	spa_t *spa;
645 	spa_config_dirent_t *dp;
646 
647 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
648 
649 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
650 
651 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
652 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
653 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
654 	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
655 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
656 	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
657 	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
658 	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
659 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
660 	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
661 	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
662 	mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
663 	mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
664 	mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
665 
666 	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
667 	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
668 	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
669 	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
670 	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
671 	cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
672 	cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
673 
674 	for (int t = 0; t < TXG_SIZE; t++)
675 		bplist_create(&spa->spa_free_bplist[t]);
676 
677 	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
678 	spa->spa_state = POOL_STATE_UNINITIALIZED;
679 	spa->spa_freeze_txg = UINT64_MAX;
680 	spa->spa_final_txg = UINT64_MAX;
681 	spa->spa_load_max_txg = UINT64_MAX;
682 	spa->spa_proc = &p0;
683 	spa->spa_proc_state = SPA_PROC_NONE;
684 	spa->spa_trust_config = B_TRUE;
685 	spa->spa_hostid = zone_get_hostid(NULL);
686 
687 	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
688 	spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
689 	spa_set_deadman_failmode(spa, zfs_deadman_failmode);
690 
691 	zfs_refcount_create(&spa->spa_refcount);
692 	spa_config_lock_init(spa);
693 	spa_stats_init(spa);
694 
695 	avl_add(&spa_namespace_avl, spa);
696 
697 	/*
698 	 * Set the alternate root, if there is one.
699 	 */
700 	if (altroot)
701 		spa->spa_root = spa_strdup(altroot);
702 
703 	spa->spa_alloc_count = spa_allocators;
704 	spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
705 	    sizeof (kmutex_t), KM_SLEEP);
706 	spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
707 	    sizeof (avl_tree_t), KM_SLEEP);
708 	for (int i = 0; i < spa->spa_alloc_count; i++) {
709 		mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
710 		avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
711 		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
712 	}
713 	avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
714 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
715 	avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
716 	    sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
717 	list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
718 	    offsetof(log_summary_entry_t, lse_node));
719 
720 	/*
721 	 * Every pool starts with the default cachefile
722 	 */
723 	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
724 	    offsetof(spa_config_dirent_t, scd_link));
725 
726 	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
727 	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
728 	list_insert_head(&spa->spa_config_list, dp);
729 
730 	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
731 	    KM_SLEEP) == 0);
732 
733 	if (config != NULL) {
734 		nvlist_t *features;
735 
736 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
737 		    &features) == 0) {
738 			VERIFY(nvlist_dup(features, &spa->spa_label_features,
739 			    0) == 0);
740 		}
741 
742 		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
743 	}
744 
745 	if (spa->spa_label_features == NULL) {
746 		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
747 		    KM_SLEEP) == 0);
748 	}
749 
750 	spa->spa_min_ashift = INT_MAX;
751 	spa->spa_max_ashift = 0;
752 	spa->spa_min_alloc = INT_MAX;
753 
754 	/* Reset cached value */
755 	spa->spa_dedup_dspace = ~0ULL;
756 
757 	/*
758 	 * As a pool is being created, treat all features as disabled by
759 	 * setting SPA_FEATURE_DISABLED for all entries in the feature
760 	 * refcount cache.
761 	 */
762 	for (int i = 0; i < SPA_FEATURES; i++) {
763 		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
764 	}
765 
766 	list_create(&spa->spa_leaf_list, sizeof (vdev_t),
767 	    offsetof(vdev_t, vdev_leaf_node));
768 
769 	return (spa);
770 }
771 
772 /*
773  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
774  * spa_namespace_lock.  This is called only after the spa_t has been closed and
775  * deactivated.
776  */
777 void
778 spa_remove(spa_t *spa)
779 {
780 	spa_config_dirent_t *dp;
781 
782 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
783 	ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
784 	ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
785 	ASSERT0(spa->spa_waiters);
786 
787 	nvlist_free(spa->spa_config_splitting);
788 
789 	avl_remove(&spa_namespace_avl, spa);
790 	cv_broadcast(&spa_namespace_cv);
791 
792 	if (spa->spa_root)
793 		spa_strfree(spa->spa_root);
794 
795 	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
796 		list_remove(&spa->spa_config_list, dp);
797 		if (dp->scd_path != NULL)
798 			spa_strfree(dp->scd_path);
799 		kmem_free(dp, sizeof (spa_config_dirent_t));
800 	}
801 
802 	for (int i = 0; i < spa->spa_alloc_count; i++) {
803 		avl_destroy(&spa->spa_alloc_trees[i]);
804 		mutex_destroy(&spa->spa_alloc_locks[i]);
805 	}
806 	kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
807 	    sizeof (kmutex_t));
808 	kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
809 	    sizeof (avl_tree_t));
810 
811 	avl_destroy(&spa->spa_metaslabs_by_flushed);
812 	avl_destroy(&spa->spa_sm_logs_by_txg);
813 	list_destroy(&spa->spa_log_summary);
814 	list_destroy(&spa->spa_config_list);
815 	list_destroy(&spa->spa_leaf_list);
816 
817 	nvlist_free(spa->spa_label_features);
818 	nvlist_free(spa->spa_load_info);
819 	nvlist_free(spa->spa_feat_stats);
820 	spa_config_set(spa, NULL);
821 
822 	zfs_refcount_destroy(&spa->spa_refcount);
823 
824 	spa_stats_destroy(spa);
825 	spa_config_lock_destroy(spa);
826 
827 	for (int t = 0; t < TXG_SIZE; t++)
828 		bplist_destroy(&spa->spa_free_bplist[t]);
829 
830 	zio_checksum_templates_free(spa);
831 
832 	cv_destroy(&spa->spa_async_cv);
833 	cv_destroy(&spa->spa_evicting_os_cv);
834 	cv_destroy(&spa->spa_proc_cv);
835 	cv_destroy(&spa->spa_scrub_io_cv);
836 	cv_destroy(&spa->spa_suspend_cv);
837 	cv_destroy(&spa->spa_activities_cv);
838 	cv_destroy(&spa->spa_waiters_cv);
839 
840 	mutex_destroy(&spa->spa_flushed_ms_lock);
841 	mutex_destroy(&spa->spa_async_lock);
842 	mutex_destroy(&spa->spa_errlist_lock);
843 	mutex_destroy(&spa->spa_errlog_lock);
844 	mutex_destroy(&spa->spa_evicting_os_lock);
845 	mutex_destroy(&spa->spa_history_lock);
846 	mutex_destroy(&spa->spa_proc_lock);
847 	mutex_destroy(&spa->spa_props_lock);
848 	mutex_destroy(&spa->spa_cksum_tmpls_lock);
849 	mutex_destroy(&spa->spa_scrub_lock);
850 	mutex_destroy(&spa->spa_suspend_lock);
851 	mutex_destroy(&spa->spa_vdev_top_lock);
852 	mutex_destroy(&spa->spa_feat_stats_lock);
853 	mutex_destroy(&spa->spa_activities_lock);
854 
855 	kmem_free(spa, sizeof (spa_t));
856 }
857 
858 /*
859  * Given a pool, return the next pool in the namespace, or NULL if there is
860  * none.  If 'prev' is NULL, return the first pool.
861  */
862 spa_t *
863 spa_next(spa_t *prev)
864 {
865 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
866 
867 	if (prev)
868 		return (AVL_NEXT(&spa_namespace_avl, prev));
869 	else
870 		return (avl_first(&spa_namespace_avl));
871 }
872 
873 /*
874  * ==========================================================================
875  * SPA refcount functions
876  * ==========================================================================
877  */
878 
879 /*
880  * Add a reference to the given spa_t.  Must have at least one reference, or
881  * have the namespace lock held.
882  */
883 void
884 spa_open_ref(spa_t *spa, void *tag)
885 {
886 	ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
887 	    MUTEX_HELD(&spa_namespace_lock));
888 	(void) zfs_refcount_add(&spa->spa_refcount, tag);
889 }
890 
891 /*
892  * Remove a reference to the given spa_t.  Must have at least one reference, or
893  * have the namespace lock held.
894  */
895 void
896 spa_close(spa_t *spa, void *tag)
897 {
898 	ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
899 	    MUTEX_HELD(&spa_namespace_lock));
900 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
901 }
902 
903 /*
904  * Remove a reference to the given spa_t held by a dsl dir that is
905  * being asynchronously released.  Async releases occur from a taskq
906  * performing eviction of dsl datasets and dirs.  The namespace lock
907  * isn't held and the hold by the object being evicted may contribute to
908  * spa_minref (e.g. dataset or directory released during pool export),
909  * so the asserts in spa_close() do not apply.
910  */
911 void
912 spa_async_close(spa_t *spa, void *tag)
913 {
914 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
915 }
916 
917 /*
918  * Check to see if the spa refcount is zero.  Must be called with
919  * spa_namespace_lock held.  We really compare against spa_minref, which is the
920  * number of references acquired when opening a pool
921  */
922 boolean_t
923 spa_refcount_zero(spa_t *spa)
924 {
925 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
926 
927 	return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
928 }
929 
930 /*
931  * ==========================================================================
932  * SPA spare and l2cache tracking
933  * ==========================================================================
934  */
935 
936 /*
937  * Hot spares and cache devices are tracked using the same code below,
938  * for 'auxiliary' devices.
939  */
940 
941 typedef struct spa_aux {
942 	uint64_t	aux_guid;
943 	uint64_t	aux_pool;
944 	avl_node_t	aux_avl;
945 	int		aux_count;
946 } spa_aux_t;
947 
948 static inline int
949 spa_aux_compare(const void *a, const void *b)
950 {
951 	const spa_aux_t *sa = (const spa_aux_t *)a;
952 	const spa_aux_t *sb = (const spa_aux_t *)b;
953 
954 	return (TREE_CMP(sa->aux_guid, sb->aux_guid));
955 }
956 
957 static void
958 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
959 {
960 	avl_index_t where;
961 	spa_aux_t search;
962 	spa_aux_t *aux;
963 
964 	search.aux_guid = vd->vdev_guid;
965 	if ((aux = avl_find(avl, &search, &where)) != NULL) {
966 		aux->aux_count++;
967 	} else {
968 		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
969 		aux->aux_guid = vd->vdev_guid;
970 		aux->aux_count = 1;
971 		avl_insert(avl, aux, where);
972 	}
973 }
974 
975 static void
976 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
977 {
978 	spa_aux_t search;
979 	spa_aux_t *aux;
980 	avl_index_t where;
981 
982 	search.aux_guid = vd->vdev_guid;
983 	aux = avl_find(avl, &search, &where);
984 
985 	ASSERT(aux != NULL);
986 
987 	if (--aux->aux_count == 0) {
988 		avl_remove(avl, aux);
989 		kmem_free(aux, sizeof (spa_aux_t));
990 	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
991 		aux->aux_pool = 0ULL;
992 	}
993 }
994 
995 static boolean_t
996 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
997 {
998 	spa_aux_t search, *found;
999 
1000 	search.aux_guid = guid;
1001 	found = avl_find(avl, &search, NULL);
1002 
1003 	if (pool) {
1004 		if (found)
1005 			*pool = found->aux_pool;
1006 		else
1007 			*pool = 0ULL;
1008 	}
1009 
1010 	if (refcnt) {
1011 		if (found)
1012 			*refcnt = found->aux_count;
1013 		else
1014 			*refcnt = 0;
1015 	}
1016 
1017 	return (found != NULL);
1018 }
1019 
1020 static void
1021 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1022 {
1023 	spa_aux_t search, *found;
1024 	avl_index_t where;
1025 
1026 	search.aux_guid = vd->vdev_guid;
1027 	found = avl_find(avl, &search, &where);
1028 	ASSERT(found != NULL);
1029 	ASSERT(found->aux_pool == 0ULL);
1030 
1031 	found->aux_pool = spa_guid(vd->vdev_spa);
1032 }
1033 
1034 /*
1035  * Spares are tracked globally due to the following constraints:
1036  *
1037  *	- A spare may be part of multiple pools.
1038  *	- A spare may be added to a pool even if it's actively in use within
1039  *	  another pool.
1040  *	- A spare in use in any pool can only be the source of a replacement if
1041  *	  the target is a spare in the same pool.
1042  *
1043  * We keep track of all spares on the system through the use of a reference
1044  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
1045  * spare, then we bump the reference count in the AVL tree.  In addition, we set
1046  * the 'vdev_isspare' member to indicate that the device is a spare (active or
1047  * inactive).  When a spare is made active (used to replace a device in the
1048  * pool), we also keep track of which pool its been made a part of.
1049  *
1050  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
1051  * called under the spa_namespace lock as part of vdev reconfiguration.  The
1052  * separate spare lock exists for the status query path, which does not need to
1053  * be completely consistent with respect to other vdev configuration changes.
1054  */
1055 
1056 static int
1057 spa_spare_compare(const void *a, const void *b)
1058 {
1059 	return (spa_aux_compare(a, b));
1060 }
1061 
1062 void
1063 spa_spare_add(vdev_t *vd)
1064 {
1065 	mutex_enter(&spa_spare_lock);
1066 	ASSERT(!vd->vdev_isspare);
1067 	spa_aux_add(vd, &spa_spare_avl);
1068 	vd->vdev_isspare = B_TRUE;
1069 	mutex_exit(&spa_spare_lock);
1070 }
1071 
1072 void
1073 spa_spare_remove(vdev_t *vd)
1074 {
1075 	mutex_enter(&spa_spare_lock);
1076 	ASSERT(vd->vdev_isspare);
1077 	spa_aux_remove(vd, &spa_spare_avl);
1078 	vd->vdev_isspare = B_FALSE;
1079 	mutex_exit(&spa_spare_lock);
1080 }
1081 
1082 boolean_t
1083 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1084 {
1085 	boolean_t found;
1086 
1087 	mutex_enter(&spa_spare_lock);
1088 	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1089 	mutex_exit(&spa_spare_lock);
1090 
1091 	return (found);
1092 }
1093 
1094 void
1095 spa_spare_activate(vdev_t *vd)
1096 {
1097 	mutex_enter(&spa_spare_lock);
1098 	ASSERT(vd->vdev_isspare);
1099 	spa_aux_activate(vd, &spa_spare_avl);
1100 	mutex_exit(&spa_spare_lock);
1101 }
1102 
1103 /*
1104  * Level 2 ARC devices are tracked globally for the same reasons as spares.
1105  * Cache devices currently only support one pool per cache device, and so
1106  * for these devices the aux reference count is currently unused beyond 1.
1107  */
1108 
1109 static int
1110 spa_l2cache_compare(const void *a, const void *b)
1111 {
1112 	return (spa_aux_compare(a, b));
1113 }
1114 
1115 void
1116 spa_l2cache_add(vdev_t *vd)
1117 {
1118 	mutex_enter(&spa_l2cache_lock);
1119 	ASSERT(!vd->vdev_isl2cache);
1120 	spa_aux_add(vd, &spa_l2cache_avl);
1121 	vd->vdev_isl2cache = B_TRUE;
1122 	mutex_exit(&spa_l2cache_lock);
1123 }
1124 
1125 void
1126 spa_l2cache_remove(vdev_t *vd)
1127 {
1128 	mutex_enter(&spa_l2cache_lock);
1129 	ASSERT(vd->vdev_isl2cache);
1130 	spa_aux_remove(vd, &spa_l2cache_avl);
1131 	vd->vdev_isl2cache = B_FALSE;
1132 	mutex_exit(&spa_l2cache_lock);
1133 }
1134 
1135 boolean_t
1136 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1137 {
1138 	boolean_t found;
1139 
1140 	mutex_enter(&spa_l2cache_lock);
1141 	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1142 	mutex_exit(&spa_l2cache_lock);
1143 
1144 	return (found);
1145 }
1146 
1147 void
1148 spa_l2cache_activate(vdev_t *vd)
1149 {
1150 	mutex_enter(&spa_l2cache_lock);
1151 	ASSERT(vd->vdev_isl2cache);
1152 	spa_aux_activate(vd, &spa_l2cache_avl);
1153 	mutex_exit(&spa_l2cache_lock);
1154 }
1155 
1156 /*
1157  * ==========================================================================
1158  * SPA vdev locking
1159  * ==========================================================================
1160  */
1161 
1162 /*
1163  * Lock the given spa_t for the purpose of adding or removing a vdev.
1164  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1165  * It returns the next transaction group for the spa_t.
1166  */
1167 uint64_t
1168 spa_vdev_enter(spa_t *spa)
1169 {
1170 	mutex_enter(&spa->spa_vdev_top_lock);
1171 	mutex_enter(&spa_namespace_lock);
1172 
1173 	vdev_autotrim_stop_all(spa);
1174 
1175 	return (spa_vdev_config_enter(spa));
1176 }
1177 
1178 /*
1179  * The same as spa_vdev_enter() above but additionally takes the guid of
1180  * the vdev being detached.  When there is a rebuild in process it will be
1181  * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1182  * The rebuild is canceled if only a single child remains after the detach.
1183  */
1184 uint64_t
1185 spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
1186 {
1187 	mutex_enter(&spa->spa_vdev_top_lock);
1188 	mutex_enter(&spa_namespace_lock);
1189 
1190 	vdev_autotrim_stop_all(spa);
1191 
1192 	if (guid != 0) {
1193 		vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1194 		if (vd) {
1195 			vdev_rebuild_stop_wait(vd->vdev_top);
1196 		}
1197 	}
1198 
1199 	return (spa_vdev_config_enter(spa));
1200 }
1201 
1202 /*
1203  * Internal implementation for spa_vdev_enter().  Used when a vdev
1204  * operation requires multiple syncs (i.e. removing a device) while
1205  * keeping the spa_namespace_lock held.
1206  */
1207 uint64_t
1208 spa_vdev_config_enter(spa_t *spa)
1209 {
1210 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1211 
1212 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1213 
1214 	return (spa_last_synced_txg(spa) + 1);
1215 }
1216 
1217 /*
1218  * Used in combination with spa_vdev_config_enter() to allow the syncing
1219  * of multiple transactions without releasing the spa_namespace_lock.
1220  */
1221 void
1222 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1223 {
1224 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1225 
1226 	int config_changed = B_FALSE;
1227 
1228 	ASSERT(txg > spa_last_synced_txg(spa));
1229 
1230 	spa->spa_pending_vdev = NULL;
1231 
1232 	/*
1233 	 * Reassess the DTLs.
1234 	 */
1235 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
1236 
1237 	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1238 		config_changed = B_TRUE;
1239 		spa->spa_config_generation++;
1240 	}
1241 
1242 	/*
1243 	 * Verify the metaslab classes.
1244 	 */
1245 	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1246 	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1247 	ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
1248 	ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1249 	ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
1250 
1251 	spa_config_exit(spa, SCL_ALL, spa);
1252 
1253 	/*
1254 	 * Panic the system if the specified tag requires it.  This
1255 	 * is useful for ensuring that configurations are updated
1256 	 * transactionally.
1257 	 */
1258 	if (zio_injection_enabled)
1259 		zio_handle_panic_injection(spa, tag, 0);
1260 
1261 	/*
1262 	 * Note: this txg_wait_synced() is important because it ensures
1263 	 * that there won't be more than one config change per txg.
1264 	 * This allows us to use the txg as the generation number.
1265 	 */
1266 	if (error == 0)
1267 		txg_wait_synced(spa->spa_dsl_pool, txg);
1268 
1269 	if (vd != NULL) {
1270 		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1271 		if (vd->vdev_ops->vdev_op_leaf) {
1272 			mutex_enter(&vd->vdev_initialize_lock);
1273 			vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1274 			    NULL);
1275 			mutex_exit(&vd->vdev_initialize_lock);
1276 
1277 			mutex_enter(&vd->vdev_trim_lock);
1278 			vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1279 			mutex_exit(&vd->vdev_trim_lock);
1280 		}
1281 
1282 		/*
1283 		 * The vdev may be both a leaf and top-level device.
1284 		 */
1285 		vdev_autotrim_stop_wait(vd);
1286 
1287 		spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
1288 		vdev_free(vd);
1289 		spa_config_exit(spa, SCL_STATE_ALL, spa);
1290 	}
1291 
1292 	/*
1293 	 * If the config changed, update the config cache.
1294 	 */
1295 	if (config_changed)
1296 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1297 }
1298 
1299 /*
1300  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1301  * locking of spa_vdev_enter(), we also want make sure the transactions have
1302  * synced to disk, and then update the global configuration cache with the new
1303  * information.
1304  */
1305 int
1306 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1307 {
1308 	vdev_autotrim_restart(spa);
1309 	vdev_rebuild_restart(spa);
1310 
1311 	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1312 	mutex_exit(&spa_namespace_lock);
1313 	mutex_exit(&spa->spa_vdev_top_lock);
1314 
1315 	return (error);
1316 }
1317 
1318 /*
1319  * Lock the given spa_t for the purpose of changing vdev state.
1320  */
1321 void
1322 spa_vdev_state_enter(spa_t *spa, int oplocks)
1323 {
1324 	int locks = SCL_STATE_ALL | oplocks;
1325 
1326 	/*
1327 	 * Root pools may need to read of the underlying devfs filesystem
1328 	 * when opening up a vdev.  Unfortunately if we're holding the
1329 	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1330 	 * the read from the root filesystem.  Instead we "prefetch"
1331 	 * the associated vnodes that we need prior to opening the
1332 	 * underlying devices and cache them so that we can prevent
1333 	 * any I/O when we are doing the actual open.
1334 	 */
1335 	if (spa_is_root(spa)) {
1336 		int low = locks & ~(SCL_ZIO - 1);
1337 		int high = locks & ~low;
1338 
1339 		spa_config_enter(spa, high, spa, RW_WRITER);
1340 		vdev_hold(spa->spa_root_vdev);
1341 		spa_config_enter(spa, low, spa, RW_WRITER);
1342 	} else {
1343 		spa_config_enter(spa, locks, spa, RW_WRITER);
1344 	}
1345 	spa->spa_vdev_locks = locks;
1346 }
1347 
1348 int
1349 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1350 {
1351 	boolean_t config_changed = B_FALSE;
1352 	vdev_t *vdev_top;
1353 
1354 	if (vd == NULL || vd == spa->spa_root_vdev) {
1355 		vdev_top = spa->spa_root_vdev;
1356 	} else {
1357 		vdev_top = vd->vdev_top;
1358 	}
1359 
1360 	if (vd != NULL || error == 0)
1361 		vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
1362 
1363 	if (vd != NULL) {
1364 		if (vd != spa->spa_root_vdev)
1365 			vdev_state_dirty(vdev_top);
1366 
1367 		config_changed = B_TRUE;
1368 		spa->spa_config_generation++;
1369 	}
1370 
1371 	if (spa_is_root(spa))
1372 		vdev_rele(spa->spa_root_vdev);
1373 
1374 	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1375 	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1376 
1377 	/*
1378 	 * If anything changed, wait for it to sync.  This ensures that,
1379 	 * from the system administrator's perspective, zpool(8) commands
1380 	 * are synchronous.  This is important for things like zpool offline:
1381 	 * when the command completes, you expect no further I/O from ZFS.
1382 	 */
1383 	if (vd != NULL)
1384 		txg_wait_synced(spa->spa_dsl_pool, 0);
1385 
1386 	/*
1387 	 * If the config changed, update the config cache.
1388 	 */
1389 	if (config_changed) {
1390 		mutex_enter(&spa_namespace_lock);
1391 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1392 		mutex_exit(&spa_namespace_lock);
1393 	}
1394 
1395 	return (error);
1396 }
1397 
1398 /*
1399  * ==========================================================================
1400  * Miscellaneous functions
1401  * ==========================================================================
1402  */
1403 
1404 void
1405 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1406 {
1407 	if (!nvlist_exists(spa->spa_label_features, feature)) {
1408 		fnvlist_add_boolean(spa->spa_label_features, feature);
1409 		/*
1410 		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1411 		 * dirty the vdev config because lock SCL_CONFIG is not held.
1412 		 * Thankfully, in this case we don't need to dirty the config
1413 		 * because it will be written out anyway when we finish
1414 		 * creating the pool.
1415 		 */
1416 		if (tx->tx_txg != TXG_INITIAL)
1417 			vdev_config_dirty(spa->spa_root_vdev);
1418 	}
1419 }
1420 
1421 void
1422 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1423 {
1424 	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1425 		vdev_config_dirty(spa->spa_root_vdev);
1426 }
1427 
1428 /*
1429  * Return the spa_t associated with given pool_guid, if it exists.  If
1430  * device_guid is non-zero, determine whether the pool exists *and* contains
1431  * a device with the specified device_guid.
1432  */
1433 spa_t *
1434 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1435 {
1436 	spa_t *spa;
1437 	avl_tree_t *t = &spa_namespace_avl;
1438 
1439 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1440 
1441 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1442 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1443 			continue;
1444 		if (spa->spa_root_vdev == NULL)
1445 			continue;
1446 		if (spa_guid(spa) == pool_guid) {
1447 			if (device_guid == 0)
1448 				break;
1449 
1450 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1451 			    device_guid) != NULL)
1452 				break;
1453 
1454 			/*
1455 			 * Check any devices we may be in the process of adding.
1456 			 */
1457 			if (spa->spa_pending_vdev) {
1458 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1459 				    device_guid) != NULL)
1460 					break;
1461 			}
1462 		}
1463 	}
1464 
1465 	return (spa);
1466 }
1467 
1468 /*
1469  * Determine whether a pool with the given pool_guid exists.
1470  */
1471 boolean_t
1472 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1473 {
1474 	return (spa_by_guid(pool_guid, device_guid) != NULL);
1475 }
1476 
1477 char *
1478 spa_strdup(const char *s)
1479 {
1480 	size_t len;
1481 	char *new;
1482 
1483 	len = strlen(s);
1484 	new = kmem_alloc(len + 1, KM_SLEEP);
1485 	bcopy(s, new, len);
1486 	new[len] = '\0';
1487 
1488 	return (new);
1489 }
1490 
1491 void
1492 spa_strfree(char *s)
1493 {
1494 	kmem_free(s, strlen(s) + 1);
1495 }
1496 
1497 uint64_t
1498 spa_generate_guid(spa_t *spa)
1499 {
1500 	uint64_t guid;
1501 
1502 	if (spa != NULL) {
1503 		do {
1504 			(void) random_get_pseudo_bytes((void *)&guid,
1505 			    sizeof (guid));
1506 		} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
1507 	} else {
1508 		do {
1509 			(void) random_get_pseudo_bytes((void *)&guid,
1510 			    sizeof (guid));
1511 		} while (guid == 0 || spa_guid_exists(guid, 0));
1512 	}
1513 
1514 	return (guid);
1515 }
1516 
1517 void
1518 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1519 {
1520 	char type[256];
1521 	char *checksum = NULL;
1522 	char *compress = NULL;
1523 
1524 	if (bp != NULL) {
1525 		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1526 			dmu_object_byteswap_t bswap =
1527 			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1528 			(void) snprintf(type, sizeof (type), "bswap %s %s",
1529 			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1530 			    "metadata" : "data",
1531 			    dmu_ot_byteswap[bswap].ob_name);
1532 		} else {
1533 			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1534 			    sizeof (type));
1535 		}
1536 		if (!BP_IS_EMBEDDED(bp)) {
1537 			checksum =
1538 			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1539 		}
1540 		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1541 	}
1542 
1543 	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1544 	    compress);
1545 }
1546 
1547 void
1548 spa_freeze(spa_t *spa)
1549 {
1550 	uint64_t freeze_txg = 0;
1551 
1552 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1553 	if (spa->spa_freeze_txg == UINT64_MAX) {
1554 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1555 		spa->spa_freeze_txg = freeze_txg;
1556 	}
1557 	spa_config_exit(spa, SCL_ALL, FTAG);
1558 	if (freeze_txg != 0)
1559 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1560 }
1561 
1562 void
1563 zfs_panic_recover(const char *fmt, ...)
1564 {
1565 	va_list adx;
1566 
1567 	va_start(adx, fmt);
1568 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1569 	va_end(adx);
1570 }
1571 
1572 /*
1573  * This is a stripped-down version of strtoull, suitable only for converting
1574  * lowercase hexadecimal numbers that don't overflow.
1575  */
1576 uint64_t
1577 zfs_strtonum(const char *str, char **nptr)
1578 {
1579 	uint64_t val = 0;
1580 	char c;
1581 	int digit;
1582 
1583 	while ((c = *str) != '\0') {
1584 		if (c >= '0' && c <= '9')
1585 			digit = c - '0';
1586 		else if (c >= 'a' && c <= 'f')
1587 			digit = 10 + c - 'a';
1588 		else
1589 			break;
1590 
1591 		val *= 16;
1592 		val += digit;
1593 
1594 		str++;
1595 	}
1596 
1597 	if (nptr)
1598 		*nptr = (char *)str;
1599 
1600 	return (val);
1601 }
1602 
1603 void
1604 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1605 {
1606 	/*
1607 	 * We bump the feature refcount for each special vdev added to the pool
1608 	 */
1609 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1610 	spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1611 }
1612 
1613 /*
1614  * ==========================================================================
1615  * Accessor functions
1616  * ==========================================================================
1617  */
1618 
1619 boolean_t
1620 spa_shutting_down(spa_t *spa)
1621 {
1622 	return (spa->spa_async_suspended);
1623 }
1624 
1625 dsl_pool_t *
1626 spa_get_dsl(spa_t *spa)
1627 {
1628 	return (spa->spa_dsl_pool);
1629 }
1630 
1631 boolean_t
1632 spa_is_initializing(spa_t *spa)
1633 {
1634 	return (spa->spa_is_initializing);
1635 }
1636 
1637 boolean_t
1638 spa_indirect_vdevs_loaded(spa_t *spa)
1639 {
1640 	return (spa->spa_indirect_vdevs_loaded);
1641 }
1642 
1643 blkptr_t *
1644 spa_get_rootblkptr(spa_t *spa)
1645 {
1646 	return (&spa->spa_ubsync.ub_rootbp);
1647 }
1648 
1649 void
1650 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1651 {
1652 	spa->spa_uberblock.ub_rootbp = *bp;
1653 }
1654 
1655 void
1656 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1657 {
1658 	if (spa->spa_root == NULL)
1659 		buf[0] = '\0';
1660 	else
1661 		(void) strncpy(buf, spa->spa_root, buflen);
1662 }
1663 
1664 int
1665 spa_sync_pass(spa_t *spa)
1666 {
1667 	return (spa->spa_sync_pass);
1668 }
1669 
1670 char *
1671 spa_name(spa_t *spa)
1672 {
1673 	return (spa->spa_name);
1674 }
1675 
1676 uint64_t
1677 spa_guid(spa_t *spa)
1678 {
1679 	dsl_pool_t *dp = spa_get_dsl(spa);
1680 	uint64_t guid;
1681 
1682 	/*
1683 	 * If we fail to parse the config during spa_load(), we can go through
1684 	 * the error path (which posts an ereport) and end up here with no root
1685 	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1686 	 * this case.
1687 	 */
1688 	if (spa->spa_root_vdev == NULL)
1689 		return (spa->spa_config_guid);
1690 
1691 	guid = spa->spa_last_synced_guid != 0 ?
1692 	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1693 
1694 	/*
1695 	 * Return the most recently synced out guid unless we're
1696 	 * in syncing context.
1697 	 */
1698 	if (dp && dsl_pool_sync_context(dp))
1699 		return (spa->spa_root_vdev->vdev_guid);
1700 	else
1701 		return (guid);
1702 }
1703 
1704 uint64_t
1705 spa_load_guid(spa_t *spa)
1706 {
1707 	/*
1708 	 * This is a GUID that exists solely as a reference for the
1709 	 * purposes of the arc.  It is generated at load time, and
1710 	 * is never written to persistent storage.
1711 	 */
1712 	return (spa->spa_load_guid);
1713 }
1714 
1715 uint64_t
1716 spa_last_synced_txg(spa_t *spa)
1717 {
1718 	return (spa->spa_ubsync.ub_txg);
1719 }
1720 
1721 uint64_t
1722 spa_first_txg(spa_t *spa)
1723 {
1724 	return (spa->spa_first_txg);
1725 }
1726 
1727 uint64_t
1728 spa_syncing_txg(spa_t *spa)
1729 {
1730 	return (spa->spa_syncing_txg);
1731 }
1732 
1733 /*
1734  * Return the last txg where data can be dirtied. The final txgs
1735  * will be used to just clear out any deferred frees that remain.
1736  */
1737 uint64_t
1738 spa_final_dirty_txg(spa_t *spa)
1739 {
1740 	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1741 }
1742 
1743 pool_state_t
1744 spa_state(spa_t *spa)
1745 {
1746 	return (spa->spa_state);
1747 }
1748 
1749 spa_load_state_t
1750 spa_load_state(spa_t *spa)
1751 {
1752 	return (spa->spa_load_state);
1753 }
1754 
1755 uint64_t
1756 spa_freeze_txg(spa_t *spa)
1757 {
1758 	return (spa->spa_freeze_txg);
1759 }
1760 
1761 /*
1762  * Return the inflated asize for a logical write in bytes. This is used by the
1763  * DMU to calculate the space a logical write will require on disk.
1764  * If lsize is smaller than the largest physical block size allocatable on this
1765  * pool we use its value instead, since the write will end up using the whole
1766  * block anyway.
1767  */
1768 uint64_t
1769 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1770 {
1771 	if (lsize == 0)
1772 		return (0);	/* No inflation needed */
1773 	return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
1774 }
1775 
1776 /*
1777  * Return the amount of slop space in bytes.  It is typically 1/32 of the pool
1778  * (3.2%), minus the embedded log space.  On very small pools, it may be
1779  * slightly larger than this.  On very large pools, it will be capped to
1780  * the value of spa_max_slop.  The embedded log space is not included in
1781  * spa_dspace.  By subtracting it, the usable space (per "zfs list") is a
1782  * constant 97% of the total space, regardless of metaslab size (assuming the
1783  * default spa_slop_shift=5 and a non-tiny pool).
1784  *
1785  * See the comment above spa_slop_shift for more details.
1786  */
1787 uint64_t
1788 spa_get_slop_space(spa_t *spa)
1789 {
1790 	uint64_t space = spa_get_dspace(spa);
1791 	uint64_t slop = MIN(space >> spa_slop_shift, spa_max_slop);
1792 
1793 	/*
1794 	 * Subtract the embedded log space, but no more than half the (3.2%)
1795 	 * unusable space.  Note, the "no more than half" is only relevant if
1796 	 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1797 	 * default.
1798 	 */
1799 	uint64_t embedded_log =
1800 	    metaslab_class_get_dspace(spa_embedded_log_class(spa));
1801 	slop -= MIN(embedded_log, slop >> 1);
1802 
1803 	/*
1804 	 * Slop space should be at least spa_min_slop, but no more than half
1805 	 * the entire pool.
1806 	 */
1807 	slop = MAX(slop, MIN(space >> 1, spa_min_slop));
1808 	return (slop);
1809 }
1810 
1811 uint64_t
1812 spa_get_dspace(spa_t *spa)
1813 {
1814 	return (spa->spa_dspace);
1815 }
1816 
1817 uint64_t
1818 spa_get_checkpoint_space(spa_t *spa)
1819 {
1820 	return (spa->spa_checkpoint_info.sci_dspace);
1821 }
1822 
1823 void
1824 spa_update_dspace(spa_t *spa)
1825 {
1826 	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1827 	    ddt_get_dedup_dspace(spa);
1828 	if (spa->spa_vdev_removal != NULL) {
1829 		/*
1830 		 * We can't allocate from the removing device, so subtract
1831 		 * its size if it was included in dspace (i.e. if this is a
1832 		 * normal-class vdev, not special/dedup).  This prevents the
1833 		 * DMU/DSL from filling up the (now smaller) pool while we
1834 		 * are in the middle of removing the device.
1835 		 *
1836 		 * Note that the DMU/DSL doesn't actually know or care
1837 		 * how much space is allocated (it does its own tracking
1838 		 * of how much space has been logically used).  So it
1839 		 * doesn't matter that the data we are moving may be
1840 		 * allocated twice (on the old device and the new
1841 		 * device).
1842 		 */
1843 		spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1844 		vdev_t *vd =
1845 		    vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1846 		if (vd->vdev_mg->mg_class == spa_normal_class(spa)) {
1847 			spa->spa_dspace -= spa_deflate(spa) ?
1848 			    vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1849 		}
1850 		spa_config_exit(spa, SCL_VDEV, FTAG);
1851 	}
1852 }
1853 
1854 /*
1855  * Return the failure mode that has been set to this pool. The default
1856  * behavior will be to block all I/Os when a complete failure occurs.
1857  */
1858 uint64_t
1859 spa_get_failmode(spa_t *spa)
1860 {
1861 	return (spa->spa_failmode);
1862 }
1863 
1864 boolean_t
1865 spa_suspended(spa_t *spa)
1866 {
1867 	return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1868 }
1869 
1870 uint64_t
1871 spa_version(spa_t *spa)
1872 {
1873 	return (spa->spa_ubsync.ub_version);
1874 }
1875 
1876 boolean_t
1877 spa_deflate(spa_t *spa)
1878 {
1879 	return (spa->spa_deflate);
1880 }
1881 
1882 metaslab_class_t *
1883 spa_normal_class(spa_t *spa)
1884 {
1885 	return (spa->spa_normal_class);
1886 }
1887 
1888 metaslab_class_t *
1889 spa_log_class(spa_t *spa)
1890 {
1891 	return (spa->spa_log_class);
1892 }
1893 
1894 metaslab_class_t *
1895 spa_embedded_log_class(spa_t *spa)
1896 {
1897 	return (spa->spa_embedded_log_class);
1898 }
1899 
1900 metaslab_class_t *
1901 spa_special_class(spa_t *spa)
1902 {
1903 	return (spa->spa_special_class);
1904 }
1905 
1906 metaslab_class_t *
1907 spa_dedup_class(spa_t *spa)
1908 {
1909 	return (spa->spa_dedup_class);
1910 }
1911 
1912 /*
1913  * Locate an appropriate allocation class
1914  */
1915 metaslab_class_t *
1916 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1917     uint_t level, uint_t special_smallblk)
1918 {
1919 	/*
1920 	 * ZIL allocations determine their class in zio_alloc_zil().
1921 	 */
1922 	ASSERT(objtype != DMU_OT_INTENT_LOG);
1923 
1924 	boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1925 
1926 	if (DMU_OT_IS_DDT(objtype)) {
1927 		if (spa->spa_dedup_class->mc_groups != 0)
1928 			return (spa_dedup_class(spa));
1929 		else if (has_special_class && zfs_ddt_data_is_special)
1930 			return (spa_special_class(spa));
1931 		else
1932 			return (spa_normal_class(spa));
1933 	}
1934 
1935 	/* Indirect blocks for user data can land in special if allowed */
1936 	if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1937 		if (has_special_class && zfs_user_indirect_is_special)
1938 			return (spa_special_class(spa));
1939 		else
1940 			return (spa_normal_class(spa));
1941 	}
1942 
1943 	if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1944 		if (has_special_class)
1945 			return (spa_special_class(spa));
1946 		else
1947 			return (spa_normal_class(spa));
1948 	}
1949 
1950 	/*
1951 	 * Allow small file blocks in special class in some cases (like
1952 	 * for the dRAID vdev feature). But always leave a reserve of
1953 	 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
1954 	 */
1955 	if (DMU_OT_IS_FILE(objtype) &&
1956 	    has_special_class && size <= special_smallblk) {
1957 		metaslab_class_t *special = spa_special_class(spa);
1958 		uint64_t alloc = metaslab_class_get_alloc(special);
1959 		uint64_t space = metaslab_class_get_space(special);
1960 		uint64_t limit =
1961 		    (space * (100 - zfs_special_class_metadata_reserve_pct))
1962 		    / 100;
1963 
1964 		if (alloc < limit)
1965 			return (special);
1966 	}
1967 
1968 	return (spa_normal_class(spa));
1969 }
1970 
1971 void
1972 spa_evicting_os_register(spa_t *spa, objset_t *os)
1973 {
1974 	mutex_enter(&spa->spa_evicting_os_lock);
1975 	list_insert_head(&spa->spa_evicting_os_list, os);
1976 	mutex_exit(&spa->spa_evicting_os_lock);
1977 }
1978 
1979 void
1980 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1981 {
1982 	mutex_enter(&spa->spa_evicting_os_lock);
1983 	list_remove(&spa->spa_evicting_os_list, os);
1984 	cv_broadcast(&spa->spa_evicting_os_cv);
1985 	mutex_exit(&spa->spa_evicting_os_lock);
1986 }
1987 
1988 void
1989 spa_evicting_os_wait(spa_t *spa)
1990 {
1991 	mutex_enter(&spa->spa_evicting_os_lock);
1992 	while (!list_is_empty(&spa->spa_evicting_os_list))
1993 		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1994 	mutex_exit(&spa->spa_evicting_os_lock);
1995 
1996 	dmu_buf_user_evict_wait();
1997 }
1998 
1999 int
2000 spa_max_replication(spa_t *spa)
2001 {
2002 	/*
2003 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2004 	 * handle BPs with more than one DVA allocated.  Set our max
2005 	 * replication level accordingly.
2006 	 */
2007 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
2008 		return (1);
2009 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
2010 }
2011 
2012 int
2013 spa_prev_software_version(spa_t *spa)
2014 {
2015 	return (spa->spa_prev_software_version);
2016 }
2017 
2018 uint64_t
2019 spa_deadman_synctime(spa_t *spa)
2020 {
2021 	return (spa->spa_deadman_synctime);
2022 }
2023 
2024 spa_autotrim_t
2025 spa_get_autotrim(spa_t *spa)
2026 {
2027 	return (spa->spa_autotrim);
2028 }
2029 
2030 uint64_t
2031 spa_deadman_ziotime(spa_t *spa)
2032 {
2033 	return (spa->spa_deadman_ziotime);
2034 }
2035 
2036 uint64_t
2037 spa_get_deadman_failmode(spa_t *spa)
2038 {
2039 	return (spa->spa_deadman_failmode);
2040 }
2041 
2042 void
2043 spa_set_deadman_failmode(spa_t *spa, const char *failmode)
2044 {
2045 	if (strcmp(failmode, "wait") == 0)
2046 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2047 	else if (strcmp(failmode, "continue") == 0)
2048 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2049 	else if (strcmp(failmode, "panic") == 0)
2050 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2051 	else
2052 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2053 }
2054 
2055 void
2056 spa_set_deadman_ziotime(hrtime_t ns)
2057 {
2058 	spa_t *spa = NULL;
2059 
2060 	if (spa_mode_global != SPA_MODE_UNINIT) {
2061 		mutex_enter(&spa_namespace_lock);
2062 		while ((spa = spa_next(spa)) != NULL)
2063 			spa->spa_deadman_ziotime = ns;
2064 		mutex_exit(&spa_namespace_lock);
2065 	}
2066 }
2067 
2068 void
2069 spa_set_deadman_synctime(hrtime_t ns)
2070 {
2071 	spa_t *spa = NULL;
2072 
2073 	if (spa_mode_global != SPA_MODE_UNINIT) {
2074 		mutex_enter(&spa_namespace_lock);
2075 		while ((spa = spa_next(spa)) != NULL)
2076 			spa->spa_deadman_synctime = ns;
2077 		mutex_exit(&spa_namespace_lock);
2078 	}
2079 }
2080 
2081 uint64_t
2082 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2083 {
2084 	uint64_t asize = DVA_GET_ASIZE(dva);
2085 	uint64_t dsize = asize;
2086 
2087 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2088 
2089 	if (asize != 0 && spa->spa_deflate) {
2090 		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2091 		if (vd != NULL)
2092 			dsize = (asize >> SPA_MINBLOCKSHIFT) *
2093 			    vd->vdev_deflate_ratio;
2094 	}
2095 
2096 	return (dsize);
2097 }
2098 
2099 uint64_t
2100 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2101 {
2102 	uint64_t dsize = 0;
2103 
2104 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2105 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2106 
2107 	return (dsize);
2108 }
2109 
2110 uint64_t
2111 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2112 {
2113 	uint64_t dsize = 0;
2114 
2115 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2116 
2117 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2118 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2119 
2120 	spa_config_exit(spa, SCL_VDEV, FTAG);
2121 
2122 	return (dsize);
2123 }
2124 
2125 uint64_t
2126 spa_dirty_data(spa_t *spa)
2127 {
2128 	return (spa->spa_dsl_pool->dp_dirty_total);
2129 }
2130 
2131 /*
2132  * ==========================================================================
2133  * SPA Import Progress Routines
2134  * ==========================================================================
2135  */
2136 
2137 typedef struct spa_import_progress {
2138 	uint64_t		pool_guid;	/* unique id for updates */
2139 	char			*pool_name;
2140 	spa_load_state_t	spa_load_state;
2141 	uint64_t		mmp_sec_remaining;	/* MMP activity check */
2142 	uint64_t		spa_load_max_txg;	/* rewind txg */
2143 	procfs_list_node_t	smh_node;
2144 } spa_import_progress_t;
2145 
2146 spa_history_list_t *spa_import_progress_list = NULL;
2147 
2148 static int
2149 spa_import_progress_show_header(struct seq_file *f)
2150 {
2151 	seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
2152 	    "load_state", "multihost_secs", "max_txg",
2153 	    "pool_name");
2154 	return (0);
2155 }
2156 
2157 static int
2158 spa_import_progress_show(struct seq_file *f, void *data)
2159 {
2160 	spa_import_progress_t *sip = (spa_import_progress_t *)data;
2161 
2162 	seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
2163 	    (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2164 	    (u_longlong_t)sip->mmp_sec_remaining,
2165 	    (u_longlong_t)sip->spa_load_max_txg,
2166 	    (sip->pool_name ? sip->pool_name : "-"));
2167 
2168 	return (0);
2169 }
2170 
2171 /* Remove oldest elements from list until there are no more than 'size' left */
2172 static void
2173 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
2174 {
2175 	spa_import_progress_t *sip;
2176 	while (shl->size > size) {
2177 		sip = list_remove_head(&shl->procfs_list.pl_list);
2178 		if (sip->pool_name)
2179 			spa_strfree(sip->pool_name);
2180 		kmem_free(sip, sizeof (spa_import_progress_t));
2181 		shl->size--;
2182 	}
2183 
2184 	IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2185 }
2186 
2187 static void
2188 spa_import_progress_init(void)
2189 {
2190 	spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
2191 	    KM_SLEEP);
2192 
2193 	spa_import_progress_list->size = 0;
2194 
2195 	spa_import_progress_list->procfs_list.pl_private =
2196 	    spa_import_progress_list;
2197 
2198 	procfs_list_install("zfs",
2199 	    NULL,
2200 	    "import_progress",
2201 	    0644,
2202 	    &spa_import_progress_list->procfs_list,
2203 	    spa_import_progress_show,
2204 	    spa_import_progress_show_header,
2205 	    NULL,
2206 	    offsetof(spa_import_progress_t, smh_node));
2207 }
2208 
2209 static void
2210 spa_import_progress_destroy(void)
2211 {
2212 	spa_history_list_t *shl = spa_import_progress_list;
2213 	procfs_list_uninstall(&shl->procfs_list);
2214 	spa_import_progress_truncate(shl, 0);
2215 	procfs_list_destroy(&shl->procfs_list);
2216 	kmem_free(shl, sizeof (spa_history_list_t));
2217 }
2218 
2219 int
2220 spa_import_progress_set_state(uint64_t pool_guid,
2221     spa_load_state_t load_state)
2222 {
2223 	spa_history_list_t *shl = spa_import_progress_list;
2224 	spa_import_progress_t *sip;
2225 	int error = ENOENT;
2226 
2227 	if (shl->size == 0)
2228 		return (0);
2229 
2230 	mutex_enter(&shl->procfs_list.pl_lock);
2231 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2232 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2233 		if (sip->pool_guid == pool_guid) {
2234 			sip->spa_load_state = load_state;
2235 			error = 0;
2236 			break;
2237 		}
2238 	}
2239 	mutex_exit(&shl->procfs_list.pl_lock);
2240 
2241 	return (error);
2242 }
2243 
2244 int
2245 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
2246 {
2247 	spa_history_list_t *shl = spa_import_progress_list;
2248 	spa_import_progress_t *sip;
2249 	int error = ENOENT;
2250 
2251 	if (shl->size == 0)
2252 		return (0);
2253 
2254 	mutex_enter(&shl->procfs_list.pl_lock);
2255 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2256 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2257 		if (sip->pool_guid == pool_guid) {
2258 			sip->spa_load_max_txg = load_max_txg;
2259 			error = 0;
2260 			break;
2261 		}
2262 	}
2263 	mutex_exit(&shl->procfs_list.pl_lock);
2264 
2265 	return (error);
2266 }
2267 
2268 int
2269 spa_import_progress_set_mmp_check(uint64_t pool_guid,
2270     uint64_t mmp_sec_remaining)
2271 {
2272 	spa_history_list_t *shl = spa_import_progress_list;
2273 	spa_import_progress_t *sip;
2274 	int error = ENOENT;
2275 
2276 	if (shl->size == 0)
2277 		return (0);
2278 
2279 	mutex_enter(&shl->procfs_list.pl_lock);
2280 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2281 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2282 		if (sip->pool_guid == pool_guid) {
2283 			sip->mmp_sec_remaining = mmp_sec_remaining;
2284 			error = 0;
2285 			break;
2286 		}
2287 	}
2288 	mutex_exit(&shl->procfs_list.pl_lock);
2289 
2290 	return (error);
2291 }
2292 
2293 /*
2294  * A new import is in progress, add an entry.
2295  */
2296 void
2297 spa_import_progress_add(spa_t *spa)
2298 {
2299 	spa_history_list_t *shl = spa_import_progress_list;
2300 	spa_import_progress_t *sip;
2301 	char *poolname = NULL;
2302 
2303 	sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
2304 	sip->pool_guid = spa_guid(spa);
2305 
2306 	(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2307 	    &poolname);
2308 	if (poolname == NULL)
2309 		poolname = spa_name(spa);
2310 	sip->pool_name = spa_strdup(poolname);
2311 	sip->spa_load_state = spa_load_state(spa);
2312 
2313 	mutex_enter(&shl->procfs_list.pl_lock);
2314 	procfs_list_add(&shl->procfs_list, sip);
2315 	shl->size++;
2316 	mutex_exit(&shl->procfs_list.pl_lock);
2317 }
2318 
2319 void
2320 spa_import_progress_remove(uint64_t pool_guid)
2321 {
2322 	spa_history_list_t *shl = spa_import_progress_list;
2323 	spa_import_progress_t *sip;
2324 
2325 	mutex_enter(&shl->procfs_list.pl_lock);
2326 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2327 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2328 		if (sip->pool_guid == pool_guid) {
2329 			if (sip->pool_name)
2330 				spa_strfree(sip->pool_name);
2331 			list_remove(&shl->procfs_list.pl_list, sip);
2332 			shl->size--;
2333 			kmem_free(sip, sizeof (spa_import_progress_t));
2334 			break;
2335 		}
2336 	}
2337 	mutex_exit(&shl->procfs_list.pl_lock);
2338 }
2339 
2340 /*
2341  * ==========================================================================
2342  * Initialization and Termination
2343  * ==========================================================================
2344  */
2345 
2346 static int
2347 spa_name_compare(const void *a1, const void *a2)
2348 {
2349 	const spa_t *s1 = a1;
2350 	const spa_t *s2 = a2;
2351 	int s;
2352 
2353 	s = strcmp(s1->spa_name, s2->spa_name);
2354 
2355 	return (TREE_ISIGN(s));
2356 }
2357 
2358 void
2359 spa_boot_init(void)
2360 {
2361 	spa_config_load();
2362 }
2363 
2364 void
2365 spa_init(spa_mode_t mode)
2366 {
2367 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2368 	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2369 	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2370 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2371 
2372 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2373 	    offsetof(spa_t, spa_avl));
2374 
2375 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2376 	    offsetof(spa_aux_t, aux_avl));
2377 
2378 	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2379 	    offsetof(spa_aux_t, aux_avl));
2380 
2381 	spa_mode_global = mode;
2382 
2383 #ifndef _KERNEL
2384 	if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
2385 		struct sigaction sa;
2386 
2387 		sa.sa_flags = SA_SIGINFO;
2388 		sigemptyset(&sa.sa_mask);
2389 		sa.sa_sigaction = arc_buf_sigsegv;
2390 
2391 		if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2392 			perror("could not enable watchpoints: "
2393 			    "sigaction(SIGSEGV, ...) = ");
2394 		} else {
2395 			arc_watch = B_TRUE;
2396 		}
2397 	}
2398 #endif
2399 
2400 	fm_init();
2401 	zfs_refcount_init();
2402 	unique_init();
2403 	zfs_btree_init();
2404 	metaslab_stat_init();
2405 	ddt_init();
2406 	zio_init();
2407 	dmu_init();
2408 	zil_init();
2409 	vdev_cache_stat_init();
2410 	vdev_mirror_stat_init();
2411 	vdev_raidz_math_init();
2412 	vdev_file_init();
2413 	zfs_prop_init();
2414 	zpool_prop_init();
2415 	zpool_feature_init();
2416 	spa_config_load();
2417 	l2arc_start();
2418 	scan_init();
2419 	qat_init();
2420 	spa_import_progress_init();
2421 }
2422 
2423 void
2424 spa_fini(void)
2425 {
2426 	l2arc_stop();
2427 
2428 	spa_evict_all();
2429 
2430 	vdev_file_fini();
2431 	vdev_cache_stat_fini();
2432 	vdev_mirror_stat_fini();
2433 	vdev_raidz_math_fini();
2434 	zil_fini();
2435 	dmu_fini();
2436 	zio_fini();
2437 	ddt_fini();
2438 	metaslab_stat_fini();
2439 	zfs_btree_fini();
2440 	unique_fini();
2441 	zfs_refcount_fini();
2442 	fm_fini();
2443 	scan_fini();
2444 	qat_fini();
2445 	spa_import_progress_destroy();
2446 
2447 	avl_destroy(&spa_namespace_avl);
2448 	avl_destroy(&spa_spare_avl);
2449 	avl_destroy(&spa_l2cache_avl);
2450 
2451 	cv_destroy(&spa_namespace_cv);
2452 	mutex_destroy(&spa_namespace_lock);
2453 	mutex_destroy(&spa_spare_lock);
2454 	mutex_destroy(&spa_l2cache_lock);
2455 }
2456 
2457 /*
2458  * Return whether this pool has a dedicated slog device. No locking needed.
2459  * It's not a problem if the wrong answer is returned as it's only for
2460  * performance and not correctness.
2461  */
2462 boolean_t
2463 spa_has_slogs(spa_t *spa)
2464 {
2465 	return (spa->spa_log_class->mc_groups != 0);
2466 }
2467 
2468 spa_log_state_t
2469 spa_get_log_state(spa_t *spa)
2470 {
2471 	return (spa->spa_log_state);
2472 }
2473 
2474 void
2475 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2476 {
2477 	spa->spa_log_state = state;
2478 }
2479 
2480 boolean_t
2481 spa_is_root(spa_t *spa)
2482 {
2483 	return (spa->spa_is_root);
2484 }
2485 
2486 boolean_t
2487 spa_writeable(spa_t *spa)
2488 {
2489 	return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
2490 }
2491 
2492 /*
2493  * Returns true if there is a pending sync task in any of the current
2494  * syncing txg, the current quiescing txg, or the current open txg.
2495  */
2496 boolean_t
2497 spa_has_pending_synctask(spa_t *spa)
2498 {
2499 	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2500 	    !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2501 }
2502 
2503 spa_mode_t
2504 spa_mode(spa_t *spa)
2505 {
2506 	return (spa->spa_mode);
2507 }
2508 
2509 uint64_t
2510 spa_bootfs(spa_t *spa)
2511 {
2512 	return (spa->spa_bootfs);
2513 }
2514 
2515 uint64_t
2516 spa_delegation(spa_t *spa)
2517 {
2518 	return (spa->spa_delegation);
2519 }
2520 
2521 objset_t *
2522 spa_meta_objset(spa_t *spa)
2523 {
2524 	return (spa->spa_meta_objset);
2525 }
2526 
2527 enum zio_checksum
2528 spa_dedup_checksum(spa_t *spa)
2529 {
2530 	return (spa->spa_dedup_checksum);
2531 }
2532 
2533 /*
2534  * Reset pool scan stat per scan pass (or reboot).
2535  */
2536 void
2537 spa_scan_stat_init(spa_t *spa)
2538 {
2539 	/* data not stored on disk */
2540 	spa->spa_scan_pass_start = gethrestime_sec();
2541 	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2542 		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2543 	else
2544 		spa->spa_scan_pass_scrub_pause = 0;
2545 	spa->spa_scan_pass_scrub_spent_paused = 0;
2546 	spa->spa_scan_pass_exam = 0;
2547 	spa->spa_scan_pass_issued = 0;
2548 	vdev_scan_stat_init(spa->spa_root_vdev);
2549 }
2550 
2551 /*
2552  * Get scan stats for zpool status reports
2553  */
2554 int
2555 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2556 {
2557 	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2558 
2559 	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2560 		return (SET_ERROR(ENOENT));
2561 	bzero(ps, sizeof (pool_scan_stat_t));
2562 
2563 	/* data stored on disk */
2564 	ps->pss_func = scn->scn_phys.scn_func;
2565 	ps->pss_state = scn->scn_phys.scn_state;
2566 	ps->pss_start_time = scn->scn_phys.scn_start_time;
2567 	ps->pss_end_time = scn->scn_phys.scn_end_time;
2568 	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2569 	ps->pss_examined = scn->scn_phys.scn_examined;
2570 	ps->pss_to_process = scn->scn_phys.scn_to_process;
2571 	ps->pss_processed = scn->scn_phys.scn_processed;
2572 	ps->pss_errors = scn->scn_phys.scn_errors;
2573 
2574 	/* data not stored on disk */
2575 	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2576 	ps->pss_pass_start = spa->spa_scan_pass_start;
2577 	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2578 	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2579 	ps->pss_pass_issued = spa->spa_scan_pass_issued;
2580 	ps->pss_issued =
2581 	    scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2582 
2583 	return (0);
2584 }
2585 
2586 int
2587 spa_maxblocksize(spa_t *spa)
2588 {
2589 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2590 		return (SPA_MAXBLOCKSIZE);
2591 	else
2592 		return (SPA_OLD_MAXBLOCKSIZE);
2593 }
2594 
2595 
2596 /*
2597  * Returns the txg that the last device removal completed. No indirect mappings
2598  * have been added since this txg.
2599  */
2600 uint64_t
2601 spa_get_last_removal_txg(spa_t *spa)
2602 {
2603 	uint64_t vdevid;
2604 	uint64_t ret = -1ULL;
2605 
2606 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2607 	/*
2608 	 * sr_prev_indirect_vdev is only modified while holding all the
2609 	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2610 	 * examining it.
2611 	 */
2612 	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2613 
2614 	while (vdevid != -1ULL) {
2615 		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2616 		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2617 
2618 		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2619 
2620 		/*
2621 		 * If the removal did not remap any data, we don't care.
2622 		 */
2623 		if (vdev_indirect_births_count(vib) != 0) {
2624 			ret = vdev_indirect_births_last_entry_txg(vib);
2625 			break;
2626 		}
2627 
2628 		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2629 	}
2630 	spa_config_exit(spa, SCL_VDEV, FTAG);
2631 
2632 	IMPLY(ret != -1ULL,
2633 	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2634 
2635 	return (ret);
2636 }
2637 
2638 int
2639 spa_maxdnodesize(spa_t *spa)
2640 {
2641 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2642 		return (DNODE_MAX_SIZE);
2643 	else
2644 		return (DNODE_MIN_SIZE);
2645 }
2646 
2647 boolean_t
2648 spa_multihost(spa_t *spa)
2649 {
2650 	return (spa->spa_multihost ? B_TRUE : B_FALSE);
2651 }
2652 
2653 uint32_t
2654 spa_get_hostid(spa_t *spa)
2655 {
2656 	return (spa->spa_hostid);
2657 }
2658 
2659 boolean_t
2660 spa_trust_config(spa_t *spa)
2661 {
2662 	return (spa->spa_trust_config);
2663 }
2664 
2665 uint64_t
2666 spa_missing_tvds_allowed(spa_t *spa)
2667 {
2668 	return (spa->spa_missing_tvds_allowed);
2669 }
2670 
2671 space_map_t *
2672 spa_syncing_log_sm(spa_t *spa)
2673 {
2674 	return (spa->spa_syncing_log_sm);
2675 }
2676 
2677 void
2678 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2679 {
2680 	spa->spa_missing_tvds = missing;
2681 }
2682 
2683 /*
2684  * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2685  */
2686 const char *
2687 spa_state_to_name(spa_t *spa)
2688 {
2689 	ASSERT3P(spa, !=, NULL);
2690 
2691 	/*
2692 	 * it is possible for the spa to exist, without root vdev
2693 	 * as the spa transitions during import/export
2694 	 */
2695 	vdev_t *rvd = spa->spa_root_vdev;
2696 	if (rvd == NULL) {
2697 		return ("TRANSITIONING");
2698 	}
2699 	vdev_state_t state = rvd->vdev_state;
2700 	vdev_aux_t aux = rvd->vdev_stat.vs_aux;
2701 
2702 	if (spa_suspended(spa) &&
2703 	    (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
2704 		return ("SUSPENDED");
2705 
2706 	switch (state) {
2707 	case VDEV_STATE_CLOSED:
2708 	case VDEV_STATE_OFFLINE:
2709 		return ("OFFLINE");
2710 	case VDEV_STATE_REMOVED:
2711 		return ("REMOVED");
2712 	case VDEV_STATE_CANT_OPEN:
2713 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
2714 			return ("FAULTED");
2715 		else if (aux == VDEV_AUX_SPLIT_POOL)
2716 			return ("SPLIT");
2717 		else
2718 			return ("UNAVAIL");
2719 	case VDEV_STATE_FAULTED:
2720 		return ("FAULTED");
2721 	case VDEV_STATE_DEGRADED:
2722 		return ("DEGRADED");
2723 	case VDEV_STATE_HEALTHY:
2724 		return ("ONLINE");
2725 	default:
2726 		break;
2727 	}
2728 
2729 	return ("UNKNOWN");
2730 }
2731 
2732 boolean_t
2733 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2734 {
2735 	vdev_t *rvd = spa->spa_root_vdev;
2736 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2737 		if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2738 			return (B_FALSE);
2739 	}
2740 	return (B_TRUE);
2741 }
2742 
2743 boolean_t
2744 spa_has_checkpoint(spa_t *spa)
2745 {
2746 	return (spa->spa_checkpoint_txg != 0);
2747 }
2748 
2749 boolean_t
2750 spa_importing_readonly_checkpoint(spa_t *spa)
2751 {
2752 	return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2753 	    spa->spa_mode == SPA_MODE_READ);
2754 }
2755 
2756 uint64_t
2757 spa_min_claim_txg(spa_t *spa)
2758 {
2759 	uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2760 
2761 	if (checkpoint_txg != 0)
2762 		return (checkpoint_txg + 1);
2763 
2764 	return (spa->spa_first_txg);
2765 }
2766 
2767 /*
2768  * If there is a checkpoint, async destroys may consume more space from
2769  * the pool instead of freeing it. In an attempt to save the pool from
2770  * getting suspended when it is about to run out of space, we stop
2771  * processing async destroys.
2772  */
2773 boolean_t
2774 spa_suspend_async_destroy(spa_t *spa)
2775 {
2776 	dsl_pool_t *dp = spa_get_dsl(spa);
2777 
2778 	uint64_t unreserved = dsl_pool_unreserved_space(dp,
2779 	    ZFS_SPACE_CHECK_EXTRA_RESERVED);
2780 	uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2781 	uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2782 
2783 	if (spa_has_checkpoint(spa) && avail == 0)
2784 		return (B_TRUE);
2785 
2786 	return (B_FALSE);
2787 }
2788 
2789 #if defined(_KERNEL)
2790 
2791 int
2792 param_set_deadman_failmode_common(const char *val)
2793 {
2794 	spa_t *spa = NULL;
2795 	char *p;
2796 
2797 	if (val == NULL)
2798 		return (SET_ERROR(EINVAL));
2799 
2800 	if ((p = strchr(val, '\n')) != NULL)
2801 		*p = '\0';
2802 
2803 	if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
2804 	    strcmp(val, "panic"))
2805 		return (SET_ERROR(EINVAL));
2806 
2807 	if (spa_mode_global != SPA_MODE_UNINIT) {
2808 		mutex_enter(&spa_namespace_lock);
2809 		while ((spa = spa_next(spa)) != NULL)
2810 			spa_set_deadman_failmode(spa, val);
2811 		mutex_exit(&spa_namespace_lock);
2812 	}
2813 
2814 	return (0);
2815 }
2816 #endif
2817 
2818 /* Namespace manipulation */
2819 EXPORT_SYMBOL(spa_lookup);
2820 EXPORT_SYMBOL(spa_add);
2821 EXPORT_SYMBOL(spa_remove);
2822 EXPORT_SYMBOL(spa_next);
2823 
2824 /* Refcount functions */
2825 EXPORT_SYMBOL(spa_open_ref);
2826 EXPORT_SYMBOL(spa_close);
2827 EXPORT_SYMBOL(spa_refcount_zero);
2828 
2829 /* Pool configuration lock */
2830 EXPORT_SYMBOL(spa_config_tryenter);
2831 EXPORT_SYMBOL(spa_config_enter);
2832 EXPORT_SYMBOL(spa_config_exit);
2833 EXPORT_SYMBOL(spa_config_held);
2834 
2835 /* Pool vdev add/remove lock */
2836 EXPORT_SYMBOL(spa_vdev_enter);
2837 EXPORT_SYMBOL(spa_vdev_exit);
2838 
2839 /* Pool vdev state change lock */
2840 EXPORT_SYMBOL(spa_vdev_state_enter);
2841 EXPORT_SYMBOL(spa_vdev_state_exit);
2842 
2843 /* Accessor functions */
2844 EXPORT_SYMBOL(spa_shutting_down);
2845 EXPORT_SYMBOL(spa_get_dsl);
2846 EXPORT_SYMBOL(spa_get_rootblkptr);
2847 EXPORT_SYMBOL(spa_set_rootblkptr);
2848 EXPORT_SYMBOL(spa_altroot);
2849 EXPORT_SYMBOL(spa_sync_pass);
2850 EXPORT_SYMBOL(spa_name);
2851 EXPORT_SYMBOL(spa_guid);
2852 EXPORT_SYMBOL(spa_last_synced_txg);
2853 EXPORT_SYMBOL(spa_first_txg);
2854 EXPORT_SYMBOL(spa_syncing_txg);
2855 EXPORT_SYMBOL(spa_version);
2856 EXPORT_SYMBOL(spa_state);
2857 EXPORT_SYMBOL(spa_load_state);
2858 EXPORT_SYMBOL(spa_freeze_txg);
2859 EXPORT_SYMBOL(spa_get_dspace);
2860 EXPORT_SYMBOL(spa_update_dspace);
2861 EXPORT_SYMBOL(spa_deflate);
2862 EXPORT_SYMBOL(spa_normal_class);
2863 EXPORT_SYMBOL(spa_log_class);
2864 EXPORT_SYMBOL(spa_special_class);
2865 EXPORT_SYMBOL(spa_preferred_class);
2866 EXPORT_SYMBOL(spa_max_replication);
2867 EXPORT_SYMBOL(spa_prev_software_version);
2868 EXPORT_SYMBOL(spa_get_failmode);
2869 EXPORT_SYMBOL(spa_suspended);
2870 EXPORT_SYMBOL(spa_bootfs);
2871 EXPORT_SYMBOL(spa_delegation);
2872 EXPORT_SYMBOL(spa_meta_objset);
2873 EXPORT_SYMBOL(spa_maxblocksize);
2874 EXPORT_SYMBOL(spa_maxdnodesize);
2875 
2876 /* Miscellaneous support routines */
2877 EXPORT_SYMBOL(spa_guid_exists);
2878 EXPORT_SYMBOL(spa_strdup);
2879 EXPORT_SYMBOL(spa_strfree);
2880 EXPORT_SYMBOL(spa_generate_guid);
2881 EXPORT_SYMBOL(snprintf_blkptr);
2882 EXPORT_SYMBOL(spa_freeze);
2883 EXPORT_SYMBOL(spa_upgrade);
2884 EXPORT_SYMBOL(spa_evict_all);
2885 EXPORT_SYMBOL(spa_lookup_by_guid);
2886 EXPORT_SYMBOL(spa_has_spare);
2887 EXPORT_SYMBOL(dva_get_dsize_sync);
2888 EXPORT_SYMBOL(bp_get_dsize_sync);
2889 EXPORT_SYMBOL(bp_get_dsize);
2890 EXPORT_SYMBOL(spa_has_slogs);
2891 EXPORT_SYMBOL(spa_is_root);
2892 EXPORT_SYMBOL(spa_writeable);
2893 EXPORT_SYMBOL(spa_mode);
2894 EXPORT_SYMBOL(spa_namespace_lock);
2895 EXPORT_SYMBOL(spa_trust_config);
2896 EXPORT_SYMBOL(spa_missing_tvds_allowed);
2897 EXPORT_SYMBOL(spa_set_missing_tvds);
2898 EXPORT_SYMBOL(spa_state_to_name);
2899 EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
2900 EXPORT_SYMBOL(spa_min_claim_txg);
2901 EXPORT_SYMBOL(spa_suspend_async_destroy);
2902 EXPORT_SYMBOL(spa_has_checkpoint);
2903 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
2904 
2905 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
2906 	"Set additional debugging flags");
2907 
2908 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
2909 	"Set to attempt to recover from fatal errors");
2910 
2911 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
2912 	"Set to ignore IO errors during free and permanently leak the space");
2913 
2914 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW,
2915 	"Dead I/O check interval in milliseconds");
2916 
2917 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
2918 	"Enable deadman timer");
2919 
2920 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW,
2921 	"SPA size estimate multiplication factor");
2922 
2923 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
2924 	"Place DDT data into the special class");
2925 
2926 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
2927 	"Place user data indirect blocks into the special class");
2928 
2929 /* BEGIN CSTYLED */
2930 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
2931 	param_set_deadman_failmode, param_get_charp, ZMOD_RW,
2932 	"Failmode for deadman timer");
2933 
2934 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
2935 	param_set_deadman_synctime, param_get_ulong, ZMOD_RW,
2936 	"Pool sync expiration time in milliseconds");
2937 
2938 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
2939 	param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
2940 	"IO expiration time in milliseconds");
2941 
2942 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW,
2943 	"Small file blocks in special vdevs depends on this much "
2944 	"free space available");
2945 /* END CSTYLED */
2946 
2947 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
2948 	param_get_int, ZMOD_RW, "Reserved free space in pool");
2949