xref: /freebsd/sys/contrib/openzfs/module/zfs/spa_misc.c (revision dd4f32ae62426a10a84b4322756d82c06c202c4e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  * Copyright 2013 Saso Kiselkov. All rights reserved.
27  * Copyright (c) 2017 Datto Inc.
28  * Copyright (c) 2017, Intel Corporation.
29  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/zfs_chksum.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zio.h>
36 #include <sys/zio_checksum.h>
37 #include <sys/zio_compress.h>
38 #include <sys/dmu.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/vdev_impl.h>
43 #include <sys/vdev_initialize.h>
44 #include <sys/vdev_trim.h>
45 #include <sys/vdev_file.h>
46 #include <sys/vdev_raidz.h>
47 #include <sys/metaslab.h>
48 #include <sys/uberblock_impl.h>
49 #include <sys/txg.h>
50 #include <sys/avl.h>
51 #include <sys/unique.h>
52 #include <sys/dsl_pool.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/fm/util.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/fs/zfs.h>
58 #include <sys/metaslab_impl.h>
59 #include <sys/arc.h>
60 #include <sys/brt.h>
61 #include <sys/ddt.h>
62 #include <sys/kstat.h>
63 #include "zfs_prop.h"
64 #include <sys/btree.h>
65 #include <sys/zfeature.h>
66 #include <sys/qat.h>
67 #include <sys/zstd/zstd.h>
68 
69 /*
70  * SPA locking
71  *
72  * There are three basic locks for managing spa_t structures:
73  *
74  * spa_namespace_lock (global mutex)
75  *
76  *	This lock must be acquired to do any of the following:
77  *
78  *		- Lookup a spa_t by name
79  *		- Add or remove a spa_t from the namespace
80  *		- Increase spa_refcount from non-zero
81  *		- Check if spa_refcount is zero
82  *		- Rename a spa_t
83  *		- add/remove/attach/detach devices
84  *		- Held for the duration of create/destroy/import/export
85  *
86  *	It does not need to handle recursion.  A create or destroy may
87  *	reference objects (files or zvols) in other pools, but by
88  *	definition they must have an existing reference, and will never need
89  *	to lookup a spa_t by name.
90  *
91  * spa_refcount (per-spa zfs_refcount_t protected by mutex)
92  *
93  *	This reference count keep track of any active users of the spa_t.  The
94  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
95  *	the refcount is never really 'zero' - opening a pool implicitly keeps
96  *	some references in the DMU.  Internally we check against spa_minref, but
97  *	present the image of a zero/non-zero value to consumers.
98  *
99  * spa_config_lock[] (per-spa array of rwlocks)
100  *
101  *	This protects the spa_t from config changes, and must be held in
102  *	the following circumstances:
103  *
104  *		- RW_READER to perform I/O to the spa
105  *		- RW_WRITER to change the vdev config
106  *
107  * The locking order is fairly straightforward:
108  *
109  *		spa_namespace_lock	->	spa_refcount
110  *
111  *	The namespace lock must be acquired to increase the refcount from 0
112  *	or to check if it is zero.
113  *
114  *		spa_refcount		->	spa_config_lock[]
115  *
116  *	There must be at least one valid reference on the spa_t to acquire
117  *	the config lock.
118  *
119  *		spa_namespace_lock	->	spa_config_lock[]
120  *
121  *	The namespace lock must always be taken before the config lock.
122  *
123  *
124  * The spa_namespace_lock can be acquired directly and is globally visible.
125  *
126  * The namespace is manipulated using the following functions, all of which
127  * require the spa_namespace_lock to be held.
128  *
129  *	spa_lookup()		Lookup a spa_t by name.
130  *
131  *	spa_add()		Create a new spa_t in the namespace.
132  *
133  *	spa_remove()		Remove a spa_t from the namespace.  This also
134  *				frees up any memory associated with the spa_t.
135  *
136  *	spa_next()		Returns the next spa_t in the system, or the
137  *				first if NULL is passed.
138  *
139  *	spa_evict_all()		Shutdown and remove all spa_t structures in
140  *				the system.
141  *
142  *	spa_guid_exists()	Determine whether a pool/device guid exists.
143  *
144  * The spa_refcount is manipulated using the following functions:
145  *
146  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
147  *				called with spa_namespace_lock held if the
148  *				refcount is currently zero.
149  *
150  *	spa_close()		Remove a reference from the spa_t.  This will
151  *				not free the spa_t or remove it from the
152  *				namespace.  No locking is required.
153  *
154  *	spa_refcount_zero()	Returns true if the refcount is currently
155  *				zero.  Must be called with spa_namespace_lock
156  *				held.
157  *
158  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
159  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
160  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
161  *
162  * To read the configuration, it suffices to hold one of these locks as reader.
163  * To modify the configuration, you must hold all locks as writer.  To modify
164  * vdev state without altering the vdev tree's topology (e.g. online/offline),
165  * you must hold SCL_STATE and SCL_ZIO as writer.
166  *
167  * We use these distinct config locks to avoid recursive lock entry.
168  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
169  * block allocations (SCL_ALLOC), which may require reading space maps
170  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
171  *
172  * The spa config locks cannot be normal rwlocks because we need the
173  * ability to hand off ownership.  For example, SCL_ZIO is acquired
174  * by the issuing thread and later released by an interrupt thread.
175  * They do, however, obey the usual write-wanted semantics to prevent
176  * writer (i.e. system administrator) starvation.
177  *
178  * The lock acquisition rules are as follows:
179  *
180  * SCL_CONFIG
181  *	Protects changes to the vdev tree topology, such as vdev
182  *	add/remove/attach/detach.  Protects the dirty config list
183  *	(spa_config_dirty_list) and the set of spares and l2arc devices.
184  *
185  * SCL_STATE
186  *	Protects changes to pool state and vdev state, such as vdev
187  *	online/offline/fault/degrade/clear.  Protects the dirty state list
188  *	(spa_state_dirty_list) and global pool state (spa_state).
189  *
190  * SCL_ALLOC
191  *	Protects changes to metaslab groups and classes.
192  *	Held as reader by metaslab_alloc() and metaslab_claim().
193  *
194  * SCL_ZIO
195  *	Held by bp-level zios (those which have no io_vd upon entry)
196  *	to prevent changes to the vdev tree.  The bp-level zio implicitly
197  *	protects all of its vdev child zios, which do not hold SCL_ZIO.
198  *
199  * SCL_FREE
200  *	Protects changes to metaslab groups and classes.
201  *	Held as reader by metaslab_free().  SCL_FREE is distinct from
202  *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
203  *	blocks in zio_done() while another i/o that holds either
204  *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
205  *
206  * SCL_VDEV
207  *	Held as reader to prevent changes to the vdev tree during trivial
208  *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
209  *	other locks, and lower than all of them, to ensure that it's safe
210  *	to acquire regardless of caller context.
211  *
212  * In addition, the following rules apply:
213  *
214  * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
215  *	The lock ordering is SCL_CONFIG > spa_props_lock.
216  *
217  * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
218  *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
219  *	or zio_write_phys() -- the caller must ensure that the config cannot
220  *	cannot change in the interim, and that the vdev cannot be reopened.
221  *	SCL_STATE as reader suffices for both.
222  *
223  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
224  *
225  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
226  *				for writing.
227  *
228  *	spa_vdev_exit()		Release the config lock, wait for all I/O
229  *				to complete, sync the updated configs to the
230  *				cache, and release the namespace lock.
231  *
232  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
233  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
234  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
235  */
236 
237 static avl_tree_t spa_namespace_avl;
238 kmutex_t spa_namespace_lock;
239 static kcondvar_t spa_namespace_cv;
240 static const int spa_max_replication_override = SPA_DVAS_PER_BP;
241 
242 static kmutex_t spa_spare_lock;
243 static avl_tree_t spa_spare_avl;
244 static kmutex_t spa_l2cache_lock;
245 static avl_tree_t spa_l2cache_avl;
246 
247 spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
248 
249 #ifdef ZFS_DEBUG
250 /*
251  * Everything except dprintf, set_error, spa, and indirect_remap is on
252  * by default in debug builds.
253  */
254 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
255     ZFS_DEBUG_INDIRECT_REMAP);
256 #else
257 int zfs_flags = 0;
258 #endif
259 
260 /*
261  * zfs_recover can be set to nonzero to attempt to recover from
262  * otherwise-fatal errors, typically caused by on-disk corruption.  When
263  * set, calls to zfs_panic_recover() will turn into warning messages.
264  * This should only be used as a last resort, as it typically results
265  * in leaked space, or worse.
266  */
267 int zfs_recover = B_FALSE;
268 
269 /*
270  * If destroy encounters an EIO while reading metadata (e.g. indirect
271  * blocks), space referenced by the missing metadata can not be freed.
272  * Normally this causes the background destroy to become "stalled", as
273  * it is unable to make forward progress.  While in this stalled state,
274  * all remaining space to free from the error-encountering filesystem is
275  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
276  * permanently leak the space from indirect blocks that can not be read,
277  * and continue to free everything else that it can.
278  *
279  * The default, "stalling" behavior is useful if the storage partially
280  * fails (i.e. some but not all i/os fail), and then later recovers.  In
281  * this case, we will be able to continue pool operations while it is
282  * partially failed, and when it recovers, we can continue to free the
283  * space, with no leaks.  However, note that this case is actually
284  * fairly rare.
285  *
286  * Typically pools either (a) fail completely (but perhaps temporarily,
287  * e.g. a top-level vdev going offline), or (b) have localized,
288  * permanent errors (e.g. disk returns the wrong data due to bit flip or
289  * firmware bug).  In case (a), this setting does not matter because the
290  * pool will be suspended and the sync thread will not be able to make
291  * forward progress regardless.  In case (b), because the error is
292  * permanent, the best we can do is leak the minimum amount of space,
293  * which is what setting this flag will do.  Therefore, it is reasonable
294  * for this flag to normally be set, but we chose the more conservative
295  * approach of not setting it, so that there is no possibility of
296  * leaking space in the "partial temporary" failure case.
297  */
298 int zfs_free_leak_on_eio = B_FALSE;
299 
300 /*
301  * Expiration time in milliseconds. This value has two meanings. First it is
302  * used to determine when the spa_deadman() logic should fire. By default the
303  * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
304  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
305  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
306  * in one of three behaviors controlled by zfs_deadman_failmode.
307  */
308 uint64_t zfs_deadman_synctime_ms = 600000UL;  /* 10 min. */
309 
310 /*
311  * This value controls the maximum amount of time zio_wait() will block for an
312  * outstanding IO.  By default this is 300 seconds at which point the "hung"
313  * behavior will be applied as described for zfs_deadman_synctime_ms.
314  */
315 uint64_t zfs_deadman_ziotime_ms = 300000UL;  /* 5 min. */
316 
317 /*
318  * Check time in milliseconds. This defines the frequency at which we check
319  * for hung I/O.
320  */
321 uint64_t zfs_deadman_checktime_ms = 60000UL;  /* 1 min. */
322 
323 /*
324  * By default the deadman is enabled.
325  */
326 int zfs_deadman_enabled = B_TRUE;
327 
328 /*
329  * Controls the behavior of the deadman when it detects a "hung" I/O.
330  * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
331  *
332  * wait     - Wait for the "hung" I/O (default)
333  * continue - Attempt to recover from a "hung" I/O
334  * panic    - Panic the system
335  */
336 const char *zfs_deadman_failmode = "wait";
337 
338 /*
339  * The worst case is single-sector max-parity RAID-Z blocks, in which
340  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
341  * times the size; so just assume that.  Add to this the fact that
342  * we can have up to 3 DVAs per bp, and one more factor of 2 because
343  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
344  * the worst case is:
345  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
346  */
347 uint_t spa_asize_inflation = 24;
348 
349 /*
350  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
351  * the pool to be consumed (bounded by spa_max_slop).  This ensures that we
352  * don't run the pool completely out of space, due to unaccounted changes (e.g.
353  * to the MOS).  It also limits the worst-case time to allocate space.  If we
354  * have less than this amount of free space, most ZPL operations (e.g.  write,
355  * create) will return ENOSPC.  The ZIL metaslabs (spa_embedded_log_class) are
356  * also part of this 3.2% of space which can't be consumed by normal writes;
357  * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
358  * log space.
359  *
360  * Certain operations (e.g. file removal, most administrative actions) can
361  * use half the slop space.  They will only return ENOSPC if less than half
362  * the slop space is free.  Typically, once the pool has less than the slop
363  * space free, the user will use these operations to free up space in the pool.
364  * These are the operations that call dsl_pool_adjustedsize() with the netfree
365  * argument set to TRUE.
366  *
367  * Operations that are almost guaranteed to free up space in the absence of
368  * a pool checkpoint can use up to three quarters of the slop space
369  * (e.g zfs destroy).
370  *
371  * A very restricted set of operations are always permitted, regardless of
372  * the amount of free space.  These are the operations that call
373  * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
374  * increase in the amount of space used, it is possible to run the pool
375  * completely out of space, causing it to be permanently read-only.
376  *
377  * Note that on very small pools, the slop space will be larger than
378  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
379  * but we never allow it to be more than half the pool size.
380  *
381  * Further, on very large pools, the slop space will be smaller than
382  * 3.2%, to avoid reserving much more space than we actually need; bounded
383  * by spa_max_slop (128GB).
384  *
385  * See also the comments in zfs_space_check_t.
386  */
387 uint_t spa_slop_shift = 5;
388 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
389 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
390 static const int spa_allocators = 4;
391 
392 
393 void
394 spa_load_failed(spa_t *spa, const char *fmt, ...)
395 {
396 	va_list adx;
397 	char buf[256];
398 
399 	va_start(adx, fmt);
400 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
401 	va_end(adx);
402 
403 	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
404 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
405 }
406 
407 void
408 spa_load_note(spa_t *spa, const char *fmt, ...)
409 {
410 	va_list adx;
411 	char buf[256];
412 
413 	va_start(adx, fmt);
414 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
415 	va_end(adx);
416 
417 	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
418 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
419 }
420 
421 /*
422  * By default dedup and user data indirects land in the special class
423  */
424 static int zfs_ddt_data_is_special = B_TRUE;
425 static int zfs_user_indirect_is_special = B_TRUE;
426 
427 /*
428  * The percentage of special class final space reserved for metadata only.
429  * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
430  * let metadata into the class.
431  */
432 static uint_t zfs_special_class_metadata_reserve_pct = 25;
433 
434 /*
435  * ==========================================================================
436  * SPA config locking
437  * ==========================================================================
438  */
439 static void
440 spa_config_lock_init(spa_t *spa)
441 {
442 	for (int i = 0; i < SCL_LOCKS; i++) {
443 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
444 		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
445 		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
446 		scl->scl_writer = NULL;
447 		scl->scl_write_wanted = 0;
448 		scl->scl_count = 0;
449 	}
450 }
451 
452 static void
453 spa_config_lock_destroy(spa_t *spa)
454 {
455 	for (int i = 0; i < SCL_LOCKS; i++) {
456 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
457 		mutex_destroy(&scl->scl_lock);
458 		cv_destroy(&scl->scl_cv);
459 		ASSERT(scl->scl_writer == NULL);
460 		ASSERT(scl->scl_write_wanted == 0);
461 		ASSERT(scl->scl_count == 0);
462 	}
463 }
464 
465 int
466 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
467 {
468 	for (int i = 0; i < SCL_LOCKS; i++) {
469 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
470 		if (!(locks & (1 << i)))
471 			continue;
472 		mutex_enter(&scl->scl_lock);
473 		if (rw == RW_READER) {
474 			if (scl->scl_writer || scl->scl_write_wanted) {
475 				mutex_exit(&scl->scl_lock);
476 				spa_config_exit(spa, locks & ((1 << i) - 1),
477 				    tag);
478 				return (0);
479 			}
480 		} else {
481 			ASSERT(scl->scl_writer != curthread);
482 			if (scl->scl_count != 0) {
483 				mutex_exit(&scl->scl_lock);
484 				spa_config_exit(spa, locks & ((1 << i) - 1),
485 				    tag);
486 				return (0);
487 			}
488 			scl->scl_writer = curthread;
489 		}
490 		scl->scl_count++;
491 		mutex_exit(&scl->scl_lock);
492 	}
493 	return (1);
494 }
495 
496 static void
497 spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
498     int mmp_flag)
499 {
500 	(void) tag;
501 	int wlocks_held = 0;
502 
503 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
504 
505 	for (int i = 0; i < SCL_LOCKS; i++) {
506 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
507 		if (scl->scl_writer == curthread)
508 			wlocks_held |= (1 << i);
509 		if (!(locks & (1 << i)))
510 			continue;
511 		mutex_enter(&scl->scl_lock);
512 		if (rw == RW_READER) {
513 			while (scl->scl_writer ||
514 			    (!mmp_flag && scl->scl_write_wanted)) {
515 				cv_wait(&scl->scl_cv, &scl->scl_lock);
516 			}
517 		} else {
518 			ASSERT(scl->scl_writer != curthread);
519 			while (scl->scl_count != 0) {
520 				scl->scl_write_wanted++;
521 				cv_wait(&scl->scl_cv, &scl->scl_lock);
522 				scl->scl_write_wanted--;
523 			}
524 			scl->scl_writer = curthread;
525 		}
526 		scl->scl_count++;
527 		mutex_exit(&scl->scl_lock);
528 	}
529 	ASSERT3U(wlocks_held, <=, locks);
530 }
531 
532 void
533 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
534 {
535 	spa_config_enter_impl(spa, locks, tag, rw, 0);
536 }
537 
538 /*
539  * The spa_config_enter_mmp() allows the mmp thread to cut in front of
540  * outstanding write lock requests. This is needed since the mmp updates are
541  * time sensitive and failure to service them promptly will result in a
542  * suspended pool. This pool suspension has been seen in practice when there is
543  * a single disk in a pool that is responding slowly and presumably about to
544  * fail.
545  */
546 
547 void
548 spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
549 {
550 	spa_config_enter_impl(spa, locks, tag, rw, 1);
551 }
552 
553 void
554 spa_config_exit(spa_t *spa, int locks, const void *tag)
555 {
556 	(void) tag;
557 	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
558 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
559 		if (!(locks & (1 << i)))
560 			continue;
561 		mutex_enter(&scl->scl_lock);
562 		ASSERT(scl->scl_count > 0);
563 		if (--scl->scl_count == 0) {
564 			ASSERT(scl->scl_writer == NULL ||
565 			    scl->scl_writer == curthread);
566 			scl->scl_writer = NULL;	/* OK in either case */
567 			cv_broadcast(&scl->scl_cv);
568 		}
569 		mutex_exit(&scl->scl_lock);
570 	}
571 }
572 
573 int
574 spa_config_held(spa_t *spa, int locks, krw_t rw)
575 {
576 	int locks_held = 0;
577 
578 	for (int i = 0; i < SCL_LOCKS; i++) {
579 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
580 		if (!(locks & (1 << i)))
581 			continue;
582 		if ((rw == RW_READER && scl->scl_count != 0) ||
583 		    (rw == RW_WRITER && scl->scl_writer == curthread))
584 			locks_held |= 1 << i;
585 	}
586 
587 	return (locks_held);
588 }
589 
590 /*
591  * ==========================================================================
592  * SPA namespace functions
593  * ==========================================================================
594  */
595 
596 /*
597  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
598  * Returns NULL if no matching spa_t is found.
599  */
600 spa_t *
601 spa_lookup(const char *name)
602 {
603 	static spa_t search;	/* spa_t is large; don't allocate on stack */
604 	spa_t *spa;
605 	avl_index_t where;
606 	char *cp;
607 
608 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
609 
610 	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
611 
612 	/*
613 	 * If it's a full dataset name, figure out the pool name and
614 	 * just use that.
615 	 */
616 	cp = strpbrk(search.spa_name, "/@#");
617 	if (cp != NULL)
618 		*cp = '\0';
619 
620 	spa = avl_find(&spa_namespace_avl, &search, &where);
621 
622 	return (spa);
623 }
624 
625 /*
626  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
627  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
628  * looking for potentially hung I/Os.
629  */
630 void
631 spa_deadman(void *arg)
632 {
633 	spa_t *spa = arg;
634 
635 	/* Disable the deadman if the pool is suspended. */
636 	if (spa_suspended(spa))
637 		return;
638 
639 	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
640 	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
641 	    (u_longlong_t)++spa->spa_deadman_calls);
642 	if (zfs_deadman_enabled)
643 		vdev_deadman(spa->spa_root_vdev, FTAG);
644 
645 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
646 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
647 	    MSEC_TO_TICK(zfs_deadman_checktime_ms));
648 }
649 
650 static int
651 spa_log_sm_sort_by_txg(const void *va, const void *vb)
652 {
653 	const spa_log_sm_t *a = va;
654 	const spa_log_sm_t *b = vb;
655 
656 	return (TREE_CMP(a->sls_txg, b->sls_txg));
657 }
658 
659 /*
660  * Create an uninitialized spa_t with the given name.  Requires
661  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
662  * exist by calling spa_lookup() first.
663  */
664 spa_t *
665 spa_add(const char *name, nvlist_t *config, const char *altroot)
666 {
667 	spa_t *spa;
668 	spa_config_dirent_t *dp;
669 
670 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
671 
672 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
673 
674 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
675 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
676 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
677 	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
678 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
679 	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
680 	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
681 	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
682 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
683 	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
684 	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
685 	mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
686 	mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
687 	mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
688 
689 	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
690 	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
691 	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
692 	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
693 	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
694 	cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
695 	cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
696 
697 	for (int t = 0; t < TXG_SIZE; t++)
698 		bplist_create(&spa->spa_free_bplist[t]);
699 
700 	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
701 	spa->spa_state = POOL_STATE_UNINITIALIZED;
702 	spa->spa_freeze_txg = UINT64_MAX;
703 	spa->spa_final_txg = UINT64_MAX;
704 	spa->spa_load_max_txg = UINT64_MAX;
705 	spa->spa_proc = &p0;
706 	spa->spa_proc_state = SPA_PROC_NONE;
707 	spa->spa_trust_config = B_TRUE;
708 	spa->spa_hostid = zone_get_hostid(NULL);
709 
710 	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
711 	spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
712 	spa_set_deadman_failmode(spa, zfs_deadman_failmode);
713 
714 	zfs_refcount_create(&spa->spa_refcount);
715 	spa_config_lock_init(spa);
716 	spa_stats_init(spa);
717 
718 	avl_add(&spa_namespace_avl, spa);
719 
720 	/*
721 	 * Set the alternate root, if there is one.
722 	 */
723 	if (altroot)
724 		spa->spa_root = spa_strdup(altroot);
725 
726 	spa->spa_alloc_count = spa_allocators;
727 	spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
728 	    sizeof (spa_alloc_t), KM_SLEEP);
729 	for (int i = 0; i < spa->spa_alloc_count; i++) {
730 		mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
731 		    NULL);
732 		avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
733 		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
734 	}
735 	avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
736 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
737 	avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
738 	    sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
739 	list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
740 	    offsetof(log_summary_entry_t, lse_node));
741 
742 	/*
743 	 * Every pool starts with the default cachefile
744 	 */
745 	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
746 	    offsetof(spa_config_dirent_t, scd_link));
747 
748 	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
749 	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
750 	list_insert_head(&spa->spa_config_list, dp);
751 
752 	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
753 	    KM_SLEEP) == 0);
754 
755 	if (config != NULL) {
756 		nvlist_t *features;
757 
758 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
759 		    &features) == 0) {
760 			VERIFY(nvlist_dup(features, &spa->spa_label_features,
761 			    0) == 0);
762 		}
763 
764 		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
765 	}
766 
767 	if (spa->spa_label_features == NULL) {
768 		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
769 		    KM_SLEEP) == 0);
770 	}
771 
772 	spa->spa_min_ashift = INT_MAX;
773 	spa->spa_max_ashift = 0;
774 	spa->spa_min_alloc = INT_MAX;
775 
776 	/* Reset cached value */
777 	spa->spa_dedup_dspace = ~0ULL;
778 
779 	/*
780 	 * As a pool is being created, treat all features as disabled by
781 	 * setting SPA_FEATURE_DISABLED for all entries in the feature
782 	 * refcount cache.
783 	 */
784 	for (int i = 0; i < SPA_FEATURES; i++) {
785 		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
786 	}
787 
788 	list_create(&spa->spa_leaf_list, sizeof (vdev_t),
789 	    offsetof(vdev_t, vdev_leaf_node));
790 
791 	return (spa);
792 }
793 
794 /*
795  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
796  * spa_namespace_lock.  This is called only after the spa_t has been closed and
797  * deactivated.
798  */
799 void
800 spa_remove(spa_t *spa)
801 {
802 	spa_config_dirent_t *dp;
803 
804 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
805 	ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
806 	ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
807 	ASSERT0(spa->spa_waiters);
808 
809 	nvlist_free(spa->spa_config_splitting);
810 
811 	avl_remove(&spa_namespace_avl, spa);
812 	cv_broadcast(&spa_namespace_cv);
813 
814 	if (spa->spa_root)
815 		spa_strfree(spa->spa_root);
816 
817 	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
818 		list_remove(&spa->spa_config_list, dp);
819 		if (dp->scd_path != NULL)
820 			spa_strfree(dp->scd_path);
821 		kmem_free(dp, sizeof (spa_config_dirent_t));
822 	}
823 
824 	for (int i = 0; i < spa->spa_alloc_count; i++) {
825 		avl_destroy(&spa->spa_allocs[i].spaa_tree);
826 		mutex_destroy(&spa->spa_allocs[i].spaa_lock);
827 	}
828 	kmem_free(spa->spa_allocs, spa->spa_alloc_count *
829 	    sizeof (spa_alloc_t));
830 
831 	avl_destroy(&spa->spa_metaslabs_by_flushed);
832 	avl_destroy(&spa->spa_sm_logs_by_txg);
833 	list_destroy(&spa->spa_log_summary);
834 	list_destroy(&spa->spa_config_list);
835 	list_destroy(&spa->spa_leaf_list);
836 
837 	nvlist_free(spa->spa_label_features);
838 	nvlist_free(spa->spa_load_info);
839 	nvlist_free(spa->spa_feat_stats);
840 	spa_config_set(spa, NULL);
841 
842 	zfs_refcount_destroy(&spa->spa_refcount);
843 
844 	spa_stats_destroy(spa);
845 	spa_config_lock_destroy(spa);
846 
847 	for (int t = 0; t < TXG_SIZE; t++)
848 		bplist_destroy(&spa->spa_free_bplist[t]);
849 
850 	zio_checksum_templates_free(spa);
851 
852 	cv_destroy(&spa->spa_async_cv);
853 	cv_destroy(&spa->spa_evicting_os_cv);
854 	cv_destroy(&spa->spa_proc_cv);
855 	cv_destroy(&spa->spa_scrub_io_cv);
856 	cv_destroy(&spa->spa_suspend_cv);
857 	cv_destroy(&spa->spa_activities_cv);
858 	cv_destroy(&spa->spa_waiters_cv);
859 
860 	mutex_destroy(&spa->spa_flushed_ms_lock);
861 	mutex_destroy(&spa->spa_async_lock);
862 	mutex_destroy(&spa->spa_errlist_lock);
863 	mutex_destroy(&spa->spa_errlog_lock);
864 	mutex_destroy(&spa->spa_evicting_os_lock);
865 	mutex_destroy(&spa->spa_history_lock);
866 	mutex_destroy(&spa->spa_proc_lock);
867 	mutex_destroy(&spa->spa_props_lock);
868 	mutex_destroy(&spa->spa_cksum_tmpls_lock);
869 	mutex_destroy(&spa->spa_scrub_lock);
870 	mutex_destroy(&spa->spa_suspend_lock);
871 	mutex_destroy(&spa->spa_vdev_top_lock);
872 	mutex_destroy(&spa->spa_feat_stats_lock);
873 	mutex_destroy(&spa->spa_activities_lock);
874 
875 	kmem_free(spa, sizeof (spa_t));
876 }
877 
878 /*
879  * Given a pool, return the next pool in the namespace, or NULL if there is
880  * none.  If 'prev' is NULL, return the first pool.
881  */
882 spa_t *
883 spa_next(spa_t *prev)
884 {
885 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
886 
887 	if (prev)
888 		return (AVL_NEXT(&spa_namespace_avl, prev));
889 	else
890 		return (avl_first(&spa_namespace_avl));
891 }
892 
893 /*
894  * ==========================================================================
895  * SPA refcount functions
896  * ==========================================================================
897  */
898 
899 /*
900  * Add a reference to the given spa_t.  Must have at least one reference, or
901  * have the namespace lock held.
902  */
903 void
904 spa_open_ref(spa_t *spa, const void *tag)
905 {
906 	ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
907 	    MUTEX_HELD(&spa_namespace_lock));
908 	(void) zfs_refcount_add(&spa->spa_refcount, tag);
909 }
910 
911 /*
912  * Remove a reference to the given spa_t.  Must have at least one reference, or
913  * have the namespace lock held.
914  */
915 void
916 spa_close(spa_t *spa, const void *tag)
917 {
918 	ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
919 	    MUTEX_HELD(&spa_namespace_lock));
920 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
921 }
922 
923 /*
924  * Remove a reference to the given spa_t held by a dsl dir that is
925  * being asynchronously released.  Async releases occur from a taskq
926  * performing eviction of dsl datasets and dirs.  The namespace lock
927  * isn't held and the hold by the object being evicted may contribute to
928  * spa_minref (e.g. dataset or directory released during pool export),
929  * so the asserts in spa_close() do not apply.
930  */
931 void
932 spa_async_close(spa_t *spa, const void *tag)
933 {
934 	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
935 }
936 
937 /*
938  * Check to see if the spa refcount is zero.  Must be called with
939  * spa_namespace_lock held.  We really compare against spa_minref, which is the
940  * number of references acquired when opening a pool
941  */
942 boolean_t
943 spa_refcount_zero(spa_t *spa)
944 {
945 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
946 
947 	return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
948 }
949 
950 /*
951  * ==========================================================================
952  * SPA spare and l2cache tracking
953  * ==========================================================================
954  */
955 
956 /*
957  * Hot spares and cache devices are tracked using the same code below,
958  * for 'auxiliary' devices.
959  */
960 
961 typedef struct spa_aux {
962 	uint64_t	aux_guid;
963 	uint64_t	aux_pool;
964 	avl_node_t	aux_avl;
965 	int		aux_count;
966 } spa_aux_t;
967 
968 static inline int
969 spa_aux_compare(const void *a, const void *b)
970 {
971 	const spa_aux_t *sa = (const spa_aux_t *)a;
972 	const spa_aux_t *sb = (const spa_aux_t *)b;
973 
974 	return (TREE_CMP(sa->aux_guid, sb->aux_guid));
975 }
976 
977 static void
978 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
979 {
980 	avl_index_t where;
981 	spa_aux_t search;
982 	spa_aux_t *aux;
983 
984 	search.aux_guid = vd->vdev_guid;
985 	if ((aux = avl_find(avl, &search, &where)) != NULL) {
986 		aux->aux_count++;
987 	} else {
988 		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
989 		aux->aux_guid = vd->vdev_guid;
990 		aux->aux_count = 1;
991 		avl_insert(avl, aux, where);
992 	}
993 }
994 
995 static void
996 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
997 {
998 	spa_aux_t search;
999 	spa_aux_t *aux;
1000 	avl_index_t where;
1001 
1002 	search.aux_guid = vd->vdev_guid;
1003 	aux = avl_find(avl, &search, &where);
1004 
1005 	ASSERT(aux != NULL);
1006 
1007 	if (--aux->aux_count == 0) {
1008 		avl_remove(avl, aux);
1009 		kmem_free(aux, sizeof (spa_aux_t));
1010 	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
1011 		aux->aux_pool = 0ULL;
1012 	}
1013 }
1014 
1015 static boolean_t
1016 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
1017 {
1018 	spa_aux_t search, *found;
1019 
1020 	search.aux_guid = guid;
1021 	found = avl_find(avl, &search, NULL);
1022 
1023 	if (pool) {
1024 		if (found)
1025 			*pool = found->aux_pool;
1026 		else
1027 			*pool = 0ULL;
1028 	}
1029 
1030 	if (refcnt) {
1031 		if (found)
1032 			*refcnt = found->aux_count;
1033 		else
1034 			*refcnt = 0;
1035 	}
1036 
1037 	return (found != NULL);
1038 }
1039 
1040 static void
1041 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1042 {
1043 	spa_aux_t search, *found;
1044 	avl_index_t where;
1045 
1046 	search.aux_guid = vd->vdev_guid;
1047 	found = avl_find(avl, &search, &where);
1048 	ASSERT(found != NULL);
1049 	ASSERT(found->aux_pool == 0ULL);
1050 
1051 	found->aux_pool = spa_guid(vd->vdev_spa);
1052 }
1053 
1054 /*
1055  * Spares are tracked globally due to the following constraints:
1056  *
1057  *	- A spare may be part of multiple pools.
1058  *	- A spare may be added to a pool even if it's actively in use within
1059  *	  another pool.
1060  *	- A spare in use in any pool can only be the source of a replacement if
1061  *	  the target is a spare in the same pool.
1062  *
1063  * We keep track of all spares on the system through the use of a reference
1064  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
1065  * spare, then we bump the reference count in the AVL tree.  In addition, we set
1066  * the 'vdev_isspare' member to indicate that the device is a spare (active or
1067  * inactive).  When a spare is made active (used to replace a device in the
1068  * pool), we also keep track of which pool its been made a part of.
1069  *
1070  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
1071  * called under the spa_namespace lock as part of vdev reconfiguration.  The
1072  * separate spare lock exists for the status query path, which does not need to
1073  * be completely consistent with respect to other vdev configuration changes.
1074  */
1075 
1076 static int
1077 spa_spare_compare(const void *a, const void *b)
1078 {
1079 	return (spa_aux_compare(a, b));
1080 }
1081 
1082 void
1083 spa_spare_add(vdev_t *vd)
1084 {
1085 	mutex_enter(&spa_spare_lock);
1086 	ASSERT(!vd->vdev_isspare);
1087 	spa_aux_add(vd, &spa_spare_avl);
1088 	vd->vdev_isspare = B_TRUE;
1089 	mutex_exit(&spa_spare_lock);
1090 }
1091 
1092 void
1093 spa_spare_remove(vdev_t *vd)
1094 {
1095 	mutex_enter(&spa_spare_lock);
1096 	ASSERT(vd->vdev_isspare);
1097 	spa_aux_remove(vd, &spa_spare_avl);
1098 	vd->vdev_isspare = B_FALSE;
1099 	mutex_exit(&spa_spare_lock);
1100 }
1101 
1102 boolean_t
1103 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1104 {
1105 	boolean_t found;
1106 
1107 	mutex_enter(&spa_spare_lock);
1108 	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1109 	mutex_exit(&spa_spare_lock);
1110 
1111 	return (found);
1112 }
1113 
1114 void
1115 spa_spare_activate(vdev_t *vd)
1116 {
1117 	mutex_enter(&spa_spare_lock);
1118 	ASSERT(vd->vdev_isspare);
1119 	spa_aux_activate(vd, &spa_spare_avl);
1120 	mutex_exit(&spa_spare_lock);
1121 }
1122 
1123 /*
1124  * Level 2 ARC devices are tracked globally for the same reasons as spares.
1125  * Cache devices currently only support one pool per cache device, and so
1126  * for these devices the aux reference count is currently unused beyond 1.
1127  */
1128 
1129 static int
1130 spa_l2cache_compare(const void *a, const void *b)
1131 {
1132 	return (spa_aux_compare(a, b));
1133 }
1134 
1135 void
1136 spa_l2cache_add(vdev_t *vd)
1137 {
1138 	mutex_enter(&spa_l2cache_lock);
1139 	ASSERT(!vd->vdev_isl2cache);
1140 	spa_aux_add(vd, &spa_l2cache_avl);
1141 	vd->vdev_isl2cache = B_TRUE;
1142 	mutex_exit(&spa_l2cache_lock);
1143 }
1144 
1145 void
1146 spa_l2cache_remove(vdev_t *vd)
1147 {
1148 	mutex_enter(&spa_l2cache_lock);
1149 	ASSERT(vd->vdev_isl2cache);
1150 	spa_aux_remove(vd, &spa_l2cache_avl);
1151 	vd->vdev_isl2cache = B_FALSE;
1152 	mutex_exit(&spa_l2cache_lock);
1153 }
1154 
1155 boolean_t
1156 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1157 {
1158 	boolean_t found;
1159 
1160 	mutex_enter(&spa_l2cache_lock);
1161 	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1162 	mutex_exit(&spa_l2cache_lock);
1163 
1164 	return (found);
1165 }
1166 
1167 void
1168 spa_l2cache_activate(vdev_t *vd)
1169 {
1170 	mutex_enter(&spa_l2cache_lock);
1171 	ASSERT(vd->vdev_isl2cache);
1172 	spa_aux_activate(vd, &spa_l2cache_avl);
1173 	mutex_exit(&spa_l2cache_lock);
1174 }
1175 
1176 /*
1177  * ==========================================================================
1178  * SPA vdev locking
1179  * ==========================================================================
1180  */
1181 
1182 /*
1183  * Lock the given spa_t for the purpose of adding or removing a vdev.
1184  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1185  * It returns the next transaction group for the spa_t.
1186  */
1187 uint64_t
1188 spa_vdev_enter(spa_t *spa)
1189 {
1190 	mutex_enter(&spa->spa_vdev_top_lock);
1191 	mutex_enter(&spa_namespace_lock);
1192 
1193 	vdev_autotrim_stop_all(spa);
1194 
1195 	return (spa_vdev_config_enter(spa));
1196 }
1197 
1198 /*
1199  * The same as spa_vdev_enter() above but additionally takes the guid of
1200  * the vdev being detached.  When there is a rebuild in process it will be
1201  * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1202  * The rebuild is canceled if only a single child remains after the detach.
1203  */
1204 uint64_t
1205 spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
1206 {
1207 	mutex_enter(&spa->spa_vdev_top_lock);
1208 	mutex_enter(&spa_namespace_lock);
1209 
1210 	vdev_autotrim_stop_all(spa);
1211 
1212 	if (guid != 0) {
1213 		vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1214 		if (vd) {
1215 			vdev_rebuild_stop_wait(vd->vdev_top);
1216 		}
1217 	}
1218 
1219 	return (spa_vdev_config_enter(spa));
1220 }
1221 
1222 /*
1223  * Internal implementation for spa_vdev_enter().  Used when a vdev
1224  * operation requires multiple syncs (i.e. removing a device) while
1225  * keeping the spa_namespace_lock held.
1226  */
1227 uint64_t
1228 spa_vdev_config_enter(spa_t *spa)
1229 {
1230 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1231 
1232 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1233 
1234 	return (spa_last_synced_txg(spa) + 1);
1235 }
1236 
1237 /*
1238  * Used in combination with spa_vdev_config_enter() to allow the syncing
1239  * of multiple transactions without releasing the spa_namespace_lock.
1240  */
1241 void
1242 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
1243     const char *tag)
1244 {
1245 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1246 
1247 	int config_changed = B_FALSE;
1248 
1249 	ASSERT(txg > spa_last_synced_txg(spa));
1250 
1251 	spa->spa_pending_vdev = NULL;
1252 
1253 	/*
1254 	 * Reassess the DTLs.
1255 	 */
1256 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
1257 
1258 	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1259 		config_changed = B_TRUE;
1260 		spa->spa_config_generation++;
1261 	}
1262 
1263 	/*
1264 	 * Verify the metaslab classes.
1265 	 */
1266 	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1267 	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1268 	ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
1269 	ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1270 	ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
1271 
1272 	spa_config_exit(spa, SCL_ALL, spa);
1273 
1274 	/*
1275 	 * Panic the system if the specified tag requires it.  This
1276 	 * is useful for ensuring that configurations are updated
1277 	 * transactionally.
1278 	 */
1279 	if (zio_injection_enabled)
1280 		zio_handle_panic_injection(spa, tag, 0);
1281 
1282 	/*
1283 	 * Note: this txg_wait_synced() is important because it ensures
1284 	 * that there won't be more than one config change per txg.
1285 	 * This allows us to use the txg as the generation number.
1286 	 */
1287 	if (error == 0)
1288 		txg_wait_synced(spa->spa_dsl_pool, txg);
1289 
1290 	if (vd != NULL) {
1291 		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1292 		if (vd->vdev_ops->vdev_op_leaf) {
1293 			mutex_enter(&vd->vdev_initialize_lock);
1294 			vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1295 			    NULL);
1296 			mutex_exit(&vd->vdev_initialize_lock);
1297 
1298 			mutex_enter(&vd->vdev_trim_lock);
1299 			vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1300 			mutex_exit(&vd->vdev_trim_lock);
1301 		}
1302 
1303 		/*
1304 		 * The vdev may be both a leaf and top-level device.
1305 		 */
1306 		vdev_autotrim_stop_wait(vd);
1307 
1308 		spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
1309 		vdev_free(vd);
1310 		spa_config_exit(spa, SCL_STATE_ALL, spa);
1311 	}
1312 
1313 	/*
1314 	 * If the config changed, update the config cache.
1315 	 */
1316 	if (config_changed)
1317 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
1318 }
1319 
1320 /*
1321  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1322  * locking of spa_vdev_enter(), we also want make sure the transactions have
1323  * synced to disk, and then update the global configuration cache with the new
1324  * information.
1325  */
1326 int
1327 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1328 {
1329 	vdev_autotrim_restart(spa);
1330 	vdev_rebuild_restart(spa);
1331 
1332 	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1333 	mutex_exit(&spa_namespace_lock);
1334 	mutex_exit(&spa->spa_vdev_top_lock);
1335 
1336 	return (error);
1337 }
1338 
1339 /*
1340  * Lock the given spa_t for the purpose of changing vdev state.
1341  */
1342 void
1343 spa_vdev_state_enter(spa_t *spa, int oplocks)
1344 {
1345 	int locks = SCL_STATE_ALL | oplocks;
1346 
1347 	/*
1348 	 * Root pools may need to read of the underlying devfs filesystem
1349 	 * when opening up a vdev.  Unfortunately if we're holding the
1350 	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1351 	 * the read from the root filesystem.  Instead we "prefetch"
1352 	 * the associated vnodes that we need prior to opening the
1353 	 * underlying devices and cache them so that we can prevent
1354 	 * any I/O when we are doing the actual open.
1355 	 */
1356 	if (spa_is_root(spa)) {
1357 		int low = locks & ~(SCL_ZIO - 1);
1358 		int high = locks & ~low;
1359 
1360 		spa_config_enter(spa, high, spa, RW_WRITER);
1361 		vdev_hold(spa->spa_root_vdev);
1362 		spa_config_enter(spa, low, spa, RW_WRITER);
1363 	} else {
1364 		spa_config_enter(spa, locks, spa, RW_WRITER);
1365 	}
1366 	spa->spa_vdev_locks = locks;
1367 }
1368 
1369 int
1370 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1371 {
1372 	boolean_t config_changed = B_FALSE;
1373 	vdev_t *vdev_top;
1374 
1375 	if (vd == NULL || vd == spa->spa_root_vdev) {
1376 		vdev_top = spa->spa_root_vdev;
1377 	} else {
1378 		vdev_top = vd->vdev_top;
1379 	}
1380 
1381 	if (vd != NULL || error == 0)
1382 		vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
1383 
1384 	if (vd != NULL) {
1385 		if (vd != spa->spa_root_vdev)
1386 			vdev_state_dirty(vdev_top);
1387 
1388 		config_changed = B_TRUE;
1389 		spa->spa_config_generation++;
1390 	}
1391 
1392 	if (spa_is_root(spa))
1393 		vdev_rele(spa->spa_root_vdev);
1394 
1395 	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1396 	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1397 
1398 	/*
1399 	 * If anything changed, wait for it to sync.  This ensures that,
1400 	 * from the system administrator's perspective, zpool(8) commands
1401 	 * are synchronous.  This is important for things like zpool offline:
1402 	 * when the command completes, you expect no further I/O from ZFS.
1403 	 */
1404 	if (vd != NULL)
1405 		txg_wait_synced(spa->spa_dsl_pool, 0);
1406 
1407 	/*
1408 	 * If the config changed, update the config cache.
1409 	 */
1410 	if (config_changed) {
1411 		mutex_enter(&spa_namespace_lock);
1412 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
1413 		mutex_exit(&spa_namespace_lock);
1414 	}
1415 
1416 	return (error);
1417 }
1418 
1419 /*
1420  * ==========================================================================
1421  * Miscellaneous functions
1422  * ==========================================================================
1423  */
1424 
1425 void
1426 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1427 {
1428 	if (!nvlist_exists(spa->spa_label_features, feature)) {
1429 		fnvlist_add_boolean(spa->spa_label_features, feature);
1430 		/*
1431 		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1432 		 * dirty the vdev config because lock SCL_CONFIG is not held.
1433 		 * Thankfully, in this case we don't need to dirty the config
1434 		 * because it will be written out anyway when we finish
1435 		 * creating the pool.
1436 		 */
1437 		if (tx->tx_txg != TXG_INITIAL)
1438 			vdev_config_dirty(spa->spa_root_vdev);
1439 	}
1440 }
1441 
1442 void
1443 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1444 {
1445 	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1446 		vdev_config_dirty(spa->spa_root_vdev);
1447 }
1448 
1449 /*
1450  * Return the spa_t associated with given pool_guid, if it exists.  If
1451  * device_guid is non-zero, determine whether the pool exists *and* contains
1452  * a device with the specified device_guid.
1453  */
1454 spa_t *
1455 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1456 {
1457 	spa_t *spa;
1458 	avl_tree_t *t = &spa_namespace_avl;
1459 
1460 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1461 
1462 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1463 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1464 			continue;
1465 		if (spa->spa_root_vdev == NULL)
1466 			continue;
1467 		if (spa_guid(spa) == pool_guid) {
1468 			if (device_guid == 0)
1469 				break;
1470 
1471 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1472 			    device_guid) != NULL)
1473 				break;
1474 
1475 			/*
1476 			 * Check any devices we may be in the process of adding.
1477 			 */
1478 			if (spa->spa_pending_vdev) {
1479 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1480 				    device_guid) != NULL)
1481 					break;
1482 			}
1483 		}
1484 	}
1485 
1486 	return (spa);
1487 }
1488 
1489 /*
1490  * Determine whether a pool with the given pool_guid exists.
1491  */
1492 boolean_t
1493 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1494 {
1495 	return (spa_by_guid(pool_guid, device_guid) != NULL);
1496 }
1497 
1498 char *
1499 spa_strdup(const char *s)
1500 {
1501 	size_t len;
1502 	char *new;
1503 
1504 	len = strlen(s);
1505 	new = kmem_alloc(len + 1, KM_SLEEP);
1506 	memcpy(new, s, len + 1);
1507 
1508 	return (new);
1509 }
1510 
1511 void
1512 spa_strfree(char *s)
1513 {
1514 	kmem_free(s, strlen(s) + 1);
1515 }
1516 
1517 uint64_t
1518 spa_generate_guid(spa_t *spa)
1519 {
1520 	uint64_t guid;
1521 
1522 	if (spa != NULL) {
1523 		do {
1524 			(void) random_get_pseudo_bytes((void *)&guid,
1525 			    sizeof (guid));
1526 		} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
1527 	} else {
1528 		do {
1529 			(void) random_get_pseudo_bytes((void *)&guid,
1530 			    sizeof (guid));
1531 		} while (guid == 0 || spa_guid_exists(guid, 0));
1532 	}
1533 
1534 	return (guid);
1535 }
1536 
1537 void
1538 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1539 {
1540 	char type[256];
1541 	const char *checksum = NULL;
1542 	const char *compress = NULL;
1543 
1544 	if (bp != NULL) {
1545 		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1546 			dmu_object_byteswap_t bswap =
1547 			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1548 			(void) snprintf(type, sizeof (type), "bswap %s %s",
1549 			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1550 			    "metadata" : "data",
1551 			    dmu_ot_byteswap[bswap].ob_name);
1552 		} else {
1553 			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1554 			    sizeof (type));
1555 		}
1556 		if (!BP_IS_EMBEDDED(bp)) {
1557 			checksum =
1558 			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1559 		}
1560 		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1561 	}
1562 
1563 	SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
1564 	    compress);
1565 }
1566 
1567 void
1568 spa_freeze(spa_t *spa)
1569 {
1570 	uint64_t freeze_txg = 0;
1571 
1572 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1573 	if (spa->spa_freeze_txg == UINT64_MAX) {
1574 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1575 		spa->spa_freeze_txg = freeze_txg;
1576 	}
1577 	spa_config_exit(spa, SCL_ALL, FTAG);
1578 	if (freeze_txg != 0)
1579 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1580 }
1581 
1582 void
1583 zfs_panic_recover(const char *fmt, ...)
1584 {
1585 	va_list adx;
1586 
1587 	va_start(adx, fmt);
1588 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1589 	va_end(adx);
1590 }
1591 
1592 /*
1593  * This is a stripped-down version of strtoull, suitable only for converting
1594  * lowercase hexadecimal numbers that don't overflow.
1595  */
1596 uint64_t
1597 zfs_strtonum(const char *str, char **nptr)
1598 {
1599 	uint64_t val = 0;
1600 	char c;
1601 	int digit;
1602 
1603 	while ((c = *str) != '\0') {
1604 		if (c >= '0' && c <= '9')
1605 			digit = c - '0';
1606 		else if (c >= 'a' && c <= 'f')
1607 			digit = 10 + c - 'a';
1608 		else
1609 			break;
1610 
1611 		val *= 16;
1612 		val += digit;
1613 
1614 		str++;
1615 	}
1616 
1617 	if (nptr)
1618 		*nptr = (char *)str;
1619 
1620 	return (val);
1621 }
1622 
1623 void
1624 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1625 {
1626 	/*
1627 	 * We bump the feature refcount for each special vdev added to the pool
1628 	 */
1629 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1630 	spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1631 }
1632 
1633 /*
1634  * ==========================================================================
1635  * Accessor functions
1636  * ==========================================================================
1637  */
1638 
1639 boolean_t
1640 spa_shutting_down(spa_t *spa)
1641 {
1642 	return (spa->spa_async_suspended);
1643 }
1644 
1645 dsl_pool_t *
1646 spa_get_dsl(spa_t *spa)
1647 {
1648 	return (spa->spa_dsl_pool);
1649 }
1650 
1651 boolean_t
1652 spa_is_initializing(spa_t *spa)
1653 {
1654 	return (spa->spa_is_initializing);
1655 }
1656 
1657 boolean_t
1658 spa_indirect_vdevs_loaded(spa_t *spa)
1659 {
1660 	return (spa->spa_indirect_vdevs_loaded);
1661 }
1662 
1663 blkptr_t *
1664 spa_get_rootblkptr(spa_t *spa)
1665 {
1666 	return (&spa->spa_ubsync.ub_rootbp);
1667 }
1668 
1669 void
1670 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1671 {
1672 	spa->spa_uberblock.ub_rootbp = *bp;
1673 }
1674 
1675 void
1676 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1677 {
1678 	if (spa->spa_root == NULL)
1679 		buf[0] = '\0';
1680 	else
1681 		(void) strlcpy(buf, spa->spa_root, buflen);
1682 }
1683 
1684 uint32_t
1685 spa_sync_pass(spa_t *spa)
1686 {
1687 	return (spa->spa_sync_pass);
1688 }
1689 
1690 char *
1691 spa_name(spa_t *spa)
1692 {
1693 	return (spa->spa_name);
1694 }
1695 
1696 uint64_t
1697 spa_guid(spa_t *spa)
1698 {
1699 	dsl_pool_t *dp = spa_get_dsl(spa);
1700 	uint64_t guid;
1701 
1702 	/*
1703 	 * If we fail to parse the config during spa_load(), we can go through
1704 	 * the error path (which posts an ereport) and end up here with no root
1705 	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1706 	 * this case.
1707 	 */
1708 	if (spa->spa_root_vdev == NULL)
1709 		return (spa->spa_config_guid);
1710 
1711 	guid = spa->spa_last_synced_guid != 0 ?
1712 	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1713 
1714 	/*
1715 	 * Return the most recently synced out guid unless we're
1716 	 * in syncing context.
1717 	 */
1718 	if (dp && dsl_pool_sync_context(dp))
1719 		return (spa->spa_root_vdev->vdev_guid);
1720 	else
1721 		return (guid);
1722 }
1723 
1724 uint64_t
1725 spa_load_guid(spa_t *spa)
1726 {
1727 	/*
1728 	 * This is a GUID that exists solely as a reference for the
1729 	 * purposes of the arc.  It is generated at load time, and
1730 	 * is never written to persistent storage.
1731 	 */
1732 	return (spa->spa_load_guid);
1733 }
1734 
1735 uint64_t
1736 spa_last_synced_txg(spa_t *spa)
1737 {
1738 	return (spa->spa_ubsync.ub_txg);
1739 }
1740 
1741 uint64_t
1742 spa_first_txg(spa_t *spa)
1743 {
1744 	return (spa->spa_first_txg);
1745 }
1746 
1747 uint64_t
1748 spa_syncing_txg(spa_t *spa)
1749 {
1750 	return (spa->spa_syncing_txg);
1751 }
1752 
1753 /*
1754  * Return the last txg where data can be dirtied. The final txgs
1755  * will be used to just clear out any deferred frees that remain.
1756  */
1757 uint64_t
1758 spa_final_dirty_txg(spa_t *spa)
1759 {
1760 	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1761 }
1762 
1763 pool_state_t
1764 spa_state(spa_t *spa)
1765 {
1766 	return (spa->spa_state);
1767 }
1768 
1769 spa_load_state_t
1770 spa_load_state(spa_t *spa)
1771 {
1772 	return (spa->spa_load_state);
1773 }
1774 
1775 uint64_t
1776 spa_freeze_txg(spa_t *spa)
1777 {
1778 	return (spa->spa_freeze_txg);
1779 }
1780 
1781 /*
1782  * Return the inflated asize for a logical write in bytes. This is used by the
1783  * DMU to calculate the space a logical write will require on disk.
1784  * If lsize is smaller than the largest physical block size allocatable on this
1785  * pool we use its value instead, since the write will end up using the whole
1786  * block anyway.
1787  */
1788 uint64_t
1789 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1790 {
1791 	if (lsize == 0)
1792 		return (0);	/* No inflation needed */
1793 	return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
1794 }
1795 
1796 /*
1797  * Return the amount of slop space in bytes.  It is typically 1/32 of the pool
1798  * (3.2%), minus the embedded log space.  On very small pools, it may be
1799  * slightly larger than this.  On very large pools, it will be capped to
1800  * the value of spa_max_slop.  The embedded log space is not included in
1801  * spa_dspace.  By subtracting it, the usable space (per "zfs list") is a
1802  * constant 97% of the total space, regardless of metaslab size (assuming the
1803  * default spa_slop_shift=5 and a non-tiny pool).
1804  *
1805  * See the comment above spa_slop_shift for more details.
1806  */
1807 uint64_t
1808 spa_get_slop_space(spa_t *spa)
1809 {
1810 	uint64_t space = 0;
1811 	uint64_t slop = 0;
1812 
1813 	/*
1814 	 * Make sure spa_dedup_dspace has been set.
1815 	 */
1816 	if (spa->spa_dedup_dspace == ~0ULL)
1817 		spa_update_dspace(spa);
1818 
1819 	/*
1820 	 * spa_get_dspace() includes the space only logically "used" by
1821 	 * deduplicated data, so since it's not useful to reserve more
1822 	 * space with more deduplicated data, we subtract that out here.
1823 	 */
1824 	space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
1825 	slop = MIN(space >> spa_slop_shift, spa_max_slop);
1826 
1827 	/*
1828 	 * Subtract the embedded log space, but no more than half the (3.2%)
1829 	 * unusable space.  Note, the "no more than half" is only relevant if
1830 	 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1831 	 * default.
1832 	 */
1833 	uint64_t embedded_log =
1834 	    metaslab_class_get_dspace(spa_embedded_log_class(spa));
1835 	slop -= MIN(embedded_log, slop >> 1);
1836 
1837 	/*
1838 	 * Slop space should be at least spa_min_slop, but no more than half
1839 	 * the entire pool.
1840 	 */
1841 	slop = MAX(slop, MIN(space >> 1, spa_min_slop));
1842 	return (slop);
1843 }
1844 
1845 uint64_t
1846 spa_get_dspace(spa_t *spa)
1847 {
1848 	return (spa->spa_dspace);
1849 }
1850 
1851 uint64_t
1852 spa_get_checkpoint_space(spa_t *spa)
1853 {
1854 	return (spa->spa_checkpoint_info.sci_dspace);
1855 }
1856 
1857 void
1858 spa_update_dspace(spa_t *spa)
1859 {
1860 	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1861 	    ddt_get_dedup_dspace(spa) + brt_get_dspace(spa);
1862 	if (spa->spa_nonallocating_dspace > 0) {
1863 		/*
1864 		 * Subtract the space provided by all non-allocating vdevs that
1865 		 * contribute to dspace.  If a file is overwritten, its old
1866 		 * blocks are freed and new blocks are allocated.  If there are
1867 		 * no snapshots of the file, the available space should remain
1868 		 * the same.  The old blocks could be freed from the
1869 		 * non-allocating vdev, but the new blocks must be allocated on
1870 		 * other (allocating) vdevs.  By reserving the entire size of
1871 		 * the non-allocating vdevs (including allocated space), we
1872 		 * ensure that there will be enough space on the allocating
1873 		 * vdevs for this file overwrite to succeed.
1874 		 *
1875 		 * Note that the DMU/DSL doesn't actually know or care
1876 		 * how much space is allocated (it does its own tracking
1877 		 * of how much space has been logically used).  So it
1878 		 * doesn't matter that the data we are moving may be
1879 		 * allocated twice (on the old device and the new device).
1880 		 */
1881 		ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
1882 		spa->spa_dspace -= spa->spa_nonallocating_dspace;
1883 	}
1884 }
1885 
1886 /*
1887  * Return the failure mode that has been set to this pool. The default
1888  * behavior will be to block all I/Os when a complete failure occurs.
1889  */
1890 uint64_t
1891 spa_get_failmode(spa_t *spa)
1892 {
1893 	return (spa->spa_failmode);
1894 }
1895 
1896 boolean_t
1897 spa_suspended(spa_t *spa)
1898 {
1899 	return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1900 }
1901 
1902 uint64_t
1903 spa_version(spa_t *spa)
1904 {
1905 	return (spa->spa_ubsync.ub_version);
1906 }
1907 
1908 boolean_t
1909 spa_deflate(spa_t *spa)
1910 {
1911 	return (spa->spa_deflate);
1912 }
1913 
1914 metaslab_class_t *
1915 spa_normal_class(spa_t *spa)
1916 {
1917 	return (spa->spa_normal_class);
1918 }
1919 
1920 metaslab_class_t *
1921 spa_log_class(spa_t *spa)
1922 {
1923 	return (spa->spa_log_class);
1924 }
1925 
1926 metaslab_class_t *
1927 spa_embedded_log_class(spa_t *spa)
1928 {
1929 	return (spa->spa_embedded_log_class);
1930 }
1931 
1932 metaslab_class_t *
1933 spa_special_class(spa_t *spa)
1934 {
1935 	return (spa->spa_special_class);
1936 }
1937 
1938 metaslab_class_t *
1939 spa_dedup_class(spa_t *spa)
1940 {
1941 	return (spa->spa_dedup_class);
1942 }
1943 
1944 /*
1945  * Locate an appropriate allocation class
1946  */
1947 metaslab_class_t *
1948 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1949     uint_t level, uint_t special_smallblk)
1950 {
1951 	/*
1952 	 * ZIL allocations determine their class in zio_alloc_zil().
1953 	 */
1954 	ASSERT(objtype != DMU_OT_INTENT_LOG);
1955 
1956 	boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1957 
1958 	if (DMU_OT_IS_DDT(objtype)) {
1959 		if (spa->spa_dedup_class->mc_groups != 0)
1960 			return (spa_dedup_class(spa));
1961 		else if (has_special_class && zfs_ddt_data_is_special)
1962 			return (spa_special_class(spa));
1963 		else
1964 			return (spa_normal_class(spa));
1965 	}
1966 
1967 	/* Indirect blocks for user data can land in special if allowed */
1968 	if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1969 		if (has_special_class && zfs_user_indirect_is_special)
1970 			return (spa_special_class(spa));
1971 		else
1972 			return (spa_normal_class(spa));
1973 	}
1974 
1975 	if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1976 		if (has_special_class)
1977 			return (spa_special_class(spa));
1978 		else
1979 			return (spa_normal_class(spa));
1980 	}
1981 
1982 	/*
1983 	 * Allow small file blocks in special class in some cases (like
1984 	 * for the dRAID vdev feature). But always leave a reserve of
1985 	 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
1986 	 */
1987 	if (DMU_OT_IS_FILE(objtype) &&
1988 	    has_special_class && size <= special_smallblk) {
1989 		metaslab_class_t *special = spa_special_class(spa);
1990 		uint64_t alloc = metaslab_class_get_alloc(special);
1991 		uint64_t space = metaslab_class_get_space(special);
1992 		uint64_t limit =
1993 		    (space * (100 - zfs_special_class_metadata_reserve_pct))
1994 		    / 100;
1995 
1996 		if (alloc < limit)
1997 			return (special);
1998 	}
1999 
2000 	return (spa_normal_class(spa));
2001 }
2002 
2003 void
2004 spa_evicting_os_register(spa_t *spa, objset_t *os)
2005 {
2006 	mutex_enter(&spa->spa_evicting_os_lock);
2007 	list_insert_head(&spa->spa_evicting_os_list, os);
2008 	mutex_exit(&spa->spa_evicting_os_lock);
2009 }
2010 
2011 void
2012 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
2013 {
2014 	mutex_enter(&spa->spa_evicting_os_lock);
2015 	list_remove(&spa->spa_evicting_os_list, os);
2016 	cv_broadcast(&spa->spa_evicting_os_cv);
2017 	mutex_exit(&spa->spa_evicting_os_lock);
2018 }
2019 
2020 void
2021 spa_evicting_os_wait(spa_t *spa)
2022 {
2023 	mutex_enter(&spa->spa_evicting_os_lock);
2024 	while (!list_is_empty(&spa->spa_evicting_os_list))
2025 		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
2026 	mutex_exit(&spa->spa_evicting_os_lock);
2027 
2028 	dmu_buf_user_evict_wait();
2029 }
2030 
2031 int
2032 spa_max_replication(spa_t *spa)
2033 {
2034 	/*
2035 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2036 	 * handle BPs with more than one DVA allocated.  Set our max
2037 	 * replication level accordingly.
2038 	 */
2039 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
2040 		return (1);
2041 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
2042 }
2043 
2044 int
2045 spa_prev_software_version(spa_t *spa)
2046 {
2047 	return (spa->spa_prev_software_version);
2048 }
2049 
2050 uint64_t
2051 spa_deadman_synctime(spa_t *spa)
2052 {
2053 	return (spa->spa_deadman_synctime);
2054 }
2055 
2056 spa_autotrim_t
2057 spa_get_autotrim(spa_t *spa)
2058 {
2059 	return (spa->spa_autotrim);
2060 }
2061 
2062 uint64_t
2063 spa_deadman_ziotime(spa_t *spa)
2064 {
2065 	return (spa->spa_deadman_ziotime);
2066 }
2067 
2068 uint64_t
2069 spa_get_deadman_failmode(spa_t *spa)
2070 {
2071 	return (spa->spa_deadman_failmode);
2072 }
2073 
2074 void
2075 spa_set_deadman_failmode(spa_t *spa, const char *failmode)
2076 {
2077 	if (strcmp(failmode, "wait") == 0)
2078 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2079 	else if (strcmp(failmode, "continue") == 0)
2080 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2081 	else if (strcmp(failmode, "panic") == 0)
2082 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2083 	else
2084 		spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2085 }
2086 
2087 void
2088 spa_set_deadman_ziotime(hrtime_t ns)
2089 {
2090 	spa_t *spa = NULL;
2091 
2092 	if (spa_mode_global != SPA_MODE_UNINIT) {
2093 		mutex_enter(&spa_namespace_lock);
2094 		while ((spa = spa_next(spa)) != NULL)
2095 			spa->spa_deadman_ziotime = ns;
2096 		mutex_exit(&spa_namespace_lock);
2097 	}
2098 }
2099 
2100 void
2101 spa_set_deadman_synctime(hrtime_t ns)
2102 {
2103 	spa_t *spa = NULL;
2104 
2105 	if (spa_mode_global != SPA_MODE_UNINIT) {
2106 		mutex_enter(&spa_namespace_lock);
2107 		while ((spa = spa_next(spa)) != NULL)
2108 			spa->spa_deadman_synctime = ns;
2109 		mutex_exit(&spa_namespace_lock);
2110 	}
2111 }
2112 
2113 uint64_t
2114 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2115 {
2116 	uint64_t asize = DVA_GET_ASIZE(dva);
2117 	uint64_t dsize = asize;
2118 
2119 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2120 
2121 	if (asize != 0 && spa->spa_deflate) {
2122 		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2123 		if (vd != NULL)
2124 			dsize = (asize >> SPA_MINBLOCKSHIFT) *
2125 			    vd->vdev_deflate_ratio;
2126 	}
2127 
2128 	return (dsize);
2129 }
2130 
2131 uint64_t
2132 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2133 {
2134 	uint64_t dsize = 0;
2135 
2136 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2137 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2138 
2139 	return (dsize);
2140 }
2141 
2142 uint64_t
2143 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2144 {
2145 	uint64_t dsize = 0;
2146 
2147 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2148 
2149 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2150 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2151 
2152 	spa_config_exit(spa, SCL_VDEV, FTAG);
2153 
2154 	return (dsize);
2155 }
2156 
2157 uint64_t
2158 spa_dirty_data(spa_t *spa)
2159 {
2160 	return (spa->spa_dsl_pool->dp_dirty_total);
2161 }
2162 
2163 /*
2164  * ==========================================================================
2165  * SPA Import Progress Routines
2166  * ==========================================================================
2167  */
2168 
2169 typedef struct spa_import_progress {
2170 	uint64_t		pool_guid;	/* unique id for updates */
2171 	char			*pool_name;
2172 	spa_load_state_t	spa_load_state;
2173 	uint64_t		mmp_sec_remaining;	/* MMP activity check */
2174 	uint64_t		spa_load_max_txg;	/* rewind txg */
2175 	procfs_list_node_t	smh_node;
2176 } spa_import_progress_t;
2177 
2178 spa_history_list_t *spa_import_progress_list = NULL;
2179 
2180 static int
2181 spa_import_progress_show_header(struct seq_file *f)
2182 {
2183 	seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
2184 	    "load_state", "multihost_secs", "max_txg",
2185 	    "pool_name");
2186 	return (0);
2187 }
2188 
2189 static int
2190 spa_import_progress_show(struct seq_file *f, void *data)
2191 {
2192 	spa_import_progress_t *sip = (spa_import_progress_t *)data;
2193 
2194 	seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
2195 	    (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2196 	    (u_longlong_t)sip->mmp_sec_remaining,
2197 	    (u_longlong_t)sip->spa_load_max_txg,
2198 	    (sip->pool_name ? sip->pool_name : "-"));
2199 
2200 	return (0);
2201 }
2202 
2203 /* Remove oldest elements from list until there are no more than 'size' left */
2204 static void
2205 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
2206 {
2207 	spa_import_progress_t *sip;
2208 	while (shl->size > size) {
2209 		sip = list_remove_head(&shl->procfs_list.pl_list);
2210 		if (sip->pool_name)
2211 			spa_strfree(sip->pool_name);
2212 		kmem_free(sip, sizeof (spa_import_progress_t));
2213 		shl->size--;
2214 	}
2215 
2216 	IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2217 }
2218 
2219 static void
2220 spa_import_progress_init(void)
2221 {
2222 	spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
2223 	    KM_SLEEP);
2224 
2225 	spa_import_progress_list->size = 0;
2226 
2227 	spa_import_progress_list->procfs_list.pl_private =
2228 	    spa_import_progress_list;
2229 
2230 	procfs_list_install("zfs",
2231 	    NULL,
2232 	    "import_progress",
2233 	    0644,
2234 	    &spa_import_progress_list->procfs_list,
2235 	    spa_import_progress_show,
2236 	    spa_import_progress_show_header,
2237 	    NULL,
2238 	    offsetof(spa_import_progress_t, smh_node));
2239 }
2240 
2241 static void
2242 spa_import_progress_destroy(void)
2243 {
2244 	spa_history_list_t *shl = spa_import_progress_list;
2245 	procfs_list_uninstall(&shl->procfs_list);
2246 	spa_import_progress_truncate(shl, 0);
2247 	procfs_list_destroy(&shl->procfs_list);
2248 	kmem_free(shl, sizeof (spa_history_list_t));
2249 }
2250 
2251 int
2252 spa_import_progress_set_state(uint64_t pool_guid,
2253     spa_load_state_t load_state)
2254 {
2255 	spa_history_list_t *shl = spa_import_progress_list;
2256 	spa_import_progress_t *sip;
2257 	int error = ENOENT;
2258 
2259 	if (shl->size == 0)
2260 		return (0);
2261 
2262 	mutex_enter(&shl->procfs_list.pl_lock);
2263 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2264 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2265 		if (sip->pool_guid == pool_guid) {
2266 			sip->spa_load_state = load_state;
2267 			error = 0;
2268 			break;
2269 		}
2270 	}
2271 	mutex_exit(&shl->procfs_list.pl_lock);
2272 
2273 	return (error);
2274 }
2275 
2276 int
2277 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
2278 {
2279 	spa_history_list_t *shl = spa_import_progress_list;
2280 	spa_import_progress_t *sip;
2281 	int error = ENOENT;
2282 
2283 	if (shl->size == 0)
2284 		return (0);
2285 
2286 	mutex_enter(&shl->procfs_list.pl_lock);
2287 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2288 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2289 		if (sip->pool_guid == pool_guid) {
2290 			sip->spa_load_max_txg = load_max_txg;
2291 			error = 0;
2292 			break;
2293 		}
2294 	}
2295 	mutex_exit(&shl->procfs_list.pl_lock);
2296 
2297 	return (error);
2298 }
2299 
2300 int
2301 spa_import_progress_set_mmp_check(uint64_t pool_guid,
2302     uint64_t mmp_sec_remaining)
2303 {
2304 	spa_history_list_t *shl = spa_import_progress_list;
2305 	spa_import_progress_t *sip;
2306 	int error = ENOENT;
2307 
2308 	if (shl->size == 0)
2309 		return (0);
2310 
2311 	mutex_enter(&shl->procfs_list.pl_lock);
2312 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2313 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2314 		if (sip->pool_guid == pool_guid) {
2315 			sip->mmp_sec_remaining = mmp_sec_remaining;
2316 			error = 0;
2317 			break;
2318 		}
2319 	}
2320 	mutex_exit(&shl->procfs_list.pl_lock);
2321 
2322 	return (error);
2323 }
2324 
2325 /*
2326  * A new import is in progress, add an entry.
2327  */
2328 void
2329 spa_import_progress_add(spa_t *spa)
2330 {
2331 	spa_history_list_t *shl = spa_import_progress_list;
2332 	spa_import_progress_t *sip;
2333 	const char *poolname = NULL;
2334 
2335 	sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
2336 	sip->pool_guid = spa_guid(spa);
2337 
2338 	(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2339 	    &poolname);
2340 	if (poolname == NULL)
2341 		poolname = spa_name(spa);
2342 	sip->pool_name = spa_strdup(poolname);
2343 	sip->spa_load_state = spa_load_state(spa);
2344 
2345 	mutex_enter(&shl->procfs_list.pl_lock);
2346 	procfs_list_add(&shl->procfs_list, sip);
2347 	shl->size++;
2348 	mutex_exit(&shl->procfs_list.pl_lock);
2349 }
2350 
2351 void
2352 spa_import_progress_remove(uint64_t pool_guid)
2353 {
2354 	spa_history_list_t *shl = spa_import_progress_list;
2355 	spa_import_progress_t *sip;
2356 
2357 	mutex_enter(&shl->procfs_list.pl_lock);
2358 	for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2359 	    sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2360 		if (sip->pool_guid == pool_guid) {
2361 			if (sip->pool_name)
2362 				spa_strfree(sip->pool_name);
2363 			list_remove(&shl->procfs_list.pl_list, sip);
2364 			shl->size--;
2365 			kmem_free(sip, sizeof (spa_import_progress_t));
2366 			break;
2367 		}
2368 	}
2369 	mutex_exit(&shl->procfs_list.pl_lock);
2370 }
2371 
2372 /*
2373  * ==========================================================================
2374  * Initialization and Termination
2375  * ==========================================================================
2376  */
2377 
2378 static int
2379 spa_name_compare(const void *a1, const void *a2)
2380 {
2381 	const spa_t *s1 = a1;
2382 	const spa_t *s2 = a2;
2383 	int s;
2384 
2385 	s = strcmp(s1->spa_name, s2->spa_name);
2386 
2387 	return (TREE_ISIGN(s));
2388 }
2389 
2390 void
2391 spa_boot_init(void)
2392 {
2393 	spa_config_load();
2394 }
2395 
2396 void
2397 spa_init(spa_mode_t mode)
2398 {
2399 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2400 	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2401 	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2402 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2403 
2404 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2405 	    offsetof(spa_t, spa_avl));
2406 
2407 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2408 	    offsetof(spa_aux_t, aux_avl));
2409 
2410 	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2411 	    offsetof(spa_aux_t, aux_avl));
2412 
2413 	spa_mode_global = mode;
2414 
2415 #ifndef _KERNEL
2416 	if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
2417 		struct sigaction sa;
2418 
2419 		sa.sa_flags = SA_SIGINFO;
2420 		sigemptyset(&sa.sa_mask);
2421 		sa.sa_sigaction = arc_buf_sigsegv;
2422 
2423 		if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2424 			perror("could not enable watchpoints: "
2425 			    "sigaction(SIGSEGV, ...) = ");
2426 		} else {
2427 			arc_watch = B_TRUE;
2428 		}
2429 	}
2430 #endif
2431 
2432 	fm_init();
2433 	zfs_refcount_init();
2434 	unique_init();
2435 	zfs_btree_init();
2436 	metaslab_stat_init();
2437 	brt_init();
2438 	ddt_init();
2439 	zio_init();
2440 	dmu_init();
2441 	zil_init();
2442 	vdev_cache_stat_init();
2443 	vdev_mirror_stat_init();
2444 	vdev_raidz_math_init();
2445 	vdev_file_init();
2446 	zfs_prop_init();
2447 	chksum_init();
2448 	zpool_prop_init();
2449 	zpool_feature_init();
2450 	spa_config_load();
2451 	vdev_prop_init();
2452 	l2arc_start();
2453 	scan_init();
2454 	qat_init();
2455 	spa_import_progress_init();
2456 }
2457 
2458 void
2459 spa_fini(void)
2460 {
2461 	l2arc_stop();
2462 
2463 	spa_evict_all();
2464 
2465 	vdev_file_fini();
2466 	vdev_cache_stat_fini();
2467 	vdev_mirror_stat_fini();
2468 	vdev_raidz_math_fini();
2469 	chksum_fini();
2470 	zil_fini();
2471 	dmu_fini();
2472 	zio_fini();
2473 	ddt_fini();
2474 	brt_fini();
2475 	metaslab_stat_fini();
2476 	zfs_btree_fini();
2477 	unique_fini();
2478 	zfs_refcount_fini();
2479 	fm_fini();
2480 	scan_fini();
2481 	qat_fini();
2482 	spa_import_progress_destroy();
2483 
2484 	avl_destroy(&spa_namespace_avl);
2485 	avl_destroy(&spa_spare_avl);
2486 	avl_destroy(&spa_l2cache_avl);
2487 
2488 	cv_destroy(&spa_namespace_cv);
2489 	mutex_destroy(&spa_namespace_lock);
2490 	mutex_destroy(&spa_spare_lock);
2491 	mutex_destroy(&spa_l2cache_lock);
2492 }
2493 
2494 /*
2495  * Return whether this pool has a dedicated slog device. No locking needed.
2496  * It's not a problem if the wrong answer is returned as it's only for
2497  * performance and not correctness.
2498  */
2499 boolean_t
2500 spa_has_slogs(spa_t *spa)
2501 {
2502 	return (spa->spa_log_class->mc_groups != 0);
2503 }
2504 
2505 spa_log_state_t
2506 spa_get_log_state(spa_t *spa)
2507 {
2508 	return (spa->spa_log_state);
2509 }
2510 
2511 void
2512 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2513 {
2514 	spa->spa_log_state = state;
2515 }
2516 
2517 boolean_t
2518 spa_is_root(spa_t *spa)
2519 {
2520 	return (spa->spa_is_root);
2521 }
2522 
2523 boolean_t
2524 spa_writeable(spa_t *spa)
2525 {
2526 	return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
2527 }
2528 
2529 /*
2530  * Returns true if there is a pending sync task in any of the current
2531  * syncing txg, the current quiescing txg, or the current open txg.
2532  */
2533 boolean_t
2534 spa_has_pending_synctask(spa_t *spa)
2535 {
2536 	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2537 	    !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2538 }
2539 
2540 spa_mode_t
2541 spa_mode(spa_t *spa)
2542 {
2543 	return (spa->spa_mode);
2544 }
2545 
2546 uint64_t
2547 spa_bootfs(spa_t *spa)
2548 {
2549 	return (spa->spa_bootfs);
2550 }
2551 
2552 uint64_t
2553 spa_delegation(spa_t *spa)
2554 {
2555 	return (spa->spa_delegation);
2556 }
2557 
2558 objset_t *
2559 spa_meta_objset(spa_t *spa)
2560 {
2561 	return (spa->spa_meta_objset);
2562 }
2563 
2564 enum zio_checksum
2565 spa_dedup_checksum(spa_t *spa)
2566 {
2567 	return (spa->spa_dedup_checksum);
2568 }
2569 
2570 /*
2571  * Reset pool scan stat per scan pass (or reboot).
2572  */
2573 void
2574 spa_scan_stat_init(spa_t *spa)
2575 {
2576 	/* data not stored on disk */
2577 	spa->spa_scan_pass_start = gethrestime_sec();
2578 	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2579 		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2580 	else
2581 		spa->spa_scan_pass_scrub_pause = 0;
2582 	spa->spa_scan_pass_scrub_spent_paused = 0;
2583 	spa->spa_scan_pass_exam = 0;
2584 	spa->spa_scan_pass_issued = 0;
2585 }
2586 
2587 /*
2588  * Get scan stats for zpool status reports
2589  */
2590 int
2591 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2592 {
2593 	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2594 
2595 	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2596 		return (SET_ERROR(ENOENT));
2597 	memset(ps, 0, sizeof (pool_scan_stat_t));
2598 
2599 	/* data stored on disk */
2600 	ps->pss_func = scn->scn_phys.scn_func;
2601 	ps->pss_state = scn->scn_phys.scn_state;
2602 	ps->pss_start_time = scn->scn_phys.scn_start_time;
2603 	ps->pss_end_time = scn->scn_phys.scn_end_time;
2604 	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2605 	ps->pss_examined = scn->scn_phys.scn_examined;
2606 	ps->pss_to_process = scn->scn_phys.scn_to_process;
2607 	ps->pss_processed = scn->scn_phys.scn_processed;
2608 	ps->pss_errors = scn->scn_phys.scn_errors;
2609 
2610 	/* data not stored on disk */
2611 	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2612 	ps->pss_pass_start = spa->spa_scan_pass_start;
2613 	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2614 	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2615 	ps->pss_pass_issued = spa->spa_scan_pass_issued;
2616 	ps->pss_issued =
2617 	    scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2618 
2619 	return (0);
2620 }
2621 
2622 int
2623 spa_maxblocksize(spa_t *spa)
2624 {
2625 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2626 		return (SPA_MAXBLOCKSIZE);
2627 	else
2628 		return (SPA_OLD_MAXBLOCKSIZE);
2629 }
2630 
2631 
2632 /*
2633  * Returns the txg that the last device removal completed. No indirect mappings
2634  * have been added since this txg.
2635  */
2636 uint64_t
2637 spa_get_last_removal_txg(spa_t *spa)
2638 {
2639 	uint64_t vdevid;
2640 	uint64_t ret = -1ULL;
2641 
2642 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2643 	/*
2644 	 * sr_prev_indirect_vdev is only modified while holding all the
2645 	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2646 	 * examining it.
2647 	 */
2648 	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2649 
2650 	while (vdevid != -1ULL) {
2651 		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2652 		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2653 
2654 		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2655 
2656 		/*
2657 		 * If the removal did not remap any data, we don't care.
2658 		 */
2659 		if (vdev_indirect_births_count(vib) != 0) {
2660 			ret = vdev_indirect_births_last_entry_txg(vib);
2661 			break;
2662 		}
2663 
2664 		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2665 	}
2666 	spa_config_exit(spa, SCL_VDEV, FTAG);
2667 
2668 	IMPLY(ret != -1ULL,
2669 	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2670 
2671 	return (ret);
2672 }
2673 
2674 int
2675 spa_maxdnodesize(spa_t *spa)
2676 {
2677 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2678 		return (DNODE_MAX_SIZE);
2679 	else
2680 		return (DNODE_MIN_SIZE);
2681 }
2682 
2683 boolean_t
2684 spa_multihost(spa_t *spa)
2685 {
2686 	return (spa->spa_multihost ? B_TRUE : B_FALSE);
2687 }
2688 
2689 uint32_t
2690 spa_get_hostid(spa_t *spa)
2691 {
2692 	return (spa->spa_hostid);
2693 }
2694 
2695 boolean_t
2696 spa_trust_config(spa_t *spa)
2697 {
2698 	return (spa->spa_trust_config);
2699 }
2700 
2701 uint64_t
2702 spa_missing_tvds_allowed(spa_t *spa)
2703 {
2704 	return (spa->spa_missing_tvds_allowed);
2705 }
2706 
2707 space_map_t *
2708 spa_syncing_log_sm(spa_t *spa)
2709 {
2710 	return (spa->spa_syncing_log_sm);
2711 }
2712 
2713 void
2714 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2715 {
2716 	spa->spa_missing_tvds = missing;
2717 }
2718 
2719 /*
2720  * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2721  */
2722 const char *
2723 spa_state_to_name(spa_t *spa)
2724 {
2725 	ASSERT3P(spa, !=, NULL);
2726 
2727 	/*
2728 	 * it is possible for the spa to exist, without root vdev
2729 	 * as the spa transitions during import/export
2730 	 */
2731 	vdev_t *rvd = spa->spa_root_vdev;
2732 	if (rvd == NULL) {
2733 		return ("TRANSITIONING");
2734 	}
2735 	vdev_state_t state = rvd->vdev_state;
2736 	vdev_aux_t aux = rvd->vdev_stat.vs_aux;
2737 
2738 	if (spa_suspended(spa) &&
2739 	    (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
2740 		return ("SUSPENDED");
2741 
2742 	switch (state) {
2743 	case VDEV_STATE_CLOSED:
2744 	case VDEV_STATE_OFFLINE:
2745 		return ("OFFLINE");
2746 	case VDEV_STATE_REMOVED:
2747 		return ("REMOVED");
2748 	case VDEV_STATE_CANT_OPEN:
2749 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
2750 			return ("FAULTED");
2751 		else if (aux == VDEV_AUX_SPLIT_POOL)
2752 			return ("SPLIT");
2753 		else
2754 			return ("UNAVAIL");
2755 	case VDEV_STATE_FAULTED:
2756 		return ("FAULTED");
2757 	case VDEV_STATE_DEGRADED:
2758 		return ("DEGRADED");
2759 	case VDEV_STATE_HEALTHY:
2760 		return ("ONLINE");
2761 	default:
2762 		break;
2763 	}
2764 
2765 	return ("UNKNOWN");
2766 }
2767 
2768 boolean_t
2769 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2770 {
2771 	vdev_t *rvd = spa->spa_root_vdev;
2772 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2773 		if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2774 			return (B_FALSE);
2775 	}
2776 	return (B_TRUE);
2777 }
2778 
2779 boolean_t
2780 spa_has_checkpoint(spa_t *spa)
2781 {
2782 	return (spa->spa_checkpoint_txg != 0);
2783 }
2784 
2785 boolean_t
2786 spa_importing_readonly_checkpoint(spa_t *spa)
2787 {
2788 	return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2789 	    spa->spa_mode == SPA_MODE_READ);
2790 }
2791 
2792 uint64_t
2793 spa_min_claim_txg(spa_t *spa)
2794 {
2795 	uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2796 
2797 	if (checkpoint_txg != 0)
2798 		return (checkpoint_txg + 1);
2799 
2800 	return (spa->spa_first_txg);
2801 }
2802 
2803 /*
2804  * If there is a checkpoint, async destroys may consume more space from
2805  * the pool instead of freeing it. In an attempt to save the pool from
2806  * getting suspended when it is about to run out of space, we stop
2807  * processing async destroys.
2808  */
2809 boolean_t
2810 spa_suspend_async_destroy(spa_t *spa)
2811 {
2812 	dsl_pool_t *dp = spa_get_dsl(spa);
2813 
2814 	uint64_t unreserved = dsl_pool_unreserved_space(dp,
2815 	    ZFS_SPACE_CHECK_EXTRA_RESERVED);
2816 	uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2817 	uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2818 
2819 	if (spa_has_checkpoint(spa) && avail == 0)
2820 		return (B_TRUE);
2821 
2822 	return (B_FALSE);
2823 }
2824 
2825 #if defined(_KERNEL)
2826 
2827 int
2828 param_set_deadman_failmode_common(const char *val)
2829 {
2830 	spa_t *spa = NULL;
2831 	char *p;
2832 
2833 	if (val == NULL)
2834 		return (SET_ERROR(EINVAL));
2835 
2836 	if ((p = strchr(val, '\n')) != NULL)
2837 		*p = '\0';
2838 
2839 	if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
2840 	    strcmp(val, "panic"))
2841 		return (SET_ERROR(EINVAL));
2842 
2843 	if (spa_mode_global != SPA_MODE_UNINIT) {
2844 		mutex_enter(&spa_namespace_lock);
2845 		while ((spa = spa_next(spa)) != NULL)
2846 			spa_set_deadman_failmode(spa, val);
2847 		mutex_exit(&spa_namespace_lock);
2848 	}
2849 
2850 	return (0);
2851 }
2852 #endif
2853 
2854 /* Namespace manipulation */
2855 EXPORT_SYMBOL(spa_lookup);
2856 EXPORT_SYMBOL(spa_add);
2857 EXPORT_SYMBOL(spa_remove);
2858 EXPORT_SYMBOL(spa_next);
2859 
2860 /* Refcount functions */
2861 EXPORT_SYMBOL(spa_open_ref);
2862 EXPORT_SYMBOL(spa_close);
2863 EXPORT_SYMBOL(spa_refcount_zero);
2864 
2865 /* Pool configuration lock */
2866 EXPORT_SYMBOL(spa_config_tryenter);
2867 EXPORT_SYMBOL(spa_config_enter);
2868 EXPORT_SYMBOL(spa_config_exit);
2869 EXPORT_SYMBOL(spa_config_held);
2870 
2871 /* Pool vdev add/remove lock */
2872 EXPORT_SYMBOL(spa_vdev_enter);
2873 EXPORT_SYMBOL(spa_vdev_exit);
2874 
2875 /* Pool vdev state change lock */
2876 EXPORT_SYMBOL(spa_vdev_state_enter);
2877 EXPORT_SYMBOL(spa_vdev_state_exit);
2878 
2879 /* Accessor functions */
2880 EXPORT_SYMBOL(spa_shutting_down);
2881 EXPORT_SYMBOL(spa_get_dsl);
2882 EXPORT_SYMBOL(spa_get_rootblkptr);
2883 EXPORT_SYMBOL(spa_set_rootblkptr);
2884 EXPORT_SYMBOL(spa_altroot);
2885 EXPORT_SYMBOL(spa_sync_pass);
2886 EXPORT_SYMBOL(spa_name);
2887 EXPORT_SYMBOL(spa_guid);
2888 EXPORT_SYMBOL(spa_last_synced_txg);
2889 EXPORT_SYMBOL(spa_first_txg);
2890 EXPORT_SYMBOL(spa_syncing_txg);
2891 EXPORT_SYMBOL(spa_version);
2892 EXPORT_SYMBOL(spa_state);
2893 EXPORT_SYMBOL(spa_load_state);
2894 EXPORT_SYMBOL(spa_freeze_txg);
2895 EXPORT_SYMBOL(spa_get_dspace);
2896 EXPORT_SYMBOL(spa_update_dspace);
2897 EXPORT_SYMBOL(spa_deflate);
2898 EXPORT_SYMBOL(spa_normal_class);
2899 EXPORT_SYMBOL(spa_log_class);
2900 EXPORT_SYMBOL(spa_special_class);
2901 EXPORT_SYMBOL(spa_preferred_class);
2902 EXPORT_SYMBOL(spa_max_replication);
2903 EXPORT_SYMBOL(spa_prev_software_version);
2904 EXPORT_SYMBOL(spa_get_failmode);
2905 EXPORT_SYMBOL(spa_suspended);
2906 EXPORT_SYMBOL(spa_bootfs);
2907 EXPORT_SYMBOL(spa_delegation);
2908 EXPORT_SYMBOL(spa_meta_objset);
2909 EXPORT_SYMBOL(spa_maxblocksize);
2910 EXPORT_SYMBOL(spa_maxdnodesize);
2911 
2912 /* Miscellaneous support routines */
2913 EXPORT_SYMBOL(spa_guid_exists);
2914 EXPORT_SYMBOL(spa_strdup);
2915 EXPORT_SYMBOL(spa_strfree);
2916 EXPORT_SYMBOL(spa_generate_guid);
2917 EXPORT_SYMBOL(snprintf_blkptr);
2918 EXPORT_SYMBOL(spa_freeze);
2919 EXPORT_SYMBOL(spa_upgrade);
2920 EXPORT_SYMBOL(spa_evict_all);
2921 EXPORT_SYMBOL(spa_lookup_by_guid);
2922 EXPORT_SYMBOL(spa_has_spare);
2923 EXPORT_SYMBOL(dva_get_dsize_sync);
2924 EXPORT_SYMBOL(bp_get_dsize_sync);
2925 EXPORT_SYMBOL(bp_get_dsize);
2926 EXPORT_SYMBOL(spa_has_slogs);
2927 EXPORT_SYMBOL(spa_is_root);
2928 EXPORT_SYMBOL(spa_writeable);
2929 EXPORT_SYMBOL(spa_mode);
2930 EXPORT_SYMBOL(spa_namespace_lock);
2931 EXPORT_SYMBOL(spa_trust_config);
2932 EXPORT_SYMBOL(spa_missing_tvds_allowed);
2933 EXPORT_SYMBOL(spa_set_missing_tvds);
2934 EXPORT_SYMBOL(spa_state_to_name);
2935 EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
2936 EXPORT_SYMBOL(spa_min_claim_txg);
2937 EXPORT_SYMBOL(spa_suspend_async_destroy);
2938 EXPORT_SYMBOL(spa_has_checkpoint);
2939 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
2940 
2941 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
2942 	"Set additional debugging flags");
2943 
2944 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
2945 	"Set to attempt to recover from fatal errors");
2946 
2947 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
2948 	"Set to ignore IO errors during free and permanently leak the space");
2949 
2950 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
2951 	"Dead I/O check interval in milliseconds");
2952 
2953 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
2954 	"Enable deadman timer");
2955 
2956 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
2957 	"SPA size estimate multiplication factor");
2958 
2959 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
2960 	"Place DDT data into the special class");
2961 
2962 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
2963 	"Place user data indirect blocks into the special class");
2964 
2965 /* BEGIN CSTYLED */
2966 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
2967 	param_set_deadman_failmode, param_get_charp, ZMOD_RW,
2968 	"Failmode for deadman timer");
2969 
2970 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
2971 	param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
2972 	"Pool sync expiration time in milliseconds");
2973 
2974 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
2975 	param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
2976 	"IO expiration time in milliseconds");
2977 
2978 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
2979 	"Small file blocks in special vdevs depends on this much "
2980 	"free space available");
2981 /* END CSTYLED */
2982 
2983 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
2984 	param_get_uint, ZMOD_RW, "Reserved free space in pool");
2985