1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
28 * Copyright (c) 2017 Datto Inc.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
31 * Copyright (c) 2023, 2024, 2025, Klara, Inc.
32 */
33
34 #include <sys/zfs_context.h>
35 #include <sys/zfs_chksum.h>
36 #include <sys/spa_impl.h>
37 #include <sys/zio.h>
38 #include <sys/zio_checksum.h>
39 #include <sys/zio_compress.h>
40 #include <sys/dmu.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/zap.h>
43 #include <sys/zil.h>
44 #include <sys/vdev_impl.h>
45 #include <sys/vdev_initialize.h>
46 #include <sys/vdev_trim.h>
47 #include <sys/vdev_file.h>
48 #include <sys/vdev_raidz.h>
49 #include <sys/metaslab.h>
50 #include <sys/uberblock_impl.h>
51 #include <sys/txg.h>
52 #include <sys/avl.h>
53 #include <sys/unique.h>
54 #include <sys/dsl_pool.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/fm/util.h>
58 #include <sys/dsl_scan.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/metaslab_impl.h>
61 #include <sys/arc.h>
62 #include <sys/brt.h>
63 #include <sys/ddt.h>
64 #include <sys/kstat.h>
65 #include "zfs_prop.h"
66 #include <sys/btree.h>
67 #include <sys/zfeature.h>
68 #include <sys/qat.h>
69 #include <sys/zstd/zstd.h>
70
71 /*
72 * SPA locking
73 *
74 * There are three basic locks for managing spa_t structures:
75 *
76 * spa_namespace_lock (global mutex)
77 *
78 * This lock must be acquired to do any of the following:
79 *
80 * - Lookup a spa_t by name
81 * - Add or remove a spa_t from the namespace
82 * - Increase spa_refcount from non-zero
83 * - Check if spa_refcount is zero
84 * - Rename a spa_t
85 * - add/remove/attach/detach devices
86 * - Held for the duration of create/destroy
87 * - Held at the start and end of import and export
88 *
89 * It does not need to handle recursion. A create or destroy may
90 * reference objects (files or zvols) in other pools, but by
91 * definition they must have an existing reference, and will never need
92 * to lookup a spa_t by name.
93 *
94 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
95 *
96 * This reference count keep track of any active users of the spa_t. The
97 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
98 * the refcount is never really 'zero' - opening a pool implicitly keeps
99 * some references in the DMU. Internally we check against spa_minref, but
100 * present the image of a zero/non-zero value to consumers.
101 *
102 * spa_config_lock[] (per-spa array of rwlocks)
103 *
104 * This protects the spa_t from config changes, and must be held in
105 * the following circumstances:
106 *
107 * - RW_READER to perform I/O to the spa
108 * - RW_WRITER to change the vdev config
109 *
110 * The locking order is fairly straightforward:
111 *
112 * spa_namespace_lock -> spa_refcount
113 *
114 * The namespace lock must be acquired to increase the refcount from 0
115 * or to check if it is zero.
116 *
117 * spa_refcount -> spa_config_lock[]
118 *
119 * There must be at least one valid reference on the spa_t to acquire
120 * the config lock.
121 *
122 * spa_namespace_lock -> spa_config_lock[]
123 *
124 * The namespace lock must always be taken before the config lock.
125 *
126 *
127 * The spa_namespace_lock can be acquired directly and is globally visible.
128 *
129 * The namespace is manipulated using the following functions, all of which
130 * require the spa_namespace_lock to be held.
131 *
132 * spa_lookup() Lookup a spa_t by name.
133 *
134 * spa_add() Create a new spa_t in the namespace.
135 *
136 * spa_remove() Remove a spa_t from the namespace. This also
137 * frees up any memory associated with the spa_t.
138 *
139 * spa_next() Returns the next spa_t in the system, or the
140 * first if NULL is passed.
141 *
142 * spa_evict_all() Shutdown and remove all spa_t structures in
143 * the system.
144 *
145 * spa_guid_exists() Determine whether a pool/device guid exists.
146 *
147 * The spa_refcount is manipulated using the following functions:
148 *
149 * spa_open_ref() Adds a reference to the given spa_t. Must be
150 * called with spa_namespace_lock held if the
151 * refcount is currently zero.
152 *
153 * spa_close() Remove a reference from the spa_t. This will
154 * not free the spa_t or remove it from the
155 * namespace. No locking is required.
156 *
157 * spa_refcount_zero() Returns true if the refcount is currently
158 * zero. Must be called with spa_namespace_lock
159 * held.
160 *
161 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
162 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
163 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
164 *
165 * To read the configuration, it suffices to hold one of these locks as reader.
166 * To modify the configuration, you must hold all locks as writer. To modify
167 * vdev state without altering the vdev tree's topology (e.g. online/offline),
168 * you must hold SCL_STATE and SCL_ZIO as writer.
169 *
170 * We use these distinct config locks to avoid recursive lock entry.
171 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
172 * block allocations (SCL_ALLOC), which may require reading space maps
173 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
174 *
175 * The spa config locks cannot be normal rwlocks because we need the
176 * ability to hand off ownership. For example, SCL_ZIO is acquired
177 * by the issuing thread and later released by an interrupt thread.
178 * They do, however, obey the usual write-wanted semantics to prevent
179 * writer (i.e. system administrator) starvation.
180 *
181 * The lock acquisition rules are as follows:
182 *
183 * SCL_CONFIG
184 * Protects changes to the vdev tree topology, such as vdev
185 * add/remove/attach/detach. Protects the dirty config list
186 * (spa_config_dirty_list) and the set of spares and l2arc devices.
187 *
188 * SCL_STATE
189 * Protects changes to pool state and vdev state, such as vdev
190 * online/offline/fault/degrade/clear. Protects the dirty state list
191 * (spa_state_dirty_list) and global pool state (spa_state).
192 *
193 * SCL_ALLOC
194 * Protects changes to metaslab groups and classes.
195 * Held as reader by metaslab_alloc() and metaslab_claim().
196 *
197 * SCL_ZIO
198 * Held by bp-level zios (those which have no io_vd upon entry)
199 * to prevent changes to the vdev tree. The bp-level zio implicitly
200 * protects all of its vdev child zios, which do not hold SCL_ZIO.
201 *
202 * SCL_FREE
203 * Protects changes to metaslab groups and classes.
204 * Held as reader by metaslab_free(). SCL_FREE is distinct from
205 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
206 * blocks in zio_done() while another i/o that holds either
207 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
208 *
209 * SCL_VDEV
210 * Held as reader to prevent changes to the vdev tree during trivial
211 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
212 * other locks, and lower than all of them, to ensure that it's safe
213 * to acquire regardless of caller context.
214 *
215 * In addition, the following rules apply:
216 *
217 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
218 * The lock ordering is SCL_CONFIG > spa_props_lock.
219 *
220 * (b) I/O operations on leaf vdevs. For any zio operation that takes
221 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
222 * or zio_write_phys() -- the caller must ensure that the config cannot
223 * cannot change in the interim, and that the vdev cannot be reopened.
224 * SCL_STATE as reader suffices for both.
225 *
226 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
227 *
228 * spa_vdev_enter() Acquire the namespace lock and the config lock
229 * for writing.
230 *
231 * spa_vdev_exit() Release the config lock, wait for all I/O
232 * to complete, sync the updated configs to the
233 * cache, and release the namespace lock.
234 *
235 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
236 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
237 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
238 */
239
240 static avl_tree_t spa_namespace_avl;
241 static kmutex_t spa_namespace_lock;
242 static kcondvar_t spa_namespace_cv;
243
244 static const int spa_max_replication_override = SPA_DVAS_PER_BP;
245
246 static kmutex_t spa_spare_lock;
247 static avl_tree_t spa_spare_avl;
248 static kmutex_t spa_l2cache_lock;
249 static avl_tree_t spa_l2cache_avl;
250
251 spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
252
253 #ifdef ZFS_DEBUG
254 /*
255 * Everything except dprintf, set_error, indirect_remap, and raidz_reconstruct
256 * is on by default in debug builds.
257 */
258 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
259 ZFS_DEBUG_INDIRECT_REMAP | ZFS_DEBUG_RAIDZ_RECONSTRUCT);
260 #else
261 int zfs_flags = 0;
262 #endif
263
264 /*
265 * zfs_recover can be set to nonzero to attempt to recover from
266 * otherwise-fatal errors, typically caused by on-disk corruption. When
267 * set, calls to zfs_panic_recover() will turn into warning messages.
268 * This should only be used as a last resort, as it typically results
269 * in leaked space, or worse.
270 */
271 int zfs_recover = B_FALSE;
272
273 /*
274 * If destroy encounters an EIO while reading metadata (e.g. indirect
275 * blocks), space referenced by the missing metadata can not be freed.
276 * Normally this causes the background destroy to become "stalled", as
277 * it is unable to make forward progress. While in this stalled state,
278 * all remaining space to free from the error-encountering filesystem is
279 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
280 * permanently leak the space from indirect blocks that can not be read,
281 * and continue to free everything else that it can.
282 *
283 * The default, "stalling" behavior is useful if the storage partially
284 * fails (i.e. some but not all i/os fail), and then later recovers. In
285 * this case, we will be able to continue pool operations while it is
286 * partially failed, and when it recovers, we can continue to free the
287 * space, with no leaks. However, note that this case is actually
288 * fairly rare.
289 *
290 * Typically pools either (a) fail completely (but perhaps temporarily,
291 * e.g. a top-level vdev going offline), or (b) have localized,
292 * permanent errors (e.g. disk returns the wrong data due to bit flip or
293 * firmware bug). In case (a), this setting does not matter because the
294 * pool will be suspended and the sync thread will not be able to make
295 * forward progress regardless. In case (b), because the error is
296 * permanent, the best we can do is leak the minimum amount of space,
297 * which is what setting this flag will do. Therefore, it is reasonable
298 * for this flag to normally be set, but we chose the more conservative
299 * approach of not setting it, so that there is no possibility of
300 * leaking space in the "partial temporary" failure case.
301 */
302 int zfs_free_leak_on_eio = B_FALSE;
303
304 /*
305 * Expiration time in milliseconds. This value has two meanings. First it is
306 * used to determine when the spa_deadman() logic should fire. By default the
307 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
308 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
309 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
310 * in one of three behaviors controlled by zfs_deadman_failmode.
311 */
312 uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */
313
314 /*
315 * This value controls the maximum amount of time zio_wait() will block for an
316 * outstanding IO. By default this is 300 seconds at which point the "hung"
317 * behavior will be applied as described for zfs_deadman_synctime_ms.
318 */
319 uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */
320
321 /*
322 * Check time in milliseconds. This defines the frequency at which we check
323 * for hung I/O.
324 */
325 uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */
326
327 /*
328 * By default the deadman is enabled.
329 */
330 int zfs_deadman_enabled = B_TRUE;
331
332 /*
333 * Controls the behavior of the deadman when it detects a "hung" I/O.
334 * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
335 *
336 * wait - Wait for the "hung" I/O (default)
337 * continue - Attempt to recover from a "hung" I/O
338 * panic - Panic the system
339 */
340 const char *zfs_deadman_failmode = "wait";
341
342 /*
343 * The worst case is single-sector max-parity RAID-Z blocks, in which
344 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
345 * times the size; so just assume that. Add to this the fact that
346 * we can have up to 3 DVAs per bp, and one more factor of 2 because
347 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
348 * the worst case is:
349 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
350 */
351 uint_t spa_asize_inflation = 24;
352
353 /*
354 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
355 * the pool to be consumed (bounded by spa_max_slop). This ensures that we
356 * don't run the pool completely out of space, due to unaccounted changes (e.g.
357 * to the MOS). It also limits the worst-case time to allocate space. If we
358 * have less than this amount of free space, most ZPL operations (e.g. write,
359 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
360 * also part of this 3.2% of space which can't be consumed by normal writes;
361 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
362 * log space.
363 *
364 * Certain operations (e.g. file removal, most administrative actions) can
365 * use half the slop space. They will only return ENOSPC if less than half
366 * the slop space is free. Typically, once the pool has less than the slop
367 * space free, the user will use these operations to free up space in the pool.
368 * These are the operations that call dsl_pool_adjustedsize() with the netfree
369 * argument set to TRUE.
370 *
371 * Operations that are almost guaranteed to free up space in the absence of
372 * a pool checkpoint can use up to three quarters of the slop space
373 * (e.g zfs destroy).
374 *
375 * A very restricted set of operations are always permitted, regardless of
376 * the amount of free space. These are the operations that call
377 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
378 * increase in the amount of space used, it is possible to run the pool
379 * completely out of space, causing it to be permanently read-only.
380 *
381 * Note that on very small pools, the slop space will be larger than
382 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
383 * but we never allow it to be more than half the pool size.
384 *
385 * Further, on very large pools, the slop space will be smaller than
386 * 3.2%, to avoid reserving much more space than we actually need; bounded
387 * by spa_max_slop (128GB).
388 *
389 * See also the comments in zfs_space_check_t.
390 */
391 uint_t spa_slop_shift = 5;
392 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
393 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
394
395 /*
396 * Number of allocators to use, per spa instance
397 */
398 static int spa_num_allocators = 4;
399 static int spa_cpus_per_allocator = 4;
400
401 /*
402 * Spa active allocator.
403 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>.
404 */
405 const char *zfs_active_allocator = "dynamic";
406
407 void
spa_load_failed(spa_t * spa,const char * fmt,...)408 spa_load_failed(spa_t *spa, const char *fmt, ...)
409 {
410 va_list adx;
411 char buf[256];
412
413 va_start(adx, fmt);
414 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
415 va_end(adx);
416
417 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
418 spa->spa_trust_config ? "trusted" : "untrusted", buf);
419 }
420
421 void
spa_load_note(spa_t * spa,const char * fmt,...)422 spa_load_note(spa_t *spa, const char *fmt, ...)
423 {
424 va_list adx;
425 char buf[256];
426
427 va_start(adx, fmt);
428 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
429 va_end(adx);
430
431 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
432 spa->spa_trust_config ? "trusted" : "untrusted", buf);
433
434 spa_import_progress_set_notes_nolog(spa, "%s", buf);
435 }
436
437 /*
438 * By default dedup and user data indirects land in the special class
439 */
440 static int zfs_ddt_data_is_special = B_TRUE;
441 static int zfs_user_indirect_is_special = B_TRUE;
442
443 /*
444 * The percentage of special class final space reserved for metadata only.
445 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
446 * let metadata into the class.
447 */
448 static uint_t zfs_special_class_metadata_reserve_pct = 25;
449
450 /*
451 * ==========================================================================
452 * SPA config locking
453 * ==========================================================================
454 */
455 static void
spa_config_lock_init(spa_t * spa)456 spa_config_lock_init(spa_t *spa)
457 {
458 for (int i = 0; i < SCL_LOCKS; i++) {
459 spa_config_lock_t *scl = &spa->spa_config_lock[i];
460 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
461 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
462 scl->scl_writer = NULL;
463 scl->scl_write_wanted = 0;
464 scl->scl_count = 0;
465 }
466 }
467
468 static void
spa_config_lock_destroy(spa_t * spa)469 spa_config_lock_destroy(spa_t *spa)
470 {
471 for (int i = 0; i < SCL_LOCKS; i++) {
472 spa_config_lock_t *scl = &spa->spa_config_lock[i];
473 mutex_destroy(&scl->scl_lock);
474 cv_destroy(&scl->scl_cv);
475 ASSERT0P(scl->scl_writer);
476 ASSERT0(scl->scl_write_wanted);
477 ASSERT0(scl->scl_count);
478 }
479 }
480
481 int
spa_config_tryenter(spa_t * spa,int locks,const void * tag,krw_t rw)482 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
483 {
484 for (int i = 0; i < SCL_LOCKS; i++) {
485 spa_config_lock_t *scl = &spa->spa_config_lock[i];
486 if (!(locks & (1 << i)))
487 continue;
488 mutex_enter(&scl->scl_lock);
489 if (rw == RW_READER) {
490 if (scl->scl_writer || scl->scl_write_wanted) {
491 mutex_exit(&scl->scl_lock);
492 spa_config_exit(spa, locks & ((1 << i) - 1),
493 tag);
494 return (0);
495 }
496 } else {
497 ASSERT(scl->scl_writer != curthread);
498 if (scl->scl_count != 0) {
499 mutex_exit(&scl->scl_lock);
500 spa_config_exit(spa, locks & ((1 << i) - 1),
501 tag);
502 return (0);
503 }
504 scl->scl_writer = curthread;
505 }
506 scl->scl_count++;
507 mutex_exit(&scl->scl_lock);
508 }
509 return (1);
510 }
511
512 static void
spa_config_enter_impl(spa_t * spa,int locks,const void * tag,krw_t rw,int priority_flag)513 spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
514 int priority_flag)
515 {
516 (void) tag;
517 int wlocks_held = 0;
518
519 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
520
521 for (int i = 0; i < SCL_LOCKS; i++) {
522 spa_config_lock_t *scl = &spa->spa_config_lock[i];
523 if (scl->scl_writer == curthread)
524 wlocks_held |= (1 << i);
525 if (!(locks & (1 << i)))
526 continue;
527 mutex_enter(&scl->scl_lock);
528 if (rw == RW_READER) {
529 while (scl->scl_writer ||
530 (!priority_flag && scl->scl_write_wanted)) {
531 cv_wait(&scl->scl_cv, &scl->scl_lock);
532 }
533 } else {
534 ASSERT(scl->scl_writer != curthread);
535 while (scl->scl_count != 0) {
536 scl->scl_write_wanted++;
537 cv_wait(&scl->scl_cv, &scl->scl_lock);
538 scl->scl_write_wanted--;
539 }
540 scl->scl_writer = curthread;
541 }
542 scl->scl_count++;
543 mutex_exit(&scl->scl_lock);
544 }
545 ASSERT3U(wlocks_held, <=, locks);
546 }
547
548 void
spa_config_enter(spa_t * spa,int locks,const void * tag,krw_t rw)549 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
550 {
551 spa_config_enter_impl(spa, locks, tag, rw, 0);
552 }
553
554 /*
555 * The spa_config_enter_priority() allows the mmp thread to cut in front of
556 * outstanding write lock requests. This is needed since the mmp updates are
557 * time sensitive and failure to service them promptly will result in a
558 * suspended pool. This pool suspension has been seen in practice when there is
559 * a single disk in a pool that is responding slowly and presumably about to
560 * fail.
561 */
562
563 void
spa_config_enter_priority(spa_t * spa,int locks,const void * tag,krw_t rw)564 spa_config_enter_priority(spa_t *spa, int locks, const void *tag, krw_t rw)
565 {
566 spa_config_enter_impl(spa, locks, tag, rw, 1);
567 }
568
569 void
spa_config_exit(spa_t * spa,int locks,const void * tag)570 spa_config_exit(spa_t *spa, int locks, const void *tag)
571 {
572 (void) tag;
573 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
574 spa_config_lock_t *scl = &spa->spa_config_lock[i];
575 if (!(locks & (1 << i)))
576 continue;
577 mutex_enter(&scl->scl_lock);
578 ASSERT(scl->scl_count > 0);
579 if (--scl->scl_count == 0) {
580 ASSERT(scl->scl_writer == NULL ||
581 scl->scl_writer == curthread);
582 scl->scl_writer = NULL; /* OK in either case */
583 cv_broadcast(&scl->scl_cv);
584 }
585 mutex_exit(&scl->scl_lock);
586 }
587 }
588
589 int
spa_config_held(spa_t * spa,int locks,krw_t rw)590 spa_config_held(spa_t *spa, int locks, krw_t rw)
591 {
592 int locks_held = 0;
593
594 for (int i = 0; i < SCL_LOCKS; i++) {
595 spa_config_lock_t *scl = &spa->spa_config_lock[i];
596 if (!(locks & (1 << i)))
597 continue;
598 if ((rw == RW_READER && scl->scl_count != 0) ||
599 (rw == RW_WRITER && scl->scl_writer == curthread))
600 locks_held |= 1 << i;
601 }
602
603 return (locks_held);
604 }
605
606 /*
607 * ==========================================================================
608 * SPA namespace functions
609 * ==========================================================================
610 */
611
612 void
spa_namespace_enter(const void * tag)613 spa_namespace_enter(const void *tag)
614 {
615 (void) tag;
616 ASSERT(!MUTEX_HELD(&spa_namespace_lock));
617 mutex_enter(&spa_namespace_lock);
618 }
619
620 boolean_t
spa_namespace_tryenter(const void * tag)621 spa_namespace_tryenter(const void *tag)
622 {
623 (void) tag;
624 ASSERT(!MUTEX_HELD(&spa_namespace_lock));
625 return (mutex_tryenter(&spa_namespace_lock));
626 }
627
628 int
spa_namespace_enter_interruptible(const void * tag)629 spa_namespace_enter_interruptible(const void *tag)
630 {
631 (void) tag;
632 ASSERT(!MUTEX_HELD(&spa_namespace_lock));
633 return (mutex_enter_interruptible(&spa_namespace_lock));
634 }
635
636 void
spa_namespace_exit(const void * tag)637 spa_namespace_exit(const void *tag)
638 {
639 (void) tag;
640 ASSERT(MUTEX_HELD(&spa_namespace_lock));
641 mutex_exit(&spa_namespace_lock);
642 }
643
644 boolean_t
spa_namespace_held(void)645 spa_namespace_held(void)
646 {
647 return (MUTEX_HELD(&spa_namespace_lock));
648 }
649
650 void
spa_namespace_wait(void)651 spa_namespace_wait(void)
652 {
653 ASSERT(MUTEX_HELD(&spa_namespace_lock));
654 cv_wait(&spa_namespace_cv, &spa_namespace_lock);
655 }
656
657 void
spa_namespace_broadcast(void)658 spa_namespace_broadcast(void)
659 {
660 ASSERT(MUTEX_HELD(&spa_namespace_lock));
661 cv_broadcast(&spa_namespace_cv);
662 }
663
664 /*
665 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
666 * Returns NULL if no matching spa_t is found.
667 */
668 spa_t *
spa_lookup(const char * name)669 spa_lookup(const char *name)
670 {
671 static spa_t search; /* spa_t is large; don't allocate on stack */
672 spa_t *spa;
673 avl_index_t where;
674 char *cp;
675
676 ASSERT(spa_namespace_held());
677
678 retry:
679 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
680
681 /*
682 * If it's a full dataset name, figure out the pool name and
683 * just use that.
684 */
685 cp = strpbrk(search.spa_name, "/@#");
686 if (cp != NULL)
687 *cp = '\0';
688
689 spa = avl_find(&spa_namespace_avl, &search, &where);
690 if (spa == NULL)
691 return (NULL);
692
693 /*
694 * Avoid racing with import/export, which don't hold the namespace
695 * lock for their entire duration.
696 */
697 if ((spa->spa_load_thread != NULL &&
698 spa->spa_load_thread != curthread) ||
699 (spa->spa_export_thread != NULL &&
700 spa->spa_export_thread != curthread)) {
701 spa_namespace_wait();
702 goto retry;
703 }
704
705 return (spa);
706 }
707
708 /*
709 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
710 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
711 * looking for potentially hung I/Os.
712 */
713 void
spa_deadman(void * arg)714 spa_deadman(void *arg)
715 {
716 spa_t *spa = arg;
717
718 /* Disable the deadman if the pool is suspended. */
719 if (spa_suspended(spa))
720 return;
721
722 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
723 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
724 (u_longlong_t)++spa->spa_deadman_calls);
725 if (zfs_deadman_enabled)
726 vdev_deadman(spa->spa_root_vdev, FTAG);
727
728 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
729 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
730 MSEC_TO_TICK(zfs_deadman_checktime_ms));
731 }
732
733 static int
spa_log_sm_sort_by_txg(const void * va,const void * vb)734 spa_log_sm_sort_by_txg(const void *va, const void *vb)
735 {
736 const spa_log_sm_t *a = va;
737 const spa_log_sm_t *b = vb;
738
739 return (TREE_CMP(a->sls_txg, b->sls_txg));
740 }
741
742 /*
743 * Create an uninitialized spa_t with the given name. Requires
744 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
745 * exist by calling spa_lookup() first.
746 */
747 spa_t *
spa_add(const char * name,nvlist_t * config,const char * altroot)748 spa_add(const char *name, nvlist_t *config, const char *altroot)
749 {
750 spa_t *spa;
751 spa_config_dirent_t *dp;
752
753 ASSERT(spa_namespace_held());
754
755 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
756
757 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
758 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
759 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
760 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
761 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
762 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
763 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
764 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
765 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
766 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
767 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
768 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
769 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
770 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
771 mutex_init(&spa->spa_txg_log_time_lock, NULL, MUTEX_DEFAULT, NULL);
772
773 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
774 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
775 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
776 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
777 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
778 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
779 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
780
781 for (int t = 0; t < TXG_SIZE; t++)
782 bplist_create(&spa->spa_free_bplist[t]);
783
784 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
785 spa->spa_state = POOL_STATE_UNINITIALIZED;
786 spa->spa_freeze_txg = UINT64_MAX;
787 spa->spa_final_txg = UINT64_MAX;
788 spa->spa_load_max_txg = UINT64_MAX;
789 spa->spa_proc = &p0;
790 spa->spa_proc_state = SPA_PROC_NONE;
791 spa->spa_trust_config = B_TRUE;
792 spa->spa_hostid = zone_get_hostid(NULL);
793
794 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
795 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
796 spa_set_deadman_failmode(spa, zfs_deadman_failmode);
797 spa_set_allocator(spa, zfs_active_allocator);
798
799 zfs_refcount_create(&spa->spa_refcount);
800 spa_config_lock_init(spa);
801 spa_stats_init(spa);
802
803 ASSERT(spa_namespace_held());
804 avl_add(&spa_namespace_avl, spa);
805
806 /*
807 * Set the alternate root, if there is one.
808 */
809 if (altroot)
810 spa->spa_root = spa_strdup(altroot);
811
812 /* Do not allow more allocators than fraction of CPUs. */
813 spa->spa_alloc_count = MAX(MIN(spa_num_allocators,
814 boot_ncpus / MAX(spa_cpus_per_allocator, 1)), 1);
815
816 if (spa->spa_alloc_count > 1) {
817 spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t,
818 sau_inuse[spa->spa_alloc_count]), KM_SLEEP);
819 mutex_init(&spa->spa_allocs_use->sau_lock, NULL, MUTEX_DEFAULT,
820 NULL);
821 }
822
823 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
824 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
825 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
826 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
827 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
828 offsetof(log_summary_entry_t, lse_node));
829
830 /*
831 * Every pool starts with the default cachefile
832 */
833 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
834 offsetof(spa_config_dirent_t, scd_link));
835
836 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
837 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
838 list_insert_head(&spa->spa_config_list, dp);
839
840 VERIFY0(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, KM_SLEEP));
841
842 if (config != NULL) {
843 nvlist_t *features;
844
845 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
846 &features) == 0) {
847 VERIFY0(nvlist_dup(features,
848 &spa->spa_label_features, 0));
849 }
850
851 VERIFY0(nvlist_dup(config, &spa->spa_config, 0));
852 }
853
854 if (spa->spa_label_features == NULL) {
855 VERIFY0(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
856 KM_SLEEP));
857 }
858
859 spa->spa_min_ashift = INT_MAX;
860 spa->spa_max_ashift = 0;
861 spa->spa_min_alloc = INT_MAX;
862 spa->spa_max_alloc = 0;
863 spa->spa_gcd_alloc = INT_MAX;
864
865 /* Reset cached value */
866 spa->spa_dedup_dspace = ~0ULL;
867
868 /*
869 * As a pool is being created, treat all features as disabled by
870 * setting SPA_FEATURE_DISABLED for all entries in the feature
871 * refcount cache.
872 */
873 for (int i = 0; i < SPA_FEATURES; i++) {
874 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
875 }
876
877 list_create(&spa->spa_leaf_list, sizeof (vdev_t),
878 offsetof(vdev_t, vdev_leaf_node));
879
880 return (spa);
881 }
882
883 /*
884 * Removes a spa_t from the namespace, freeing up any memory used. Requires
885 * spa_namespace_lock. This is called only after the spa_t has been closed and
886 * deactivated.
887 */
888 void
spa_remove(spa_t * spa)889 spa_remove(spa_t *spa)
890 {
891 spa_config_dirent_t *dp;
892
893 ASSERT(spa_namespace_held());
894 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
895 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
896 ASSERT0(spa->spa_waiters);
897
898 nvlist_free(spa->spa_config_splitting);
899
900 avl_remove(&spa_namespace_avl, spa);
901
902 if (spa->spa_root)
903 spa_strfree(spa->spa_root);
904
905 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
906 if (dp->scd_path != NULL)
907 spa_strfree(dp->scd_path);
908 kmem_free(dp, sizeof (spa_config_dirent_t));
909 }
910
911 if (spa->spa_alloc_count > 1) {
912 mutex_destroy(&spa->spa_allocs_use->sau_lock);
913 kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t,
914 sau_inuse[spa->spa_alloc_count]));
915 }
916
917 avl_destroy(&spa->spa_metaslabs_by_flushed);
918 avl_destroy(&spa->spa_sm_logs_by_txg);
919 list_destroy(&spa->spa_log_summary);
920 list_destroy(&spa->spa_config_list);
921 list_destroy(&spa->spa_leaf_list);
922
923 nvlist_free(spa->spa_label_features);
924 nvlist_free(spa->spa_load_info);
925 nvlist_free(spa->spa_feat_stats);
926 spa_config_set(spa, NULL);
927
928 zfs_refcount_destroy(&spa->spa_refcount);
929
930 spa_stats_destroy(spa);
931 spa_config_lock_destroy(spa);
932
933 for (int t = 0; t < TXG_SIZE; t++)
934 bplist_destroy(&spa->spa_free_bplist[t]);
935
936 zio_checksum_templates_free(spa);
937
938 cv_destroy(&spa->spa_async_cv);
939 cv_destroy(&spa->spa_evicting_os_cv);
940 cv_destroy(&spa->spa_proc_cv);
941 cv_destroy(&spa->spa_scrub_io_cv);
942 cv_destroy(&spa->spa_suspend_cv);
943 cv_destroy(&spa->spa_activities_cv);
944 cv_destroy(&spa->spa_waiters_cv);
945
946 mutex_destroy(&spa->spa_flushed_ms_lock);
947 mutex_destroy(&spa->spa_async_lock);
948 mutex_destroy(&spa->spa_errlist_lock);
949 mutex_destroy(&spa->spa_errlog_lock);
950 mutex_destroy(&spa->spa_evicting_os_lock);
951 mutex_destroy(&spa->spa_history_lock);
952 mutex_destroy(&spa->spa_proc_lock);
953 mutex_destroy(&spa->spa_props_lock);
954 mutex_destroy(&spa->spa_cksum_tmpls_lock);
955 mutex_destroy(&spa->spa_scrub_lock);
956 mutex_destroy(&spa->spa_suspend_lock);
957 mutex_destroy(&spa->spa_vdev_top_lock);
958 mutex_destroy(&spa->spa_feat_stats_lock);
959 mutex_destroy(&spa->spa_activities_lock);
960 mutex_destroy(&spa->spa_txg_log_time_lock);
961
962 kmem_free(spa, sizeof (spa_t));
963 }
964
965 /*
966 * Given a pool, return the next pool in the namespace, or NULL if there is
967 * none. If 'prev' is NULL, return the first pool.
968 */
969 spa_t *
spa_next(spa_t * prev)970 spa_next(spa_t *prev)
971 {
972 ASSERT(spa_namespace_held());
973
974 if (prev)
975 return (AVL_NEXT(&spa_namespace_avl, prev));
976 else
977 return (avl_first(&spa_namespace_avl));
978 }
979
980 /*
981 * ==========================================================================
982 * SPA refcount functions
983 * ==========================================================================
984 */
985
986 /*
987 * Add a reference to the given spa_t. Must have at least one reference, or
988 * have the namespace lock held.
989 */
990 void
spa_open_ref(spa_t * spa,const void * tag)991 spa_open_ref(spa_t *spa, const void *tag)
992 {
993 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
994 spa_namespace_held() ||
995 spa->spa_load_thread == curthread);
996 (void) zfs_refcount_add(&spa->spa_refcount, tag);
997 }
998
999 /*
1000 * Remove a reference to the given spa_t. Must have at least one reference, or
1001 * have the namespace lock held or be part of a pool import/export.
1002 */
1003 void
spa_close(spa_t * spa,const void * tag)1004 spa_close(spa_t *spa, const void *tag)
1005 {
1006 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
1007 spa_namespace_held() ||
1008 spa->spa_load_thread == curthread ||
1009 spa->spa_export_thread == curthread);
1010 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
1011 }
1012
1013 /*
1014 * Remove a reference to the given spa_t held by a dsl dir that is
1015 * being asynchronously released. Async releases occur from a taskq
1016 * performing eviction of dsl datasets and dirs. The namespace lock
1017 * isn't held and the hold by the object being evicted may contribute to
1018 * spa_minref (e.g. dataset or directory released during pool export),
1019 * so the asserts in spa_close() do not apply.
1020 */
1021 void
spa_async_close(spa_t * spa,const void * tag)1022 spa_async_close(spa_t *spa, const void *tag)
1023 {
1024 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
1025 }
1026
1027 /*
1028 * Check to see if the spa refcount is zero. Must be called with
1029 * spa_namespace_lock held or be the spa export thread. We really
1030 * compare against spa_minref, which is the number of references
1031 * acquired when opening a pool
1032 */
1033 boolean_t
spa_refcount_zero(spa_t * spa)1034 spa_refcount_zero(spa_t *spa)
1035 {
1036 ASSERT(spa_namespace_held() ||
1037 spa->spa_export_thread == curthread);
1038
1039 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
1040 }
1041
1042 /*
1043 * ==========================================================================
1044 * SPA spare and l2cache tracking
1045 * ==========================================================================
1046 */
1047
1048 /*
1049 * Hot spares and cache devices are tracked using the same code below,
1050 * for 'auxiliary' devices.
1051 */
1052
1053 typedef struct spa_aux {
1054 uint64_t aux_guid;
1055 uint64_t aux_pool;
1056 avl_node_t aux_avl;
1057 int aux_count;
1058 } spa_aux_t;
1059
1060 static inline int
spa_aux_compare(const void * a,const void * b)1061 spa_aux_compare(const void *a, const void *b)
1062 {
1063 const spa_aux_t *sa = (const spa_aux_t *)a;
1064 const spa_aux_t *sb = (const spa_aux_t *)b;
1065
1066 return (TREE_CMP(sa->aux_guid, sb->aux_guid));
1067 }
1068
1069 static void
spa_aux_add(vdev_t * vd,avl_tree_t * avl)1070 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
1071 {
1072 avl_index_t where;
1073 spa_aux_t search;
1074 spa_aux_t *aux;
1075
1076 search.aux_guid = vd->vdev_guid;
1077 if ((aux = avl_find(avl, &search, &where)) != NULL) {
1078 aux->aux_count++;
1079 } else {
1080 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
1081 aux->aux_guid = vd->vdev_guid;
1082 aux->aux_count = 1;
1083 avl_insert(avl, aux, where);
1084 }
1085 }
1086
1087 static void
spa_aux_remove(vdev_t * vd,avl_tree_t * avl)1088 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
1089 {
1090 spa_aux_t search;
1091 spa_aux_t *aux;
1092 avl_index_t where;
1093
1094 search.aux_guid = vd->vdev_guid;
1095 aux = avl_find(avl, &search, &where);
1096
1097 ASSERT(aux != NULL);
1098
1099 if (--aux->aux_count == 0) {
1100 avl_remove(avl, aux);
1101 kmem_free(aux, sizeof (spa_aux_t));
1102 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
1103 aux->aux_pool = 0ULL;
1104 }
1105 }
1106
1107 static boolean_t
spa_aux_exists(uint64_t guid,uint64_t * pool,int * refcnt,avl_tree_t * avl)1108 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
1109 {
1110 spa_aux_t search, *found;
1111
1112 search.aux_guid = guid;
1113 found = avl_find(avl, &search, NULL);
1114
1115 if (pool) {
1116 if (found)
1117 *pool = found->aux_pool;
1118 else
1119 *pool = 0ULL;
1120 }
1121
1122 if (refcnt) {
1123 if (found)
1124 *refcnt = found->aux_count;
1125 else
1126 *refcnt = 0;
1127 }
1128
1129 return (found != NULL);
1130 }
1131
1132 static void
spa_aux_activate(vdev_t * vd,avl_tree_t * avl)1133 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1134 {
1135 spa_aux_t search, *found;
1136 avl_index_t where;
1137
1138 search.aux_guid = vd->vdev_guid;
1139 found = avl_find(avl, &search, &where);
1140 ASSERT(found != NULL);
1141 ASSERT(found->aux_pool == 0ULL);
1142
1143 found->aux_pool = spa_guid(vd->vdev_spa);
1144 }
1145
1146 /*
1147 * Spares are tracked globally due to the following constraints:
1148 *
1149 * - A spare may be part of multiple pools.
1150 * - A spare may be added to a pool even if it's actively in use within
1151 * another pool.
1152 * - A spare in use in any pool can only be the source of a replacement if
1153 * the target is a spare in the same pool.
1154 *
1155 * We keep track of all spares on the system through the use of a reference
1156 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
1157 * spare, then we bump the reference count in the AVL tree. In addition, we set
1158 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1159 * inactive). When a spare is made active (used to replace a device in the
1160 * pool), we also keep track of which pool its been made a part of.
1161 *
1162 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
1163 * called under the spa_namespace lock as part of vdev reconfiguration. The
1164 * separate spare lock exists for the status query path, which does not need to
1165 * be completely consistent with respect to other vdev configuration changes.
1166 */
1167
1168 static int
spa_spare_compare(const void * a,const void * b)1169 spa_spare_compare(const void *a, const void *b)
1170 {
1171 return (spa_aux_compare(a, b));
1172 }
1173
1174 void
spa_spare_add(vdev_t * vd)1175 spa_spare_add(vdev_t *vd)
1176 {
1177 mutex_enter(&spa_spare_lock);
1178 ASSERT(!vd->vdev_isspare);
1179 spa_aux_add(vd, &spa_spare_avl);
1180 vd->vdev_isspare = B_TRUE;
1181 mutex_exit(&spa_spare_lock);
1182 }
1183
1184 void
spa_spare_remove(vdev_t * vd)1185 spa_spare_remove(vdev_t *vd)
1186 {
1187 mutex_enter(&spa_spare_lock);
1188 ASSERT(vd->vdev_isspare);
1189 spa_aux_remove(vd, &spa_spare_avl);
1190 vd->vdev_isspare = B_FALSE;
1191 mutex_exit(&spa_spare_lock);
1192 }
1193
1194 boolean_t
spa_spare_exists(uint64_t guid,uint64_t * pool,int * refcnt)1195 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1196 {
1197 boolean_t found;
1198
1199 mutex_enter(&spa_spare_lock);
1200 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1201 mutex_exit(&spa_spare_lock);
1202
1203 return (found);
1204 }
1205
1206 void
spa_spare_activate(vdev_t * vd)1207 spa_spare_activate(vdev_t *vd)
1208 {
1209 mutex_enter(&spa_spare_lock);
1210 ASSERT(vd->vdev_isspare);
1211 spa_aux_activate(vd, &spa_spare_avl);
1212 mutex_exit(&spa_spare_lock);
1213 }
1214
1215 /*
1216 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1217 * Cache devices currently only support one pool per cache device, and so
1218 * for these devices the aux reference count is currently unused beyond 1.
1219 */
1220
1221 static int
spa_l2cache_compare(const void * a,const void * b)1222 spa_l2cache_compare(const void *a, const void *b)
1223 {
1224 return (spa_aux_compare(a, b));
1225 }
1226
1227 void
spa_l2cache_add(vdev_t * vd)1228 spa_l2cache_add(vdev_t *vd)
1229 {
1230 mutex_enter(&spa_l2cache_lock);
1231 ASSERT(!vd->vdev_isl2cache);
1232 spa_aux_add(vd, &spa_l2cache_avl);
1233 vd->vdev_isl2cache = B_TRUE;
1234 mutex_exit(&spa_l2cache_lock);
1235 }
1236
1237 void
spa_l2cache_remove(vdev_t * vd)1238 spa_l2cache_remove(vdev_t *vd)
1239 {
1240 mutex_enter(&spa_l2cache_lock);
1241 ASSERT(vd->vdev_isl2cache);
1242 spa_aux_remove(vd, &spa_l2cache_avl);
1243 vd->vdev_isl2cache = B_FALSE;
1244 mutex_exit(&spa_l2cache_lock);
1245 }
1246
1247 boolean_t
spa_l2cache_exists(uint64_t guid,uint64_t * pool)1248 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1249 {
1250 boolean_t found;
1251
1252 mutex_enter(&spa_l2cache_lock);
1253 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1254 mutex_exit(&spa_l2cache_lock);
1255
1256 return (found);
1257 }
1258
1259 void
spa_l2cache_activate(vdev_t * vd)1260 spa_l2cache_activate(vdev_t *vd)
1261 {
1262 mutex_enter(&spa_l2cache_lock);
1263 ASSERT(vd->vdev_isl2cache);
1264 spa_aux_activate(vd, &spa_l2cache_avl);
1265 mutex_exit(&spa_l2cache_lock);
1266 }
1267
1268 /*
1269 * ==========================================================================
1270 * SPA vdev locking
1271 * ==========================================================================
1272 */
1273
1274 /*
1275 * Lock the given spa_t for the purpose of adding or removing a vdev.
1276 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1277 * It returns the next transaction group for the spa_t.
1278 */
1279 uint64_t
spa_vdev_enter(spa_t * spa)1280 spa_vdev_enter(spa_t *spa)
1281 {
1282 mutex_enter(&spa->spa_vdev_top_lock);
1283 spa_namespace_enter(FTAG);
1284
1285 ASSERT0(spa->spa_export_thread);
1286
1287 vdev_autotrim_stop_all(spa);
1288
1289 return (spa_vdev_config_enter(spa));
1290 }
1291
1292 /*
1293 * The same as spa_vdev_enter() above but additionally takes the guid of
1294 * the vdev being detached. When there is a rebuild in process it will be
1295 * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1296 * The rebuild is canceled if only a single child remains after the detach.
1297 */
1298 uint64_t
spa_vdev_detach_enter(spa_t * spa,uint64_t guid)1299 spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
1300 {
1301 mutex_enter(&spa->spa_vdev_top_lock);
1302 spa_namespace_enter(FTAG);
1303
1304 ASSERT0(spa->spa_export_thread);
1305
1306 vdev_autotrim_stop_all(spa);
1307
1308 if (guid != 0) {
1309 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1310 if (vd) {
1311 vdev_rebuild_stop_wait(vd->vdev_top);
1312 }
1313 }
1314
1315 return (spa_vdev_config_enter(spa));
1316 }
1317
1318 /*
1319 * Internal implementation for spa_vdev_enter(). Used when a vdev
1320 * operation requires multiple syncs (i.e. removing a device) while
1321 * keeping the spa_namespace_lock held.
1322 */
1323 uint64_t
spa_vdev_config_enter(spa_t * spa)1324 spa_vdev_config_enter(spa_t *spa)
1325 {
1326 ASSERT(spa_namespace_held());
1327
1328 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1329
1330 return (spa_last_synced_txg(spa) + 1);
1331 }
1332
1333 /*
1334 * Used in combination with spa_vdev_config_enter() to allow the syncing
1335 * of multiple transactions without releasing the spa_namespace_lock.
1336 */
1337 void
spa_vdev_config_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error,const char * tag)1338 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
1339 const char *tag)
1340 {
1341 ASSERT(spa_namespace_held());
1342
1343 int config_changed = B_FALSE;
1344
1345 ASSERT(txg > spa_last_synced_txg(spa));
1346
1347 spa->spa_pending_vdev = NULL;
1348
1349 /*
1350 * Reassess the DTLs.
1351 */
1352 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
1353
1354 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1355 config_changed = B_TRUE;
1356 spa->spa_config_generation++;
1357 }
1358
1359 /*
1360 * Verify the metaslab classes.
1361 */
1362 metaslab_class_validate(spa_normal_class(spa));
1363 metaslab_class_validate(spa_log_class(spa));
1364 metaslab_class_validate(spa_embedded_log_class(spa));
1365 metaslab_class_validate(spa_special_class(spa));
1366 metaslab_class_validate(spa_special_embedded_log_class(spa));
1367 metaslab_class_validate(spa_dedup_class(spa));
1368
1369 spa_config_exit(spa, SCL_ALL, spa);
1370
1371 /*
1372 * Panic the system if the specified tag requires it. This
1373 * is useful for ensuring that configurations are updated
1374 * transactionally.
1375 */
1376 if (zio_injection_enabled)
1377 zio_handle_panic_injection(spa, tag, 0);
1378
1379 /*
1380 * Note: this txg_wait_synced() is important because it ensures
1381 * that there won't be more than one config change per txg.
1382 * This allows us to use the txg as the generation number.
1383 */
1384 if (error == 0)
1385 txg_wait_synced(spa->spa_dsl_pool, txg);
1386
1387 if (vd != NULL) {
1388 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1389 if (vd->vdev_ops->vdev_op_leaf) {
1390 mutex_enter(&vd->vdev_initialize_lock);
1391 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1392 NULL);
1393 mutex_exit(&vd->vdev_initialize_lock);
1394
1395 mutex_enter(&vd->vdev_trim_lock);
1396 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1397 mutex_exit(&vd->vdev_trim_lock);
1398 }
1399
1400 /*
1401 * The vdev may be both a leaf and top-level device.
1402 */
1403 vdev_autotrim_stop_wait(vd);
1404
1405 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
1406 vdev_free(vd);
1407 spa_config_exit(spa, SCL_STATE_ALL, spa);
1408 }
1409
1410 /*
1411 * If the config changed, update the config cache.
1412 */
1413 if (config_changed)
1414 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
1415 }
1416
1417 /*
1418 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1419 * locking of spa_vdev_enter(), we also want make sure the transactions have
1420 * synced to disk, and then update the global configuration cache with the new
1421 * information.
1422 */
1423 int
spa_vdev_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error)1424 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1425 {
1426 vdev_autotrim_restart(spa);
1427 vdev_rebuild_restart(spa);
1428
1429 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1430 spa_namespace_exit(FTAG);
1431 mutex_exit(&spa->spa_vdev_top_lock);
1432
1433 return (error);
1434 }
1435
1436 /*
1437 * Lock the given spa_t for the purpose of changing vdev state.
1438 */
1439 void
spa_vdev_state_enter(spa_t * spa,int oplocks)1440 spa_vdev_state_enter(spa_t *spa, int oplocks)
1441 {
1442 int locks = SCL_STATE_ALL | oplocks;
1443
1444 /*
1445 * Root pools may need to read of the underlying devfs filesystem
1446 * when opening up a vdev. Unfortunately if we're holding the
1447 * SCL_ZIO lock it will result in a deadlock when we try to issue
1448 * the read from the root filesystem. Instead we "prefetch"
1449 * the associated vnodes that we need prior to opening the
1450 * underlying devices and cache them so that we can prevent
1451 * any I/O when we are doing the actual open.
1452 */
1453 if (spa_is_root(spa)) {
1454 int low = locks & ~(SCL_ZIO - 1);
1455 int high = locks & ~low;
1456
1457 spa_config_enter(spa, high, spa, RW_WRITER);
1458 vdev_hold(spa->spa_root_vdev);
1459 spa_config_enter(spa, low, spa, RW_WRITER);
1460 } else {
1461 spa_config_enter(spa, locks, spa, RW_WRITER);
1462 }
1463 spa->spa_vdev_locks = locks;
1464 }
1465
1466 int
spa_vdev_state_exit(spa_t * spa,vdev_t * vd,int error)1467 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1468 {
1469 boolean_t config_changed = B_FALSE;
1470 vdev_t *vdev_top;
1471
1472 if (vd == NULL || vd == spa->spa_root_vdev) {
1473 vdev_top = spa->spa_root_vdev;
1474 } else {
1475 vdev_top = vd->vdev_top;
1476 }
1477
1478 if (vd != NULL || error == 0)
1479 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
1480
1481 if (vd != NULL) {
1482 if (vd != spa->spa_root_vdev)
1483 vdev_state_dirty(vdev_top);
1484
1485 config_changed = B_TRUE;
1486 spa->spa_config_generation++;
1487 }
1488
1489 if (spa_is_root(spa))
1490 vdev_rele(spa->spa_root_vdev);
1491
1492 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1493 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1494
1495 /*
1496 * If anything changed, wait for it to sync. This ensures that,
1497 * from the system administrator's perspective, zpool(8) commands
1498 * are synchronous. This is important for things like zpool offline:
1499 * when the command completes, you expect no further I/O from ZFS.
1500 */
1501 if (vd != NULL)
1502 txg_wait_synced(spa->spa_dsl_pool, 0);
1503
1504 /*
1505 * If the config changed, update the config cache.
1506 */
1507 if (config_changed) {
1508 spa_namespace_enter(FTAG);
1509 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
1510 spa_namespace_exit(FTAG);
1511 }
1512
1513 return (error);
1514 }
1515
1516 /*
1517 * ==========================================================================
1518 * Miscellaneous functions
1519 * ==========================================================================
1520 */
1521
1522 void
spa_activate_mos_feature(spa_t * spa,const char * feature,dmu_tx_t * tx)1523 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1524 {
1525 if (!nvlist_exists(spa->spa_label_features, feature)) {
1526 fnvlist_add_boolean(spa->spa_label_features, feature);
1527 /*
1528 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1529 * dirty the vdev config because lock SCL_CONFIG is not held.
1530 * Thankfully, in this case we don't need to dirty the config
1531 * because it will be written out anyway when we finish
1532 * creating the pool.
1533 */
1534 if (tx->tx_txg != TXG_INITIAL)
1535 vdev_config_dirty(spa->spa_root_vdev);
1536 }
1537 }
1538
1539 void
spa_deactivate_mos_feature(spa_t * spa,const char * feature)1540 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1541 {
1542 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1543 vdev_config_dirty(spa->spa_root_vdev);
1544 }
1545
1546 /*
1547 * Return the spa_t associated with given pool_guid, if it exists. If
1548 * device_guid is non-zero, determine whether the pool exists *and* contains
1549 * a device with the specified device_guid.
1550 */
1551 spa_t *
spa_by_guid(uint64_t pool_guid,uint64_t device_guid)1552 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1553 {
1554 spa_t *spa;
1555 avl_tree_t *t = &spa_namespace_avl;
1556
1557 ASSERT(spa_namespace_held());
1558
1559 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1560 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1561 continue;
1562 if (spa->spa_root_vdev == NULL)
1563 continue;
1564 if (spa_guid(spa) == pool_guid) {
1565 if (device_guid == 0)
1566 break;
1567
1568 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1569 device_guid) != NULL)
1570 break;
1571
1572 /*
1573 * Check any devices we may be in the process of adding.
1574 */
1575 if (spa->spa_pending_vdev) {
1576 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1577 device_guid) != NULL)
1578 break;
1579 }
1580 }
1581 }
1582
1583 return (spa);
1584 }
1585
1586 /*
1587 * Determine whether a pool with the given pool_guid exists.
1588 */
1589 boolean_t
spa_guid_exists(uint64_t pool_guid,uint64_t device_guid)1590 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1591 {
1592 return (spa_by_guid(pool_guid, device_guid) != NULL);
1593 }
1594
1595 char *
spa_strdup(const char * s)1596 spa_strdup(const char *s)
1597 {
1598 size_t len;
1599 char *new;
1600
1601 len = strlen(s);
1602 new = kmem_alloc(len + 1, KM_SLEEP);
1603 memcpy(new, s, len + 1);
1604
1605 return (new);
1606 }
1607
1608 void
spa_strfree(char * s)1609 spa_strfree(char *s)
1610 {
1611 kmem_free(s, strlen(s) + 1);
1612 }
1613
1614 uint64_t
spa_generate_guid(spa_t * spa)1615 spa_generate_guid(spa_t *spa)
1616 {
1617 uint64_t guid;
1618
1619 if (spa != NULL) {
1620 do {
1621 (void) random_get_pseudo_bytes((void *)&guid,
1622 sizeof (guid));
1623 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
1624 } else {
1625 do {
1626 (void) random_get_pseudo_bytes((void *)&guid,
1627 sizeof (guid));
1628 } while (guid == 0 || spa_guid_exists(guid, 0));
1629 }
1630
1631 return (guid);
1632 }
1633
1634 static boolean_t
spa_load_guid_exists(uint64_t guid)1635 spa_load_guid_exists(uint64_t guid)
1636 {
1637 avl_tree_t *t = &spa_namespace_avl;
1638
1639 ASSERT(spa_namespace_held());
1640
1641 for (spa_t *spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1642 if (spa_load_guid(spa) == guid)
1643 return (B_TRUE);
1644 }
1645
1646 return (arc_async_flush_guid_inuse(guid));
1647 }
1648
1649 uint64_t
spa_generate_load_guid(void)1650 spa_generate_load_guid(void)
1651 {
1652 uint64_t guid;
1653
1654 do {
1655 (void) random_get_pseudo_bytes((void *)&guid,
1656 sizeof (guid));
1657 } while (guid == 0 || spa_load_guid_exists(guid));
1658
1659 return (guid);
1660 }
1661
1662 void
snprintf_blkptr(char * buf,size_t buflen,const blkptr_t * bp)1663 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1664 {
1665 char type[256];
1666 const char *checksum = NULL;
1667 const char *compress = NULL;
1668
1669 if (bp != NULL) {
1670 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1671 dmu_object_byteswap_t bswap =
1672 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1673 (void) snprintf(type, sizeof (type), "bswap %s %s",
1674 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1675 "metadata" : "data",
1676 dmu_ot_byteswap[bswap].ob_name);
1677 } else {
1678 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1679 sizeof (type));
1680 }
1681 if (!BP_IS_EMBEDDED(bp)) {
1682 checksum =
1683 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1684 }
1685 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1686 }
1687
1688 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
1689 compress);
1690 }
1691
1692 void
spa_freeze(spa_t * spa)1693 spa_freeze(spa_t *spa)
1694 {
1695 uint64_t freeze_txg = 0;
1696
1697 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1698 if (spa->spa_freeze_txg == UINT64_MAX) {
1699 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1700 spa->spa_freeze_txg = freeze_txg;
1701 }
1702 spa_config_exit(spa, SCL_ALL, FTAG);
1703 if (freeze_txg != 0)
1704 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1705 }
1706
1707 void
zfs_panic_recover(const char * fmt,...)1708 zfs_panic_recover(const char *fmt, ...)
1709 {
1710 va_list adx;
1711
1712 va_start(adx, fmt);
1713 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1714 va_end(adx);
1715 }
1716
1717 /*
1718 * This is a stripped-down version of strtoull, suitable only for converting
1719 * lowercase hexadecimal numbers that don't overflow.
1720 */
1721 uint64_t
zfs_strtonum(const char * str,char ** nptr)1722 zfs_strtonum(const char *str, char **nptr)
1723 {
1724 uint64_t val = 0;
1725 char c;
1726 int digit;
1727
1728 while ((c = *str) != '\0') {
1729 if (c >= '0' && c <= '9')
1730 digit = c - '0';
1731 else if (c >= 'a' && c <= 'f')
1732 digit = 10 + c - 'a';
1733 else
1734 break;
1735
1736 val *= 16;
1737 val += digit;
1738
1739 str++;
1740 }
1741
1742 if (nptr)
1743 *nptr = (char *)str;
1744
1745 return (val);
1746 }
1747
1748 void
spa_activate_allocation_classes(spa_t * spa,dmu_tx_t * tx)1749 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1750 {
1751 /*
1752 * We bump the feature refcount for each special vdev added to the pool
1753 */
1754 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1755 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1756 }
1757
1758 /*
1759 * ==========================================================================
1760 * Accessor functions
1761 * ==========================================================================
1762 */
1763
1764 boolean_t
spa_shutting_down(spa_t * spa)1765 spa_shutting_down(spa_t *spa)
1766 {
1767 return (spa->spa_async_suspended);
1768 }
1769
1770 dsl_pool_t *
spa_get_dsl(spa_t * spa)1771 spa_get_dsl(spa_t *spa)
1772 {
1773 return (spa->spa_dsl_pool);
1774 }
1775
1776 boolean_t
spa_is_initializing(spa_t * spa)1777 spa_is_initializing(spa_t *spa)
1778 {
1779 return (spa->spa_is_initializing);
1780 }
1781
1782 boolean_t
spa_indirect_vdevs_loaded(spa_t * spa)1783 spa_indirect_vdevs_loaded(spa_t *spa)
1784 {
1785 return (spa->spa_indirect_vdevs_loaded);
1786 }
1787
1788 blkptr_t *
spa_get_rootblkptr(spa_t * spa)1789 spa_get_rootblkptr(spa_t *spa)
1790 {
1791 return (&spa->spa_ubsync.ub_rootbp);
1792 }
1793
1794 void
spa_set_rootblkptr(spa_t * spa,const blkptr_t * bp)1795 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1796 {
1797 spa->spa_uberblock.ub_rootbp = *bp;
1798 }
1799
1800 void
spa_altroot(spa_t * spa,char * buf,size_t buflen)1801 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1802 {
1803 if (spa->spa_root == NULL)
1804 buf[0] = '\0';
1805 else
1806 (void) strlcpy(buf, spa->spa_root, buflen);
1807 }
1808
1809 uint32_t
spa_sync_pass(spa_t * spa)1810 spa_sync_pass(spa_t *spa)
1811 {
1812 return (spa->spa_sync_pass);
1813 }
1814
1815 char *
spa_name(spa_t * spa)1816 spa_name(spa_t *spa)
1817 {
1818 return (spa->spa_name);
1819 }
1820
1821 uint64_t
spa_guid(spa_t * spa)1822 spa_guid(spa_t *spa)
1823 {
1824 dsl_pool_t *dp = spa_get_dsl(spa);
1825 uint64_t guid;
1826
1827 /*
1828 * If we fail to parse the config during spa_load(), we can go through
1829 * the error path (which posts an ereport) and end up here with no root
1830 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1831 * this case.
1832 */
1833 if (spa->spa_root_vdev == NULL)
1834 return (spa->spa_config_guid);
1835
1836 guid = spa->spa_last_synced_guid != 0 ?
1837 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1838
1839 /*
1840 * Return the most recently synced out guid unless we're
1841 * in syncing context.
1842 */
1843 if (dp && dsl_pool_sync_context(dp))
1844 return (spa->spa_root_vdev->vdev_guid);
1845 else
1846 return (guid);
1847 }
1848
1849 uint64_t
spa_load_guid(spa_t * spa)1850 spa_load_guid(spa_t *spa)
1851 {
1852 /*
1853 * This is a GUID that exists solely as a reference for the
1854 * purposes of the arc. It is generated at load time, and
1855 * is never written to persistent storage.
1856 */
1857 return (spa->spa_load_guid);
1858 }
1859
1860 uint64_t
spa_last_synced_txg(spa_t * spa)1861 spa_last_synced_txg(spa_t *spa)
1862 {
1863 return (spa->spa_ubsync.ub_txg);
1864 }
1865
1866 uint64_t
spa_first_txg(spa_t * spa)1867 spa_first_txg(spa_t *spa)
1868 {
1869 return (spa->spa_first_txg);
1870 }
1871
1872 uint64_t
spa_syncing_txg(spa_t * spa)1873 spa_syncing_txg(spa_t *spa)
1874 {
1875 return (spa->spa_syncing_txg);
1876 }
1877
1878 /*
1879 * Return the last txg where data can be dirtied. The final txgs
1880 * will be used to just clear out any deferred frees that remain.
1881 */
1882 uint64_t
spa_final_dirty_txg(spa_t * spa)1883 spa_final_dirty_txg(spa_t *spa)
1884 {
1885 return (spa->spa_final_txg - TXG_DEFER_SIZE);
1886 }
1887
1888 pool_state_t
spa_state(spa_t * spa)1889 spa_state(spa_t *spa)
1890 {
1891 return (spa->spa_state);
1892 }
1893
1894 spa_load_state_t
spa_load_state(spa_t * spa)1895 spa_load_state(spa_t *spa)
1896 {
1897 return (spa->spa_load_state);
1898 }
1899
1900 uint64_t
spa_freeze_txg(spa_t * spa)1901 spa_freeze_txg(spa_t *spa)
1902 {
1903 return (spa->spa_freeze_txg);
1904 }
1905
1906 /*
1907 * Return the inflated asize for a logical write in bytes. This is used by the
1908 * DMU to calculate the space a logical write will require on disk.
1909 * If lsize is smaller than the largest physical block size allocatable on this
1910 * pool we use its value instead, since the write will end up using the whole
1911 * block anyway.
1912 */
1913 uint64_t
spa_get_worst_case_asize(spa_t * spa,uint64_t lsize)1914 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1915 {
1916 if (lsize == 0)
1917 return (0); /* No inflation needed */
1918 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
1919 }
1920
1921 /*
1922 * Return the range of minimum allocation sizes for the normal allocation
1923 * class. This can be used by external consumers of the DMU to estimate
1924 * potential wasted capacity when setting the recordsize for an object.
1925 * This is mainly for dRAID pools which always pad to a full stripe width.
1926 */
1927 void
spa_get_min_alloc_range(spa_t * spa,uint64_t * min_alloc,uint64_t * max_alloc)1928 spa_get_min_alloc_range(spa_t *spa, uint64_t *min_alloc, uint64_t *max_alloc)
1929 {
1930 *min_alloc = spa->spa_min_alloc;
1931 *max_alloc = spa->spa_max_alloc;
1932 }
1933
1934 /*
1935 * Return the amount of slop space in bytes. It is typically 1/32 of the pool
1936 * (3.2%), minus the embedded log space. On very small pools, it may be
1937 * slightly larger than this. On very large pools, it will be capped to
1938 * the value of spa_max_slop. The embedded log space is not included in
1939 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a
1940 * constant 97% of the total space, regardless of metaslab size (assuming the
1941 * default spa_slop_shift=5 and a non-tiny pool).
1942 *
1943 * See the comment above spa_slop_shift for more details.
1944 */
1945 uint64_t
spa_get_slop_space(spa_t * spa)1946 spa_get_slop_space(spa_t *spa)
1947 {
1948 uint64_t space = 0;
1949 uint64_t slop = 0;
1950
1951 /*
1952 * Make sure spa_dedup_dspace has been set.
1953 */
1954 if (spa->spa_dedup_dspace == ~0ULL)
1955 spa_update_dspace(spa);
1956
1957 space = spa->spa_rdspace;
1958 slop = MIN(space >> spa_slop_shift, spa_max_slop);
1959
1960 /*
1961 * Subtract the embedded log space, but no more than half the (3.2%)
1962 * unusable space. Note, the "no more than half" is only relevant if
1963 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1964 * default.
1965 */
1966 uint64_t embedded_log =
1967 metaslab_class_get_dspace(spa_embedded_log_class(spa));
1968 embedded_log += metaslab_class_get_dspace(
1969 spa_special_embedded_log_class(spa));
1970 slop -= MIN(embedded_log, slop >> 1);
1971
1972 /*
1973 * Slop space should be at least spa_min_slop, but no more than half
1974 * the entire pool.
1975 */
1976 slop = MAX(slop, MIN(space >> 1, spa_min_slop));
1977 return (slop);
1978 }
1979
1980 uint64_t
spa_get_dspace(spa_t * spa)1981 spa_get_dspace(spa_t *spa)
1982 {
1983 return (spa->spa_dspace);
1984 }
1985
1986 uint64_t
spa_get_checkpoint_space(spa_t * spa)1987 spa_get_checkpoint_space(spa_t *spa)
1988 {
1989 return (spa->spa_checkpoint_info.sci_dspace);
1990 }
1991
1992 void
spa_update_dspace(spa_t * spa)1993 spa_update_dspace(spa_t *spa)
1994 {
1995 spa->spa_rdspace = metaslab_class_get_dspace(spa_normal_class(spa));
1996 if (spa->spa_nonallocating_dspace > 0) {
1997 /*
1998 * Subtract the space provided by all non-allocating vdevs that
1999 * contribute to dspace. If a file is overwritten, its old
2000 * blocks are freed and new blocks are allocated. If there are
2001 * no snapshots of the file, the available space should remain
2002 * the same. The old blocks could be freed from the
2003 * non-allocating vdev, but the new blocks must be allocated on
2004 * other (allocating) vdevs. By reserving the entire size of
2005 * the non-allocating vdevs (including allocated space), we
2006 * ensure that there will be enough space on the allocating
2007 * vdevs for this file overwrite to succeed.
2008 *
2009 * Note that the DMU/DSL doesn't actually know or care
2010 * how much space is allocated (it does its own tracking
2011 * of how much space has been logically used). So it
2012 * doesn't matter that the data we are moving may be
2013 * allocated twice (on the old device and the new device).
2014 */
2015 ASSERT3U(spa->spa_rdspace, >=, spa->spa_nonallocating_dspace);
2016 spa->spa_rdspace -= spa->spa_nonallocating_dspace;
2017 }
2018 spa->spa_dspace = spa->spa_rdspace + ddt_get_dedup_dspace(spa) +
2019 brt_get_dspace(spa);
2020 }
2021
2022 /*
2023 * Return the failure mode that has been set to this pool. The default
2024 * behavior will be to block all I/Os when a complete failure occurs.
2025 */
2026 uint64_t
spa_get_failmode(spa_t * spa)2027 spa_get_failmode(spa_t *spa)
2028 {
2029 return (spa->spa_failmode);
2030 }
2031
2032 boolean_t
spa_suspended(spa_t * spa)2033 spa_suspended(spa_t *spa)
2034 {
2035 return (spa->spa_suspended != ZIO_SUSPEND_NONE);
2036 }
2037
2038 uint64_t
spa_version(spa_t * spa)2039 spa_version(spa_t *spa)
2040 {
2041 return (spa->spa_ubsync.ub_version);
2042 }
2043
2044 boolean_t
spa_deflate(spa_t * spa)2045 spa_deflate(spa_t *spa)
2046 {
2047 return (spa->spa_deflate);
2048 }
2049
2050 metaslab_class_t *
spa_normal_class(spa_t * spa)2051 spa_normal_class(spa_t *spa)
2052 {
2053 return (spa->spa_normal_class);
2054 }
2055
2056 metaslab_class_t *
spa_log_class(spa_t * spa)2057 spa_log_class(spa_t *spa)
2058 {
2059 return (spa->spa_log_class);
2060 }
2061
2062 metaslab_class_t *
spa_embedded_log_class(spa_t * spa)2063 spa_embedded_log_class(spa_t *spa)
2064 {
2065 return (spa->spa_embedded_log_class);
2066 }
2067
2068 metaslab_class_t *
spa_special_class(spa_t * spa)2069 spa_special_class(spa_t *spa)
2070 {
2071 return (spa->spa_special_class);
2072 }
2073
2074 metaslab_class_t *
spa_special_embedded_log_class(spa_t * spa)2075 spa_special_embedded_log_class(spa_t *spa)
2076 {
2077 return (spa->spa_special_embedded_log_class);
2078 }
2079
2080 metaslab_class_t *
spa_dedup_class(spa_t * spa)2081 spa_dedup_class(spa_t *spa)
2082 {
2083 return (spa->spa_dedup_class);
2084 }
2085
2086 boolean_t
spa_special_has_ddt(spa_t * spa)2087 spa_special_has_ddt(spa_t *spa)
2088 {
2089 return (zfs_ddt_data_is_special && spa_has_special(spa));
2090 }
2091
2092 /*
2093 * Locate an appropriate allocation class
2094 */
2095 metaslab_class_t *
spa_preferred_class(spa_t * spa,const zio_t * zio)2096 spa_preferred_class(spa_t *spa, const zio_t *zio)
2097 {
2098 metaslab_class_t *mc = zio->io_metaslab_class;
2099 boolean_t tried_dedup = (mc == spa_dedup_class(spa));
2100 boolean_t tried_special = (mc == spa_special_class(spa));
2101 const zio_prop_t *zp = &zio->io_prop;
2102
2103 /*
2104 * Override object type for the purposes of selecting a storage class.
2105 * Primarily for DMU_OTN_ types where we can't explicitly control their
2106 * storage class; instead, choose a static type most closely matches
2107 * what we want.
2108 */
2109 dmu_object_type_t objtype =
2110 zp->zp_storage_type == DMU_OT_NONE ?
2111 zp->zp_type : zp->zp_storage_type;
2112
2113 /*
2114 * ZIL allocations determine their class in zio_alloc_zil().
2115 */
2116 ASSERT(objtype != DMU_OT_INTENT_LOG);
2117
2118 if (DMU_OT_IS_DDT(objtype)) {
2119 if (spa_has_dedup(spa) && !tried_dedup && !tried_special)
2120 return (spa_dedup_class(spa));
2121 else if (spa_special_has_ddt(spa) && !tried_special)
2122 return (spa_special_class(spa));
2123 else
2124 return (spa_normal_class(spa));
2125 }
2126
2127 /* Indirect blocks for user data can land in special if allowed */
2128 if (zp->zp_level > 0 &&
2129 (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
2130 if (zfs_user_indirect_is_special && spa_has_special(spa) &&
2131 !tried_special)
2132 return (spa_special_class(spa));
2133 else
2134 return (spa_normal_class(spa));
2135 }
2136
2137 if (DMU_OT_IS_METADATA(objtype) || zp->zp_level > 0) {
2138 if (spa_has_special(spa) && !tried_special)
2139 return (spa_special_class(spa));
2140 else
2141 return (spa_normal_class(spa));
2142 }
2143
2144 /*
2145 * Allow small file or zvol blocks in special class if opted in by
2146 * the special_smallblk property. However, always leave a reserve of
2147 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
2148 */
2149 if ((DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL) &&
2150 spa_has_special(spa) && !tried_special &&
2151 zio->io_size <= zp->zp_zpl_smallblk) {
2152 metaslab_class_t *special = spa_special_class(spa);
2153 uint64_t alloc = metaslab_class_get_alloc(special);
2154 uint64_t space = metaslab_class_get_space(special);
2155 uint64_t limit =
2156 (space * (100 - zfs_special_class_metadata_reserve_pct))
2157 / 100;
2158
2159 if (alloc < limit)
2160 return (special);
2161 }
2162
2163 return (spa_normal_class(spa));
2164 }
2165
2166 void
spa_evicting_os_register(spa_t * spa,objset_t * os)2167 spa_evicting_os_register(spa_t *spa, objset_t *os)
2168 {
2169 mutex_enter(&spa->spa_evicting_os_lock);
2170 list_insert_head(&spa->spa_evicting_os_list, os);
2171 mutex_exit(&spa->spa_evicting_os_lock);
2172 }
2173
2174 void
spa_evicting_os_deregister(spa_t * spa,objset_t * os)2175 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
2176 {
2177 mutex_enter(&spa->spa_evicting_os_lock);
2178 list_remove(&spa->spa_evicting_os_list, os);
2179 cv_broadcast(&spa->spa_evicting_os_cv);
2180 mutex_exit(&spa->spa_evicting_os_lock);
2181 }
2182
2183 void
spa_evicting_os_wait(spa_t * spa)2184 spa_evicting_os_wait(spa_t *spa)
2185 {
2186 mutex_enter(&spa->spa_evicting_os_lock);
2187 while (!list_is_empty(&spa->spa_evicting_os_list))
2188 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
2189 mutex_exit(&spa->spa_evicting_os_lock);
2190
2191 dmu_buf_user_evict_wait();
2192 }
2193
2194 int
spa_max_replication(spa_t * spa)2195 spa_max_replication(spa_t *spa)
2196 {
2197 /*
2198 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2199 * handle BPs with more than one DVA allocated. Set our max
2200 * replication level accordingly.
2201 */
2202 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
2203 return (1);
2204 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
2205 }
2206
2207 int
spa_prev_software_version(spa_t * spa)2208 spa_prev_software_version(spa_t *spa)
2209 {
2210 return (spa->spa_prev_software_version);
2211 }
2212
2213 uint64_t
spa_deadman_synctime(spa_t * spa)2214 spa_deadman_synctime(spa_t *spa)
2215 {
2216 return (spa->spa_deadman_synctime);
2217 }
2218
2219 spa_autotrim_t
spa_get_autotrim(spa_t * spa)2220 spa_get_autotrim(spa_t *spa)
2221 {
2222 return (spa->spa_autotrim);
2223 }
2224
2225 uint64_t
spa_deadman_ziotime(spa_t * spa)2226 spa_deadman_ziotime(spa_t *spa)
2227 {
2228 return (spa->spa_deadman_ziotime);
2229 }
2230
2231 uint64_t
spa_get_deadman_failmode(spa_t * spa)2232 spa_get_deadman_failmode(spa_t *spa)
2233 {
2234 return (spa->spa_deadman_failmode);
2235 }
2236
2237 void
spa_set_deadman_failmode(spa_t * spa,const char * failmode)2238 spa_set_deadman_failmode(spa_t *spa, const char *failmode)
2239 {
2240 if (strcmp(failmode, "wait") == 0)
2241 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2242 else if (strcmp(failmode, "continue") == 0)
2243 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2244 else if (strcmp(failmode, "panic") == 0)
2245 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2246 else
2247 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2248 }
2249
2250 void
spa_set_deadman_ziotime(hrtime_t ns)2251 spa_set_deadman_ziotime(hrtime_t ns)
2252 {
2253 spa_t *spa = NULL;
2254
2255 if (spa_mode_global != SPA_MODE_UNINIT) {
2256 spa_namespace_enter(FTAG);
2257 while ((spa = spa_next(spa)) != NULL)
2258 spa->spa_deadman_ziotime = ns;
2259 spa_namespace_exit(FTAG);
2260 }
2261 }
2262
2263 void
spa_set_deadman_synctime(hrtime_t ns)2264 spa_set_deadman_synctime(hrtime_t ns)
2265 {
2266 spa_t *spa = NULL;
2267
2268 if (spa_mode_global != SPA_MODE_UNINIT) {
2269 spa_namespace_enter(FTAG);
2270 while ((spa = spa_next(spa)) != NULL)
2271 spa->spa_deadman_synctime = ns;
2272 spa_namespace_exit(FTAG);
2273 }
2274 }
2275
2276 uint64_t
dva_get_dsize_sync(spa_t * spa,const dva_t * dva)2277 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2278 {
2279 uint64_t asize = DVA_GET_ASIZE(dva);
2280 uint64_t dsize = asize;
2281
2282 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2283
2284 if (asize != 0 && spa->spa_deflate) {
2285 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2286 if (vd != NULL)
2287 dsize = (asize >> SPA_MINBLOCKSHIFT) *
2288 vd->vdev_deflate_ratio;
2289 }
2290
2291 return (dsize);
2292 }
2293
2294 uint64_t
bp_get_dsize_sync(spa_t * spa,const blkptr_t * bp)2295 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2296 {
2297 uint64_t dsize = 0;
2298
2299 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2300 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2301
2302 return (dsize);
2303 }
2304
2305 uint64_t
bp_get_dsize(spa_t * spa,const blkptr_t * bp)2306 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2307 {
2308 uint64_t dsize = 0;
2309
2310 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2311
2312 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2313 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2314
2315 spa_config_exit(spa, SCL_VDEV, FTAG);
2316
2317 return (dsize);
2318 }
2319
2320 uint64_t
spa_dirty_data(spa_t * spa)2321 spa_dirty_data(spa_t *spa)
2322 {
2323 return (spa->spa_dsl_pool->dp_dirty_total);
2324 }
2325
2326 /*
2327 * ==========================================================================
2328 * SPA Import Progress Routines
2329 * ==========================================================================
2330 */
2331
2332 typedef struct spa_import_progress {
2333 uint64_t pool_guid; /* unique id for updates */
2334 char *pool_name;
2335 spa_load_state_t spa_load_state;
2336 char *spa_load_notes;
2337 uint64_t mmp_sec_remaining; /* MMP activity check */
2338 uint64_t spa_load_max_txg; /* rewind txg */
2339 procfs_list_node_t smh_node;
2340 } spa_import_progress_t;
2341
2342 spa_history_list_t *spa_import_progress_list = NULL;
2343
2344 static int
spa_import_progress_show_header(struct seq_file * f)2345 spa_import_progress_show_header(struct seq_file *f)
2346 {
2347 seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid",
2348 "load_state", "multihost_secs", "max_txg",
2349 "pool_name", "notes");
2350 return (0);
2351 }
2352
2353 static int
spa_import_progress_show(struct seq_file * f,void * data)2354 spa_import_progress_show(struct seq_file *f, void *data)
2355 {
2356 spa_import_progress_t *sip = (spa_import_progress_t *)data;
2357
2358 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n",
2359 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2360 (u_longlong_t)sip->mmp_sec_remaining,
2361 (u_longlong_t)sip->spa_load_max_txg,
2362 (sip->pool_name ? sip->pool_name : "-"),
2363 (sip->spa_load_notes ? sip->spa_load_notes : "-"));
2364
2365 return (0);
2366 }
2367
2368 /* Remove oldest elements from list until there are no more than 'size' left */
2369 static void
spa_import_progress_truncate(spa_history_list_t * shl,unsigned int size)2370 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
2371 {
2372 spa_import_progress_t *sip;
2373 while (shl->size > size) {
2374 sip = list_remove_head(&shl->procfs_list.pl_list);
2375 if (sip->pool_name)
2376 spa_strfree(sip->pool_name);
2377 if (sip->spa_load_notes)
2378 kmem_strfree(sip->spa_load_notes);
2379 kmem_free(sip, sizeof (spa_import_progress_t));
2380 shl->size--;
2381 }
2382
2383 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2384 }
2385
2386 static void
spa_import_progress_init(void)2387 spa_import_progress_init(void)
2388 {
2389 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
2390 KM_SLEEP);
2391
2392 spa_import_progress_list->size = 0;
2393
2394 spa_import_progress_list->procfs_list.pl_private =
2395 spa_import_progress_list;
2396
2397 procfs_list_install("zfs",
2398 NULL,
2399 "import_progress",
2400 0644,
2401 &spa_import_progress_list->procfs_list,
2402 spa_import_progress_show,
2403 spa_import_progress_show_header,
2404 NULL,
2405 offsetof(spa_import_progress_t, smh_node));
2406 }
2407
2408 static void
spa_import_progress_destroy(void)2409 spa_import_progress_destroy(void)
2410 {
2411 spa_history_list_t *shl = spa_import_progress_list;
2412 procfs_list_uninstall(&shl->procfs_list);
2413 spa_import_progress_truncate(shl, 0);
2414 procfs_list_destroy(&shl->procfs_list);
2415 kmem_free(shl, sizeof (spa_history_list_t));
2416 }
2417
2418 int
spa_import_progress_set_state(uint64_t pool_guid,spa_load_state_t load_state)2419 spa_import_progress_set_state(uint64_t pool_guid,
2420 spa_load_state_t load_state)
2421 {
2422 spa_history_list_t *shl = spa_import_progress_list;
2423 spa_import_progress_t *sip;
2424 int error = ENOENT;
2425
2426 if (shl->size == 0)
2427 return (0);
2428
2429 mutex_enter(&shl->procfs_list.pl_lock);
2430 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2431 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2432 if (sip->pool_guid == pool_guid) {
2433 sip->spa_load_state = load_state;
2434 if (sip->spa_load_notes != NULL) {
2435 kmem_strfree(sip->spa_load_notes);
2436 sip->spa_load_notes = NULL;
2437 }
2438 error = 0;
2439 break;
2440 }
2441 }
2442 mutex_exit(&shl->procfs_list.pl_lock);
2443
2444 return (error);
2445 }
2446
2447 static void
spa_import_progress_set_notes_impl(spa_t * spa,boolean_t log_dbgmsg,const char * fmt,va_list adx)2448 spa_import_progress_set_notes_impl(spa_t *spa, boolean_t log_dbgmsg,
2449 const char *fmt, va_list adx)
2450 {
2451 spa_history_list_t *shl = spa_import_progress_list;
2452 spa_import_progress_t *sip;
2453 uint64_t pool_guid = spa_guid(spa);
2454
2455 if (shl->size == 0)
2456 return;
2457
2458 char *notes = kmem_vasprintf(fmt, adx);
2459
2460 mutex_enter(&shl->procfs_list.pl_lock);
2461 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2462 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2463 if (sip->pool_guid == pool_guid) {
2464 if (sip->spa_load_notes != NULL) {
2465 kmem_strfree(sip->spa_load_notes);
2466 sip->spa_load_notes = NULL;
2467 }
2468 sip->spa_load_notes = notes;
2469 if (log_dbgmsg)
2470 zfs_dbgmsg("'%s' %s", sip->pool_name, notes);
2471 notes = NULL;
2472 break;
2473 }
2474 }
2475 mutex_exit(&shl->procfs_list.pl_lock);
2476 if (notes != NULL)
2477 kmem_strfree(notes);
2478 }
2479
2480 void
spa_import_progress_set_notes(spa_t * spa,const char * fmt,...)2481 spa_import_progress_set_notes(spa_t *spa, const char *fmt, ...)
2482 {
2483 va_list adx;
2484
2485 va_start(adx, fmt);
2486 spa_import_progress_set_notes_impl(spa, B_TRUE, fmt, adx);
2487 va_end(adx);
2488 }
2489
2490 void
spa_import_progress_set_notes_nolog(spa_t * spa,const char * fmt,...)2491 spa_import_progress_set_notes_nolog(spa_t *spa, const char *fmt, ...)
2492 {
2493 va_list adx;
2494
2495 va_start(adx, fmt);
2496 spa_import_progress_set_notes_impl(spa, B_FALSE, fmt, adx);
2497 va_end(adx);
2498 }
2499
2500 int
spa_import_progress_set_max_txg(uint64_t pool_guid,uint64_t load_max_txg)2501 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
2502 {
2503 spa_history_list_t *shl = spa_import_progress_list;
2504 spa_import_progress_t *sip;
2505 int error = ENOENT;
2506
2507 if (shl->size == 0)
2508 return (0);
2509
2510 mutex_enter(&shl->procfs_list.pl_lock);
2511 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2512 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2513 if (sip->pool_guid == pool_guid) {
2514 sip->spa_load_max_txg = load_max_txg;
2515 error = 0;
2516 break;
2517 }
2518 }
2519 mutex_exit(&shl->procfs_list.pl_lock);
2520
2521 return (error);
2522 }
2523
2524 int
spa_import_progress_set_mmp_check(uint64_t pool_guid,uint64_t mmp_sec_remaining)2525 spa_import_progress_set_mmp_check(uint64_t pool_guid,
2526 uint64_t mmp_sec_remaining)
2527 {
2528 spa_history_list_t *shl = spa_import_progress_list;
2529 spa_import_progress_t *sip;
2530 int error = ENOENT;
2531
2532 if (shl->size == 0)
2533 return (0);
2534
2535 mutex_enter(&shl->procfs_list.pl_lock);
2536 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2537 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2538 if (sip->pool_guid == pool_guid) {
2539 sip->mmp_sec_remaining = mmp_sec_remaining;
2540 error = 0;
2541 break;
2542 }
2543 }
2544 mutex_exit(&shl->procfs_list.pl_lock);
2545
2546 return (error);
2547 }
2548
2549 /*
2550 * A new import is in progress, add an entry.
2551 */
2552 void
spa_import_progress_add(spa_t * spa)2553 spa_import_progress_add(spa_t *spa)
2554 {
2555 spa_history_list_t *shl = spa_import_progress_list;
2556 spa_import_progress_t *sip;
2557 const char *poolname = NULL;
2558
2559 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
2560 sip->pool_guid = spa_guid(spa);
2561
2562 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2563 &poolname);
2564 if (poolname == NULL)
2565 poolname = spa_name(spa);
2566 sip->pool_name = spa_strdup(poolname);
2567 sip->spa_load_state = spa_load_state(spa);
2568 sip->spa_load_notes = NULL;
2569
2570 mutex_enter(&shl->procfs_list.pl_lock);
2571 procfs_list_add(&shl->procfs_list, sip);
2572 shl->size++;
2573 mutex_exit(&shl->procfs_list.pl_lock);
2574 }
2575
2576 void
spa_import_progress_remove(uint64_t pool_guid)2577 spa_import_progress_remove(uint64_t pool_guid)
2578 {
2579 spa_history_list_t *shl = spa_import_progress_list;
2580 spa_import_progress_t *sip;
2581
2582 mutex_enter(&shl->procfs_list.pl_lock);
2583 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2584 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2585 if (sip->pool_guid == pool_guid) {
2586 if (sip->pool_name)
2587 spa_strfree(sip->pool_name);
2588 if (sip->spa_load_notes)
2589 spa_strfree(sip->spa_load_notes);
2590 list_remove(&shl->procfs_list.pl_list, sip);
2591 shl->size--;
2592 kmem_free(sip, sizeof (spa_import_progress_t));
2593 break;
2594 }
2595 }
2596 mutex_exit(&shl->procfs_list.pl_lock);
2597 }
2598
2599 /*
2600 * ==========================================================================
2601 * Initialization and Termination
2602 * ==========================================================================
2603 */
2604
2605 static int
spa_name_compare(const void * a1,const void * a2)2606 spa_name_compare(const void *a1, const void *a2)
2607 {
2608 const spa_t *s1 = a1;
2609 const spa_t *s2 = a2;
2610 int s;
2611
2612 s = strcmp(s1->spa_name, s2->spa_name);
2613
2614 return (TREE_ISIGN(s));
2615 }
2616
2617 void
spa_init(spa_mode_t mode)2618 spa_init(spa_mode_t mode)
2619 {
2620 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2621 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2622 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2623 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2624
2625 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2626 offsetof(spa_t, spa_avl));
2627
2628 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2629 offsetof(spa_aux_t, aux_avl));
2630
2631 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2632 offsetof(spa_aux_t, aux_avl));
2633
2634 spa_mode_global = mode;
2635
2636 #ifndef _KERNEL
2637 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
2638 struct sigaction sa;
2639
2640 sa.sa_flags = SA_SIGINFO;
2641 sigemptyset(&sa.sa_mask);
2642 sa.sa_sigaction = arc_buf_sigsegv;
2643
2644 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2645 perror("could not enable watchpoints: "
2646 "sigaction(SIGSEGV, ...) = ");
2647 } else {
2648 arc_watch = B_TRUE;
2649 }
2650 }
2651 #endif
2652
2653 fm_init();
2654 zfs_refcount_init();
2655 unique_init();
2656 zfs_btree_init();
2657 metaslab_stat_init();
2658 brt_init();
2659 ddt_init();
2660 zio_init();
2661 dmu_init();
2662 zil_init();
2663 vdev_mirror_stat_init();
2664 vdev_raidz_math_init();
2665 vdev_file_init();
2666 zfs_prop_init();
2667 chksum_init();
2668 zpool_prop_init();
2669 zpool_feature_init();
2670 vdev_prop_init();
2671 l2arc_start();
2672 scan_init();
2673 qat_init();
2674 spa_import_progress_init();
2675 zap_init();
2676 }
2677
2678 void
spa_fini(void)2679 spa_fini(void)
2680 {
2681 l2arc_stop();
2682
2683 spa_evict_all();
2684
2685 vdev_file_fini();
2686 vdev_mirror_stat_fini();
2687 vdev_raidz_math_fini();
2688 chksum_fini();
2689 zil_fini();
2690 dmu_fini();
2691 zio_fini();
2692 ddt_fini();
2693 brt_fini();
2694 metaslab_stat_fini();
2695 zfs_btree_fini();
2696 unique_fini();
2697 zfs_refcount_fini();
2698 fm_fini();
2699 scan_fini();
2700 qat_fini();
2701 spa_import_progress_destroy();
2702 zap_fini();
2703
2704 avl_destroy(&spa_namespace_avl);
2705 avl_destroy(&spa_spare_avl);
2706 avl_destroy(&spa_l2cache_avl);
2707
2708 cv_destroy(&spa_namespace_cv);
2709 mutex_destroy(&spa_namespace_lock);
2710 mutex_destroy(&spa_spare_lock);
2711 mutex_destroy(&spa_l2cache_lock);
2712 }
2713
2714 boolean_t
spa_has_dedup(spa_t * spa)2715 spa_has_dedup(spa_t *spa)
2716 {
2717 return (spa->spa_dedup_class->mc_groups != 0);
2718 }
2719
2720 /*
2721 * Return whether this pool has a dedicated slog device. No locking needed.
2722 * It's not a problem if the wrong answer is returned as it's only for
2723 * performance and not correctness.
2724 */
2725 boolean_t
spa_has_slogs(spa_t * spa)2726 spa_has_slogs(spa_t *spa)
2727 {
2728 return (spa->spa_log_class->mc_groups != 0);
2729 }
2730
2731 boolean_t
spa_has_special(spa_t * spa)2732 spa_has_special(spa_t *spa)
2733 {
2734 return (spa->spa_special_class->mc_groups != 0);
2735 }
2736
2737 spa_log_state_t
spa_get_log_state(spa_t * spa)2738 spa_get_log_state(spa_t *spa)
2739 {
2740 return (spa->spa_log_state);
2741 }
2742
2743 void
spa_set_log_state(spa_t * spa,spa_log_state_t state)2744 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2745 {
2746 spa->spa_log_state = state;
2747 }
2748
2749 boolean_t
spa_is_root(spa_t * spa)2750 spa_is_root(spa_t *spa)
2751 {
2752 return (spa->spa_is_root);
2753 }
2754
2755 boolean_t
spa_writeable(spa_t * spa)2756 spa_writeable(spa_t *spa)
2757 {
2758 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
2759 }
2760
2761 /*
2762 * Returns true if there is a pending sync task in any of the current
2763 * syncing txg, the current quiescing txg, or the current open txg.
2764 */
2765 boolean_t
spa_has_pending_synctask(spa_t * spa)2766 spa_has_pending_synctask(spa_t *spa)
2767 {
2768 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2769 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2770 }
2771
2772 spa_mode_t
spa_mode(spa_t * spa)2773 spa_mode(spa_t *spa)
2774 {
2775 return (spa->spa_mode);
2776 }
2777
2778 uint64_t
spa_get_last_scrubbed_txg(spa_t * spa)2779 spa_get_last_scrubbed_txg(spa_t *spa)
2780 {
2781 return (spa->spa_scrubbed_last_txg);
2782 }
2783
2784 uint64_t
spa_bootfs(spa_t * spa)2785 spa_bootfs(spa_t *spa)
2786 {
2787 return (spa->spa_bootfs);
2788 }
2789
2790 uint64_t
spa_delegation(spa_t * spa)2791 spa_delegation(spa_t *spa)
2792 {
2793 return (spa->spa_delegation);
2794 }
2795
2796 objset_t *
spa_meta_objset(spa_t * spa)2797 spa_meta_objset(spa_t *spa)
2798 {
2799 return (spa->spa_meta_objset);
2800 }
2801
2802 enum zio_checksum
spa_dedup_checksum(spa_t * spa)2803 spa_dedup_checksum(spa_t *spa)
2804 {
2805 return (spa->spa_dedup_checksum);
2806 }
2807
2808 /*
2809 * Reset pool scan stat per scan pass (or reboot).
2810 */
2811 void
spa_scan_stat_init(spa_t * spa)2812 spa_scan_stat_init(spa_t *spa)
2813 {
2814 /* data not stored on disk */
2815 spa->spa_scan_pass_start = gethrestime_sec();
2816 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2817 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2818 else
2819 spa->spa_scan_pass_scrub_pause = 0;
2820
2821 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
2822 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
2823 else
2824 spa->spa_scan_pass_errorscrub_pause = 0;
2825
2826 spa->spa_scan_pass_scrub_spent_paused = 0;
2827 spa->spa_scan_pass_exam = 0;
2828 spa->spa_scan_pass_issued = 0;
2829
2830 // error scrub stats
2831 spa->spa_scan_pass_errorscrub_spent_paused = 0;
2832 }
2833
2834 /*
2835 * Get scan stats for zpool status reports
2836 */
2837 int
spa_scan_get_stats(spa_t * spa,pool_scan_stat_t * ps)2838 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2839 {
2840 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2841
2842 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
2843 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
2844 return (SET_ERROR(ENOENT));
2845
2846 memset(ps, 0, sizeof (pool_scan_stat_t));
2847
2848 /* data stored on disk */
2849 ps->pss_func = scn->scn_phys.scn_func;
2850 ps->pss_state = scn->scn_phys.scn_state;
2851 ps->pss_start_time = scn->scn_phys.scn_start_time;
2852 ps->pss_end_time = scn->scn_phys.scn_end_time;
2853 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2854 ps->pss_examined = scn->scn_phys.scn_examined;
2855 ps->pss_skipped = scn->scn_phys.scn_skipped;
2856 ps->pss_processed = scn->scn_phys.scn_processed;
2857 ps->pss_errors = scn->scn_phys.scn_errors;
2858
2859 /* data not stored on disk */
2860 ps->pss_pass_exam = spa->spa_scan_pass_exam;
2861 ps->pss_pass_start = spa->spa_scan_pass_start;
2862 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2863 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2864 ps->pss_pass_issued = spa->spa_scan_pass_issued;
2865 ps->pss_issued =
2866 scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2867
2868 /* error scrub data stored on disk */
2869 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
2870 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
2871 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
2872 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
2873 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
2874 ps->pss_error_scrub_to_be_examined =
2875 scn->errorscrub_phys.dep_to_examine;
2876
2877 /* error scrub data not stored on disk */
2878 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
2879
2880 return (0);
2881 }
2882
2883 int
spa_maxblocksize(spa_t * spa)2884 spa_maxblocksize(spa_t *spa)
2885 {
2886 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2887 return (SPA_MAXBLOCKSIZE);
2888 else
2889 return (SPA_OLD_MAXBLOCKSIZE);
2890 }
2891
2892
2893 /*
2894 * Returns the txg that the last device removal completed. No indirect mappings
2895 * have been added since this txg.
2896 */
2897 uint64_t
spa_get_last_removal_txg(spa_t * spa)2898 spa_get_last_removal_txg(spa_t *spa)
2899 {
2900 uint64_t vdevid;
2901 uint64_t ret = -1ULL;
2902
2903 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2904 /*
2905 * sr_prev_indirect_vdev is only modified while holding all the
2906 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2907 * examining it.
2908 */
2909 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2910
2911 while (vdevid != -1ULL) {
2912 vdev_t *vd = vdev_lookup_top(spa, vdevid);
2913 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2914
2915 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2916
2917 /*
2918 * If the removal did not remap any data, we don't care.
2919 */
2920 if (vdev_indirect_births_count(vib) != 0) {
2921 ret = vdev_indirect_births_last_entry_txg(vib);
2922 break;
2923 }
2924
2925 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2926 }
2927 spa_config_exit(spa, SCL_VDEV, FTAG);
2928
2929 IMPLY(ret != -1ULL,
2930 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2931
2932 return (ret);
2933 }
2934
2935 int
spa_maxdnodesize(spa_t * spa)2936 spa_maxdnodesize(spa_t *spa)
2937 {
2938 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2939 return (DNODE_MAX_SIZE);
2940 else
2941 return (DNODE_MIN_SIZE);
2942 }
2943
2944 boolean_t
spa_multihost(spa_t * spa)2945 spa_multihost(spa_t *spa)
2946 {
2947 return (spa->spa_multihost ? B_TRUE : B_FALSE);
2948 }
2949
2950 uint32_t
spa_get_hostid(spa_t * spa)2951 spa_get_hostid(spa_t *spa)
2952 {
2953 return (spa->spa_hostid);
2954 }
2955
2956 boolean_t
spa_trust_config(spa_t * spa)2957 spa_trust_config(spa_t *spa)
2958 {
2959 return (spa->spa_trust_config);
2960 }
2961
2962 uint64_t
spa_missing_tvds_allowed(spa_t * spa)2963 spa_missing_tvds_allowed(spa_t *spa)
2964 {
2965 return (spa->spa_missing_tvds_allowed);
2966 }
2967
2968 space_map_t *
spa_syncing_log_sm(spa_t * spa)2969 spa_syncing_log_sm(spa_t *spa)
2970 {
2971 return (spa->spa_syncing_log_sm);
2972 }
2973
2974 void
spa_set_missing_tvds(spa_t * spa,uint64_t missing)2975 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2976 {
2977 spa->spa_missing_tvds = missing;
2978 }
2979
2980 /*
2981 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2982 */
2983 const char *
spa_state_to_name(spa_t * spa)2984 spa_state_to_name(spa_t *spa)
2985 {
2986 ASSERT3P(spa, !=, NULL);
2987
2988 /*
2989 * it is possible for the spa to exist, without root vdev
2990 * as the spa transitions during import/export
2991 */
2992 vdev_t *rvd = spa->spa_root_vdev;
2993 if (rvd == NULL) {
2994 return ("TRANSITIONING");
2995 }
2996 vdev_state_t state = rvd->vdev_state;
2997 vdev_aux_t aux = rvd->vdev_stat.vs_aux;
2998
2999 if (spa_suspended(spa))
3000 return ("SUSPENDED");
3001
3002 switch (state) {
3003 case VDEV_STATE_CLOSED:
3004 case VDEV_STATE_OFFLINE:
3005 return ("OFFLINE");
3006 case VDEV_STATE_REMOVED:
3007 return ("REMOVED");
3008 case VDEV_STATE_CANT_OPEN:
3009 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
3010 return ("FAULTED");
3011 else if (aux == VDEV_AUX_SPLIT_POOL)
3012 return ("SPLIT");
3013 else
3014 return ("UNAVAIL");
3015 case VDEV_STATE_FAULTED:
3016 return ("FAULTED");
3017 case VDEV_STATE_DEGRADED:
3018 return ("DEGRADED");
3019 case VDEV_STATE_HEALTHY:
3020 return ("ONLINE");
3021 default:
3022 break;
3023 }
3024
3025 return ("UNKNOWN");
3026 }
3027
3028 boolean_t
spa_top_vdevs_spacemap_addressable(spa_t * spa)3029 spa_top_vdevs_spacemap_addressable(spa_t *spa)
3030 {
3031 vdev_t *rvd = spa->spa_root_vdev;
3032 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
3033 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
3034 return (B_FALSE);
3035 }
3036 return (B_TRUE);
3037 }
3038
3039 boolean_t
spa_has_checkpoint(spa_t * spa)3040 spa_has_checkpoint(spa_t *spa)
3041 {
3042 return (spa->spa_checkpoint_txg != 0);
3043 }
3044
3045 boolean_t
spa_importing_readonly_checkpoint(spa_t * spa)3046 spa_importing_readonly_checkpoint(spa_t *spa)
3047 {
3048 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
3049 spa->spa_mode == SPA_MODE_READ);
3050 }
3051
3052 uint64_t
spa_min_claim_txg(spa_t * spa)3053 spa_min_claim_txg(spa_t *spa)
3054 {
3055 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
3056
3057 if (checkpoint_txg != 0)
3058 return (checkpoint_txg + 1);
3059
3060 return (spa->spa_first_txg);
3061 }
3062
3063 /*
3064 * If there is a checkpoint, async destroys may consume more space from
3065 * the pool instead of freeing it. In an attempt to save the pool from
3066 * getting suspended when it is about to run out of space, we stop
3067 * processing async destroys.
3068 */
3069 boolean_t
spa_suspend_async_destroy(spa_t * spa)3070 spa_suspend_async_destroy(spa_t *spa)
3071 {
3072 dsl_pool_t *dp = spa_get_dsl(spa);
3073
3074 uint64_t unreserved = dsl_pool_unreserved_space(dp,
3075 ZFS_SPACE_CHECK_EXTRA_RESERVED);
3076 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
3077 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
3078
3079 if (spa_has_checkpoint(spa) && avail == 0)
3080 return (B_TRUE);
3081
3082 return (B_FALSE);
3083 }
3084
3085 #if defined(_KERNEL)
3086
3087 int
param_set_deadman_failmode_common(const char * val)3088 param_set_deadman_failmode_common(const char *val)
3089 {
3090 spa_t *spa = NULL;
3091 char *p;
3092
3093 if (val == NULL)
3094 return (SET_ERROR(EINVAL));
3095
3096 if ((p = strchr(val, '\n')) != NULL)
3097 *p = '\0';
3098
3099 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
3100 strcmp(val, "panic"))
3101 return (SET_ERROR(EINVAL));
3102
3103 if (spa_mode_global != SPA_MODE_UNINIT) {
3104 spa_namespace_enter(FTAG);
3105 while ((spa = spa_next(spa)) != NULL)
3106 spa_set_deadman_failmode(spa, val);
3107 spa_namespace_exit(FTAG);
3108 }
3109
3110 return (0);
3111 }
3112 #endif
3113
3114 /* Namespace manipulation */
3115 EXPORT_SYMBOL(spa_lookup);
3116 EXPORT_SYMBOL(spa_add);
3117 EXPORT_SYMBOL(spa_remove);
3118 EXPORT_SYMBOL(spa_next);
3119
3120 /* Refcount functions */
3121 EXPORT_SYMBOL(spa_open_ref);
3122 EXPORT_SYMBOL(spa_close);
3123 EXPORT_SYMBOL(spa_refcount_zero);
3124
3125 /* Pool configuration lock */
3126 EXPORT_SYMBOL(spa_config_tryenter);
3127 EXPORT_SYMBOL(spa_config_enter);
3128 EXPORT_SYMBOL(spa_config_exit);
3129 EXPORT_SYMBOL(spa_config_held);
3130
3131 /* Pool vdev add/remove lock */
3132 EXPORT_SYMBOL(spa_vdev_enter);
3133 EXPORT_SYMBOL(spa_vdev_exit);
3134
3135 /* Pool vdev state change lock */
3136 EXPORT_SYMBOL(spa_vdev_state_enter);
3137 EXPORT_SYMBOL(spa_vdev_state_exit);
3138
3139 /* Accessor functions */
3140 EXPORT_SYMBOL(spa_shutting_down);
3141 EXPORT_SYMBOL(spa_get_dsl);
3142 EXPORT_SYMBOL(spa_get_rootblkptr);
3143 EXPORT_SYMBOL(spa_set_rootblkptr);
3144 EXPORT_SYMBOL(spa_altroot);
3145 EXPORT_SYMBOL(spa_sync_pass);
3146 EXPORT_SYMBOL(spa_name);
3147 EXPORT_SYMBOL(spa_guid);
3148 EXPORT_SYMBOL(spa_last_synced_txg);
3149 EXPORT_SYMBOL(spa_first_txg);
3150 EXPORT_SYMBOL(spa_syncing_txg);
3151 EXPORT_SYMBOL(spa_version);
3152 EXPORT_SYMBOL(spa_state);
3153 EXPORT_SYMBOL(spa_load_state);
3154 EXPORT_SYMBOL(spa_freeze_txg);
3155 EXPORT_SYMBOL(spa_get_min_alloc_range); /* for Lustre */
3156 EXPORT_SYMBOL(spa_get_dspace);
3157 EXPORT_SYMBOL(spa_update_dspace);
3158 EXPORT_SYMBOL(spa_deflate);
3159 EXPORT_SYMBOL(spa_normal_class);
3160 EXPORT_SYMBOL(spa_log_class);
3161 EXPORT_SYMBOL(spa_special_class);
3162 EXPORT_SYMBOL(spa_preferred_class);
3163 EXPORT_SYMBOL(spa_max_replication);
3164 EXPORT_SYMBOL(spa_prev_software_version);
3165 EXPORT_SYMBOL(spa_get_failmode);
3166 EXPORT_SYMBOL(spa_suspended);
3167 EXPORT_SYMBOL(spa_bootfs);
3168 EXPORT_SYMBOL(spa_delegation);
3169 EXPORT_SYMBOL(spa_meta_objset);
3170 EXPORT_SYMBOL(spa_maxblocksize);
3171 EXPORT_SYMBOL(spa_maxdnodesize);
3172
3173 /* Miscellaneous support routines */
3174 EXPORT_SYMBOL(spa_guid_exists);
3175 EXPORT_SYMBOL(spa_strdup);
3176 EXPORT_SYMBOL(spa_strfree);
3177 EXPORT_SYMBOL(spa_generate_guid);
3178 EXPORT_SYMBOL(snprintf_blkptr);
3179 EXPORT_SYMBOL(spa_freeze);
3180 EXPORT_SYMBOL(spa_upgrade);
3181 EXPORT_SYMBOL(spa_evict_all);
3182 EXPORT_SYMBOL(spa_lookup_by_guid);
3183 EXPORT_SYMBOL(spa_has_spare);
3184 EXPORT_SYMBOL(dva_get_dsize_sync);
3185 EXPORT_SYMBOL(bp_get_dsize_sync);
3186 EXPORT_SYMBOL(bp_get_dsize);
3187 EXPORT_SYMBOL(spa_has_slogs);
3188 EXPORT_SYMBOL(spa_is_root);
3189 EXPORT_SYMBOL(spa_writeable);
3190 EXPORT_SYMBOL(spa_mode);
3191 EXPORT_SYMBOL(spa_trust_config);
3192 EXPORT_SYMBOL(spa_missing_tvds_allowed);
3193 EXPORT_SYMBOL(spa_set_missing_tvds);
3194 EXPORT_SYMBOL(spa_state_to_name);
3195 EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
3196 EXPORT_SYMBOL(spa_min_claim_txg);
3197 EXPORT_SYMBOL(spa_suspend_async_destroy);
3198 EXPORT_SYMBOL(spa_has_checkpoint);
3199 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
3200
3201 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
3202 "Set additional debugging flags");
3203
3204 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
3205 "Set to attempt to recover from fatal errors");
3206
3207 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
3208 "Set to ignore IO errors during free and permanently leak the space");
3209
3210 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
3211 "Dead I/O check interval in milliseconds");
3212
3213 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
3214 "Enable deadman timer");
3215
3216 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
3217 "SPA size estimate multiplication factor");
3218
3219 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
3220 "Place DDT data into the special class");
3221
3222 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
3223 "Place user data indirect blocks into the special class");
3224
3225 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
3226 param_set_deadman_failmode, param_get_charp, ZMOD_RW,
3227 "Failmode for deadman timer");
3228
3229 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
3230 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
3231 "Pool sync expiration time in milliseconds");
3232
3233 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
3234 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
3235 "IO expiration time in milliseconds");
3236
3237 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
3238 "Small file blocks in special vdevs depends on this much "
3239 "free space available");
3240
3241 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
3242 param_get_uint, ZMOD_RW, "Reserved free space in pool");
3243
3244 ZFS_MODULE_PARAM(zfs, spa_, num_allocators, INT, ZMOD_RW,
3245 "Number of allocators per spa");
3246
3247 ZFS_MODULE_PARAM(zfs, spa_, cpus_per_allocator, INT, ZMOD_RW,
3248 "Minimum number of CPUs per allocators");
3249