1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright (c) 2017 Datto Inc.
29 * Copyright 2019 Joyent, Inc.
30 * Copyright (c) 2017, Intel Corporation.
31 * Copyright 2020 Joyent, Inc.
32 * Copyright 2022 Oxide Computer Company
33 */
34
35 #include <sys/zfs_context.h>
36 #include <sys/spa_impl.h>
37 #include <sys/spa_boot.h>
38 #include <sys/zio.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zio_compress.h>
41 #include <sys/dmu.h>
42 #include <sys/dmu_tx.h>
43 #include <sys/zap.h>
44 #include <sys/zil.h>
45 #include <sys/vdev_impl.h>
46 #include <sys/vdev_initialize.h>
47 #include <sys/vdev_trim.h>
48 #include <sys/vdev_raidz.h>
49 #include <sys/metaslab.h>
50 #include <sys/uberblock_impl.h>
51 #include <sys/txg.h>
52 #include <sys/avl.h>
53 #include <sys/unique.h>
54 #include <sys/dsl_pool.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_scan.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/metaslab_impl.h>
60 #include <sys/arc.h>
61 #include <sys/ddt.h>
62 #include "zfs_prop.h"
63 #include "zfs_fletcher.h"
64 #include <sys/btree.h>
65 #include <sys/zfeature.h>
66
67 /*
68 * SPA locking
69 *
70 * There are three basic locks for managing spa_t structures:
71 *
72 * spa_namespace_lock (global mutex)
73 *
74 * This lock must be acquired to do any of the following:
75 *
76 * - Lookup a spa_t by name
77 * - Add or remove a spa_t from the namespace
78 * - Increase spa_refcount from non-zero
79 * - Check if spa_refcount is zero
80 * - Rename a spa_t
81 * - add/remove/attach/detach devices
82 * - Held for the duration of create/destroy/import/export
83 *
84 * It does not need to handle recursion. A create or destroy may
85 * reference objects (files or zvols) in other pools, but by
86 * definition they must have an existing reference, and will never need
87 * to lookup a spa_t by name.
88 *
89 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
90 *
91 * This reference count keep track of any active users of the spa_t. The
92 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
93 * the refcount is never really 'zero' - opening a pool implicitly keeps
94 * some references in the DMU. Internally we check against spa_minref, but
95 * present the image of a zero/non-zero value to consumers.
96 *
97 * spa_config_lock[] (per-spa array of rwlocks)
98 *
99 * This protects the spa_t from config changes, and must be held in
100 * the following circumstances:
101 *
102 * - RW_READER to perform I/O to the spa
103 * - RW_WRITER to change the vdev config
104 *
105 * The locking order is fairly straightforward:
106 *
107 * spa_namespace_lock -> spa_refcount
108 *
109 * The namespace lock must be acquired to increase the refcount from 0
110 * or to check if it is zero.
111 *
112 * spa_refcount -> spa_config_lock[]
113 *
114 * There must be at least one valid reference on the spa_t to acquire
115 * the config lock.
116 *
117 * spa_namespace_lock -> spa_config_lock[]
118 *
119 * The namespace lock must always be taken before the config lock.
120 *
121 *
122 * The spa_namespace_lock can be acquired directly and is globally visible.
123 *
124 * The namespace is manipulated using the following functions, all of which
125 * require the spa_namespace_lock to be held.
126 *
127 * spa_lookup() Lookup a spa_t by name.
128 *
129 * spa_add() Create a new spa_t in the namespace.
130 *
131 * spa_remove() Remove a spa_t from the namespace. This also
132 * frees up any memory associated with the spa_t.
133 *
134 * spa_next() Returns the next spa_t in the system, or the
135 * first if NULL is passed.
136 *
137 * spa_evict_all() Shutdown and remove all spa_t structures in
138 * the system.
139 *
140 * spa_guid_exists() Determine whether a pool/device guid exists.
141 *
142 * The spa_refcount is manipulated using the following functions:
143 *
144 * spa_open_ref() Adds a reference to the given spa_t. Must be
145 * called with spa_namespace_lock held if the
146 * refcount is currently zero.
147 *
148 * spa_close() Remove a reference from the spa_t. This will
149 * not free the spa_t or remove it from the
150 * namespace. No locking is required.
151 *
152 * spa_refcount_zero() Returns true if the refcount is currently
153 * zero. Must be called with spa_namespace_lock
154 * held.
155 *
156 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
157 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
158 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
159 *
160 * To read the configuration, it suffices to hold one of these locks as reader.
161 * To modify the configuration, you must hold all locks as writer. To modify
162 * vdev state without altering the vdev tree's topology (e.g. online/offline),
163 * you must hold SCL_STATE and SCL_ZIO as writer.
164 *
165 * We use these distinct config locks to avoid recursive lock entry.
166 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
167 * block allocations (SCL_ALLOC), which may require reading space maps
168 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
169 *
170 * The spa config locks cannot be normal rwlocks because we need the
171 * ability to hand off ownership. For example, SCL_ZIO is acquired
172 * by the issuing thread and later released by an interrupt thread.
173 * They do, however, obey the usual write-wanted semantics to prevent
174 * writer (i.e. system administrator) starvation.
175 *
176 * The lock acquisition rules are as follows:
177 *
178 * SCL_CONFIG
179 * Protects changes to the vdev tree topology, such as vdev
180 * add/remove/attach/detach. Protects the dirty config list
181 * (spa_config_dirty_list) and the set of spares and l2arc devices.
182 *
183 * SCL_STATE
184 * Protects changes to pool state and vdev state, such as vdev
185 * online/offline/fault/degrade/clear. Protects the dirty state list
186 * (spa_state_dirty_list) and global pool state (spa_state).
187 *
188 * SCL_ALLOC
189 * Protects changes to metaslab groups and classes.
190 * Held as reader by metaslab_alloc() and metaslab_claim().
191 *
192 * SCL_ZIO
193 * Held by bp-level zios (those which have no io_vd upon entry)
194 * to prevent changes to the vdev tree. The bp-level zio implicitly
195 * protects all of its vdev child zios, which do not hold SCL_ZIO.
196 *
197 * SCL_FREE
198 * Protects changes to metaslab groups and classes.
199 * Held as reader by metaslab_free(). SCL_FREE is distinct from
200 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
201 * blocks in zio_done() while another i/o that holds either
202 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
203 *
204 * SCL_VDEV
205 * Held as reader to prevent changes to the vdev tree during trivial
206 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
207 * other locks, and lower than all of them, to ensure that it's safe
208 * to acquire regardless of caller context.
209 *
210 * In addition, the following rules apply:
211 *
212 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
213 * The lock ordering is SCL_CONFIG > spa_props_lock.
214 *
215 * (b) I/O operations on leaf vdevs. For any zio operation that takes
216 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
217 * or zio_write_phys() -- the caller must ensure that the config cannot
218 * cannot change in the interim, and that the vdev cannot be reopened.
219 * SCL_STATE as reader suffices for both.
220 *
221 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
222 *
223 * spa_vdev_enter() Acquire the namespace lock and the config lock
224 * for writing.
225 *
226 * spa_vdev_exit() Release the config lock, wait for all I/O
227 * to complete, sync the updated configs to the
228 * cache, and release the namespace lock.
229 *
230 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
231 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
232 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
233 */
234
235 static avl_tree_t spa_namespace_avl;
236 kmutex_t spa_namespace_lock;
237 static kcondvar_t spa_namespace_cv;
238 static int spa_active_count;
239 int spa_max_replication_override = SPA_DVAS_PER_BP;
240
241 static kmutex_t spa_spare_lock;
242 static avl_tree_t spa_spare_avl;
243 static kmutex_t spa_l2cache_lock;
244 static avl_tree_t spa_l2cache_avl;
245
246 kmem_cache_t *spa_buffer_pool;
247 int spa_mode_global;
248
249 #ifdef ZFS_DEBUG
250 /*
251 * Everything except dprintf, spa, and indirect_remap is on by default
252 * in debug builds.
253 */
254 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP);
255 #else
256 int zfs_flags = 0;
257 #endif
258
259 /*
260 * zfs_recover can be set to nonzero to attempt to recover from
261 * otherwise-fatal errors, typically caused by on-disk corruption. When
262 * set, calls to zfs_panic_recover() will turn into warning messages.
263 * This should only be used as a last resort, as it typically results
264 * in leaked space, or worse.
265 */
266 boolean_t zfs_recover = B_FALSE;
267
268 /*
269 * If destroy encounters an EIO while reading metadata (e.g. indirect
270 * blocks), space referenced by the missing metadata can not be freed.
271 * Normally this causes the background destroy to become "stalled", as
272 * it is unable to make forward progress. While in this stalled state,
273 * all remaining space to free from the error-encountering filesystem is
274 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
275 * permanently leak the space from indirect blocks that can not be read,
276 * and continue to free everything else that it can.
277 *
278 * The default, "stalling" behavior is useful if the storage partially
279 * fails (i.e. some but not all i/os fail), and then later recovers. In
280 * this case, we will be able to continue pool operations while it is
281 * partially failed, and when it recovers, we can continue to free the
282 * space, with no leaks. However, note that this case is actually
283 * fairly rare.
284 *
285 * Typically pools either (a) fail completely (but perhaps temporarily,
286 * e.g. a top-level vdev going offline), or (b) have localized,
287 * permanent errors (e.g. disk returns the wrong data due to bit flip or
288 * firmware bug). In case (a), this setting does not matter because the
289 * pool will be suspended and the sync thread will not be able to make
290 * forward progress regardless. In case (b), because the error is
291 * permanent, the best we can do is leak the minimum amount of space,
292 * which is what setting this flag will do. Therefore, it is reasonable
293 * for this flag to normally be set, but we chose the more conservative
294 * approach of not setting it, so that there is no possibility of
295 * leaking space in the "partial temporary" failure case.
296 */
297 boolean_t zfs_free_leak_on_eio = B_FALSE;
298
299 /*
300 * Expiration time in milliseconds. This value has two meanings. First it is
301 * used to determine when the spa_deadman() logic should fire. By default the
302 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
303 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
304 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
305 * in a system panic.
306 */
307 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
308
309 /*
310 * Check time in milliseconds. This defines the frequency at which we check
311 * for hung I/O.
312 */
313 uint64_t zfs_deadman_checktime_ms = 5000ULL;
314
315 /*
316 * Override the zfs deadman behavior via /etc/system. By default the
317 * deadman is enabled except on VMware and sparc deployments.
318 */
319 int zfs_deadman_enabled = -1;
320
321 #if defined(__amd64__) || defined(__i386__)
322 /*
323 * Should we allow the use of mechanisms that depend on saving and restoring
324 * the FPU state? This was disabled initially due to stability issues in
325 * the kernel FPU routines; see bug 13717. As of the fixes for 13902 and
326 * 13915, it has once again been enabled.
327 */
328 int zfs_fpu_enabled = 1;
329 #endif
330
331 /*
332 * The worst case is single-sector max-parity RAID-Z blocks, in which
333 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
334 * times the size; so just assume that. Add to this the fact that
335 * we can have up to 3 DVAs per bp, and one more factor of 2 because
336 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
337 * the worst case is:
338 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
339 */
340 int spa_asize_inflation = 24;
341
342 /*
343 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
344 * the pool to be consumed. This ensures that we don't run the pool
345 * completely out of space, due to unaccounted changes (e.g. to the MOS).
346 * It also limits the worst-case time to allocate space. If we have
347 * less than this amount of free space, most ZPL operations (e.g. write,
348 * create) will return ENOSPC.
349 *
350 * Certain operations (e.g. file removal, most administrative actions) can
351 * use half the slop space. They will only return ENOSPC if less than half
352 * the slop space is free. Typically, once the pool has less than the slop
353 * space free, the user will use these operations to free up space in the pool.
354 * These are the operations that call dsl_pool_adjustedsize() with the netfree
355 * argument set to TRUE.
356 *
357 * Operations that are almost guaranteed to free up space in the absence of
358 * a pool checkpoint can use up to three quarters of the slop space
359 * (e.g zfs destroy).
360 *
361 * A very restricted set of operations are always permitted, regardless of
362 * the amount of free space. These are the operations that call
363 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
364 * increase in the amount of space used, it is possible to run the pool
365 * completely out of space, causing it to be permanently read-only.
366 *
367 * Note that on very small pools, the slop space will be larger than
368 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
369 * but we never allow it to be more than half the pool size.
370 *
371 * See also the comments in zfs_space_check_t.
372 */
373 int spa_slop_shift = 5;
374 uint64_t spa_min_slop = 128 * 1024 * 1024;
375
376 int spa_allocators = 4;
377
378 /*PRINTFLIKE2*/
379 void
spa_load_failed(spa_t * spa,const char * fmt,...)380 spa_load_failed(spa_t *spa, const char *fmt, ...)
381 {
382 va_list adx;
383 char buf[256];
384
385 va_start(adx, fmt);
386 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
387 va_end(adx);
388
389 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
390 spa->spa_trust_config ? "trusted" : "untrusted", buf);
391 }
392
393 /*PRINTFLIKE2*/
394 void
spa_load_note(spa_t * spa,const char * fmt,...)395 spa_load_note(spa_t *spa, const char *fmt, ...)
396 {
397 va_list adx;
398 char buf[256];
399
400 va_start(adx, fmt);
401 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
402 va_end(adx);
403
404 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
405 spa->spa_trust_config ? "trusted" : "untrusted", buf);
406 }
407
408 /*
409 * By default dedup and user data indirects land in the special class
410 */
411 int zfs_ddt_data_is_special = B_TRUE;
412 int zfs_user_indirect_is_special = B_TRUE;
413
414 /*
415 * The percentage of special class final space reserved for metadata only.
416 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
417 * let metadata into the class.
418 */
419 int zfs_special_class_metadata_reserve_pct = 25;
420
421 /*
422 * ==========================================================================
423 * SPA config locking
424 * ==========================================================================
425 */
426 static void
spa_config_lock_init(spa_t * spa)427 spa_config_lock_init(spa_t *spa)
428 {
429 for (int i = 0; i < SCL_LOCKS; i++) {
430 spa_config_lock_t *scl = &spa->spa_config_lock[i];
431 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
432 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
433 zfs_refcount_create_untracked(&scl->scl_count);
434 scl->scl_writer = NULL;
435 scl->scl_write_wanted = 0;
436 }
437 }
438
439 static void
spa_config_lock_destroy(spa_t * spa)440 spa_config_lock_destroy(spa_t *spa)
441 {
442 for (int i = 0; i < SCL_LOCKS; i++) {
443 spa_config_lock_t *scl = &spa->spa_config_lock[i];
444 mutex_destroy(&scl->scl_lock);
445 cv_destroy(&scl->scl_cv);
446 zfs_refcount_destroy(&scl->scl_count);
447 ASSERT(scl->scl_writer == NULL);
448 ASSERT(scl->scl_write_wanted == 0);
449 }
450 }
451
452 int
spa_config_tryenter(spa_t * spa,int locks,void * tag,krw_t rw)453 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
454 {
455 for (int i = 0; i < SCL_LOCKS; i++) {
456 spa_config_lock_t *scl = &spa->spa_config_lock[i];
457 if (!(locks & (1 << i)))
458 continue;
459 mutex_enter(&scl->scl_lock);
460 if (rw == RW_READER) {
461 if (scl->scl_writer || scl->scl_write_wanted) {
462 mutex_exit(&scl->scl_lock);
463 spa_config_exit(spa, locks & ((1 << i) - 1),
464 tag);
465 return (0);
466 }
467 } else {
468 ASSERT(scl->scl_writer != curthread);
469 if (!zfs_refcount_is_zero(&scl->scl_count)) {
470 mutex_exit(&scl->scl_lock);
471 spa_config_exit(spa, locks & ((1 << i) - 1),
472 tag);
473 return (0);
474 }
475 scl->scl_writer = curthread;
476 }
477 (void) zfs_refcount_add(&scl->scl_count, tag);
478 mutex_exit(&scl->scl_lock);
479 }
480 return (1);
481 }
482
483 void
spa_config_enter(spa_t * spa,int locks,void * tag,krw_t rw)484 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
485 {
486 int wlocks_held = 0;
487
488 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
489
490 for (int i = 0; i < SCL_LOCKS; i++) {
491 spa_config_lock_t *scl = &spa->spa_config_lock[i];
492 if (scl->scl_writer == curthread)
493 wlocks_held |= (1 << i);
494 if (!(locks & (1 << i)))
495 continue;
496 mutex_enter(&scl->scl_lock);
497 if (rw == RW_READER) {
498 while (scl->scl_writer || scl->scl_write_wanted) {
499 cv_wait(&scl->scl_cv, &scl->scl_lock);
500 }
501 } else {
502 ASSERT(scl->scl_writer != curthread);
503 while (!zfs_refcount_is_zero(&scl->scl_count)) {
504 scl->scl_write_wanted++;
505 cv_wait(&scl->scl_cv, &scl->scl_lock);
506 scl->scl_write_wanted--;
507 }
508 scl->scl_writer = curthread;
509 }
510 (void) zfs_refcount_add(&scl->scl_count, tag);
511 mutex_exit(&scl->scl_lock);
512 }
513 ASSERT3U(wlocks_held, <=, locks);
514 }
515
516 void
spa_config_exit(spa_t * spa,int locks,void * tag)517 spa_config_exit(spa_t *spa, int locks, void *tag)
518 {
519 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
520 spa_config_lock_t *scl = &spa->spa_config_lock[i];
521 if (!(locks & (1 << i)))
522 continue;
523 mutex_enter(&scl->scl_lock);
524 ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
525 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
526 ASSERT(scl->scl_writer == NULL ||
527 scl->scl_writer == curthread);
528 scl->scl_writer = NULL; /* OK in either case */
529 cv_broadcast(&scl->scl_cv);
530 }
531 mutex_exit(&scl->scl_lock);
532 }
533 }
534
535 int
spa_config_held(spa_t * spa,int locks,krw_t rw)536 spa_config_held(spa_t *spa, int locks, krw_t rw)
537 {
538 int locks_held = 0;
539
540 for (int i = 0; i < SCL_LOCKS; i++) {
541 spa_config_lock_t *scl = &spa->spa_config_lock[i];
542 if (!(locks & (1 << i)))
543 continue;
544 if ((rw == RW_READER &&
545 !zfs_refcount_is_zero(&scl->scl_count)) ||
546 (rw == RW_WRITER && scl->scl_writer == curthread))
547 locks_held |= 1 << i;
548 }
549
550 return (locks_held);
551 }
552
553 /*
554 * ==========================================================================
555 * SPA namespace functions
556 * ==========================================================================
557 */
558
559 /*
560 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
561 * Returns NULL if no matching spa_t is found.
562 */
563 spa_t *
spa_lookup(const char * name)564 spa_lookup(const char *name)
565 {
566 static spa_t search; /* spa_t is large; don't allocate on stack */
567 spa_t *spa;
568 avl_index_t where;
569 char *cp;
570
571 ASSERT(MUTEX_HELD(&spa_namespace_lock));
572
573 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
574
575 /*
576 * If it's a full dataset name, figure out the pool name and
577 * just use that.
578 */
579 cp = strpbrk(search.spa_name, "/@#");
580 if (cp != NULL)
581 *cp = '\0';
582
583 spa = avl_find(&spa_namespace_avl, &search, &where);
584
585 return (spa);
586 }
587
588 /*
589 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
590 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
591 * looking for potentially hung I/Os.
592 */
593 void
spa_deadman(void * arg)594 spa_deadman(void *arg)
595 {
596 spa_t *spa = arg;
597
598 /*
599 * Disable the deadman timer if the pool is suspended.
600 */
601 if (spa_suspended(spa)) {
602 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
603 return;
604 }
605
606 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
607 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
608 ++spa->spa_deadman_calls);
609 if (zfs_deadman_enabled)
610 vdev_deadman(spa->spa_root_vdev);
611 }
612
613 int
spa_log_sm_sort_by_txg(const void * va,const void * vb)614 spa_log_sm_sort_by_txg(const void *va, const void *vb)
615 {
616 const spa_log_sm_t *a = va;
617 const spa_log_sm_t *b = vb;
618
619 return (TREE_CMP(a->sls_txg, b->sls_txg));
620 }
621
622 /*
623 * Create an uninitialized spa_t with the given name. Requires
624 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
625 * exist by calling spa_lookup() first.
626 */
627 spa_t *
spa_add(const char * name,nvlist_t * config,const char * altroot)628 spa_add(const char *name, nvlist_t *config, const char *altroot)
629 {
630 spa_t *spa;
631 spa_config_dirent_t *dp;
632 cyc_handler_t hdlr;
633 cyc_time_t when;
634
635 ASSERT(MUTEX_HELD(&spa_namespace_lock));
636
637 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
638
639 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
640 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
641 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
642 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
643 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
644 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
645 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
646 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
647 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
648 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
649 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
650 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
651 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
652 mutex_init(&spa->spa_imp_kstat_lock, NULL, MUTEX_DEFAULT, NULL);
653
654 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
655 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
656 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
657 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
658 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
659
660 for (int t = 0; t < TXG_SIZE; t++)
661 bplist_create(&spa->spa_free_bplist[t]);
662
663 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
664 spa->spa_state = POOL_STATE_UNINITIALIZED;
665 spa->spa_freeze_txg = UINT64_MAX;
666 spa->spa_final_txg = UINT64_MAX;
667 spa->spa_load_max_txg = UINT64_MAX;
668 spa->spa_proc = &p0;
669 spa->spa_proc_state = SPA_PROC_NONE;
670 spa->spa_trust_config = B_TRUE;
671
672 hdlr.cyh_func = spa_deadman;
673 hdlr.cyh_arg = spa;
674 hdlr.cyh_level = CY_LOW_LEVEL;
675
676 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
677
678 /*
679 * This determines how often we need to check for hung I/Os after
680 * the cyclic has already fired. Since checking for hung I/Os is
681 * an expensive operation we don't want to check too frequently.
682 * Instead wait for 5 seconds before checking again.
683 */
684 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
685 when.cyt_when = CY_INFINITY;
686 mutex_enter(&cpu_lock);
687 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
688 mutex_exit(&cpu_lock);
689
690 zfs_refcount_create(&spa->spa_refcount);
691 spa_config_lock_init(spa);
692
693 avl_add(&spa_namespace_avl, spa);
694
695 /*
696 * Set the alternate root, if there is one.
697 */
698 if (altroot) {
699 spa->spa_root = spa_strdup(altroot);
700 spa_active_count++;
701 }
702
703 spa->spa_alloc_count = spa_allocators;
704 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
705 sizeof (kmutex_t), KM_SLEEP);
706 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
707 sizeof (avl_tree_t), KM_SLEEP);
708 for (int i = 0; i < spa->spa_alloc_count; i++) {
709 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
710 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
711 sizeof (zio_t), offsetof(zio_t, io_alloc_node));
712 }
713 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
714 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
715 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
716 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
717 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
718 offsetof(log_summary_entry_t, lse_node));
719
720 /*
721 * Every pool starts with the default cachefile
722 */
723 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
724 offsetof(spa_config_dirent_t, scd_link));
725
726 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
727 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
728 list_insert_head(&spa->spa_config_list, dp);
729
730 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
731 KM_SLEEP) == 0);
732
733 if (config != NULL) {
734 nvlist_t *features;
735
736 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
737 &features) == 0) {
738 VERIFY(nvlist_dup(features, &spa->spa_label_features,
739 0) == 0);
740 }
741
742 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
743 }
744
745 if (spa->spa_label_features == NULL) {
746 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
747 KM_SLEEP) == 0);
748 }
749
750 spa->spa_iokstat = kstat_create("zfs", 0, name,
751 "disk", KSTAT_TYPE_IO, 1, 0);
752 if (spa->spa_iokstat) {
753 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
754 kstat_install(spa->spa_iokstat);
755 }
756
757 spa->spa_min_ashift = INT_MAX;
758 spa->spa_max_ashift = 0;
759
760 /*
761 * As a pool is being created, treat all features as disabled by
762 * setting SPA_FEATURE_DISABLED for all entries in the feature
763 * refcount cache.
764 */
765 for (int i = 0; i < SPA_FEATURES; i++) {
766 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
767 }
768
769 list_create(&spa->spa_leaf_list, sizeof (vdev_t),
770 offsetof(vdev_t, vdev_leaf_node));
771
772 return (spa);
773 }
774
775 /*
776 * Removes a spa_t from the namespace, freeing up any memory used. Requires
777 * spa_namespace_lock. This is called only after the spa_t has been closed and
778 * deactivated.
779 */
780 void
spa_remove(spa_t * spa)781 spa_remove(spa_t *spa)
782 {
783 spa_config_dirent_t *dp;
784
785 ASSERT(MUTEX_HELD(&spa_namespace_lock));
786 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
787 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
788
789 nvlist_free(spa->spa_config_splitting);
790
791 avl_remove(&spa_namespace_avl, spa);
792 cv_broadcast(&spa_namespace_cv);
793
794 if (spa->spa_root) {
795 spa_strfree(spa->spa_root);
796 spa_active_count--;
797 }
798
799 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
800 list_remove(&spa->spa_config_list, dp);
801 if (dp->scd_path != NULL)
802 spa_strfree(dp->scd_path);
803 kmem_free(dp, sizeof (spa_config_dirent_t));
804 }
805
806 for (int i = 0; i < spa->spa_alloc_count; i++) {
807 avl_destroy(&spa->spa_alloc_trees[i]);
808 mutex_destroy(&spa->spa_alloc_locks[i]);
809 }
810 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
811 sizeof (kmutex_t));
812 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
813 sizeof (avl_tree_t));
814
815 avl_destroy(&spa->spa_metaslabs_by_flushed);
816 avl_destroy(&spa->spa_sm_logs_by_txg);
817 list_destroy(&spa->spa_log_summary);
818 list_destroy(&spa->spa_config_list);
819 list_destroy(&spa->spa_leaf_list);
820
821 nvlist_free(spa->spa_label_features);
822 nvlist_free(spa->spa_load_info);
823 spa_config_set(spa, NULL);
824
825 mutex_enter(&cpu_lock);
826 if (spa->spa_deadman_cycid != CYCLIC_NONE)
827 cyclic_remove(spa->spa_deadman_cycid);
828 mutex_exit(&cpu_lock);
829 spa->spa_deadman_cycid = CYCLIC_NONE;
830
831 zfs_refcount_destroy(&spa->spa_refcount);
832
833 spa_config_lock_destroy(spa);
834
835 kstat_delete(spa->spa_iokstat);
836 spa->spa_iokstat = NULL;
837
838 for (int t = 0; t < TXG_SIZE; t++)
839 bplist_destroy(&spa->spa_free_bplist[t]);
840
841 zio_checksum_templates_free(spa);
842
843 cv_destroy(&spa->spa_async_cv);
844 cv_destroy(&spa->spa_evicting_os_cv);
845 cv_destroy(&spa->spa_proc_cv);
846 cv_destroy(&spa->spa_scrub_io_cv);
847 cv_destroy(&spa->spa_suspend_cv);
848
849 mutex_destroy(&spa->spa_flushed_ms_lock);
850 mutex_destroy(&spa->spa_async_lock);
851 mutex_destroy(&spa->spa_errlist_lock);
852 mutex_destroy(&spa->spa_errlog_lock);
853 mutex_destroy(&spa->spa_evicting_os_lock);
854 mutex_destroy(&spa->spa_history_lock);
855 mutex_destroy(&spa->spa_proc_lock);
856 mutex_destroy(&spa->spa_props_lock);
857 mutex_destroy(&spa->spa_cksum_tmpls_lock);
858 mutex_destroy(&spa->spa_scrub_lock);
859 mutex_destroy(&spa->spa_suspend_lock);
860 mutex_destroy(&spa->spa_vdev_top_lock);
861 mutex_destroy(&spa->spa_iokstat_lock);
862 mutex_destroy(&spa->spa_imp_kstat_lock);
863
864 kmem_free(spa, sizeof (spa_t));
865 }
866
867 /*
868 * Given a pool, return the next pool in the namespace, or NULL if there is
869 * none. If 'prev' is NULL, return the first pool.
870 */
871 spa_t *
spa_next(spa_t * prev)872 spa_next(spa_t *prev)
873 {
874 ASSERT(MUTEX_HELD(&spa_namespace_lock));
875
876 if (prev)
877 return (AVL_NEXT(&spa_namespace_avl, prev));
878 else
879 return (avl_first(&spa_namespace_avl));
880 }
881
882 /*
883 * ==========================================================================
884 * SPA refcount functions
885 * ==========================================================================
886 */
887
888 /*
889 * Add a reference to the given spa_t. Must have at least one reference, or
890 * have the namespace lock held.
891 */
892 void
spa_open_ref(spa_t * spa,void * tag)893 spa_open_ref(spa_t *spa, void *tag)
894 {
895 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
896 MUTEX_HELD(&spa_namespace_lock));
897 (void) zfs_refcount_add(&spa->spa_refcount, tag);
898 }
899
900 /*
901 * Remove a reference to the given spa_t. Must have at least one reference, or
902 * have the namespace lock held.
903 */
904 void
spa_close(spa_t * spa,void * tag)905 spa_close(spa_t *spa, void *tag)
906 {
907 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
908 MUTEX_HELD(&spa_namespace_lock));
909 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
910 }
911
912 /*
913 * Remove a reference to the given spa_t held by a dsl dir that is
914 * being asynchronously released. Async releases occur from a taskq
915 * performing eviction of dsl datasets and dirs. The namespace lock
916 * isn't held and the hold by the object being evicted may contribute to
917 * spa_minref (e.g. dataset or directory released during pool export),
918 * so the asserts in spa_close() do not apply.
919 */
920 void
spa_async_close(spa_t * spa,void * tag)921 spa_async_close(spa_t *spa, void *tag)
922 {
923 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
924 }
925
926 /*
927 * Check to see if the spa refcount is zero. Must be called with
928 * spa_namespace_lock held. We really compare against spa_minref, which is the
929 * number of references acquired when opening a pool
930 */
931 boolean_t
spa_refcount_zero(spa_t * spa)932 spa_refcount_zero(spa_t *spa)
933 {
934 ASSERT(MUTEX_HELD(&spa_namespace_lock));
935
936 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
937 }
938
939 /*
940 * ==========================================================================
941 * SPA spare and l2cache tracking
942 * ==========================================================================
943 */
944
945 /*
946 * Hot spares and cache devices are tracked using the same code below,
947 * for 'auxiliary' devices.
948 */
949
950 typedef struct spa_aux {
951 uint64_t aux_guid;
952 uint64_t aux_pool;
953 avl_node_t aux_avl;
954 int aux_count;
955 } spa_aux_t;
956
957 static inline int
spa_aux_compare(const void * a,const void * b)958 spa_aux_compare(const void *a, const void *b)
959 {
960 const spa_aux_t *sa = (const spa_aux_t *)a;
961 const spa_aux_t *sb = (const spa_aux_t *)b;
962
963 return (TREE_CMP(sa->aux_guid, sb->aux_guid));
964 }
965
966 void
spa_aux_add(vdev_t * vd,avl_tree_t * avl)967 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
968 {
969 avl_index_t where;
970 spa_aux_t search;
971 spa_aux_t *aux;
972
973 search.aux_guid = vd->vdev_guid;
974 if ((aux = avl_find(avl, &search, &where)) != NULL) {
975 aux->aux_count++;
976 } else {
977 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
978 aux->aux_guid = vd->vdev_guid;
979 aux->aux_count = 1;
980 avl_insert(avl, aux, where);
981 }
982 }
983
984 void
spa_aux_remove(vdev_t * vd,avl_tree_t * avl)985 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
986 {
987 spa_aux_t search;
988 spa_aux_t *aux;
989 avl_index_t where;
990
991 search.aux_guid = vd->vdev_guid;
992 aux = avl_find(avl, &search, &where);
993
994 ASSERT(aux != NULL);
995
996 if (--aux->aux_count == 0) {
997 avl_remove(avl, aux);
998 kmem_free(aux, sizeof (spa_aux_t));
999 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
1000 aux->aux_pool = 0ULL;
1001 }
1002 }
1003
1004 boolean_t
spa_aux_exists(uint64_t guid,uint64_t * pool,int * refcnt,avl_tree_t * avl)1005 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
1006 {
1007 spa_aux_t search, *found;
1008
1009 search.aux_guid = guid;
1010 found = avl_find(avl, &search, NULL);
1011
1012 if (pool) {
1013 if (found)
1014 *pool = found->aux_pool;
1015 else
1016 *pool = 0ULL;
1017 }
1018
1019 if (refcnt) {
1020 if (found)
1021 *refcnt = found->aux_count;
1022 else
1023 *refcnt = 0;
1024 }
1025
1026 return (found != NULL);
1027 }
1028
1029 void
spa_aux_activate(vdev_t * vd,avl_tree_t * avl)1030 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1031 {
1032 spa_aux_t search, *found;
1033 avl_index_t where;
1034
1035 search.aux_guid = vd->vdev_guid;
1036 found = avl_find(avl, &search, &where);
1037 ASSERT(found != NULL);
1038 ASSERT(found->aux_pool == 0ULL);
1039
1040 found->aux_pool = spa_guid(vd->vdev_spa);
1041 }
1042
1043 /*
1044 * Spares are tracked globally due to the following constraints:
1045 *
1046 * - A spare may be part of multiple pools.
1047 * - A spare may be added to a pool even if it's actively in use within
1048 * another pool.
1049 * - A spare in use in any pool can only be the source of a replacement if
1050 * the target is a spare in the same pool.
1051 *
1052 * We keep track of all spares on the system through the use of a reference
1053 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
1054 * spare, then we bump the reference count in the AVL tree. In addition, we set
1055 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1056 * inactive). When a spare is made active (used to replace a device in the
1057 * pool), we also keep track of which pool its been made a part of.
1058 *
1059 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
1060 * called under the spa_namespace lock as part of vdev reconfiguration. The
1061 * separate spare lock exists for the status query path, which does not need to
1062 * be completely consistent with respect to other vdev configuration changes.
1063 */
1064
1065 /*
1066 * Poll the spare vdevs to make sure they are not faulty.
1067 *
1068 * The probe operation will raise an ENXIO error and create an FM ereport if the
1069 * probe fails.
1070 */
1071 void
spa_spare_poll(spa_t * spa)1072 spa_spare_poll(spa_t *spa)
1073 {
1074 boolean_t async_request = B_FALSE;
1075 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1076 for (int i = 0; i < spa->spa_spares.sav_count; i++) {
1077 spa_aux_t search, *found;
1078 vdev_t *vd = spa->spa_spares.sav_vdevs[i];
1079
1080 search.aux_guid = vd->vdev_guid;
1081
1082 mutex_enter(&spa_spare_lock);
1083 found = avl_find(&spa_spare_avl, &search, NULL);
1084 /* This spare is in use by a pool. */
1085 if (found != NULL && found->aux_pool != 0) {
1086 mutex_exit(&spa_spare_lock);
1087 continue;
1088 }
1089 mutex_exit(&spa_spare_lock);
1090
1091 vd->vdev_probe_wanted = B_TRUE;
1092 async_request = B_TRUE;
1093 }
1094 if (async_request)
1095 spa_async_request(spa, SPA_ASYNC_PROBE);
1096
1097 spa_config_exit(spa, SCL_STATE, FTAG);
1098 }
1099
1100 static int
spa_spare_compare(const void * a,const void * b)1101 spa_spare_compare(const void *a, const void *b)
1102 {
1103 return (spa_aux_compare(a, b));
1104 }
1105
1106 void
spa_spare_add(vdev_t * vd)1107 spa_spare_add(vdev_t *vd)
1108 {
1109 mutex_enter(&spa_spare_lock);
1110 ASSERT(!vd->vdev_isspare);
1111 spa_aux_add(vd, &spa_spare_avl);
1112 vd->vdev_isspare = B_TRUE;
1113 mutex_exit(&spa_spare_lock);
1114 }
1115
1116 void
spa_spare_remove(vdev_t * vd)1117 spa_spare_remove(vdev_t *vd)
1118 {
1119 mutex_enter(&spa_spare_lock);
1120 ASSERT(vd->vdev_isspare);
1121 spa_aux_remove(vd, &spa_spare_avl);
1122 vd->vdev_isspare = B_FALSE;
1123 mutex_exit(&spa_spare_lock);
1124 }
1125
1126 boolean_t
spa_spare_exists(uint64_t guid,uint64_t * pool,int * refcnt)1127 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1128 {
1129 boolean_t found;
1130
1131 mutex_enter(&spa_spare_lock);
1132 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1133 mutex_exit(&spa_spare_lock);
1134
1135 return (found);
1136 }
1137
1138 void
spa_spare_activate(vdev_t * vd)1139 spa_spare_activate(vdev_t *vd)
1140 {
1141 mutex_enter(&spa_spare_lock);
1142 ASSERT(vd->vdev_isspare);
1143 spa_aux_activate(vd, &spa_spare_avl);
1144 mutex_exit(&spa_spare_lock);
1145 }
1146
1147 /*
1148 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1149 * Cache devices currently only support one pool per cache device, and so
1150 * for these devices the aux reference count is currently unused beyond 1.
1151 */
1152
1153 static int
spa_l2cache_compare(const void * a,const void * b)1154 spa_l2cache_compare(const void *a, const void *b)
1155 {
1156 return (spa_aux_compare(a, b));
1157 }
1158
1159 void
spa_l2cache_add(vdev_t * vd)1160 spa_l2cache_add(vdev_t *vd)
1161 {
1162 mutex_enter(&spa_l2cache_lock);
1163 ASSERT(!vd->vdev_isl2cache);
1164 spa_aux_add(vd, &spa_l2cache_avl);
1165 vd->vdev_isl2cache = B_TRUE;
1166 mutex_exit(&spa_l2cache_lock);
1167 }
1168
1169 void
spa_l2cache_remove(vdev_t * vd)1170 spa_l2cache_remove(vdev_t *vd)
1171 {
1172 mutex_enter(&spa_l2cache_lock);
1173 ASSERT(vd->vdev_isl2cache);
1174 spa_aux_remove(vd, &spa_l2cache_avl);
1175 vd->vdev_isl2cache = B_FALSE;
1176 mutex_exit(&spa_l2cache_lock);
1177 }
1178
1179 boolean_t
spa_l2cache_exists(uint64_t guid,uint64_t * pool)1180 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1181 {
1182 boolean_t found;
1183
1184 mutex_enter(&spa_l2cache_lock);
1185 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1186 mutex_exit(&spa_l2cache_lock);
1187
1188 return (found);
1189 }
1190
1191 void
spa_l2cache_activate(vdev_t * vd)1192 spa_l2cache_activate(vdev_t *vd)
1193 {
1194 mutex_enter(&spa_l2cache_lock);
1195 ASSERT(vd->vdev_isl2cache);
1196 spa_aux_activate(vd, &spa_l2cache_avl);
1197 mutex_exit(&spa_l2cache_lock);
1198 }
1199
1200 /*
1201 * ==========================================================================
1202 * SPA vdev locking
1203 * ==========================================================================
1204 */
1205
1206 /*
1207 * Lock the given spa_t for the purpose of adding or removing a vdev.
1208 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1209 * It returns the next transaction group for the spa_t.
1210 */
1211 uint64_t
spa_vdev_enter(spa_t * spa)1212 spa_vdev_enter(spa_t *spa)
1213 {
1214 mutex_enter(&spa->spa_vdev_top_lock);
1215 mutex_enter(&spa_namespace_lock);
1216
1217 vdev_autotrim_stop_all(spa);
1218
1219 return (spa_vdev_config_enter(spa));
1220 }
1221
1222 /*
1223 * Internal implementation for spa_vdev_enter(). Used when a vdev
1224 * operation requires multiple syncs (i.e. removing a device) while
1225 * keeping the spa_namespace_lock held.
1226 */
1227 uint64_t
spa_vdev_config_enter(spa_t * spa)1228 spa_vdev_config_enter(spa_t *spa)
1229 {
1230 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1231
1232 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1233
1234 return (spa_last_synced_txg(spa) + 1);
1235 }
1236
1237 /*
1238 * Used in combination with spa_vdev_config_enter() to allow the syncing
1239 * of multiple transactions without releasing the spa_namespace_lock.
1240 */
1241 void
spa_vdev_config_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error,char * tag)1242 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1243 {
1244 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1245
1246 int config_changed = B_FALSE;
1247
1248 ASSERT(txg > spa_last_synced_txg(spa));
1249
1250 spa->spa_pending_vdev = NULL;
1251
1252 /*
1253 * Reassess the DTLs.
1254 */
1255 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1256
1257 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1258 config_changed = B_TRUE;
1259 spa->spa_config_generation++;
1260 }
1261
1262 /*
1263 * Verify the metaslab classes.
1264 */
1265 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1266 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1267 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1268 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
1269
1270 spa_config_exit(spa, SCL_ALL, spa);
1271
1272 /*
1273 * Panic the system if the specified tag requires it. This
1274 * is useful for ensuring that configurations are updated
1275 * transactionally.
1276 */
1277 if (zio_injection_enabled)
1278 zio_handle_panic_injection(spa, tag, 0);
1279
1280 /*
1281 * Note: this txg_wait_synced() is important because it ensures
1282 * that there won't be more than one config change per txg.
1283 * This allows us to use the txg as the generation number.
1284 */
1285 if (error == 0)
1286 txg_wait_synced(spa->spa_dsl_pool, txg);
1287
1288 if (vd != NULL) {
1289 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1290 if (vd->vdev_ops->vdev_op_leaf) {
1291 mutex_enter(&vd->vdev_initialize_lock);
1292 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1293 NULL);
1294 mutex_exit(&vd->vdev_initialize_lock);
1295
1296 mutex_enter(&vd->vdev_trim_lock);
1297 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1298 mutex_exit(&vd->vdev_trim_lock);
1299 }
1300
1301 /*
1302 * The vdev may be both a leaf and top-level device.
1303 */
1304 vdev_autotrim_stop_wait(vd);
1305
1306 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1307 vdev_free(vd);
1308 spa_config_exit(spa, SCL_ALL, spa);
1309 }
1310
1311 /*
1312 * If the config changed, update the config cache.
1313 */
1314 if (config_changed)
1315 spa_write_cachefile(spa, B_FALSE, B_TRUE);
1316 }
1317
1318 /*
1319 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1320 * locking of spa_vdev_enter(), we also want make sure the transactions have
1321 * synced to disk, and then update the global configuration cache with the new
1322 * information.
1323 */
1324 int
spa_vdev_exit(spa_t * spa,vdev_t * vd,uint64_t txg,int error)1325 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1326 {
1327 vdev_autotrim_restart(spa);
1328
1329 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1330 mutex_exit(&spa_namespace_lock);
1331 mutex_exit(&spa->spa_vdev_top_lock);
1332
1333 return (error);
1334 }
1335
1336 /*
1337 * Lock the given spa_t for the purpose of changing vdev state.
1338 */
1339 void
spa_vdev_state_enter(spa_t * spa,int oplocks)1340 spa_vdev_state_enter(spa_t *spa, int oplocks)
1341 {
1342 int locks = SCL_STATE_ALL | oplocks;
1343
1344 /*
1345 * Root pools may need to read of the underlying devfs filesystem
1346 * when opening up a vdev. Unfortunately if we're holding the
1347 * SCL_ZIO lock it will result in a deadlock when we try to issue
1348 * the read from the root filesystem. Instead we "prefetch"
1349 * the associated vnodes that we need prior to opening the
1350 * underlying devices and cache them so that we can prevent
1351 * any I/O when we are doing the actual open.
1352 */
1353 if (spa_is_root(spa)) {
1354 int low = locks & ~(SCL_ZIO - 1);
1355 int high = locks & ~low;
1356
1357 spa_config_enter(spa, high, spa, RW_WRITER);
1358 vdev_hold(spa->spa_root_vdev);
1359 spa_config_enter(spa, low, spa, RW_WRITER);
1360 } else {
1361 spa_config_enter(spa, locks, spa, RW_WRITER);
1362 }
1363 spa->spa_vdev_locks = locks;
1364 }
1365
1366 int
spa_vdev_state_exit(spa_t * spa,vdev_t * vd,int error)1367 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1368 {
1369 boolean_t config_changed = B_FALSE;
1370
1371 if (vd != NULL || error == 0)
1372 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1373 0, 0, B_FALSE);
1374
1375 if (vd != NULL) {
1376 vdev_state_dirty(vd->vdev_top);
1377 config_changed = B_TRUE;
1378 spa->spa_config_generation++;
1379 }
1380
1381 if (spa_is_root(spa))
1382 vdev_rele(spa->spa_root_vdev);
1383
1384 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1385 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1386
1387 /*
1388 * If anything changed, wait for it to sync. This ensures that,
1389 * from the system administrator's perspective, zpool(8) commands
1390 * are synchronous. This is important for things like zpool offline:
1391 * when the command completes, you expect no further I/O from ZFS.
1392 */
1393 if (vd != NULL)
1394 txg_wait_synced(spa->spa_dsl_pool, 0);
1395
1396 /*
1397 * If the config changed, update the config cache.
1398 */
1399 if (config_changed) {
1400 mutex_enter(&spa_namespace_lock);
1401 spa_write_cachefile(spa, B_FALSE, B_TRUE);
1402 mutex_exit(&spa_namespace_lock);
1403 }
1404
1405 return (error);
1406 }
1407
1408 /*
1409 * ==========================================================================
1410 * Miscellaneous functions
1411 * ==========================================================================
1412 */
1413
1414 void
spa_activate_mos_feature(spa_t * spa,const char * feature,dmu_tx_t * tx)1415 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1416 {
1417 if (!nvlist_exists(spa->spa_label_features, feature)) {
1418 fnvlist_add_boolean(spa->spa_label_features, feature);
1419 /*
1420 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1421 * dirty the vdev config because lock SCL_CONFIG is not held.
1422 * Thankfully, in this case we don't need to dirty the config
1423 * because it will be written out anyway when we finish
1424 * creating the pool.
1425 */
1426 if (tx->tx_txg != TXG_INITIAL)
1427 vdev_config_dirty(spa->spa_root_vdev);
1428 }
1429 }
1430
1431 void
spa_deactivate_mos_feature(spa_t * spa,const char * feature)1432 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1433 {
1434 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1435 vdev_config_dirty(spa->spa_root_vdev);
1436 }
1437
1438 /*
1439 * Return the spa_t associated with given pool_guid, if it exists. If
1440 * device_guid is non-zero, determine whether the pool exists *and* contains
1441 * a device with the specified device_guid.
1442 */
1443 spa_t *
spa_by_guid(uint64_t pool_guid,uint64_t device_guid)1444 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1445 {
1446 spa_t *spa;
1447 avl_tree_t *t = &spa_namespace_avl;
1448
1449 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1450
1451 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1452 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1453 continue;
1454 if (spa->spa_root_vdev == NULL)
1455 continue;
1456 if (spa_guid(spa) == pool_guid) {
1457 if (device_guid == 0)
1458 break;
1459
1460 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1461 device_guid) != NULL)
1462 break;
1463
1464 /*
1465 * Check any devices we may be in the process of adding.
1466 */
1467 if (spa->spa_pending_vdev) {
1468 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1469 device_guid) != NULL)
1470 break;
1471 }
1472 }
1473 }
1474
1475 return (spa);
1476 }
1477
1478 /*
1479 * Determine whether a pool with the given pool_guid exists.
1480 */
1481 boolean_t
spa_guid_exists(uint64_t pool_guid,uint64_t device_guid)1482 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1483 {
1484 return (spa_by_guid(pool_guid, device_guid) != NULL);
1485 }
1486
1487 char *
spa_strdup(const char * s)1488 spa_strdup(const char *s)
1489 {
1490 size_t len;
1491 char *new;
1492
1493 len = strlen(s);
1494 new = kmem_alloc(len + 1, KM_SLEEP);
1495 bcopy(s, new, len);
1496 new[len] = '\0';
1497
1498 return (new);
1499 }
1500
1501 void
spa_strfree(char * s)1502 spa_strfree(char *s)
1503 {
1504 kmem_free(s, strlen(s) + 1);
1505 }
1506
1507 uint64_t
spa_get_random(uint64_t range)1508 spa_get_random(uint64_t range)
1509 {
1510 uint64_t r;
1511
1512 ASSERT(range != 0);
1513
1514 if (range == 1)
1515 return (0);
1516
1517 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1518
1519 return (r % range);
1520 }
1521
1522 uint64_t
spa_generate_guid(spa_t * spa)1523 spa_generate_guid(spa_t *spa)
1524 {
1525 uint64_t guid = spa_get_random(-1ULL);
1526
1527 if (spa != NULL) {
1528 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1529 guid = spa_get_random(-1ULL);
1530 } else {
1531 while (guid == 0 || spa_guid_exists(guid, 0))
1532 guid = spa_get_random(-1ULL);
1533 }
1534
1535 return (guid);
1536 }
1537
1538 void
snprintf_blkptr(char * buf,size_t buflen,const blkptr_t * bp)1539 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1540 {
1541 char type[256];
1542 char *checksum = NULL;
1543 char *compress = NULL;
1544
1545 if (bp != NULL) {
1546 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1547 dmu_object_byteswap_t bswap =
1548 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1549 (void) snprintf(type, sizeof (type), "bswap %s %s",
1550 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1551 "metadata" : "data",
1552 dmu_ot_byteswap[bswap].ob_name);
1553 } else {
1554 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1555 sizeof (type));
1556 }
1557 if (!BP_IS_EMBEDDED(bp)) {
1558 checksum =
1559 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1560 }
1561 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1562 }
1563
1564 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1565 compress);
1566 }
1567
1568 void
spa_freeze(spa_t * spa)1569 spa_freeze(spa_t *spa)
1570 {
1571 uint64_t freeze_txg = 0;
1572
1573 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1574 if (spa->spa_freeze_txg == UINT64_MAX) {
1575 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1576 spa->spa_freeze_txg = freeze_txg;
1577 }
1578 spa_config_exit(spa, SCL_ALL, FTAG);
1579 if (freeze_txg != 0)
1580 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1581 }
1582
1583 void
zfs_panic_recover(const char * fmt,...)1584 zfs_panic_recover(const char *fmt, ...)
1585 {
1586 va_list adx;
1587
1588 va_start(adx, fmt);
1589 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1590 va_end(adx);
1591 }
1592
1593 /*
1594 * This is a stripped-down version of strtoull, suitable only for converting
1595 * lowercase hexadecimal numbers that don't overflow.
1596 */
1597 uint64_t
zfs_strtonum(const char * str,char ** nptr)1598 zfs_strtonum(const char *str, char **nptr)
1599 {
1600 uint64_t val = 0;
1601 char c;
1602 int digit;
1603
1604 while ((c = *str) != '\0') {
1605 if (c >= '0' && c <= '9')
1606 digit = c - '0';
1607 else if (c >= 'a' && c <= 'f')
1608 digit = 10 + c - 'a';
1609 else
1610 break;
1611
1612 val *= 16;
1613 val += digit;
1614
1615 str++;
1616 }
1617
1618 if (nptr)
1619 *nptr = (char *)str;
1620
1621 return (val);
1622 }
1623
1624 void
spa_activate_allocation_classes(spa_t * spa,dmu_tx_t * tx)1625 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1626 {
1627 /*
1628 * We bump the feature refcount for each special vdev added to the pool
1629 */
1630 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1631 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1632 }
1633
1634 /*
1635 * ==========================================================================
1636 * Accessor functions
1637 * ==========================================================================
1638 */
1639
1640 boolean_t
spa_shutting_down(spa_t * spa)1641 spa_shutting_down(spa_t *spa)
1642 {
1643 return (spa->spa_async_suspended);
1644 }
1645
1646 dsl_pool_t *
spa_get_dsl(spa_t * spa)1647 spa_get_dsl(spa_t *spa)
1648 {
1649 return (spa->spa_dsl_pool);
1650 }
1651
1652 boolean_t
spa_is_initializing(spa_t * spa)1653 spa_is_initializing(spa_t *spa)
1654 {
1655 return (spa->spa_is_initializing);
1656 }
1657
1658 boolean_t
spa_indirect_vdevs_loaded(spa_t * spa)1659 spa_indirect_vdevs_loaded(spa_t *spa)
1660 {
1661 return (spa->spa_indirect_vdevs_loaded);
1662 }
1663
1664 blkptr_t *
spa_get_rootblkptr(spa_t * spa)1665 spa_get_rootblkptr(spa_t *spa)
1666 {
1667 return (&spa->spa_ubsync.ub_rootbp);
1668 }
1669
1670 void
spa_set_rootblkptr(spa_t * spa,const blkptr_t * bp)1671 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1672 {
1673 spa->spa_uberblock.ub_rootbp = *bp;
1674 }
1675
1676 void
spa_altroot(spa_t * spa,char * buf,size_t buflen)1677 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1678 {
1679 if (spa->spa_root == NULL)
1680 buf[0] = '\0';
1681 else
1682 (void) strncpy(buf, spa->spa_root, buflen);
1683 }
1684
1685 int
spa_sync_pass(spa_t * spa)1686 spa_sync_pass(spa_t *spa)
1687 {
1688 return (spa->spa_sync_pass);
1689 }
1690
1691 char *
spa_name(spa_t * spa)1692 spa_name(spa_t *spa)
1693 {
1694 return (spa->spa_name);
1695 }
1696
1697 uint64_t
spa_guid(spa_t * spa)1698 spa_guid(spa_t *spa)
1699 {
1700 dsl_pool_t *dp = spa_get_dsl(spa);
1701 uint64_t guid;
1702
1703 /*
1704 * If we fail to parse the config during spa_load(), we can go through
1705 * the error path (which posts an ereport) and end up here with no root
1706 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1707 * this case.
1708 */
1709 if (spa->spa_root_vdev == NULL)
1710 return (spa->spa_config_guid);
1711
1712 guid = spa->spa_last_synced_guid != 0 ?
1713 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1714
1715 /*
1716 * Return the most recently synced out guid unless we're
1717 * in syncing context.
1718 */
1719 if (dp && dsl_pool_sync_context(dp))
1720 return (spa->spa_root_vdev->vdev_guid);
1721 else
1722 return (guid);
1723 }
1724
1725 uint64_t
spa_load_guid(spa_t * spa)1726 spa_load_guid(spa_t *spa)
1727 {
1728 /*
1729 * This is a GUID that exists solely as a reference for the
1730 * purposes of the arc. It is generated at load time, and
1731 * is never written to persistent storage.
1732 */
1733 return (spa->spa_load_guid);
1734 }
1735
1736 uint64_t
spa_last_synced_txg(spa_t * spa)1737 spa_last_synced_txg(spa_t *spa)
1738 {
1739 return (spa->spa_ubsync.ub_txg);
1740 }
1741
1742 uint64_t
spa_first_txg(spa_t * spa)1743 spa_first_txg(spa_t *spa)
1744 {
1745 return (spa->spa_first_txg);
1746 }
1747
1748 uint64_t
spa_syncing_txg(spa_t * spa)1749 spa_syncing_txg(spa_t *spa)
1750 {
1751 return (spa->spa_syncing_txg);
1752 }
1753
1754 /*
1755 * Return the last txg where data can be dirtied. The final txgs
1756 * will be used to just clear out any deferred frees that remain.
1757 */
1758 uint64_t
spa_final_dirty_txg(spa_t * spa)1759 spa_final_dirty_txg(spa_t *spa)
1760 {
1761 return (spa->spa_final_txg - TXG_DEFER_SIZE);
1762 }
1763
1764 pool_state_t
spa_state(spa_t * spa)1765 spa_state(spa_t *spa)
1766 {
1767 return (spa->spa_state);
1768 }
1769
1770 spa_load_state_t
spa_load_state(spa_t * spa)1771 spa_load_state(spa_t *spa)
1772 {
1773 return (spa->spa_load_state);
1774 }
1775
1776 uint64_t
spa_freeze_txg(spa_t * spa)1777 spa_freeze_txg(spa_t *spa)
1778 {
1779 return (spa->spa_freeze_txg);
1780 }
1781
1782 /* ARGSUSED */
1783 uint64_t
spa_get_worst_case_asize(spa_t * spa,uint64_t lsize)1784 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1785 {
1786 return (lsize * spa_asize_inflation);
1787 }
1788
1789 /*
1790 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
1791 * or at least 128MB, unless that would cause it to be more than half the
1792 * pool size.
1793 *
1794 * See the comment above spa_slop_shift for details.
1795 */
1796 uint64_t
spa_get_slop_space(spa_t * spa)1797 spa_get_slop_space(spa_t *spa)
1798 {
1799 uint64_t space = spa_get_dspace(spa);
1800 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
1801 }
1802
1803 uint64_t
spa_get_dspace(spa_t * spa)1804 spa_get_dspace(spa_t *spa)
1805 {
1806 return (spa->spa_dspace);
1807 }
1808
1809 uint64_t
spa_get_checkpoint_space(spa_t * spa)1810 spa_get_checkpoint_space(spa_t *spa)
1811 {
1812 return (spa->spa_checkpoint_info.sci_dspace);
1813 }
1814
1815 void
spa_update_dspace(spa_t * spa)1816 spa_update_dspace(spa_t *spa)
1817 {
1818 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1819 ddt_get_dedup_dspace(spa);
1820 if (spa->spa_vdev_removal != NULL) {
1821 /*
1822 * We can't allocate from the removing device, so
1823 * subtract its size. This prevents the DMU/DSL from
1824 * filling up the (now smaller) pool while we are in the
1825 * middle of removing the device.
1826 *
1827 * Note that the DMU/DSL doesn't actually know or care
1828 * how much space is allocated (it does its own tracking
1829 * of how much space has been logically used). So it
1830 * doesn't matter that the data we are moving may be
1831 * allocated twice (on the old device and the new
1832 * device).
1833 */
1834 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1835 vdev_t *vd =
1836 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1837 spa->spa_dspace -= spa_deflate(spa) ?
1838 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1839 spa_config_exit(spa, SCL_VDEV, FTAG);
1840 }
1841 }
1842
1843 /*
1844 * Return the failure mode that has been set to this pool. The default
1845 * behavior will be to block all I/Os when a complete failure occurs.
1846 */
1847 uint8_t
spa_get_failmode(spa_t * spa)1848 spa_get_failmode(spa_t *spa)
1849 {
1850 return (spa->spa_failmode);
1851 }
1852
1853 boolean_t
spa_suspended(spa_t * spa)1854 spa_suspended(spa_t *spa)
1855 {
1856 return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1857 }
1858
1859 uint64_t
spa_version(spa_t * spa)1860 spa_version(spa_t *spa)
1861 {
1862 return (spa->spa_ubsync.ub_version);
1863 }
1864
1865 boolean_t
spa_deflate(spa_t * spa)1866 spa_deflate(spa_t *spa)
1867 {
1868 return (spa->spa_deflate);
1869 }
1870
1871 metaslab_class_t *
spa_normal_class(spa_t * spa)1872 spa_normal_class(spa_t *spa)
1873 {
1874 return (spa->spa_normal_class);
1875 }
1876
1877 metaslab_class_t *
spa_log_class(spa_t * spa)1878 spa_log_class(spa_t *spa)
1879 {
1880 return (spa->spa_log_class);
1881 }
1882
1883 metaslab_class_t *
spa_special_class(spa_t * spa)1884 spa_special_class(spa_t *spa)
1885 {
1886 return (spa->spa_special_class);
1887 }
1888
1889 metaslab_class_t *
spa_dedup_class(spa_t * spa)1890 spa_dedup_class(spa_t *spa)
1891 {
1892 return (spa->spa_dedup_class);
1893 }
1894
1895 /*
1896 * Locate an appropriate allocation class
1897 */
1898 metaslab_class_t *
spa_preferred_class(spa_t * spa,uint64_t size,dmu_object_type_t objtype,uint_t level,uint_t special_smallblk)1899 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1900 uint_t level, uint_t special_smallblk)
1901 {
1902 if (DMU_OT_IS_ZIL(objtype)) {
1903 if (spa->spa_log_class->mc_groups != 0)
1904 return (spa_log_class(spa));
1905 else
1906 return (spa_normal_class(spa));
1907 }
1908
1909 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1910
1911 if (DMU_OT_IS_DDT(objtype)) {
1912 if (spa->spa_dedup_class->mc_groups != 0)
1913 return (spa_dedup_class(spa));
1914 else if (has_special_class && zfs_ddt_data_is_special)
1915 return (spa_special_class(spa));
1916 else
1917 return (spa_normal_class(spa));
1918 }
1919
1920 /* Indirect blocks for user data can land in special if allowed */
1921 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1922 if (has_special_class && zfs_user_indirect_is_special)
1923 return (spa_special_class(spa));
1924 else
1925 return (spa_normal_class(spa));
1926 }
1927
1928 if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1929 if (has_special_class)
1930 return (spa_special_class(spa));
1931 else
1932 return (spa_normal_class(spa));
1933 }
1934
1935 /*
1936 * Allow small file blocks in special class in some cases (like
1937 * for the dRAID vdev feature). But always leave a reserve of
1938 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
1939 */
1940 if (DMU_OT_IS_FILE(objtype) &&
1941 has_special_class && size <= special_smallblk) {
1942 metaslab_class_t *special = spa_special_class(spa);
1943 uint64_t alloc = metaslab_class_get_alloc(special);
1944 uint64_t space = metaslab_class_get_space(special);
1945 uint64_t limit =
1946 (space * (100 - zfs_special_class_metadata_reserve_pct))
1947 / 100;
1948
1949 if (alloc < limit)
1950 return (special);
1951 }
1952
1953 return (spa_normal_class(spa));
1954 }
1955
1956 void
spa_evicting_os_register(spa_t * spa,objset_t * os)1957 spa_evicting_os_register(spa_t *spa, objset_t *os)
1958 {
1959 mutex_enter(&spa->spa_evicting_os_lock);
1960 list_insert_head(&spa->spa_evicting_os_list, os);
1961 mutex_exit(&spa->spa_evicting_os_lock);
1962 }
1963
1964 void
spa_evicting_os_deregister(spa_t * spa,objset_t * os)1965 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1966 {
1967 mutex_enter(&spa->spa_evicting_os_lock);
1968 list_remove(&spa->spa_evicting_os_list, os);
1969 cv_broadcast(&spa->spa_evicting_os_cv);
1970 mutex_exit(&spa->spa_evicting_os_lock);
1971 }
1972
1973 void
spa_evicting_os_wait(spa_t * spa)1974 spa_evicting_os_wait(spa_t *spa)
1975 {
1976 mutex_enter(&spa->spa_evicting_os_lock);
1977 while (!list_is_empty(&spa->spa_evicting_os_list))
1978 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1979 mutex_exit(&spa->spa_evicting_os_lock);
1980
1981 dmu_buf_user_evict_wait();
1982 }
1983
1984 int
spa_max_replication(spa_t * spa)1985 spa_max_replication(spa_t *spa)
1986 {
1987 /*
1988 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1989 * handle BPs with more than one DVA allocated. Set our max
1990 * replication level accordingly.
1991 */
1992 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1993 return (1);
1994 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1995 }
1996
1997 int
spa_prev_software_version(spa_t * spa)1998 spa_prev_software_version(spa_t *spa)
1999 {
2000 return (spa->spa_prev_software_version);
2001 }
2002
2003 uint64_t
spa_deadman_synctime(spa_t * spa)2004 spa_deadman_synctime(spa_t *spa)
2005 {
2006 return (spa->spa_deadman_synctime);
2007 }
2008
2009 spa_autotrim_t
spa_get_autotrim(spa_t * spa)2010 spa_get_autotrim(spa_t *spa)
2011 {
2012 return (spa->spa_autotrim);
2013 }
2014
2015 uint64_t
dva_get_dsize_sync(spa_t * spa,const dva_t * dva)2016 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
2017 {
2018 uint64_t asize = DVA_GET_ASIZE(dva);
2019 uint64_t dsize = asize;
2020
2021 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
2022
2023 if (asize != 0 && spa->spa_deflate) {
2024 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2025 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
2026 }
2027
2028 return (dsize);
2029 }
2030
2031 uint64_t
bp_get_dsize_sync(spa_t * spa,const blkptr_t * bp)2032 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2033 {
2034 uint64_t dsize = 0;
2035
2036 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2037 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2038
2039 return (dsize);
2040 }
2041
2042 uint64_t
bp_get_dsize(spa_t * spa,const blkptr_t * bp)2043 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2044 {
2045 uint64_t dsize = 0;
2046
2047 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2048
2049 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
2050 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2051
2052 spa_config_exit(spa, SCL_VDEV, FTAG);
2053
2054 return (dsize);
2055 }
2056
2057 uint64_t
spa_dirty_data(spa_t * spa)2058 spa_dirty_data(spa_t *spa)
2059 {
2060 return (spa->spa_dsl_pool->dp_dirty_total);
2061 }
2062
2063 /*
2064 * ==========================================================================
2065 * SPA Import Progress Routines
2066 * The illumos implementation of these are different from OpenZFS. OpenZFS
2067 * uses the Linux /proc fs, whereas we use a kstat on the spa.
2068 * ==========================================================================
2069 */
2070
2071 typedef struct spa_import_progress {
2072 kstat_named_t sip_load_state;
2073 kstat_named_t sip_mmp_sec_remaining; /* MMP activity check */
2074 kstat_named_t sip_load_max_txg; /* rewind txg */
2075 } spa_import_progress_t;
2076
2077 static void
spa_import_progress_init(void)2078 spa_import_progress_init(void)
2079 {
2080 }
2081
2082 static void
spa_import_progress_destroy(void)2083 spa_import_progress_destroy(void)
2084 {
2085 }
2086
2087 void spa_import_progress_add(spa_t *);
2088
2089 int
spa_import_progress_set_state(spa_t * spa,spa_load_state_t load_state)2090 spa_import_progress_set_state(spa_t *spa, spa_load_state_t load_state)
2091 {
2092 if (spa->spa_imp_kstat == NULL)
2093 spa_import_progress_add(spa);
2094
2095 mutex_enter(&spa->spa_imp_kstat_lock);
2096 if (spa->spa_imp_kstat != NULL) {
2097 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2098 if (sip != NULL)
2099 sip->sip_load_state.value.ui64 = (uint64_t)load_state;
2100 }
2101 mutex_exit(&spa->spa_imp_kstat_lock);
2102
2103 return (0);
2104 }
2105
2106 int
spa_import_progress_set_max_txg(spa_t * spa,uint64_t load_max_txg)2107 spa_import_progress_set_max_txg(spa_t *spa, uint64_t load_max_txg)
2108 {
2109 if (spa->spa_imp_kstat == NULL)
2110 spa_import_progress_add(spa);
2111
2112 mutex_enter(&spa->spa_imp_kstat_lock);
2113 if (spa->spa_imp_kstat != NULL) {
2114 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2115 if (sip != NULL)
2116 sip->sip_load_max_txg.value.ui64 = load_max_txg;
2117 }
2118 mutex_exit(&spa->spa_imp_kstat_lock);
2119
2120 return (0);
2121 }
2122
2123 int
spa_import_progress_set_mmp_check(spa_t * spa,uint64_t mmp_sec_remaining)2124 spa_import_progress_set_mmp_check(spa_t *spa, uint64_t mmp_sec_remaining)
2125 {
2126 if (spa->spa_imp_kstat == NULL)
2127 spa_import_progress_add(spa);
2128
2129 mutex_enter(&spa->spa_imp_kstat_lock);
2130 if (spa->spa_imp_kstat != NULL) {
2131 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data;
2132 if (sip != NULL)
2133 sip->sip_mmp_sec_remaining.value.ui64 =
2134 mmp_sec_remaining;
2135 }
2136 mutex_exit(&spa->spa_imp_kstat_lock);
2137
2138 return (0);
2139 }
2140
2141 /*
2142 * A new import is in progress. Add an entry.
2143 */
2144 void
spa_import_progress_add(spa_t * spa)2145 spa_import_progress_add(spa_t *spa)
2146 {
2147 char *poolname = NULL;
2148 spa_import_progress_t *sip;
2149
2150 mutex_enter(&spa->spa_imp_kstat_lock);
2151 if (spa->spa_imp_kstat != NULL) {
2152 sip = spa->spa_imp_kstat->ks_data;
2153 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa);
2154 mutex_exit(&spa->spa_imp_kstat_lock);
2155 return;
2156 }
2157
2158 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2159 &poolname);
2160 if (poolname == NULL)
2161 poolname = spa_name(spa);
2162
2163 spa->spa_imp_kstat = kstat_create("zfs_import", 0, poolname,
2164 "zfs_misc", KSTAT_TYPE_NAMED,
2165 sizeof (spa_import_progress_t) / sizeof (kstat_named_t),
2166 KSTAT_FLAG_VIRTUAL);
2167 if (spa->spa_imp_kstat != NULL) {
2168 sip = kmem_alloc(sizeof (spa_import_progress_t), KM_SLEEP);
2169 spa->spa_imp_kstat->ks_data = sip;
2170
2171 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa);
2172
2173 kstat_named_init(&sip->sip_load_state,
2174 "spa_load_state", KSTAT_DATA_UINT64);
2175 kstat_named_init(&sip->sip_mmp_sec_remaining,
2176 "mmp_sec_remaining", KSTAT_DATA_UINT64);
2177 kstat_named_init(&sip->sip_load_max_txg,
2178 "spa_load_max_txg", KSTAT_DATA_UINT64);
2179 spa->spa_imp_kstat->ks_lock = &spa->spa_imp_kstat_lock;
2180 kstat_install(spa->spa_imp_kstat);
2181 }
2182 mutex_exit(&spa->spa_imp_kstat_lock);
2183 }
2184
2185 void
spa_import_progress_remove(spa_t * spa)2186 spa_import_progress_remove(spa_t *spa)
2187 {
2188 if (spa->spa_imp_kstat != NULL) {
2189 void *data = spa->spa_imp_kstat->ks_data;
2190
2191 kstat_delete(spa->spa_imp_kstat);
2192 spa->spa_imp_kstat = NULL;
2193 kmem_free(data, sizeof (spa_import_progress_t));
2194 }
2195 }
2196
2197 /*
2198 * ==========================================================================
2199 * Initialization and Termination
2200 * ==========================================================================
2201 */
2202
2203 static int
spa_name_compare(const void * a1,const void * a2)2204 spa_name_compare(const void *a1, const void *a2)
2205 {
2206 const spa_t *s1 = a1;
2207 const spa_t *s2 = a2;
2208 int s;
2209
2210 s = strcmp(s1->spa_name, s2->spa_name);
2211
2212 return (TREE_ISIGN(s));
2213 }
2214
2215 int
spa_busy(void)2216 spa_busy(void)
2217 {
2218 return (spa_active_count);
2219 }
2220
2221 void
spa_boot_init()2222 spa_boot_init()
2223 {
2224 spa_config_load();
2225 }
2226
2227 void
spa_init(int mode)2228 spa_init(int mode)
2229 {
2230 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2231 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2232 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2233 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2234
2235 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2236 offsetof(spa_t, spa_avl));
2237
2238 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2239 offsetof(spa_aux_t, aux_avl));
2240
2241 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2242 offsetof(spa_aux_t, aux_avl));
2243
2244 spa_mode_global = mode;
2245
2246 #ifdef _KERNEL
2247 spa_arch_init();
2248 #else
2249 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
2250 arc_procfd = open("/proc/self/ctl", O_WRONLY);
2251 if (arc_procfd == -1) {
2252 perror("could not enable watchpoints: "
2253 "opening /proc/self/ctl failed: ");
2254 } else {
2255 arc_watch = B_TRUE;
2256 }
2257 }
2258 #endif
2259
2260 zfs_refcount_init();
2261 unique_init();
2262 zfs_btree_init();
2263 metaslab_stat_init();
2264 zio_init();
2265 dmu_init();
2266 zil_init();
2267 vdev_cache_stat_init();
2268 vdev_mirror_stat_init();
2269 vdev_raidz_math_init();
2270 fletcher_4_init();
2271 zfs_prop_init();
2272 zpool_prop_init();
2273 zpool_feature_init();
2274 spa_config_load();
2275 l2arc_start();
2276 scan_init();
2277 spa_import_progress_init();
2278 }
2279
2280 void
spa_fini(void)2281 spa_fini(void)
2282 {
2283 l2arc_stop();
2284
2285 spa_evict_all();
2286
2287 vdev_cache_stat_fini();
2288 vdev_mirror_stat_fini();
2289 vdev_raidz_math_fini();
2290 fletcher_4_fini();
2291 zil_fini();
2292 dmu_fini();
2293 zio_fini();
2294 metaslab_stat_fini();
2295 zfs_btree_fini();
2296 unique_fini();
2297 zfs_refcount_fini();
2298 scan_fini();
2299 spa_import_progress_destroy();
2300
2301 avl_destroy(&spa_namespace_avl);
2302 avl_destroy(&spa_spare_avl);
2303 avl_destroy(&spa_l2cache_avl);
2304
2305 cv_destroy(&spa_namespace_cv);
2306 mutex_destroy(&spa_namespace_lock);
2307 mutex_destroy(&spa_spare_lock);
2308 mutex_destroy(&spa_l2cache_lock);
2309 }
2310
2311 /*
2312 * Return whether this pool has slogs. No locking needed.
2313 * It's not a problem if the wrong answer is returned as it's only for
2314 * performance and not correctness
2315 */
2316 boolean_t
spa_has_slogs(spa_t * spa)2317 spa_has_slogs(spa_t *spa)
2318 {
2319 return (spa->spa_log_class->mc_rotor != NULL);
2320 }
2321
2322 spa_log_state_t
spa_get_log_state(spa_t * spa)2323 spa_get_log_state(spa_t *spa)
2324 {
2325 return (spa->spa_log_state);
2326 }
2327
2328 void
spa_set_log_state(spa_t * spa,spa_log_state_t state)2329 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2330 {
2331 spa->spa_log_state = state;
2332 }
2333
2334 boolean_t
spa_is_root(spa_t * spa)2335 spa_is_root(spa_t *spa)
2336 {
2337 return (spa->spa_is_root);
2338 }
2339
2340 boolean_t
spa_writeable(spa_t * spa)2341 spa_writeable(spa_t *spa)
2342 {
2343 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
2344 }
2345
2346 /*
2347 * Returns true if there is a pending sync task in any of the current
2348 * syncing txg, the current quiescing txg, or the current open txg.
2349 */
2350 boolean_t
spa_has_pending_synctask(spa_t * spa)2351 spa_has_pending_synctask(spa_t *spa)
2352 {
2353 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2354 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2355 }
2356
2357 int
spa_mode(spa_t * spa)2358 spa_mode(spa_t *spa)
2359 {
2360 return (spa->spa_mode);
2361 }
2362
2363 uint64_t
spa_bootfs(spa_t * spa)2364 spa_bootfs(spa_t *spa)
2365 {
2366 return (spa->spa_bootfs);
2367 }
2368
2369 uint64_t
spa_delegation(spa_t * spa)2370 spa_delegation(spa_t *spa)
2371 {
2372 return (spa->spa_delegation);
2373 }
2374
2375 objset_t *
spa_meta_objset(spa_t * spa)2376 spa_meta_objset(spa_t *spa)
2377 {
2378 return (spa->spa_meta_objset);
2379 }
2380
2381 enum zio_checksum
spa_dedup_checksum(spa_t * spa)2382 spa_dedup_checksum(spa_t *spa)
2383 {
2384 return (spa->spa_dedup_checksum);
2385 }
2386
2387 /*
2388 * Reset pool scan stat per scan pass (or reboot).
2389 */
2390 void
spa_scan_stat_init(spa_t * spa)2391 spa_scan_stat_init(spa_t *spa)
2392 {
2393 /* data not stored on disk */
2394 spa->spa_scan_pass_start = gethrestime_sec();
2395 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2396 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2397 else
2398 spa->spa_scan_pass_scrub_pause = 0;
2399 spa->spa_scan_pass_scrub_spent_paused = 0;
2400 spa->spa_scan_pass_exam = 0;
2401 spa->spa_scan_pass_issued = 0;
2402 vdev_scan_stat_init(spa->spa_root_vdev);
2403 }
2404
2405 /*
2406 * Get scan stats for zpool status reports
2407 */
2408 int
spa_scan_get_stats(spa_t * spa,pool_scan_stat_t * ps)2409 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2410 {
2411 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2412
2413 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2414 return (SET_ERROR(ENOENT));
2415 bzero(ps, sizeof (pool_scan_stat_t));
2416
2417 /* data stored on disk */
2418 ps->pss_func = scn->scn_phys.scn_func;
2419 ps->pss_state = scn->scn_phys.scn_state;
2420 ps->pss_start_time = scn->scn_phys.scn_start_time;
2421 ps->pss_end_time = scn->scn_phys.scn_end_time;
2422 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2423 ps->pss_to_process = scn->scn_phys.scn_to_process;
2424 ps->pss_processed = scn->scn_phys.scn_processed;
2425 ps->pss_errors = scn->scn_phys.scn_errors;
2426 ps->pss_examined = scn->scn_phys.scn_examined;
2427 ps->pss_issued =
2428 scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2429 ps->pss_state = scn->scn_phys.scn_state;
2430
2431 /* data not stored on disk */
2432 ps->pss_pass_start = spa->spa_scan_pass_start;
2433 ps->pss_pass_exam = spa->spa_scan_pass_exam;
2434 ps->pss_pass_issued = spa->spa_scan_pass_issued;
2435 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2436 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2437
2438 return (0);
2439 }
2440
2441 int
spa_maxblocksize(spa_t * spa)2442 spa_maxblocksize(spa_t *spa)
2443 {
2444 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2445 return (SPA_MAXBLOCKSIZE);
2446 else
2447 return (SPA_OLD_MAXBLOCKSIZE);
2448 }
2449
2450 int
spa_maxdnodesize(spa_t * spa)2451 spa_maxdnodesize(spa_t *spa)
2452 {
2453 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2454 return (DNODE_MAX_SIZE);
2455 else
2456 return (DNODE_MIN_SIZE);
2457 }
2458
2459 boolean_t
spa_multihost(spa_t * spa)2460 spa_multihost(spa_t *spa)
2461 {
2462 return (spa->spa_multihost ? B_TRUE : B_FALSE);
2463 }
2464
2465 unsigned long
spa_get_hostid(void)2466 spa_get_hostid(void)
2467 {
2468 unsigned long myhostid;
2469
2470 #ifdef _KERNEL
2471 myhostid = zone_get_hostid(NULL);
2472 #else /* _KERNEL */
2473 /*
2474 * We're emulating the system's hostid in userland, so
2475 * we can't use zone_get_hostid().
2476 */
2477 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2478 #endif /* _KERNEL */
2479
2480 return (myhostid);
2481 }
2482
2483 /*
2484 * Returns the txg that the last device removal completed. No indirect mappings
2485 * have been added since this txg.
2486 */
2487 uint64_t
spa_get_last_removal_txg(spa_t * spa)2488 spa_get_last_removal_txg(spa_t *spa)
2489 {
2490 uint64_t vdevid;
2491 uint64_t ret = -1ULL;
2492
2493 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2494 /*
2495 * sr_prev_indirect_vdev is only modified while holding all the
2496 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2497 * examining it.
2498 */
2499 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2500
2501 while (vdevid != -1ULL) {
2502 vdev_t *vd = vdev_lookup_top(spa, vdevid);
2503 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2504
2505 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2506
2507 /*
2508 * If the removal did not remap any data, we don't care.
2509 */
2510 if (vdev_indirect_births_count(vib) != 0) {
2511 ret = vdev_indirect_births_last_entry_txg(vib);
2512 break;
2513 }
2514
2515 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2516 }
2517 spa_config_exit(spa, SCL_VDEV, FTAG);
2518
2519 IMPLY(ret != -1ULL,
2520 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2521
2522 return (ret);
2523 }
2524
2525 boolean_t
spa_trust_config(spa_t * spa)2526 spa_trust_config(spa_t *spa)
2527 {
2528 return (spa->spa_trust_config);
2529 }
2530
2531 uint64_t
spa_missing_tvds_allowed(spa_t * spa)2532 spa_missing_tvds_allowed(spa_t *spa)
2533 {
2534 return (spa->spa_missing_tvds_allowed);
2535 }
2536
2537 space_map_t *
spa_syncing_log_sm(spa_t * spa)2538 spa_syncing_log_sm(spa_t *spa)
2539 {
2540 return (spa->spa_syncing_log_sm);
2541 }
2542
2543 void
spa_set_missing_tvds(spa_t * spa,uint64_t missing)2544 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2545 {
2546 spa->spa_missing_tvds = missing;
2547 }
2548
2549 boolean_t
spa_top_vdevs_spacemap_addressable(spa_t * spa)2550 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2551 {
2552 vdev_t *rvd = spa->spa_root_vdev;
2553 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2554 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2555 return (B_FALSE);
2556 }
2557 return (B_TRUE);
2558 }
2559
2560 boolean_t
spa_has_checkpoint(spa_t * spa)2561 spa_has_checkpoint(spa_t *spa)
2562 {
2563 return (spa->spa_checkpoint_txg != 0);
2564 }
2565
2566 boolean_t
spa_importing_readonly_checkpoint(spa_t * spa)2567 spa_importing_readonly_checkpoint(spa_t *spa)
2568 {
2569 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2570 spa->spa_mode == FREAD);
2571 }
2572
2573 uint64_t
spa_min_claim_txg(spa_t * spa)2574 spa_min_claim_txg(spa_t *spa)
2575 {
2576 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2577
2578 if (checkpoint_txg != 0)
2579 return (checkpoint_txg + 1);
2580
2581 return (spa->spa_first_txg);
2582 }
2583
2584 /*
2585 * If there is a checkpoint, async destroys may consume more space from
2586 * the pool instead of freeing it. In an attempt to save the pool from
2587 * getting suspended when it is about to run out of space, we stop
2588 * processing async destroys.
2589 */
2590 boolean_t
spa_suspend_async_destroy(spa_t * spa)2591 spa_suspend_async_destroy(spa_t *spa)
2592 {
2593 dsl_pool_t *dp = spa_get_dsl(spa);
2594
2595 uint64_t unreserved = dsl_pool_unreserved_space(dp,
2596 ZFS_SPACE_CHECK_EXTRA_RESERVED);
2597 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2598 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2599
2600 if (spa_has_checkpoint(spa) && avail == 0)
2601 return (B_TRUE);
2602
2603 return (B_FALSE);
2604 }
2605
2606 /*
2607 * Generate a LUN expansion event. This routine does not use
2608 * ddi_log_sysevent() because that would require a dev_info_t, and we may not
2609 * have one available.
2610 */
2611 void
zfs_post_dle_sysevent(const char * physpath)2612 zfs_post_dle_sysevent(const char *physpath)
2613 {
2614 #ifdef _KERNEL
2615 sysevent_t *ev = sysevent_alloc(EC_DEV_STATUS, ESC_DEV_DLE,
2616 SUNW_KERN_PUB "zfs", SE_SLEEP);
2617 sysevent_attr_list_t *attr = NULL;
2618 sysevent_id_t eid;
2619
2620 VERIFY(ev != NULL);
2621
2622 /*
2623 * The only attribute is the /devices path of the expanding device:
2624 */
2625 sysevent_value_t value = {
2626 .value_type = SE_DATA_TYPE_STRING,
2627 .value = {
2628 .sv_string = (char *)physpath,
2629 },
2630 };
2631 if (sysevent_add_attr(&attr, DEV_PHYS_PATH, &value, SE_SLEEP) != 0) {
2632 sysevent_free(ev);
2633 return;
2634 }
2635
2636 if (sysevent_attach_attributes(ev, attr) != 0) {
2637 sysevent_free_attr(attr);
2638 sysevent_free(ev);
2639 return;
2640 }
2641
2642 (void) log_sysevent(ev, SE_SLEEP, &eid);
2643 sysevent_free(ev);
2644 #endif
2645 }
2646