xref: /freebsd/sys/contrib/openzfs/module/zfs/spa.c (revision e2df9bb44109577475aeb186e7186ac040f9bde1)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy 
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24783d3ff6SMartin Matuska  * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2018, Nexenta Systems, Inc.  All rights reserved.
26eda14cbcSMatt Macy  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27eda14cbcSMatt Macy  * Copyright 2013 Saso Kiselkov. All rights reserved.
28eda14cbcSMatt Macy  * Copyright (c) 2014 Integros [integros.com]
29eda14cbcSMatt Macy  * Copyright 2016 Toomas Soome <tsoome@me.com>
30eda14cbcSMatt Macy  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31eda14cbcSMatt Macy  * Copyright 2018 Joyent, Inc.
32eda14cbcSMatt Macy  * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
33eda14cbcSMatt Macy  * Copyright 2017 Joyent, Inc.
34eda14cbcSMatt Macy  * Copyright (c) 2017, Intel Corporation.
35ee36e25aSMartin Matuska  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
364e8d558cSMartin Matuska  * Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
37ce4dcb97SMartin Matuska  * Copyright (c) 2023, 2024, Klara Inc.
38eda14cbcSMatt Macy  */
39eda14cbcSMatt Macy 
40eda14cbcSMatt Macy /*
41eda14cbcSMatt Macy  * SPA: Storage Pool Allocator
42eda14cbcSMatt Macy  *
43eda14cbcSMatt Macy  * This file contains all the routines used when modifying on-disk SPA state.
44eda14cbcSMatt Macy  * This includes opening, importing, destroying, exporting a pool, and syncing a
45eda14cbcSMatt Macy  * pool.
46eda14cbcSMatt Macy  */
47eda14cbcSMatt Macy 
48eda14cbcSMatt Macy #include <sys/zfs_context.h>
49eda14cbcSMatt Macy #include <sys/fm/fs/zfs.h>
50eda14cbcSMatt Macy #include <sys/spa_impl.h>
51eda14cbcSMatt Macy #include <sys/zio.h>
52eda14cbcSMatt Macy #include <sys/zio_checksum.h>
53eda14cbcSMatt Macy #include <sys/dmu.h>
54eda14cbcSMatt Macy #include <sys/dmu_tx.h>
55eda14cbcSMatt Macy #include <sys/zap.h>
56eda14cbcSMatt Macy #include <sys/zil.h>
572a58b312SMartin Matuska #include <sys/brt.h>
58eda14cbcSMatt Macy #include <sys/ddt.h>
59eda14cbcSMatt Macy #include <sys/vdev_impl.h>
60eda14cbcSMatt Macy #include <sys/vdev_removal.h>
61eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h>
62eda14cbcSMatt Macy #include <sys/vdev_indirect_births.h>
63eda14cbcSMatt Macy #include <sys/vdev_initialize.h>
64eda14cbcSMatt Macy #include <sys/vdev_rebuild.h>
65eda14cbcSMatt Macy #include <sys/vdev_trim.h>
66eda14cbcSMatt Macy #include <sys/vdev_disk.h>
67e716630dSMartin Matuska #include <sys/vdev_raidz.h>
687877fdebSMatt Macy #include <sys/vdev_draid.h>
69eda14cbcSMatt Macy #include <sys/metaslab.h>
70eda14cbcSMatt Macy #include <sys/metaslab_impl.h>
71eda14cbcSMatt Macy #include <sys/mmp.h>
72eda14cbcSMatt Macy #include <sys/uberblock_impl.h>
73eda14cbcSMatt Macy #include <sys/txg.h>
74eda14cbcSMatt Macy #include <sys/avl.h>
75eda14cbcSMatt Macy #include <sys/bpobj.h>
76eda14cbcSMatt Macy #include <sys/dmu_traverse.h>
77eda14cbcSMatt Macy #include <sys/dmu_objset.h>
78eda14cbcSMatt Macy #include <sys/unique.h>
79eda14cbcSMatt Macy #include <sys/dsl_pool.h>
80eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
81eda14cbcSMatt Macy #include <sys/dsl_dir.h>
82eda14cbcSMatt Macy #include <sys/dsl_prop.h>
83eda14cbcSMatt Macy #include <sys/dsl_synctask.h>
84eda14cbcSMatt Macy #include <sys/fs/zfs.h>
85eda14cbcSMatt Macy #include <sys/arc.h>
86eda14cbcSMatt Macy #include <sys/callb.h>
87eda14cbcSMatt Macy #include <sys/systeminfo.h>
88eda14cbcSMatt Macy #include <sys/zfs_ioctl.h>
89eda14cbcSMatt Macy #include <sys/dsl_scan.h>
90eda14cbcSMatt Macy #include <sys/zfeature.h>
91eda14cbcSMatt Macy #include <sys/dsl_destroy.h>
92eda14cbcSMatt Macy #include <sys/zvol.h>
93eda14cbcSMatt Macy 
94eda14cbcSMatt Macy #ifdef	_KERNEL
95eda14cbcSMatt Macy #include <sys/fm/protocol.h>
96eda14cbcSMatt Macy #include <sys/fm/util.h>
97eda14cbcSMatt Macy #include <sys/callb.h>
98eda14cbcSMatt Macy #include <sys/zone.h>
99eda14cbcSMatt Macy #include <sys/vmsystm.h>
100eda14cbcSMatt Macy #endif	/* _KERNEL */
101eda14cbcSMatt Macy 
102eda14cbcSMatt Macy #include "zfs_prop.h"
103eda14cbcSMatt Macy #include "zfs_comutil.h"
10414c2e0a0SMartin Matuska #include <cityhash.h>
105eda14cbcSMatt Macy 
106eda14cbcSMatt Macy /*
1076c1e79dfSMartin Matuska  * spa_thread() existed on Illumos as a parent thread for the various worker
1086c1e79dfSMartin Matuska  * threads that actually run the pool, as a way to both reference the entire
1096c1e79dfSMartin Matuska  * pool work as a single object, and to share properties like scheduling
1106c1e79dfSMartin Matuska  * options. It has not yet been adapted to Linux or FreeBSD. This define is
1116c1e79dfSMartin Matuska  * used to mark related parts of the code to make things easier for the reader,
1126c1e79dfSMartin Matuska  * and to compile this code out. It can be removed when someone implements it,
1136c1e79dfSMartin Matuska  * moves it to some Illumos-specific place, or removes it entirely.
1146c1e79dfSMartin Matuska  */
1156c1e79dfSMartin Matuska #undef HAVE_SPA_THREAD
1166c1e79dfSMartin Matuska 
1176c1e79dfSMartin Matuska /*
1186c1e79dfSMartin Matuska  * The "System Duty Cycle" scheduling class is an Illumos feature to help
1196c1e79dfSMartin Matuska  * prevent CPU-intensive kernel threads from affecting latency on interactive
1206c1e79dfSMartin Matuska  * threads. It doesn't exist on Linux or FreeBSD, so the supporting code is
1216c1e79dfSMartin Matuska  * gated behind a define. On Illumos SDC depends on spa_thread(), but
1226c1e79dfSMartin Matuska  * spa_thread() also has other uses, so this is a separate define.
1236c1e79dfSMartin Matuska  */
1246c1e79dfSMartin Matuska #undef HAVE_SYSDC
1256c1e79dfSMartin Matuska 
1266c1e79dfSMartin Matuska /*
127eda14cbcSMatt Macy  * The interval, in seconds, at which failed configuration cache file writes
128eda14cbcSMatt Macy  * should be retried.
129eda14cbcSMatt Macy  */
130eda14cbcSMatt Macy int zfs_ccw_retry_interval = 300;
131eda14cbcSMatt Macy 
132eda14cbcSMatt Macy typedef enum zti_modes {
133eda14cbcSMatt Macy 	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
13416038816SMartin Matuska 	ZTI_MODE_SCALE,			/* Taskqs scale with CPUs. */
13514c2e0a0SMartin Matuska 	ZTI_MODE_SYNC,			/* sync thread assigned */
136eda14cbcSMatt Macy 	ZTI_MODE_NULL,			/* don't create a taskq */
137eda14cbcSMatt Macy 	ZTI_NMODES
138eda14cbcSMatt Macy } zti_modes_t;
139eda14cbcSMatt Macy 
140eda14cbcSMatt Macy #define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
141eda14cbcSMatt Macy #define	ZTI_PCT(n)	{ ZTI_MODE_ONLINE_PERCENT, (n), 1 }
14216038816SMartin Matuska #define	ZTI_SCALE	{ ZTI_MODE_SCALE, 0, 1 }
14314c2e0a0SMartin Matuska #define	ZTI_SYNC	{ ZTI_MODE_SYNC, 0, 1 }
144eda14cbcSMatt Macy #define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
145eda14cbcSMatt Macy 
146eda14cbcSMatt Macy #define	ZTI_N(n)	ZTI_P(n, 1)
147eda14cbcSMatt Macy #define	ZTI_ONE		ZTI_N(1)
148eda14cbcSMatt Macy 
149eda14cbcSMatt Macy typedef struct zio_taskq_info {
150eda14cbcSMatt Macy 	zti_modes_t zti_mode;
151eda14cbcSMatt Macy 	uint_t zti_value;
152eda14cbcSMatt Macy 	uint_t zti_count;
153eda14cbcSMatt Macy } zio_taskq_info_t;
154eda14cbcSMatt Macy 
155eda14cbcSMatt Macy static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
156eda14cbcSMatt Macy 	"iss", "iss_h", "int", "int_h"
157eda14cbcSMatt Macy };
158eda14cbcSMatt Macy 
159eda14cbcSMatt Macy /*
160eda14cbcSMatt Macy  * This table defines the taskq settings for each ZFS I/O type. When
161eda14cbcSMatt Macy  * initializing a pool, we use this table to create an appropriately sized
162eda14cbcSMatt Macy  * taskq. Some operations are low volume and therefore have a small, static
163eda14cbcSMatt Macy  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
16414c2e0a0SMartin Matuska  * macros. Other operations process a large amount of data; the ZTI_SCALE
165eda14cbcSMatt Macy  * macro causes us to create a taskq oriented for throughput. Some operations
166eda14cbcSMatt Macy  * are so high frequency and short-lived that the taskq itself can become a
167eda14cbcSMatt Macy  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
168eda14cbcSMatt Macy  * additional degree of parallelism specified by the number of threads per-
169eda14cbcSMatt Macy  * taskq and the number of taskqs; when dispatching an event in this case, the
17014c2e0a0SMartin Matuska  * particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs
17114c2e0a0SMartin Matuska  * that scales with the number of CPUs.
172eda14cbcSMatt Macy  *
173eda14cbcSMatt Macy  * The different taskq priorities are to handle the different contexts (issue
174aca928a5SMartin Matuska  * and interrupt) and then to reserve threads for high priority I/Os that
175aca928a5SMartin Matuska  * need to be handled with minimum delay.  Illumos taskq has unfair TQ_FRONT
176aca928a5SMartin Matuska  * implementation, so separate high priority threads are used there.
177eda14cbcSMatt Macy  */
178b356da80SMartin Matuska static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
179eda14cbcSMatt Macy 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
180eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
18116038816SMartin Matuska 	{ ZTI_N(8),	ZTI_NULL,	ZTI_SCALE,	ZTI_NULL }, /* READ */
182aca928a5SMartin Matuska #ifdef illumos
18314c2e0a0SMartin Matuska 	{ ZTI_SYNC,	ZTI_N(5),	ZTI_SCALE,	ZTI_N(5) }, /* WRITE */
184aca928a5SMartin Matuska #else
185aca928a5SMartin Matuska 	{ ZTI_SYNC,	ZTI_NULL,	ZTI_SCALE,	ZTI_NULL }, /* WRITE */
186aca928a5SMartin Matuska #endif
18716038816SMartin Matuska 	{ ZTI_SCALE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
188eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
1891719886fSMartin Matuska 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FLUSH */
190eda14cbcSMatt Macy 	{ ZTI_N(4),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* TRIM */
191eda14cbcSMatt Macy };
192eda14cbcSMatt Macy 
193eda14cbcSMatt Macy static void spa_sync_version(void *arg, dmu_tx_t *tx);
194eda14cbcSMatt Macy static void spa_sync_props(void *arg, dmu_tx_t *tx);
195eda14cbcSMatt Macy static boolean_t spa_has_active_shared_spare(spa_t *spa);
196a0b956f5SMartin Matuska static int spa_load_impl(spa_t *spa, spa_import_type_t type,
197a0b956f5SMartin Matuska     const char **ereport);
198eda14cbcSMatt Macy static void spa_vdev_resilver_done(spa_t *spa);
199eda14cbcSMatt Macy 
200b2526e8bSMartin Matuska /*
201b2526e8bSMartin Matuska  * Percentage of all CPUs that can be used by the metaslab preload taskq.
202b2526e8bSMartin Matuska  */
203b2526e8bSMartin Matuska static uint_t metaslab_preload_pct = 50;
204b2526e8bSMartin Matuska 
205e92ffd9bSMartin Matuska static uint_t	zio_taskq_batch_pct = 80;	  /* 1 thread per cpu in pset */
206e92ffd9bSMartin Matuska static uint_t	zio_taskq_batch_tpq;		  /* threads per taskq */
2076c1e79dfSMartin Matuska 
2086c1e79dfSMartin Matuska #ifdef HAVE_SYSDC
209e92ffd9bSMartin Matuska static const boolean_t	zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
210e92ffd9bSMartin Matuska static const uint_t	zio_taskq_basedc = 80;	  /* base duty cycle */
2116c1e79dfSMartin Matuska #endif
212eda14cbcSMatt Macy 
2136c1e79dfSMartin Matuska #ifdef HAVE_SPA_THREAD
214e92ffd9bSMartin Matuska static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
2156c1e79dfSMartin Matuska #endif
216eda14cbcSMatt Macy 
217b985c9caSMartin Matuska static uint_t	zio_taskq_write_tpq = 16;
21814c2e0a0SMartin Matuska 
219eda14cbcSMatt Macy /*
220eda14cbcSMatt Macy  * Report any spa_load_verify errors found, but do not fail spa_load.
221eda14cbcSMatt Macy  * This is used by zdb to analyze non-idle pools.
222eda14cbcSMatt Macy  */
223eda14cbcSMatt Macy boolean_t	spa_load_verify_dryrun = B_FALSE;
224eda14cbcSMatt Macy 
225eda14cbcSMatt Macy /*
22681b22a98SMartin Matuska  * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
22781b22a98SMartin Matuska  * This is used by zdb for spacemaps verification.
22881b22a98SMartin Matuska  */
22981b22a98SMartin Matuska boolean_t	spa_mode_readable_spacemaps = B_FALSE;
23081b22a98SMartin Matuska 
23181b22a98SMartin Matuska /*
232eda14cbcSMatt Macy  * This (illegal) pool name is used when temporarily importing a spa_t in order
233eda14cbcSMatt Macy  * to get the vdev stats associated with the imported devices.
234eda14cbcSMatt Macy  */
235eda14cbcSMatt Macy #define	TRYIMPORT_NAME	"$import"
236eda14cbcSMatt Macy 
237eda14cbcSMatt Macy /*
238eda14cbcSMatt Macy  * For debugging purposes: print out vdev tree during pool import.
239eda14cbcSMatt Macy  */
240e92ffd9bSMartin Matuska static int		spa_load_print_vdev_tree = B_FALSE;
241eda14cbcSMatt Macy 
242eda14cbcSMatt Macy /*
243eda14cbcSMatt Macy  * A non-zero value for zfs_max_missing_tvds means that we allow importing
244eda14cbcSMatt Macy  * pools with missing top-level vdevs. This is strictly intended for advanced
245eda14cbcSMatt Macy  * pool recovery cases since missing data is almost inevitable. Pools with
246eda14cbcSMatt Macy  * missing devices can only be imported read-only for safety reasons, and their
247eda14cbcSMatt Macy  * fail-mode will be automatically set to "continue".
248eda14cbcSMatt Macy  *
249eda14cbcSMatt Macy  * With 1 missing vdev we should be able to import the pool and mount all
250eda14cbcSMatt Macy  * datasets. User data that was not modified after the missing device has been
251eda14cbcSMatt Macy  * added should be recoverable. This means that snapshots created prior to the
252eda14cbcSMatt Macy  * addition of that device should be completely intact.
253eda14cbcSMatt Macy  *
254eda14cbcSMatt Macy  * With 2 missing vdevs, some datasets may fail to mount since there are
255eda14cbcSMatt Macy  * dataset statistics that are stored as regular metadata. Some data might be
256eda14cbcSMatt Macy  * recoverable if those vdevs were added recently.
257eda14cbcSMatt Macy  *
258eda14cbcSMatt Macy  * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
259eda14cbcSMatt Macy  * may be missing entirely. Chances of data recovery are very low. Note that
260eda14cbcSMatt Macy  * there are also risks of performing an inadvertent rewind as we might be
261eda14cbcSMatt Macy  * missing all the vdevs with the latest uberblocks.
262eda14cbcSMatt Macy  */
263dbd5678dSMartin Matuska uint64_t	zfs_max_missing_tvds = 0;
264eda14cbcSMatt Macy 
265eda14cbcSMatt Macy /*
266eda14cbcSMatt Macy  * The parameters below are similar to zfs_max_missing_tvds but are only
267eda14cbcSMatt Macy  * intended for a preliminary open of the pool with an untrusted config which
268eda14cbcSMatt Macy  * might be incomplete or out-dated.
269eda14cbcSMatt Macy  *
270eda14cbcSMatt Macy  * We are more tolerant for pools opened from a cachefile since we could have
271eda14cbcSMatt Macy  * an out-dated cachefile where a device removal was not registered.
272eda14cbcSMatt Macy  * We could have set the limit arbitrarily high but in the case where devices
273eda14cbcSMatt Macy  * are really missing we would want to return the proper error codes; we chose
274eda14cbcSMatt Macy  * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
275eda14cbcSMatt Macy  * and we get a chance to retrieve the trusted config.
276eda14cbcSMatt Macy  */
277eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
278eda14cbcSMatt Macy 
279eda14cbcSMatt Macy /*
280eda14cbcSMatt Macy  * In the case where config was assembled by scanning device paths (/dev/dsks
281eda14cbcSMatt Macy  * by default) we are less tolerant since all the existing devices should have
282eda14cbcSMatt Macy  * been detected and we want spa_load to return the right error codes.
283eda14cbcSMatt Macy  */
284eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_scan = 0;
285eda14cbcSMatt Macy 
286eda14cbcSMatt Macy /*
287eda14cbcSMatt Macy  * Debugging aid that pauses spa_sync() towards the end.
288eda14cbcSMatt Macy  */
289e92ffd9bSMartin Matuska static const boolean_t	zfs_pause_spa_sync = B_FALSE;
290eda14cbcSMatt Macy 
291eda14cbcSMatt Macy /*
292eda14cbcSMatt Macy  * Variables to indicate the livelist condense zthr func should wait at certain
293eda14cbcSMatt Macy  * points for the livelist to be removed - used to test condense/destroy races
294eda14cbcSMatt Macy  */
295e92ffd9bSMartin Matuska static int zfs_livelist_condense_zthr_pause = 0;
296e92ffd9bSMartin Matuska static int zfs_livelist_condense_sync_pause = 0;
297eda14cbcSMatt Macy 
298eda14cbcSMatt Macy /*
299eda14cbcSMatt Macy  * Variables to track whether or not condense cancellation has been
300eda14cbcSMatt Macy  * triggered in testing.
301eda14cbcSMatt Macy  */
302e92ffd9bSMartin Matuska static int zfs_livelist_condense_sync_cancel = 0;
303e92ffd9bSMartin Matuska static int zfs_livelist_condense_zthr_cancel = 0;
304eda14cbcSMatt Macy 
305eda14cbcSMatt Macy /*
306eda14cbcSMatt Macy  * Variable to track whether or not extra ALLOC blkptrs were added to a
307eda14cbcSMatt Macy  * livelist entry while it was being condensed (caused by the way we track
308eda14cbcSMatt Macy  * remapped blkptrs in dbuf_remap_impl)
309eda14cbcSMatt Macy  */
310e92ffd9bSMartin Matuska static int zfs_livelist_condense_new_alloc = 0;
311eda14cbcSMatt Macy 
312eda14cbcSMatt Macy /*
313eda14cbcSMatt Macy  * ==========================================================================
314eda14cbcSMatt Macy  * SPA properties routines
315eda14cbcSMatt Macy  * ==========================================================================
316eda14cbcSMatt Macy  */
317eda14cbcSMatt Macy 
318eda14cbcSMatt Macy /*
319eda14cbcSMatt Macy  * Add a (source=src, propname=propval) list to an nvlist.
320eda14cbcSMatt Macy  */
321eda14cbcSMatt Macy static void
322a0b956f5SMartin Matuska spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
323eda14cbcSMatt Macy     uint64_t intval, zprop_source_t src)
324eda14cbcSMatt Macy {
325eda14cbcSMatt Macy 	const char *propname = zpool_prop_to_name(prop);
326eda14cbcSMatt Macy 	nvlist_t *propval;
327eda14cbcSMatt Macy 
32881b22a98SMartin Matuska 	propval = fnvlist_alloc();
32981b22a98SMartin Matuska 	fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
330eda14cbcSMatt Macy 
331eda14cbcSMatt Macy 	if (strval != NULL)
33281b22a98SMartin Matuska 		fnvlist_add_string(propval, ZPROP_VALUE, strval);
333eda14cbcSMatt Macy 	else
33481b22a98SMartin Matuska 		fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
335eda14cbcSMatt Macy 
33681b22a98SMartin Matuska 	fnvlist_add_nvlist(nvl, propname, propval);
337eda14cbcSMatt Macy 	nvlist_free(propval);
338eda14cbcSMatt Macy }
339eda14cbcSMatt Macy 
340ce4dcb97SMartin Matuska static int
341ce4dcb97SMartin Matuska spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl)
342ce4dcb97SMartin Matuska {
343ce4dcb97SMartin Matuska 	zpool_prop_t prop = zpool_name_to_prop(propname);
344ce4dcb97SMartin Matuska 	zprop_source_t src = ZPROP_SRC_NONE;
345ce4dcb97SMartin Matuska 	uint64_t intval;
346ce4dcb97SMartin Matuska 	int err;
347ce4dcb97SMartin Matuska 
348ce4dcb97SMartin Matuska 	/*
349ce4dcb97SMartin Matuska 	 * NB: Not all properties lookups via this API require
350ce4dcb97SMartin Matuska 	 * the spa props lock, so they must explicitly grab it here.
351ce4dcb97SMartin Matuska 	 */
352ce4dcb97SMartin Matuska 	switch (prop) {
353ce4dcb97SMartin Matuska 	case ZPOOL_PROP_DEDUPCACHED:
354ce4dcb97SMartin Matuska 		err = ddt_get_pool_dedup_cached(spa, &intval);
355ce4dcb97SMartin Matuska 		if (err != 0)
356ce4dcb97SMartin Matuska 			return (SET_ERROR(err));
357ce4dcb97SMartin Matuska 		break;
358ce4dcb97SMartin Matuska 	default:
359ce4dcb97SMartin Matuska 		return (SET_ERROR(EINVAL));
360ce4dcb97SMartin Matuska 	}
361ce4dcb97SMartin Matuska 
362ce4dcb97SMartin Matuska 	spa_prop_add_list(outnvl, prop, NULL, intval, src);
363ce4dcb97SMartin Matuska 
364ce4dcb97SMartin Matuska 	return (0);
365ce4dcb97SMartin Matuska }
366ce4dcb97SMartin Matuska 
367ce4dcb97SMartin Matuska int
368ce4dcb97SMartin Matuska spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props,
369d59a7618SRob Norris     nvlist_t *outnvl)
370ce4dcb97SMartin Matuska {
371ce4dcb97SMartin Matuska 	int err = 0;
372ce4dcb97SMartin Matuska 
373ce4dcb97SMartin Matuska 	if (props == NULL)
374ce4dcb97SMartin Matuska 		return (0);
375ce4dcb97SMartin Matuska 
376ce4dcb97SMartin Matuska 	for (unsigned int i = 0; i < n_props && err == 0; i++) {
377d59a7618SRob Norris 		err = spa_prop_add(spa, props[i], outnvl);
378ce4dcb97SMartin Matuska 	}
379ce4dcb97SMartin Matuska 
380ce4dcb97SMartin Matuska 	return (err);
381ce4dcb97SMartin Matuska }
382ce4dcb97SMartin Matuska 
383eda14cbcSMatt Macy /*
384c98ecfceSAllan Jude  * Add a user property (source=src, propname=propval) to an nvlist.
385c98ecfceSAllan Jude  */
386c98ecfceSAllan Jude static void
387c98ecfceSAllan Jude spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
388c98ecfceSAllan Jude     zprop_source_t src)
389c98ecfceSAllan Jude {
390c98ecfceSAllan Jude 	nvlist_t *propval;
391c98ecfceSAllan Jude 
392c98ecfceSAllan Jude 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
393c98ecfceSAllan Jude 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
394c98ecfceSAllan Jude 	VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
395c98ecfceSAllan Jude 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
396c98ecfceSAllan Jude 	nvlist_free(propval);
397c98ecfceSAllan Jude }
398c98ecfceSAllan Jude 
399c98ecfceSAllan Jude /*
400eda14cbcSMatt Macy  * Get property values from the spa configuration.
401eda14cbcSMatt Macy  */
402eda14cbcSMatt Macy static void
403d59a7618SRob Norris spa_prop_get_config(spa_t *spa, nvlist_t *nv)
404eda14cbcSMatt Macy {
405eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
406eda14cbcSMatt Macy 	dsl_pool_t *pool = spa->spa_dsl_pool;
407eda14cbcSMatt Macy 	uint64_t size, alloc, cap, version;
408eda14cbcSMatt Macy 	const zprop_source_t src = ZPROP_SRC_NONE;
409eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
410eda14cbcSMatt Macy 	metaslab_class_t *mc = spa_normal_class(spa);
411eda14cbcSMatt Macy 
412eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
413eda14cbcSMatt Macy 
414eda14cbcSMatt Macy 	if (rvd != NULL) {
415eda14cbcSMatt Macy 		alloc = metaslab_class_get_alloc(mc);
416eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_special_class(spa));
417eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
418184c1b94SMartin Matuska 		alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
419eda14cbcSMatt Macy 
420eda14cbcSMatt Macy 		size = metaslab_class_get_space(mc);
421eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_special_class(spa));
422eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_dedup_class(spa));
423184c1b94SMartin Matuska 		size += metaslab_class_get_space(spa_embedded_log_class(spa));
424eda14cbcSMatt Macy 
425d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
426d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
427d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
428d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL,
429eda14cbcSMatt Macy 		    size - alloc, src);
430d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL,
431eda14cbcSMatt Macy 		    spa->spa_checkpoint_info.sci_dspace, src);
432eda14cbcSMatt Macy 
433d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL,
434eda14cbcSMatt Macy 		    metaslab_class_fragmentation(mc), src);
435d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL,
436eda14cbcSMatt Macy 		    metaslab_class_expandable_space(mc), src);
437d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL,
438eda14cbcSMatt Macy 		    (spa_mode(spa) == SPA_MODE_READ), src);
439eda14cbcSMatt Macy 
440eda14cbcSMatt Macy 		cap = (size == 0) ? 0 : (alloc * 100 / size);
441d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src);
442eda14cbcSMatt Macy 
443d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL,
444eda14cbcSMatt Macy 		    ddt_get_pool_dedup_ratio(spa), src);
445d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL,
4462a58b312SMartin Matuska 		    brt_get_used(spa), src);
447d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL,
4482a58b312SMartin Matuska 		    brt_get_saved(spa), src);
449d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL,
4502a58b312SMartin Matuska 		    brt_get_ratio(spa), src);
451eda14cbcSMatt Macy 
452d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
453ce4dcb97SMartin Matuska 		    ddt_get_ddt_dsize(spa), src);
454ce4dcb97SMartin Matuska 
455d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
456eda14cbcSMatt Macy 		    rvd->vdev_state, src);
457eda14cbcSMatt Macy 
458eda14cbcSMatt Macy 		version = spa_version(spa);
459eda14cbcSMatt Macy 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
460d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
461eda14cbcSMatt Macy 			    version, ZPROP_SRC_DEFAULT);
462eda14cbcSMatt Macy 		} else {
463d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
464eda14cbcSMatt Macy 			    version, ZPROP_SRC_LOCAL);
465eda14cbcSMatt Macy 		}
466d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID,
467eda14cbcSMatt Macy 		    NULL, spa_load_guid(spa), src);
468eda14cbcSMatt Macy 	}
469eda14cbcSMatt Macy 
470eda14cbcSMatt Macy 	if (pool != NULL) {
471eda14cbcSMatt Macy 		/*
472eda14cbcSMatt Macy 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
473eda14cbcSMatt Macy 		 * when opening pools before this version freedir will be NULL.
474eda14cbcSMatt Macy 		 */
475eda14cbcSMatt Macy 		if (pool->dp_free_dir != NULL) {
476d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL,
477eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
478eda14cbcSMatt Macy 			    src);
479eda14cbcSMatt Macy 		} else {
480d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_FREEING,
481eda14cbcSMatt Macy 			    NULL, 0, src);
482eda14cbcSMatt Macy 		}
483eda14cbcSMatt Macy 
484eda14cbcSMatt Macy 		if (pool->dp_leak_dir != NULL) {
485d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL,
486eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
487eda14cbcSMatt Macy 			    src);
488eda14cbcSMatt Macy 		} else {
489d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_LEAKED,
490eda14cbcSMatt Macy 			    NULL, 0, src);
491eda14cbcSMatt Macy 		}
492eda14cbcSMatt Macy 	}
493eda14cbcSMatt Macy 
494d59a7618SRob Norris 	spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
495eda14cbcSMatt Macy 
496eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
497d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment,
498eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
499eda14cbcSMatt Macy 	}
500eda14cbcSMatt Macy 
501ee36e25aSMartin Matuska 	if (spa->spa_compatibility != NULL) {
502d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY,
503ee36e25aSMartin Matuska 		    spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
504ee36e25aSMartin Matuska 	}
505ee36e25aSMartin Matuska 
506eda14cbcSMatt Macy 	if (spa->spa_root != NULL)
507d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root,
508eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
509eda14cbcSMatt Macy 
510eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
511d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
512eda14cbcSMatt Macy 		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
513eda14cbcSMatt Macy 	} else {
514d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
515eda14cbcSMatt Macy 		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
516eda14cbcSMatt Macy 	}
517eda14cbcSMatt Macy 
518eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
519d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
520eda14cbcSMatt Macy 		    DNODE_MAX_SIZE, ZPROP_SRC_NONE);
521eda14cbcSMatt Macy 	} else {
522d59a7618SRob Norris 		spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
523eda14cbcSMatt Macy 		    DNODE_MIN_SIZE, ZPROP_SRC_NONE);
524eda14cbcSMatt Macy 	}
525eda14cbcSMatt Macy 
526eda14cbcSMatt Macy 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
527eda14cbcSMatt Macy 		if (dp->scd_path == NULL) {
528d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
529eda14cbcSMatt Macy 			    "none", 0, ZPROP_SRC_LOCAL);
530eda14cbcSMatt Macy 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
531d59a7618SRob Norris 			spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
532eda14cbcSMatt Macy 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
533eda14cbcSMatt Macy 		}
534eda14cbcSMatt Macy 	}
535eda14cbcSMatt Macy }
536eda14cbcSMatt Macy 
537eda14cbcSMatt Macy /*
538eda14cbcSMatt Macy  * Get zpool property values.
539eda14cbcSMatt Macy  */
540eda14cbcSMatt Macy int
541d59a7618SRob Norris spa_prop_get(spa_t *spa, nvlist_t *nv)
542eda14cbcSMatt Macy {
543eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
544eda14cbcSMatt Macy 	zap_cursor_t zc;
545eda14cbcSMatt Macy 	zap_attribute_t za;
546eda14cbcSMatt Macy 	dsl_pool_t *dp;
547d59a7618SRob Norris 	int err = 0;
548eda14cbcSMatt Macy 
549eda14cbcSMatt Macy 	dp = spa_get_dsl(spa);
550eda14cbcSMatt Macy 	dsl_pool_config_enter(dp, FTAG);
551eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
552eda14cbcSMatt Macy 
553eda14cbcSMatt Macy 	/*
554eda14cbcSMatt Macy 	 * Get properties from the spa config.
555eda14cbcSMatt Macy 	 */
556d59a7618SRob Norris 	spa_prop_get_config(spa, nv);
557eda14cbcSMatt Macy 
558eda14cbcSMatt Macy 	/* If no pool property object, no more prop to get. */
559eda14cbcSMatt Macy 	if (mos == NULL || spa->spa_pool_props_object == 0)
560eda14cbcSMatt Macy 		goto out;
561eda14cbcSMatt Macy 
562eda14cbcSMatt Macy 	/*
563eda14cbcSMatt Macy 	 * Get properties from the MOS pool property object.
564eda14cbcSMatt Macy 	 */
565eda14cbcSMatt Macy 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
566eda14cbcSMatt Macy 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
567eda14cbcSMatt Macy 	    zap_cursor_advance(&zc)) {
568eda14cbcSMatt Macy 		uint64_t intval = 0;
569eda14cbcSMatt Macy 		char *strval = NULL;
570eda14cbcSMatt Macy 		zprop_source_t src = ZPROP_SRC_DEFAULT;
571eda14cbcSMatt Macy 		zpool_prop_t prop;
572eda14cbcSMatt Macy 
573c98ecfceSAllan Jude 		if ((prop = zpool_name_to_prop(za.za_name)) ==
574c98ecfceSAllan Jude 		    ZPOOL_PROP_INVAL && !zfs_prop_user(za.za_name))
575eda14cbcSMatt Macy 			continue;
576eda14cbcSMatt Macy 
577eda14cbcSMatt Macy 		switch (za.za_integer_length) {
578eda14cbcSMatt Macy 		case 8:
579eda14cbcSMatt Macy 			/* integer property */
580eda14cbcSMatt Macy 			if (za.za_first_integer !=
581eda14cbcSMatt Macy 			    zpool_prop_default_numeric(prop))
582eda14cbcSMatt Macy 				src = ZPROP_SRC_LOCAL;
583eda14cbcSMatt Macy 
584eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_BOOTFS) {
585eda14cbcSMatt Macy 				dsl_dataset_t *ds = NULL;
586eda14cbcSMatt Macy 
587eda14cbcSMatt Macy 				err = dsl_dataset_hold_obj(dp,
588eda14cbcSMatt Macy 				    za.za_first_integer, FTAG, &ds);
589eda14cbcSMatt Macy 				if (err != 0)
590eda14cbcSMatt Macy 					break;
591eda14cbcSMatt Macy 
592eda14cbcSMatt Macy 				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
593eda14cbcSMatt Macy 				    KM_SLEEP);
594eda14cbcSMatt Macy 				dsl_dataset_name(ds, strval);
595eda14cbcSMatt Macy 				dsl_dataset_rele(ds, FTAG);
596eda14cbcSMatt Macy 			} else {
597eda14cbcSMatt Macy 				strval = NULL;
598eda14cbcSMatt Macy 				intval = za.za_first_integer;
599eda14cbcSMatt Macy 			}
600eda14cbcSMatt Macy 
601d59a7618SRob Norris 			spa_prop_add_list(nv, prop, strval, intval, src);
602eda14cbcSMatt Macy 
603eda14cbcSMatt Macy 			if (strval != NULL)
604eda14cbcSMatt Macy 				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
605eda14cbcSMatt Macy 
606eda14cbcSMatt Macy 			break;
607eda14cbcSMatt Macy 
608eda14cbcSMatt Macy 		case 1:
609eda14cbcSMatt Macy 			/* string property */
610eda14cbcSMatt Macy 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
611eda14cbcSMatt Macy 			err = zap_lookup(mos, spa->spa_pool_props_object,
612eda14cbcSMatt Macy 			    za.za_name, 1, za.za_num_integers, strval);
613eda14cbcSMatt Macy 			if (err) {
614eda14cbcSMatt Macy 				kmem_free(strval, za.za_num_integers);
615eda14cbcSMatt Macy 				break;
616eda14cbcSMatt Macy 			}
617c98ecfceSAllan Jude 			if (prop != ZPOOL_PROP_INVAL) {
618d59a7618SRob Norris 				spa_prop_add_list(nv, prop, strval, 0, src);
619c98ecfceSAllan Jude 			} else {
620c98ecfceSAllan Jude 				src = ZPROP_SRC_LOCAL;
621d59a7618SRob Norris 				spa_prop_add_user(nv, za.za_name, strval,
622c98ecfceSAllan Jude 				    src);
623c98ecfceSAllan Jude 			}
624eda14cbcSMatt Macy 			kmem_free(strval, za.za_num_integers);
625eda14cbcSMatt Macy 			break;
626eda14cbcSMatt Macy 
627eda14cbcSMatt Macy 		default:
628eda14cbcSMatt Macy 			break;
629eda14cbcSMatt Macy 		}
630eda14cbcSMatt Macy 	}
631eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
632eda14cbcSMatt Macy out:
633eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
634eda14cbcSMatt Macy 	dsl_pool_config_exit(dp, FTAG);
635d59a7618SRob Norris 
636d59a7618SRob Norris 	if (err && err != ENOENT)
637eda14cbcSMatt Macy 		return (err);
638eda14cbcSMatt Macy 
639eda14cbcSMatt Macy 	return (0);
640eda14cbcSMatt Macy }
641eda14cbcSMatt Macy 
642eda14cbcSMatt Macy /*
643eda14cbcSMatt Macy  * Validate the given pool properties nvlist and modify the list
644eda14cbcSMatt Macy  * for the property values to be set.
645eda14cbcSMatt Macy  */
646eda14cbcSMatt Macy static int
647eda14cbcSMatt Macy spa_prop_validate(spa_t *spa, nvlist_t *props)
648eda14cbcSMatt Macy {
649eda14cbcSMatt Macy 	nvpair_t *elem;
650eda14cbcSMatt Macy 	int error = 0, reset_bootfs = 0;
651eda14cbcSMatt Macy 	uint64_t objnum = 0;
652eda14cbcSMatt Macy 	boolean_t has_feature = B_FALSE;
653eda14cbcSMatt Macy 
654eda14cbcSMatt Macy 	elem = NULL;
655eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
656eda14cbcSMatt Macy 		uint64_t intval;
6572a58b312SMartin Matuska 		const char *strval, *slash, *check, *fname;
658eda14cbcSMatt Macy 		const char *propname = nvpair_name(elem);
659eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(propname);
660eda14cbcSMatt Macy 
661eda14cbcSMatt Macy 		switch (prop) {
662eda14cbcSMatt Macy 		case ZPOOL_PROP_INVAL:
663eda14cbcSMatt Macy 			/*
664eda14cbcSMatt Macy 			 * Sanitize the input.
665eda14cbcSMatt Macy 			 */
666c98ecfceSAllan Jude 			if (zfs_prop_user(propname)) {
667c98ecfceSAllan Jude 				if (strlen(propname) >= ZAP_MAXNAMELEN) {
668c98ecfceSAllan Jude 					error = SET_ERROR(ENAMETOOLONG);
669c98ecfceSAllan Jude 					break;
670c98ecfceSAllan Jude 				}
671c98ecfceSAllan Jude 
672c98ecfceSAllan Jude 				if (strlen(fnvpair_value_string(elem)) >=
673c98ecfceSAllan Jude 				    ZAP_MAXVALUELEN) {
674c98ecfceSAllan Jude 					error = SET_ERROR(E2BIG);
675c98ecfceSAllan Jude 					break;
676c98ecfceSAllan Jude 				}
677c98ecfceSAllan Jude 			} else if (zpool_prop_feature(propname)) {
678eda14cbcSMatt Macy 				if (nvpair_type(elem) != DATA_TYPE_UINT64) {
679eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
680eda14cbcSMatt Macy 					break;
681eda14cbcSMatt Macy 				}
682eda14cbcSMatt Macy 
683eda14cbcSMatt Macy 				if (nvpair_value_uint64(elem, &intval) != 0) {
684eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
685eda14cbcSMatt Macy 					break;
686eda14cbcSMatt Macy 				}
687eda14cbcSMatt Macy 
688eda14cbcSMatt Macy 				if (intval != 0) {
689eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
690eda14cbcSMatt Macy 					break;
691eda14cbcSMatt Macy 				}
692eda14cbcSMatt Macy 
693eda14cbcSMatt Macy 				fname = strchr(propname, '@') + 1;
694eda14cbcSMatt Macy 				if (zfeature_lookup_name(fname, NULL) != 0) {
695eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
696eda14cbcSMatt Macy 					break;
697eda14cbcSMatt Macy 				}
698eda14cbcSMatt Macy 
699eda14cbcSMatt Macy 				has_feature = B_TRUE;
700c98ecfceSAllan Jude 			} else {
701c98ecfceSAllan Jude 				error = SET_ERROR(EINVAL);
702c98ecfceSAllan Jude 				break;
703c98ecfceSAllan Jude 			}
704eda14cbcSMatt Macy 			break;
705eda14cbcSMatt Macy 
706eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
707eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
708eda14cbcSMatt Macy 			if (!error &&
709eda14cbcSMatt Macy 			    (intval < spa_version(spa) ||
710eda14cbcSMatt Macy 			    intval > SPA_VERSION_BEFORE_FEATURES ||
711eda14cbcSMatt Macy 			    has_feature))
712eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
713eda14cbcSMatt Macy 			break;
714eda14cbcSMatt Macy 
715ce4dcb97SMartin Matuska 		case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
716ce4dcb97SMartin Matuska 			error = nvpair_value_uint64(elem, &intval);
717ce4dcb97SMartin Matuska 			break;
718ce4dcb97SMartin Matuska 
719eda14cbcSMatt Macy 		case ZPOOL_PROP_DELEGATION:
720eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOREPLACE:
721eda14cbcSMatt Macy 		case ZPOOL_PROP_LISTSNAPS:
722eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOEXPAND:
723eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOTRIM:
724eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
725eda14cbcSMatt Macy 			if (!error && intval > 1)
726eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
727eda14cbcSMatt Macy 			break;
728eda14cbcSMatt Macy 
729eda14cbcSMatt Macy 		case ZPOOL_PROP_MULTIHOST:
730eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
731eda14cbcSMatt Macy 			if (!error && intval > 1)
732eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
733eda14cbcSMatt Macy 
734eda14cbcSMatt Macy 			if (!error) {
735eda14cbcSMatt Macy 				uint32_t hostid = zone_get_hostid(NULL);
736eda14cbcSMatt Macy 				if (hostid)
737eda14cbcSMatt Macy 					spa->spa_hostid = hostid;
738eda14cbcSMatt Macy 				else
739eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
740eda14cbcSMatt Macy 			}
741eda14cbcSMatt Macy 
742eda14cbcSMatt Macy 			break;
743eda14cbcSMatt Macy 
744eda14cbcSMatt Macy 		case ZPOOL_PROP_BOOTFS:
745eda14cbcSMatt Macy 			/*
746eda14cbcSMatt Macy 			 * If the pool version is less than SPA_VERSION_BOOTFS,
747eda14cbcSMatt Macy 			 * or the pool is still being created (version == 0),
748eda14cbcSMatt Macy 			 * the bootfs property cannot be set.
749eda14cbcSMatt Macy 			 */
750eda14cbcSMatt Macy 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
751eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
752eda14cbcSMatt Macy 				break;
753eda14cbcSMatt Macy 			}
754eda14cbcSMatt Macy 
755eda14cbcSMatt Macy 			/*
756eda14cbcSMatt Macy 			 * Make sure the vdev config is bootable
757eda14cbcSMatt Macy 			 */
758eda14cbcSMatt Macy 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
759eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
760eda14cbcSMatt Macy 				break;
761eda14cbcSMatt Macy 			}
762eda14cbcSMatt Macy 
763eda14cbcSMatt Macy 			reset_bootfs = 1;
764eda14cbcSMatt Macy 
765eda14cbcSMatt Macy 			error = nvpair_value_string(elem, &strval);
766eda14cbcSMatt Macy 
767eda14cbcSMatt Macy 			if (!error) {
768eda14cbcSMatt Macy 				objset_t *os;
769eda14cbcSMatt Macy 
770eda14cbcSMatt Macy 				if (strval == NULL || strval[0] == '\0') {
771eda14cbcSMatt Macy 					objnum = zpool_prop_default_numeric(
772eda14cbcSMatt Macy 					    ZPOOL_PROP_BOOTFS);
773eda14cbcSMatt Macy 					break;
774eda14cbcSMatt Macy 				}
775eda14cbcSMatt Macy 
776eda14cbcSMatt Macy 				error = dmu_objset_hold(strval, FTAG, &os);
777eda14cbcSMatt Macy 				if (error != 0)
778eda14cbcSMatt Macy 					break;
779eda14cbcSMatt Macy 
780eda14cbcSMatt Macy 				/* Must be ZPL. */
781eda14cbcSMatt Macy 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
782eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
783eda14cbcSMatt Macy 				} else {
784eda14cbcSMatt Macy 					objnum = dmu_objset_id(os);
785eda14cbcSMatt Macy 				}
786eda14cbcSMatt Macy 				dmu_objset_rele(os, FTAG);
787eda14cbcSMatt Macy 			}
788eda14cbcSMatt Macy 			break;
789eda14cbcSMatt Macy 
790eda14cbcSMatt Macy 		case ZPOOL_PROP_FAILUREMODE:
791eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
792eda14cbcSMatt Macy 			if (!error && intval > ZIO_FAILURE_MODE_PANIC)
793eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
794eda14cbcSMatt Macy 
795eda14cbcSMatt Macy 			/*
796eda14cbcSMatt Macy 			 * This is a special case which only occurs when
797eda14cbcSMatt Macy 			 * the pool has completely failed. This allows
798eda14cbcSMatt Macy 			 * the user to change the in-core failmode property
799eda14cbcSMatt Macy 			 * without syncing it out to disk (I/Os might
800eda14cbcSMatt Macy 			 * currently be blocked). We do this by returning
801eda14cbcSMatt Macy 			 * EIO to the caller (spa_prop_set) to trick it
802eda14cbcSMatt Macy 			 * into thinking we encountered a property validation
803eda14cbcSMatt Macy 			 * error.
804eda14cbcSMatt Macy 			 */
805eda14cbcSMatt Macy 			if (!error && spa_suspended(spa)) {
806eda14cbcSMatt Macy 				spa->spa_failmode = intval;
807eda14cbcSMatt Macy 				error = SET_ERROR(EIO);
808eda14cbcSMatt Macy 			}
809eda14cbcSMatt Macy 			break;
810eda14cbcSMatt Macy 
811eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
812eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
813eda14cbcSMatt Macy 				break;
814eda14cbcSMatt Macy 
815eda14cbcSMatt Macy 			if (strval[0] == '\0')
816eda14cbcSMatt Macy 				break;
817eda14cbcSMatt Macy 
818eda14cbcSMatt Macy 			if (strcmp(strval, "none") == 0)
819eda14cbcSMatt Macy 				break;
820eda14cbcSMatt Macy 
821eda14cbcSMatt Macy 			if (strval[0] != '/') {
822eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
823eda14cbcSMatt Macy 				break;
824eda14cbcSMatt Macy 			}
825eda14cbcSMatt Macy 
826eda14cbcSMatt Macy 			slash = strrchr(strval, '/');
827eda14cbcSMatt Macy 			ASSERT(slash != NULL);
828eda14cbcSMatt Macy 
829eda14cbcSMatt Macy 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
830eda14cbcSMatt Macy 			    strcmp(slash, "/..") == 0)
831eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
832eda14cbcSMatt Macy 			break;
833eda14cbcSMatt Macy 
834eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
835eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
836eda14cbcSMatt Macy 				break;
837eda14cbcSMatt Macy 			for (check = strval; *check != '\0'; check++) {
838eda14cbcSMatt Macy 				if (!isprint(*check)) {
839eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
840eda14cbcSMatt Macy 					break;
841eda14cbcSMatt Macy 				}
842eda14cbcSMatt Macy 			}
843eda14cbcSMatt Macy 			if (strlen(strval) > ZPROP_MAX_COMMENT)
844eda14cbcSMatt Macy 				error = SET_ERROR(E2BIG);
845eda14cbcSMatt Macy 			break;
846eda14cbcSMatt Macy 
847eda14cbcSMatt Macy 		default:
848eda14cbcSMatt Macy 			break;
849eda14cbcSMatt Macy 		}
850eda14cbcSMatt Macy 
851eda14cbcSMatt Macy 		if (error)
852eda14cbcSMatt Macy 			break;
853eda14cbcSMatt Macy 	}
854eda14cbcSMatt Macy 
855eda14cbcSMatt Macy 	(void) nvlist_remove_all(props,
856eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
857eda14cbcSMatt Macy 
858eda14cbcSMatt Macy 	if (!error && reset_bootfs) {
859eda14cbcSMatt Macy 		error = nvlist_remove(props,
860eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
861eda14cbcSMatt Macy 
862eda14cbcSMatt Macy 		if (!error) {
863eda14cbcSMatt Macy 			error = nvlist_add_uint64(props,
864eda14cbcSMatt Macy 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
865eda14cbcSMatt Macy 		}
866eda14cbcSMatt Macy 	}
867eda14cbcSMatt Macy 
868eda14cbcSMatt Macy 	return (error);
869eda14cbcSMatt Macy }
870eda14cbcSMatt Macy 
871eda14cbcSMatt Macy void
872eda14cbcSMatt Macy spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
873eda14cbcSMatt Macy {
8742a58b312SMartin Matuska 	const char *cachefile;
875eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
876eda14cbcSMatt Macy 
877eda14cbcSMatt Macy 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
878eda14cbcSMatt Macy 	    &cachefile) != 0)
879eda14cbcSMatt Macy 		return;
880eda14cbcSMatt Macy 
881eda14cbcSMatt Macy 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
882eda14cbcSMatt Macy 	    KM_SLEEP);
883eda14cbcSMatt Macy 
884eda14cbcSMatt Macy 	if (cachefile[0] == '\0')
885eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(spa_config_path);
886eda14cbcSMatt Macy 	else if (strcmp(cachefile, "none") == 0)
887eda14cbcSMatt Macy 		dp->scd_path = NULL;
888eda14cbcSMatt Macy 	else
889eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(cachefile);
890eda14cbcSMatt Macy 
891eda14cbcSMatt Macy 	list_insert_head(&spa->spa_config_list, dp);
892eda14cbcSMatt Macy 	if (need_sync)
893eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
894eda14cbcSMatt Macy }
895eda14cbcSMatt Macy 
896eda14cbcSMatt Macy int
897eda14cbcSMatt Macy spa_prop_set(spa_t *spa, nvlist_t *nvp)
898eda14cbcSMatt Macy {
899eda14cbcSMatt Macy 	int error;
900eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
901eda14cbcSMatt Macy 	boolean_t need_sync = B_FALSE;
902eda14cbcSMatt Macy 
903eda14cbcSMatt Macy 	if ((error = spa_prop_validate(spa, nvp)) != 0)
904eda14cbcSMatt Macy 		return (error);
905eda14cbcSMatt Macy 
906eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
907eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
908eda14cbcSMatt Macy 
909eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_CACHEFILE ||
910eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_ALTROOT ||
911eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_READONLY)
912eda14cbcSMatt Macy 			continue;
913eda14cbcSMatt Macy 
914c98ecfceSAllan Jude 		if (prop == ZPOOL_PROP_INVAL &&
915c98ecfceSAllan Jude 		    zfs_prop_user(nvpair_name(elem))) {
916c98ecfceSAllan Jude 			need_sync = B_TRUE;
917c98ecfceSAllan Jude 			break;
918c98ecfceSAllan Jude 		}
919c98ecfceSAllan Jude 
920eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
921681ce946SMartin Matuska 			uint64_t ver = 0;
922eda14cbcSMatt Macy 
923eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_VERSION) {
924eda14cbcSMatt Macy 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
925eda14cbcSMatt Macy 			} else {
926eda14cbcSMatt Macy 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
927eda14cbcSMatt Macy 				ver = SPA_VERSION_FEATURES;
928eda14cbcSMatt Macy 				need_sync = B_TRUE;
929eda14cbcSMatt Macy 			}
930eda14cbcSMatt Macy 
931eda14cbcSMatt Macy 			/* Save time if the version is already set. */
932eda14cbcSMatt Macy 			if (ver == spa_version(spa))
933eda14cbcSMatt Macy 				continue;
934eda14cbcSMatt Macy 
935eda14cbcSMatt Macy 			/*
936eda14cbcSMatt Macy 			 * In addition to the pool directory object, we might
937eda14cbcSMatt Macy 			 * create the pool properties object, the features for
938eda14cbcSMatt Macy 			 * read object, the features for write object, or the
939eda14cbcSMatt Macy 			 * feature descriptions object.
940eda14cbcSMatt Macy 			 */
941eda14cbcSMatt Macy 			error = dsl_sync_task(spa->spa_name, NULL,
942eda14cbcSMatt Macy 			    spa_sync_version, &ver,
943eda14cbcSMatt Macy 			    6, ZFS_SPACE_CHECK_RESERVED);
944eda14cbcSMatt Macy 			if (error)
945eda14cbcSMatt Macy 				return (error);
946eda14cbcSMatt Macy 			continue;
947eda14cbcSMatt Macy 		}
948eda14cbcSMatt Macy 
949eda14cbcSMatt Macy 		need_sync = B_TRUE;
950eda14cbcSMatt Macy 		break;
951eda14cbcSMatt Macy 	}
952eda14cbcSMatt Macy 
953eda14cbcSMatt Macy 	if (need_sync) {
954eda14cbcSMatt Macy 		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
955eda14cbcSMatt Macy 		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
956eda14cbcSMatt Macy 	}
957eda14cbcSMatt Macy 
958eda14cbcSMatt Macy 	return (0);
959eda14cbcSMatt Macy }
960eda14cbcSMatt Macy 
961eda14cbcSMatt Macy /*
962eda14cbcSMatt Macy  * If the bootfs property value is dsobj, clear it.
963eda14cbcSMatt Macy  */
964eda14cbcSMatt Macy void
965eda14cbcSMatt Macy spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
966eda14cbcSMatt Macy {
967eda14cbcSMatt Macy 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
968eda14cbcSMatt Macy 		VERIFY(zap_remove(spa->spa_meta_objset,
969eda14cbcSMatt Macy 		    spa->spa_pool_props_object,
970eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
971eda14cbcSMatt Macy 		spa->spa_bootfs = 0;
972eda14cbcSMatt Macy 	}
973eda14cbcSMatt Macy }
974eda14cbcSMatt Macy 
975eda14cbcSMatt Macy static int
976eda14cbcSMatt Macy spa_change_guid_check(void *arg, dmu_tx_t *tx)
977eda14cbcSMatt Macy {
978eda14cbcSMatt Macy 	uint64_t *newguid __maybe_unused = arg;
979eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
980eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
981eda14cbcSMatt Macy 	uint64_t vdev_state;
982eda14cbcSMatt Macy 
983eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
984eda14cbcSMatt Macy 		int error = (spa_has_checkpoint(spa)) ?
985eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
986eda14cbcSMatt Macy 		return (SET_ERROR(error));
987eda14cbcSMatt Macy 	}
988eda14cbcSMatt Macy 
989eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
990eda14cbcSMatt Macy 	vdev_state = rvd->vdev_state;
991eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
992eda14cbcSMatt Macy 
993eda14cbcSMatt Macy 	if (vdev_state != VDEV_STATE_HEALTHY)
994eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
995eda14cbcSMatt Macy 
996eda14cbcSMatt Macy 	ASSERT3U(spa_guid(spa), !=, *newguid);
997eda14cbcSMatt Macy 
998eda14cbcSMatt Macy 	return (0);
999eda14cbcSMatt Macy }
1000eda14cbcSMatt Macy 
1001eda14cbcSMatt Macy static void
1002eda14cbcSMatt Macy spa_change_guid_sync(void *arg, dmu_tx_t *tx)
1003eda14cbcSMatt Macy {
1004eda14cbcSMatt Macy 	uint64_t *newguid = arg;
1005eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1006eda14cbcSMatt Macy 	uint64_t oldguid;
1007eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
1008eda14cbcSMatt Macy 
1009eda14cbcSMatt Macy 	oldguid = spa_guid(spa);
1010eda14cbcSMatt Macy 
1011eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1012eda14cbcSMatt Macy 	rvd->vdev_guid = *newguid;
1013eda14cbcSMatt Macy 	rvd->vdev_guid_sum += (*newguid - oldguid);
1014eda14cbcSMatt Macy 	vdev_config_dirty(rvd);
1015eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
1016eda14cbcSMatt Macy 
1017eda14cbcSMatt Macy 	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
1018eda14cbcSMatt Macy 	    (u_longlong_t)oldguid, (u_longlong_t)*newguid);
1019eda14cbcSMatt Macy }
1020eda14cbcSMatt Macy 
1021eda14cbcSMatt Macy /*
1022eda14cbcSMatt Macy  * Change the GUID for the pool.  This is done so that we can later
1023eda14cbcSMatt Macy  * re-import a pool built from a clone of our own vdevs.  We will modify
1024eda14cbcSMatt Macy  * the root vdev's guid, our own pool guid, and then mark all of our
1025eda14cbcSMatt Macy  * vdevs dirty.  Note that we must make sure that all our vdevs are
1026eda14cbcSMatt Macy  * online when we do this, or else any vdevs that weren't present
1027eda14cbcSMatt Macy  * would be orphaned from our pool.  We are also going to issue a
1028eda14cbcSMatt Macy  * sysevent to update any watchers.
1029*e2df9bb4SMartin Matuska  *
1030*e2df9bb4SMartin Matuska  * The GUID of the pool will be changed to the value pointed to by guidp.
1031*e2df9bb4SMartin Matuska  * The GUID may not be set to the reserverd value of 0.
1032*e2df9bb4SMartin Matuska  * The new GUID will be generated if guidp is NULL.
1033eda14cbcSMatt Macy  */
1034eda14cbcSMatt Macy int
1035*e2df9bb4SMartin Matuska spa_change_guid(spa_t *spa, const uint64_t *guidp)
1036eda14cbcSMatt Macy {
1037eda14cbcSMatt Macy 	uint64_t guid;
1038*e2df9bb4SMartin Matuska 	int error;
1039eda14cbcSMatt Macy 
1040eda14cbcSMatt Macy 	mutex_enter(&spa->spa_vdev_top_lock);
1041eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
1042*e2df9bb4SMartin Matuska 
1043*e2df9bb4SMartin Matuska 	if (guidp != NULL) {
1044*e2df9bb4SMartin Matuska 		guid = *guidp;
1045*e2df9bb4SMartin Matuska 		if (guid == 0) {
1046*e2df9bb4SMartin Matuska 			error = SET_ERROR(EINVAL);
1047*e2df9bb4SMartin Matuska 			goto out;
1048*e2df9bb4SMartin Matuska 		}
1049*e2df9bb4SMartin Matuska 
1050*e2df9bb4SMartin Matuska 		if (spa_guid_exists(guid, 0)) {
1051*e2df9bb4SMartin Matuska 			error = SET_ERROR(EEXIST);
1052*e2df9bb4SMartin Matuska 			goto out;
1053*e2df9bb4SMartin Matuska 		}
1054*e2df9bb4SMartin Matuska 	} else {
1055eda14cbcSMatt Macy 		guid = spa_generate_guid(NULL);
1056*e2df9bb4SMartin Matuska 	}
1057eda14cbcSMatt Macy 
1058eda14cbcSMatt Macy 	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
1059eda14cbcSMatt Macy 	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
1060eda14cbcSMatt Macy 
1061eda14cbcSMatt Macy 	if (error == 0) {
1062be181ee2SMartin Matuska 		/*
1063be181ee2SMartin Matuska 		 * Clear the kobj flag from all the vdevs to allow
1064be181ee2SMartin Matuska 		 * vdev_cache_process_kobj_evt() to post events to all the
1065be181ee2SMartin Matuska 		 * vdevs since GUID is updated.
1066be181ee2SMartin Matuska 		 */
1067be181ee2SMartin Matuska 		vdev_clear_kobj_evt(spa->spa_root_vdev);
1068be181ee2SMartin Matuska 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
1069be181ee2SMartin Matuska 			vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
1070be181ee2SMartin Matuska 
1071be181ee2SMartin Matuska 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
1072eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
1073eda14cbcSMatt Macy 	}
1074eda14cbcSMatt Macy 
1075*e2df9bb4SMartin Matuska out:
1076eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
1077eda14cbcSMatt Macy 	mutex_exit(&spa->spa_vdev_top_lock);
1078eda14cbcSMatt Macy 
1079eda14cbcSMatt Macy 	return (error);
1080eda14cbcSMatt Macy }
1081eda14cbcSMatt Macy 
1082eda14cbcSMatt Macy /*
1083eda14cbcSMatt Macy  * ==========================================================================
1084eda14cbcSMatt Macy  * SPA state manipulation (open/create/destroy/import/export)
1085eda14cbcSMatt Macy  * ==========================================================================
1086eda14cbcSMatt Macy  */
1087eda14cbcSMatt Macy 
1088eda14cbcSMatt Macy static int
1089eda14cbcSMatt Macy spa_error_entry_compare(const void *a, const void *b)
1090eda14cbcSMatt Macy {
1091eda14cbcSMatt Macy 	const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
1092eda14cbcSMatt Macy 	const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
1093eda14cbcSMatt Macy 	int ret;
1094eda14cbcSMatt Macy 
1095eda14cbcSMatt Macy 	ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
1096eda14cbcSMatt Macy 	    sizeof (zbookmark_phys_t));
1097eda14cbcSMatt Macy 
1098eda14cbcSMatt Macy 	return (TREE_ISIGN(ret));
1099eda14cbcSMatt Macy }
1100eda14cbcSMatt Macy 
1101eda14cbcSMatt Macy /*
1102eda14cbcSMatt Macy  * Utility function which retrieves copies of the current logs and
1103eda14cbcSMatt Macy  * re-initializes them in the process.
1104eda14cbcSMatt Macy  */
1105eda14cbcSMatt Macy void
1106eda14cbcSMatt Macy spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
1107eda14cbcSMatt Macy {
1108eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
1109eda14cbcSMatt Macy 
1110da5137abSMartin Matuska 	memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
1111da5137abSMartin Matuska 	memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
1112eda14cbcSMatt Macy 
1113eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
1114eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1115eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1116eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
1117eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1118eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1119eda14cbcSMatt Macy }
1120eda14cbcSMatt Macy 
1121eda14cbcSMatt Macy static void
1122eda14cbcSMatt Macy spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1123eda14cbcSMatt Macy {
1124eda14cbcSMatt Macy 	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
1125eda14cbcSMatt Macy 	enum zti_modes mode = ztip->zti_mode;
1126eda14cbcSMatt Macy 	uint_t value = ztip->zti_value;
1127eda14cbcSMatt Macy 	uint_t count = ztip->zti_count;
1128eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
112916038816SMartin Matuska 	uint_t cpus, flags = TASKQ_DYNAMIC;
1130eda14cbcSMatt Macy 
1131eda14cbcSMatt Macy 	switch (mode) {
1132eda14cbcSMatt Macy 	case ZTI_MODE_FIXED:
113316038816SMartin Matuska 		ASSERT3U(value, >, 0);
1134eda14cbcSMatt Macy 		break;
1135eda14cbcSMatt Macy 
113614c2e0a0SMartin Matuska 	case ZTI_MODE_SYNC:
113714c2e0a0SMartin Matuska 
113814c2e0a0SMartin Matuska 		/*
1139b985c9caSMartin Matuska 		 * Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs,
1140b985c9caSMartin Matuska 		 * not to exceed the number of spa allocators, and align to it.
114114c2e0a0SMartin Matuska 		 */
1142b985c9caSMartin Matuska 		cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
1143b985c9caSMartin Matuska 		count = MAX(1, cpus / MAX(1, zio_taskq_write_tpq));
114414c2e0a0SMartin Matuska 		count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
114514c2e0a0SMartin Matuska 		count = MIN(count, spa->spa_alloc_count);
1146b985c9caSMartin Matuska 		while (spa->spa_alloc_count % count != 0 &&
1147b985c9caSMartin Matuska 		    spa->spa_alloc_count < count * 2)
1148b985c9caSMartin Matuska 			count--;
114914c2e0a0SMartin Matuska 
115014c2e0a0SMartin Matuska 		/*
115114c2e0a0SMartin Matuska 		 * zio_taskq_batch_pct is unbounded and may exceed 100%, but no
115214c2e0a0SMartin Matuska 		 * single taskq may have more threads than 100% of online cpus.
115314c2e0a0SMartin Matuska 		 */
115414c2e0a0SMartin Matuska 		value = (zio_taskq_batch_pct + count / 2) / count;
115514c2e0a0SMartin Matuska 		value = MIN(value, 100);
1156eda14cbcSMatt Macy 		flags |= TASKQ_THREADS_CPU_PCT;
1157eda14cbcSMatt Macy 		break;
1158eda14cbcSMatt Macy 
115916038816SMartin Matuska 	case ZTI_MODE_SCALE:
116016038816SMartin Matuska 		flags |= TASKQ_THREADS_CPU_PCT;
116116038816SMartin Matuska 		/*
116216038816SMartin Matuska 		 * We want more taskqs to reduce lock contention, but we want
116316038816SMartin Matuska 		 * less for better request ordering and CPU utilization.
116416038816SMartin Matuska 		 */
116516038816SMartin Matuska 		cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
116616038816SMartin Matuska 		if (zio_taskq_batch_tpq > 0) {
116716038816SMartin Matuska 			count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
116816038816SMartin Matuska 			    zio_taskq_batch_tpq);
116916038816SMartin Matuska 		} else {
117016038816SMartin Matuska 			/*
117116038816SMartin Matuska 			 * Prefer 6 threads per taskq, but no more taskqs
117216038816SMartin Matuska 			 * than threads in them on large systems. For 80%:
117316038816SMartin Matuska 			 *
117416038816SMartin Matuska 			 *                 taskq   taskq   total
117516038816SMartin Matuska 			 * cpus    taskqs  percent threads threads
117616038816SMartin Matuska 			 * ------- ------- ------- ------- -------
117716038816SMartin Matuska 			 * 1       1       80%     1       1
117816038816SMartin Matuska 			 * 2       1       80%     1       1
117916038816SMartin Matuska 			 * 4       1       80%     3       3
118016038816SMartin Matuska 			 * 8       2       40%     3       6
118116038816SMartin Matuska 			 * 16      3       27%     4       12
118216038816SMartin Matuska 			 * 32      5       16%     5       25
118316038816SMartin Matuska 			 * 64      7       11%     7       49
118416038816SMartin Matuska 			 * 128     10      8%      10      100
118516038816SMartin Matuska 			 * 256     14      6%      15      210
118616038816SMartin Matuska 			 */
118716038816SMartin Matuska 			count = 1 + cpus / 6;
118816038816SMartin Matuska 			while (count * count > cpus)
118916038816SMartin Matuska 				count--;
119016038816SMartin Matuska 		}
119116038816SMartin Matuska 		/* Limit each taskq within 100% to not trigger assertion. */
119216038816SMartin Matuska 		count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
119316038816SMartin Matuska 		value = (zio_taskq_batch_pct + count / 2) / count;
119416038816SMartin Matuska 		break;
119516038816SMartin Matuska 
119616038816SMartin Matuska 	case ZTI_MODE_NULL:
119716038816SMartin Matuska 		tqs->stqs_count = 0;
119816038816SMartin Matuska 		tqs->stqs_taskq = NULL;
119916038816SMartin Matuska 		return;
120016038816SMartin Matuska 
1201eda14cbcSMatt Macy 	default:
1202eda14cbcSMatt Macy 		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
120314c2e0a0SMartin Matuska 		    "spa_taskqs_init()",
1204eda14cbcSMatt Macy 		    zio_type_name[t], zio_taskq_types[q], mode, value);
1205eda14cbcSMatt Macy 		break;
1206eda14cbcSMatt Macy 	}
1207eda14cbcSMatt Macy 
120816038816SMartin Matuska 	ASSERT3U(count, >, 0);
120916038816SMartin Matuska 	tqs->stqs_count = count;
121016038816SMartin Matuska 	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
121116038816SMartin Matuska 
1212eda14cbcSMatt Macy 	for (uint_t i = 0; i < count; i++) {
1213eda14cbcSMatt Macy 		taskq_t *tq;
1214eda14cbcSMatt Macy 		char name[32];
1215eda14cbcSMatt Macy 
121616038816SMartin Matuska 		if (count > 1)
121716038816SMartin Matuska 			(void) snprintf(name, sizeof (name), "%s_%s_%u",
121816038816SMartin Matuska 			    zio_type_name[t], zio_taskq_types[q], i);
121916038816SMartin Matuska 		else
1220eda14cbcSMatt Macy 			(void) snprintf(name, sizeof (name), "%s_%s",
1221eda14cbcSMatt Macy 			    zio_type_name[t], zio_taskq_types[q]);
1222eda14cbcSMatt Macy 
12236c1e79dfSMartin Matuska #ifdef HAVE_SYSDC
1224eda14cbcSMatt Macy 		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
1225e92ffd9bSMartin Matuska 			(void) zio_taskq_basedc;
1226eda14cbcSMatt Macy 			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1227eda14cbcSMatt Macy 			    spa->spa_proc, zio_taskq_basedc, flags);
1228eda14cbcSMatt Macy 		} else {
12296c1e79dfSMartin Matuska #endif
1230eda14cbcSMatt Macy 			pri_t pri = maxclsyspri;
1231eda14cbcSMatt Macy 			/*
1232eda14cbcSMatt Macy 			 * The write issue taskq can be extremely CPU
1233eda14cbcSMatt Macy 			 * intensive.  Run it at slightly less important
12342c48331dSMatt Macy 			 * priority than the other taskqs.
12352c48331dSMatt Macy 			 *
12362c48331dSMatt Macy 			 * Under Linux and FreeBSD this means incrementing
12372c48331dSMatt Macy 			 * the priority value as opposed to platforms like
12382c48331dSMatt Macy 			 * illumos where it should be decremented.
12392c48331dSMatt Macy 			 *
12402c48331dSMatt Macy 			 * On FreeBSD, if priorities divided by four (RQ_PPQ)
12412c48331dSMatt Macy 			 * are equal then a difference between them is
12422c48331dSMatt Macy 			 * insignificant.
1243eda14cbcSMatt Macy 			 */
12442c48331dSMatt Macy 			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
12452c48331dSMatt Macy #if defined(__linux__)
1246eda14cbcSMatt Macy 				pri++;
12472c48331dSMatt Macy #elif defined(__FreeBSD__)
12482c48331dSMatt Macy 				pri += 4;
12492c48331dSMatt Macy #else
12502c48331dSMatt Macy #error "unknown OS"
12512c48331dSMatt Macy #endif
12522c48331dSMatt Macy 			}
1253eda14cbcSMatt Macy 			tq = taskq_create_proc(name, value, pri, 50,
1254eda14cbcSMatt Macy 			    INT_MAX, spa->spa_proc, flags);
12556c1e79dfSMartin Matuska #ifdef HAVE_SYSDC
1256eda14cbcSMatt Macy 		}
12576c1e79dfSMartin Matuska #endif
1258eda14cbcSMatt Macy 
1259eda14cbcSMatt Macy 		tqs->stqs_taskq[i] = tq;
1260eda14cbcSMatt Macy 	}
1261eda14cbcSMatt Macy }
1262eda14cbcSMatt Macy 
1263eda14cbcSMatt Macy static void
1264eda14cbcSMatt Macy spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1265eda14cbcSMatt Macy {
1266eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1267eda14cbcSMatt Macy 
1268eda14cbcSMatt Macy 	if (tqs->stqs_taskq == NULL) {
1269eda14cbcSMatt Macy 		ASSERT3U(tqs->stqs_count, ==, 0);
1270eda14cbcSMatt Macy 		return;
1271eda14cbcSMatt Macy 	}
1272eda14cbcSMatt Macy 
1273eda14cbcSMatt Macy 	for (uint_t i = 0; i < tqs->stqs_count; i++) {
1274eda14cbcSMatt Macy 		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1275eda14cbcSMatt Macy 		taskq_destroy(tqs->stqs_taskq[i]);
1276eda14cbcSMatt Macy 	}
1277eda14cbcSMatt Macy 
1278eda14cbcSMatt Macy 	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1279eda14cbcSMatt Macy 	tqs->stqs_taskq = NULL;
1280eda14cbcSMatt Macy }
1281eda14cbcSMatt Macy 
1282b356da80SMartin Matuska #ifdef _KERNEL
1283b356da80SMartin Matuska /*
1284b356da80SMartin Matuska  * The READ and WRITE rows of zio_taskqs are configurable at module load time
1285b356da80SMartin Matuska  * by setting zio_taskq_read or zio_taskq_write.
1286b356da80SMartin Matuska  *
1287b356da80SMartin Matuska  * Example (the defaults for READ and WRITE)
1288b356da80SMartin Matuska  *   zio_taskq_read='fixed,1,8 null scale null'
1289aca928a5SMartin Matuska  *   zio_taskq_write='sync null scale null'
1290b356da80SMartin Matuska  *
1291b356da80SMartin Matuska  * Each sets the entire row at a time.
1292b356da80SMartin Matuska  *
1293b356da80SMartin Matuska  * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number
1294b356da80SMartin Matuska  * of threads per taskq.
1295b356da80SMartin Matuska  *
1296b356da80SMartin Matuska  * 'null' can only be set on the high-priority queues (queue selection for
1297b356da80SMartin Matuska  * high-priority queues will fall back to the regular queue if the high-pri
1298b356da80SMartin Matuska  * is NULL.
1299b356da80SMartin Matuska  */
1300b356da80SMartin Matuska static const char *const modes[ZTI_NMODES] = {
1301b356da80SMartin Matuska 	"fixed", "scale", "sync", "null"
1302b356da80SMartin Matuska };
1303b356da80SMartin Matuska 
1304b356da80SMartin Matuska /* Parse the incoming config string. Modifies cfg */
1305b356da80SMartin Matuska static int
1306b356da80SMartin Matuska spa_taskq_param_set(zio_type_t t, char *cfg)
1307b356da80SMartin Matuska {
1308b356da80SMartin Matuska 	int err = 0;
1309b356da80SMartin Matuska 
1310b356da80SMartin Matuska 	zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}};
1311b356da80SMartin Matuska 
1312b356da80SMartin Matuska 	char *next = cfg, *tok, *c;
1313b356da80SMartin Matuska 
1314b356da80SMartin Matuska 	/*
1315b356da80SMartin Matuska 	 * Parse out each element from the string and fill `row`. The entire
1316b356da80SMartin Matuska 	 * row has to be set at once, so any errors are flagged by just
1317b356da80SMartin Matuska 	 * breaking out of this loop early.
1318b356da80SMartin Matuska 	 */
1319b356da80SMartin Matuska 	uint_t q;
1320b356da80SMartin Matuska 	for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
1321b356da80SMartin Matuska 		/* `next` is the start of the config */
1322b356da80SMartin Matuska 		if (next == NULL)
1323b356da80SMartin Matuska 			break;
1324b356da80SMartin Matuska 
1325b356da80SMartin Matuska 		/* Eat up leading space */
1326b356da80SMartin Matuska 		while (isspace(*next))
1327b356da80SMartin Matuska 			next++;
1328b356da80SMartin Matuska 		if (*next == '\0')
1329b356da80SMartin Matuska 			break;
1330b356da80SMartin Matuska 
1331b356da80SMartin Matuska 		/* Mode ends at space or end of string */
1332b356da80SMartin Matuska 		tok = next;
1333b356da80SMartin Matuska 		next = strchr(tok, ' ');
1334b356da80SMartin Matuska 		if (next != NULL) *next++ = '\0';
1335b356da80SMartin Matuska 
1336b356da80SMartin Matuska 		/* Parameters start after a comma */
1337b356da80SMartin Matuska 		c = strchr(tok, ',');
1338b356da80SMartin Matuska 		if (c != NULL) *c++ = '\0';
1339b356da80SMartin Matuska 
1340b356da80SMartin Matuska 		/* Match mode string */
1341b356da80SMartin Matuska 		uint_t mode;
1342b356da80SMartin Matuska 		for (mode = 0; mode < ZTI_NMODES; mode++)
1343b356da80SMartin Matuska 			if (strcmp(tok, modes[mode]) == 0)
1344b356da80SMartin Matuska 				break;
1345b356da80SMartin Matuska 		if (mode == ZTI_NMODES)
1346b356da80SMartin Matuska 			break;
1347b356da80SMartin Matuska 
1348b356da80SMartin Matuska 		/* Invalid canary */
1349b356da80SMartin Matuska 		row[q].zti_mode = ZTI_NMODES;
1350b356da80SMartin Matuska 
1351b356da80SMartin Matuska 		/* Per-mode setup */
1352b356da80SMartin Matuska 		switch (mode) {
1353b356da80SMartin Matuska 
1354b356da80SMartin Matuska 		/*
1355b356da80SMartin Matuska 		 * FIXED is parameterised: number of queues, and number of
1356b356da80SMartin Matuska 		 * threads per queue.
1357b356da80SMartin Matuska 		 */
1358b356da80SMartin Matuska 		case ZTI_MODE_FIXED: {
1359b356da80SMartin Matuska 			/* No parameters? */
1360b356da80SMartin Matuska 			if (c == NULL || *c == '\0')
1361b356da80SMartin Matuska 				break;
1362b356da80SMartin Matuska 
1363b356da80SMartin Matuska 			/* Find next parameter */
1364b356da80SMartin Matuska 			tok = c;
1365b356da80SMartin Matuska 			c = strchr(tok, ',');
1366b356da80SMartin Matuska 			if (c == NULL)
1367b356da80SMartin Matuska 				break;
1368b356da80SMartin Matuska 
1369b356da80SMartin Matuska 			/* Take digits and convert */
1370b356da80SMartin Matuska 			unsigned long long nq;
1371b356da80SMartin Matuska 			if (!(isdigit(*tok)))
1372b356da80SMartin Matuska 				break;
1373b356da80SMartin Matuska 			err = ddi_strtoull(tok, &tok, 10, &nq);
1374b356da80SMartin Matuska 			/* Must succeed and also end at the next param sep */
1375b356da80SMartin Matuska 			if (err != 0 || tok != c)
1376b356da80SMartin Matuska 				break;
1377b356da80SMartin Matuska 
1378b356da80SMartin Matuska 			/* Move past the comma */
1379b356da80SMartin Matuska 			tok++;
1380b356da80SMartin Matuska 			/* Need another number */
1381b356da80SMartin Matuska 			if (!(isdigit(*tok)))
1382b356da80SMartin Matuska 				break;
1383b356da80SMartin Matuska 			/* Remember start to make sure we moved */
1384b356da80SMartin Matuska 			c = tok;
1385b356da80SMartin Matuska 
1386b356da80SMartin Matuska 			/* Take digits */
1387b356da80SMartin Matuska 			unsigned long long ntpq;
1388b356da80SMartin Matuska 			err = ddi_strtoull(tok, &tok, 10, &ntpq);
1389b356da80SMartin Matuska 			/* Must succeed, and moved forward */
1390b356da80SMartin Matuska 			if (err != 0 || tok == c || *tok != '\0')
1391b356da80SMartin Matuska 				break;
1392b356da80SMartin Matuska 
1393b356da80SMartin Matuska 			/*
1394b356da80SMartin Matuska 			 * sanity; zero queues/threads make no sense, and
1395b356da80SMartin Matuska 			 * 16K is almost certainly more than anyone will ever
1396b356da80SMartin Matuska 			 * need and avoids silly numbers like UINT32_MAX
1397b356da80SMartin Matuska 			 */
1398b356da80SMartin Matuska 			if (nq == 0 || nq >= 16384 ||
1399b356da80SMartin Matuska 			    ntpq == 0 || ntpq >= 16384)
1400b356da80SMartin Matuska 				break;
1401b356da80SMartin Matuska 
1402b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_P(ntpq, nq);
1403b356da80SMartin Matuska 			row[q] = zti;
1404b356da80SMartin Matuska 			break;
1405b356da80SMartin Matuska 		}
1406b356da80SMartin Matuska 
1407b356da80SMartin Matuska 		case ZTI_MODE_SCALE: {
1408b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_SCALE;
1409b356da80SMartin Matuska 			row[q] = zti;
1410b356da80SMartin Matuska 			break;
1411b356da80SMartin Matuska 		}
1412b356da80SMartin Matuska 
1413b356da80SMartin Matuska 		case ZTI_MODE_SYNC: {
1414b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_SYNC;
1415b356da80SMartin Matuska 			row[q] = zti;
1416b356da80SMartin Matuska 			break;
1417b356da80SMartin Matuska 		}
1418b356da80SMartin Matuska 
1419b356da80SMartin Matuska 		case ZTI_MODE_NULL: {
1420b356da80SMartin Matuska 			/*
1421b356da80SMartin Matuska 			 * Can only null the high-priority queues; the general-
1422b356da80SMartin Matuska 			 * purpose ones have to exist.
1423b356da80SMartin Matuska 			 */
1424b356da80SMartin Matuska 			if (q != ZIO_TASKQ_ISSUE_HIGH &&
1425b356da80SMartin Matuska 			    q != ZIO_TASKQ_INTERRUPT_HIGH)
1426b356da80SMartin Matuska 				break;
1427b356da80SMartin Matuska 
1428b356da80SMartin Matuska 			const zio_taskq_info_t zti = ZTI_NULL;
1429b356da80SMartin Matuska 			row[q] = zti;
1430b356da80SMartin Matuska 			break;
1431b356da80SMartin Matuska 		}
1432b356da80SMartin Matuska 
1433b356da80SMartin Matuska 		default:
1434b356da80SMartin Matuska 			break;
1435b356da80SMartin Matuska 		}
1436b356da80SMartin Matuska 
1437b356da80SMartin Matuska 		/* Ensure we set a mode */
1438b356da80SMartin Matuska 		if (row[q].zti_mode == ZTI_NMODES)
1439b356da80SMartin Matuska 			break;
1440b356da80SMartin Matuska 	}
1441b356da80SMartin Matuska 
1442b356da80SMartin Matuska 	/* Didn't get a full row, fail */
1443b356da80SMartin Matuska 	if (q < ZIO_TASKQ_TYPES)
1444b356da80SMartin Matuska 		return (SET_ERROR(EINVAL));
1445b356da80SMartin Matuska 
1446b356da80SMartin Matuska 	/* Eat trailing space */
1447b356da80SMartin Matuska 	if (next != NULL)
1448b356da80SMartin Matuska 		while (isspace(*next))
1449b356da80SMartin Matuska 			next++;
1450b356da80SMartin Matuska 
1451b356da80SMartin Matuska 	/* If there's anything left over then fail */
1452b356da80SMartin Matuska 	if (next != NULL && *next != '\0')
1453b356da80SMartin Matuska 		return (SET_ERROR(EINVAL));
1454b356da80SMartin Matuska 
1455b356da80SMartin Matuska 	/* Success! Copy it into the real config */
1456b356da80SMartin Matuska 	for (q = 0; q < ZIO_TASKQ_TYPES; q++)
1457b356da80SMartin Matuska 		zio_taskqs[t][q] = row[q];
1458b356da80SMartin Matuska 
1459b356da80SMartin Matuska 	return (0);
1460b356da80SMartin Matuska }
1461b356da80SMartin Matuska 
1462b356da80SMartin Matuska static int
146378ae60b4SMartin Matuska spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline)
1464b356da80SMartin Matuska {
1465b356da80SMartin Matuska 	int pos = 0;
1466b356da80SMartin Matuska 
1467b356da80SMartin Matuska 	/* Build paramater string from live config */
1468b356da80SMartin Matuska 	const char *sep = "";
1469b356da80SMartin Matuska 	for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) {
1470b356da80SMartin Matuska 		const zio_taskq_info_t *zti = &zio_taskqs[t][q];
1471b356da80SMartin Matuska 		if (zti->zti_mode == ZTI_MODE_FIXED)
1472b356da80SMartin Matuska 			pos += sprintf(&buf[pos], "%s%s,%u,%u", sep,
1473b356da80SMartin Matuska 			    modes[zti->zti_mode], zti->zti_count,
1474b356da80SMartin Matuska 			    zti->zti_value);
1475b356da80SMartin Matuska 		else
1476b356da80SMartin Matuska 			pos += sprintf(&buf[pos], "%s%s", sep,
1477b356da80SMartin Matuska 			    modes[zti->zti_mode]);
1478b356da80SMartin Matuska 		sep = " ";
1479b356da80SMartin Matuska 	}
1480b356da80SMartin Matuska 
148178ae60b4SMartin Matuska 	if (add_newline)
1482b356da80SMartin Matuska 		buf[pos++] = '\n';
1483b356da80SMartin Matuska 	buf[pos] = '\0';
1484b356da80SMartin Matuska 
1485b356da80SMartin Matuska 	return (pos);
1486b356da80SMartin Matuska }
1487b356da80SMartin Matuska 
1488b356da80SMartin Matuska #ifdef __linux__
1489b356da80SMartin Matuska static int
1490b356da80SMartin Matuska spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp)
1491b356da80SMartin Matuska {
1492b356da80SMartin Matuska 	char *cfg = kmem_strdup(val);
1493b356da80SMartin Matuska 	int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg);
1494b356da80SMartin Matuska 	kmem_free(cfg, strlen(val)+1);
1495b356da80SMartin Matuska 	return (-err);
1496b356da80SMartin Matuska }
1497b356da80SMartin Matuska static int
1498b356da80SMartin Matuska spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp)
1499b356da80SMartin Matuska {
150078ae60b4SMartin Matuska 	return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE));
1501b356da80SMartin Matuska }
1502b356da80SMartin Matuska 
1503b356da80SMartin Matuska static int
1504b356da80SMartin Matuska spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp)
1505b356da80SMartin Matuska {
1506b356da80SMartin Matuska 	char *cfg = kmem_strdup(val);
1507b356da80SMartin Matuska 	int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg);
1508b356da80SMartin Matuska 	kmem_free(cfg, strlen(val)+1);
1509b356da80SMartin Matuska 	return (-err);
1510b356da80SMartin Matuska }
1511b356da80SMartin Matuska static int
1512b356da80SMartin Matuska spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp)
1513b356da80SMartin Matuska {
151478ae60b4SMartin Matuska 	return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE));
1515b356da80SMartin Matuska }
1516b356da80SMartin Matuska #else
1517b356da80SMartin Matuska /*
1518b356da80SMartin Matuska  * On FreeBSD load-time parameters can be set up before malloc() is available,
1519b356da80SMartin Matuska  * so we have to do all the parsing work on the stack.
1520b356da80SMartin Matuska  */
1521b356da80SMartin Matuska #define	SPA_TASKQ_PARAM_MAX	(128)
1522b356da80SMartin Matuska 
1523b356da80SMartin Matuska static int
1524b356da80SMartin Matuska spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)
1525b356da80SMartin Matuska {
1526b356da80SMartin Matuska 	char buf[SPA_TASKQ_PARAM_MAX];
152709af4bf2SMark Johnston 	int err;
1528b356da80SMartin Matuska 
152978ae60b4SMartin Matuska 	(void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE);
1530b356da80SMartin Matuska 	err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
153109af4bf2SMark Johnston 	if (err || req->newptr == NULL)
1532b356da80SMartin Matuska 		return (err);
1533b356da80SMartin Matuska 	return (spa_taskq_param_set(ZIO_TYPE_READ, buf));
1534b356da80SMartin Matuska }
1535b356da80SMartin Matuska 
1536b356da80SMartin Matuska static int
1537b356da80SMartin Matuska spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
1538b356da80SMartin Matuska {
1539b356da80SMartin Matuska 	char buf[SPA_TASKQ_PARAM_MAX];
154009af4bf2SMark Johnston 	int err;
1541b356da80SMartin Matuska 
154278ae60b4SMartin Matuska 	(void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE);
1543b356da80SMartin Matuska 	err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
154409af4bf2SMark Johnston 	if (err || req->newptr == NULL)
1545b356da80SMartin Matuska 		return (err);
1546b356da80SMartin Matuska 	return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf));
1547b356da80SMartin Matuska }
1548b356da80SMartin Matuska #endif
1549b356da80SMartin Matuska #endif /* _KERNEL */
1550b356da80SMartin Matuska 
1551eda14cbcSMatt Macy /*
1552eda14cbcSMatt Macy  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1553eda14cbcSMatt Macy  * Note that a type may have multiple discrete taskqs to avoid lock contention
155414c2e0a0SMartin Matuska  * on the taskq itself.
1555eda14cbcSMatt Macy  */
1556aca928a5SMartin Matuska void
1557aca928a5SMartin Matuska spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1558aca928a5SMartin Matuska     task_func_t *func, zio_t *zio, boolean_t cutinline)
1559eda14cbcSMatt Macy {
1560eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1561eda14cbcSMatt Macy 	taskq_t *tq;
1562eda14cbcSMatt Macy 
1563eda14cbcSMatt Macy 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1564eda14cbcSMatt Macy 	ASSERT3U(tqs->stqs_count, !=, 0);
1565eda14cbcSMatt Macy 
1566aca928a5SMartin Matuska 	/*
1567aca928a5SMartin Matuska 	 * NB: We are assuming that the zio can only be dispatched
1568aca928a5SMartin Matuska 	 * to a single taskq at a time.  It would be a grievous error
1569aca928a5SMartin Matuska 	 * to dispatch the zio to another taskq at the same time.
1570aca928a5SMartin Matuska 	 */
1571aca928a5SMartin Matuska 	ASSERT(zio);
1572aca928a5SMartin Matuska 	ASSERT(taskq_empty_ent(&zio->io_tqent));
1573aca928a5SMartin Matuska 
1574eda14cbcSMatt Macy 	if (tqs->stqs_count == 1) {
1575eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[0];
1576b985c9caSMartin Matuska 	} else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
1577aca928a5SMartin Matuska 	    ZIO_HAS_ALLOCATOR(zio)) {
1578b985c9caSMartin Matuska 		tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
1579eda14cbcSMatt Macy 	} else {
1580eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1581eda14cbcSMatt Macy 	}
1582eda14cbcSMatt Macy 
1583aca928a5SMartin Matuska 	taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0,
1584aca928a5SMartin Matuska 	    &zio->io_tqent);
1585eda14cbcSMatt Macy }
1586eda14cbcSMatt Macy 
1587eda14cbcSMatt Macy static void
1588eda14cbcSMatt Macy spa_create_zio_taskqs(spa_t *spa)
1589eda14cbcSMatt Macy {
1590eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1591eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1592eda14cbcSMatt Macy 			spa_taskqs_init(spa, t, q);
1593eda14cbcSMatt Macy 		}
1594eda14cbcSMatt Macy 	}
1595eda14cbcSMatt Macy }
1596eda14cbcSMatt Macy 
1597eda14cbcSMatt Macy #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1598eda14cbcSMatt Macy static void
1599eda14cbcSMatt Macy spa_thread(void *arg)
1600eda14cbcSMatt Macy {
1601eda14cbcSMatt Macy 	psetid_t zio_taskq_psrset_bind = PS_NONE;
1602eda14cbcSMatt Macy 	callb_cpr_t cprinfo;
1603eda14cbcSMatt Macy 
1604eda14cbcSMatt Macy 	spa_t *spa = arg;
1605eda14cbcSMatt Macy 	user_t *pu = PTOU(curproc);
1606eda14cbcSMatt Macy 
1607eda14cbcSMatt Macy 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1608eda14cbcSMatt Macy 	    spa->spa_name);
1609eda14cbcSMatt Macy 
1610eda14cbcSMatt Macy 	ASSERT(curproc != &p0);
1611eda14cbcSMatt Macy 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1612eda14cbcSMatt Macy 	    "zpool-%s", spa->spa_name);
1613eda14cbcSMatt Macy 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1614eda14cbcSMatt Macy 
1615eda14cbcSMatt Macy 	/* bind this thread to the requested psrset */
1616eda14cbcSMatt Macy 	if (zio_taskq_psrset_bind != PS_NONE) {
1617eda14cbcSMatt Macy 		pool_lock();
1618eda14cbcSMatt Macy 		mutex_enter(&cpu_lock);
1619eda14cbcSMatt Macy 		mutex_enter(&pidlock);
1620eda14cbcSMatt Macy 		mutex_enter(&curproc->p_lock);
1621eda14cbcSMatt Macy 
1622eda14cbcSMatt Macy 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1623eda14cbcSMatt Macy 		    0, NULL, NULL) == 0)  {
1624eda14cbcSMatt Macy 			curthread->t_bind_pset = zio_taskq_psrset_bind;
1625eda14cbcSMatt Macy 		} else {
1626eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1627eda14cbcSMatt Macy 			    "Couldn't bind process for zfs pool \"%s\" to "
1628eda14cbcSMatt Macy 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1629eda14cbcSMatt Macy 		}
1630eda14cbcSMatt Macy 
1631eda14cbcSMatt Macy 		mutex_exit(&curproc->p_lock);
1632eda14cbcSMatt Macy 		mutex_exit(&pidlock);
1633eda14cbcSMatt Macy 		mutex_exit(&cpu_lock);
1634eda14cbcSMatt Macy 		pool_unlock();
1635eda14cbcSMatt Macy 	}
1636eda14cbcSMatt Macy 
16376c1e79dfSMartin Matuska #ifdef HAVE_SYSDC
1638eda14cbcSMatt Macy 	if (zio_taskq_sysdc) {
1639eda14cbcSMatt Macy 		sysdc_thread_enter(curthread, 100, 0);
1640eda14cbcSMatt Macy 	}
16416c1e79dfSMartin Matuska #endif
1642eda14cbcSMatt Macy 
1643eda14cbcSMatt Macy 	spa->spa_proc = curproc;
1644eda14cbcSMatt Macy 	spa->spa_did = curthread->t_did;
1645eda14cbcSMatt Macy 
1646eda14cbcSMatt Macy 	spa_create_zio_taskqs(spa);
1647eda14cbcSMatt Macy 
1648eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1649eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1650eda14cbcSMatt Macy 
1651eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_ACTIVE;
1652eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1653eda14cbcSMatt Macy 
1654eda14cbcSMatt Macy 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1655eda14cbcSMatt Macy 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1656eda14cbcSMatt Macy 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1657eda14cbcSMatt Macy 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1658eda14cbcSMatt Macy 
1659eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1660eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_GONE;
1661eda14cbcSMatt Macy 	spa->spa_proc = &p0;
1662eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1663eda14cbcSMatt Macy 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1664eda14cbcSMatt Macy 
1665eda14cbcSMatt Macy 	mutex_enter(&curproc->p_lock);
1666eda14cbcSMatt Macy 	lwp_exit();
1667eda14cbcSMatt Macy }
1668eda14cbcSMatt Macy #endif
1669eda14cbcSMatt Macy 
16702ad756a6SMartin Matuska extern metaslab_ops_t *metaslab_allocator(spa_t *spa);
16712ad756a6SMartin Matuska 
1672eda14cbcSMatt Macy /*
1673eda14cbcSMatt Macy  * Activate an uninitialized pool.
1674eda14cbcSMatt Macy  */
1675eda14cbcSMatt Macy static void
1676eda14cbcSMatt Macy spa_activate(spa_t *spa, spa_mode_t mode)
1677eda14cbcSMatt Macy {
16782ad756a6SMartin Matuska 	metaslab_ops_t *msp = metaslab_allocator(spa);
1679eda14cbcSMatt Macy 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1680eda14cbcSMatt Macy 
1681eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
1682eda14cbcSMatt Macy 	spa->spa_mode = mode;
168381b22a98SMartin Matuska 	spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
1684eda14cbcSMatt Macy 
16852ad756a6SMartin Matuska 	spa->spa_normal_class = metaslab_class_create(spa, msp);
16862ad756a6SMartin Matuska 	spa->spa_log_class = metaslab_class_create(spa, msp);
16872ad756a6SMartin Matuska 	spa->spa_embedded_log_class = metaslab_class_create(spa, msp);
16882ad756a6SMartin Matuska 	spa->spa_special_class = metaslab_class_create(spa, msp);
16892ad756a6SMartin Matuska 	spa->spa_dedup_class = metaslab_class_create(spa, msp);
1690eda14cbcSMatt Macy 
1691eda14cbcSMatt Macy 	/* Try to create a covering process */
1692eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1693eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1694eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1695eda14cbcSMatt Macy 	spa->spa_did = 0;
1696eda14cbcSMatt Macy 
1697eda14cbcSMatt Macy #ifdef HAVE_SPA_THREAD
1698eda14cbcSMatt Macy 	/* Only create a process if we're going to be around a while. */
1699eda14cbcSMatt Macy 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1700eda14cbcSMatt Macy 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1701eda14cbcSMatt Macy 		    NULL, 0) == 0) {
1702eda14cbcSMatt Macy 			spa->spa_proc_state = SPA_PROC_CREATED;
1703eda14cbcSMatt Macy 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1704eda14cbcSMatt Macy 				cv_wait(&spa->spa_proc_cv,
1705eda14cbcSMatt Macy 				    &spa->spa_proc_lock);
1706eda14cbcSMatt Macy 			}
1707eda14cbcSMatt Macy 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1708eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1709eda14cbcSMatt Macy 			ASSERT(spa->spa_did != 0);
1710eda14cbcSMatt Macy 		} else {
1711eda14cbcSMatt Macy #ifdef _KERNEL
1712eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1713eda14cbcSMatt Macy 			    "Couldn't create process for zfs pool \"%s\"\n",
1714eda14cbcSMatt Macy 			    spa->spa_name);
1715eda14cbcSMatt Macy #endif
1716eda14cbcSMatt Macy 		}
1717eda14cbcSMatt Macy 	}
1718eda14cbcSMatt Macy #endif /* HAVE_SPA_THREAD */
1719eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1720eda14cbcSMatt Macy 
1721eda14cbcSMatt Macy 	/* If we didn't create a process, we need to create our taskqs. */
1722eda14cbcSMatt Macy 	if (spa->spa_proc == &p0) {
1723eda14cbcSMatt Macy 		spa_create_zio_taskqs(spa);
1724eda14cbcSMatt Macy 	}
1725eda14cbcSMatt Macy 
1726eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1727eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1728eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL);
1729eda14cbcSMatt Macy 	}
1730eda14cbcSMatt Macy 
1731eda14cbcSMatt Macy 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1732eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_config_dirty_node));
1733eda14cbcSMatt Macy 	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1734eda14cbcSMatt Macy 	    offsetof(objset_t, os_evicting_node));
1735eda14cbcSMatt Macy 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1736eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_state_dirty_node));
1737eda14cbcSMatt Macy 
1738eda14cbcSMatt Macy 	txg_list_create(&spa->spa_vdev_txg_list, spa,
1739eda14cbcSMatt Macy 	    offsetof(struct vdev, vdev_txg_node));
1740eda14cbcSMatt Macy 
1741eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
1742eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1743eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1744eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
1745eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1746eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1747271171e0SMartin Matuska 	avl_create(&spa->spa_errlist_healed,
1748271171e0SMartin Matuska 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1749271171e0SMartin Matuska 	    offsetof(spa_error_entry_t, se_avl));
1750eda14cbcSMatt Macy 
1751c03c5b1cSMartin Matuska 	spa_activate_os(spa);
1752c03c5b1cSMartin Matuska 
1753eda14cbcSMatt Macy 	spa_keystore_init(&spa->spa_keystore);
1754eda14cbcSMatt Macy 
1755eda14cbcSMatt Macy 	/*
1756eda14cbcSMatt Macy 	 * This taskq is used to perform zvol-minor-related tasks
1757eda14cbcSMatt Macy 	 * asynchronously. This has several advantages, including easy
1758180f8225SMatt Macy 	 * resolution of various deadlocks.
1759eda14cbcSMatt Macy 	 *
1760eda14cbcSMatt Macy 	 * The taskq must be single threaded to ensure tasks are always
1761eda14cbcSMatt Macy 	 * processed in the order in which they were dispatched.
1762eda14cbcSMatt Macy 	 *
1763eda14cbcSMatt Macy 	 * A taskq per pool allows one to keep the pools independent.
1764eda14cbcSMatt Macy 	 * This way if one pool is suspended, it will not impact another.
1765eda14cbcSMatt Macy 	 *
1766eda14cbcSMatt Macy 	 * The preferred location to dispatch a zvol minor task is a sync
1767eda14cbcSMatt Macy 	 * task. In this context, there is easy access to the spa_t and minimal
1768eda14cbcSMatt Macy 	 * error handling is required because the sync task must succeed.
1769eda14cbcSMatt Macy 	 */
1770eda14cbcSMatt Macy 	spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1771eda14cbcSMatt Macy 	    1, INT_MAX, 0);
1772eda14cbcSMatt Macy 
1773eda14cbcSMatt Macy 	/*
1774b2526e8bSMartin Matuska 	 * The taskq to preload metaslabs.
1775b2526e8bSMartin Matuska 	 */
1776b2526e8bSMartin Matuska 	spa->spa_metaslab_taskq = taskq_create("z_metaslab",
1777b2526e8bSMartin Matuska 	    metaslab_preload_pct, maxclsyspri, 1, INT_MAX,
1778b2526e8bSMartin Matuska 	    TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1779b2526e8bSMartin Matuska 
1780b2526e8bSMartin Matuska 	/*
1781eda14cbcSMatt Macy 	 * Taskq dedicated to prefetcher threads: this is used to prevent the
1782eda14cbcSMatt Macy 	 * pool traverse code from monopolizing the global (and limited)
1783eda14cbcSMatt Macy 	 * system_taskq by inappropriately scheduling long running tasks on it.
1784eda14cbcSMatt Macy 	 */
17857877fdebSMatt Macy 	spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
17867877fdebSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1787eda14cbcSMatt Macy 
1788eda14cbcSMatt Macy 	/*
1789eda14cbcSMatt Macy 	 * The taskq to upgrade datasets in this pool. Currently used by
1790eda14cbcSMatt Macy 	 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1791eda14cbcSMatt Macy 	 */
17927877fdebSMatt Macy 	spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
17937877fdebSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1794eda14cbcSMatt Macy }
1795eda14cbcSMatt Macy 
1796eda14cbcSMatt Macy /*
1797eda14cbcSMatt Macy  * Opposite of spa_activate().
1798eda14cbcSMatt Macy  */
1799eda14cbcSMatt Macy static void
1800eda14cbcSMatt Macy spa_deactivate(spa_t *spa)
1801eda14cbcSMatt Macy {
1802eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on == B_FALSE);
1803eda14cbcSMatt Macy 	ASSERT(spa->spa_dsl_pool == NULL);
1804eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
1805eda14cbcSMatt Macy 	ASSERT(spa->spa_async_zio_root == NULL);
1806eda14cbcSMatt Macy 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1807eda14cbcSMatt Macy 
1808eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
1809eda14cbcSMatt Macy 
1810eda14cbcSMatt Macy 	if (spa->spa_zvol_taskq) {
1811eda14cbcSMatt Macy 		taskq_destroy(spa->spa_zvol_taskq);
1812eda14cbcSMatt Macy 		spa->spa_zvol_taskq = NULL;
1813eda14cbcSMatt Macy 	}
1814eda14cbcSMatt Macy 
1815b2526e8bSMartin Matuska 	if (spa->spa_metaslab_taskq) {
1816b2526e8bSMartin Matuska 		taskq_destroy(spa->spa_metaslab_taskq);
1817b2526e8bSMartin Matuska 		spa->spa_metaslab_taskq = NULL;
1818b2526e8bSMartin Matuska 	}
1819b2526e8bSMartin Matuska 
1820eda14cbcSMatt Macy 	if (spa->spa_prefetch_taskq) {
1821eda14cbcSMatt Macy 		taskq_destroy(spa->spa_prefetch_taskq);
1822eda14cbcSMatt Macy 		spa->spa_prefetch_taskq = NULL;
1823eda14cbcSMatt Macy 	}
1824eda14cbcSMatt Macy 
1825eda14cbcSMatt Macy 	if (spa->spa_upgrade_taskq) {
1826eda14cbcSMatt Macy 		taskq_destroy(spa->spa_upgrade_taskq);
1827eda14cbcSMatt Macy 		spa->spa_upgrade_taskq = NULL;
1828eda14cbcSMatt Macy 	}
1829eda14cbcSMatt Macy 
1830eda14cbcSMatt Macy 	txg_list_destroy(&spa->spa_vdev_txg_list);
1831eda14cbcSMatt Macy 
1832eda14cbcSMatt Macy 	list_destroy(&spa->spa_config_dirty_list);
1833eda14cbcSMatt Macy 	list_destroy(&spa->spa_evicting_os_list);
1834eda14cbcSMatt Macy 	list_destroy(&spa->spa_state_dirty_list);
1835eda14cbcSMatt Macy 
1836eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
1837eda14cbcSMatt Macy 
1838eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1839eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1840eda14cbcSMatt Macy 			spa_taskqs_fini(spa, t, q);
1841eda14cbcSMatt Macy 		}
1842eda14cbcSMatt Macy 	}
1843eda14cbcSMatt Macy 
1844eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1845eda14cbcSMatt Macy 		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1846eda14cbcSMatt Macy 		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1847eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = NULL;
1848eda14cbcSMatt Macy 	}
1849eda14cbcSMatt Macy 
1850eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_normal_class);
1851eda14cbcSMatt Macy 	spa->spa_normal_class = NULL;
1852eda14cbcSMatt Macy 
1853eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_log_class);
1854eda14cbcSMatt Macy 	spa->spa_log_class = NULL;
1855eda14cbcSMatt Macy 
1856184c1b94SMartin Matuska 	metaslab_class_destroy(spa->spa_embedded_log_class);
1857184c1b94SMartin Matuska 	spa->spa_embedded_log_class = NULL;
1858184c1b94SMartin Matuska 
1859eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_special_class);
1860eda14cbcSMatt Macy 	spa->spa_special_class = NULL;
1861eda14cbcSMatt Macy 
1862eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_dedup_class);
1863eda14cbcSMatt Macy 	spa->spa_dedup_class = NULL;
1864eda14cbcSMatt Macy 
1865eda14cbcSMatt Macy 	/*
1866eda14cbcSMatt Macy 	 * If this was part of an import or the open otherwise failed, we may
1867eda14cbcSMatt Macy 	 * still have errors left in the queues.  Empty them just in case.
1868eda14cbcSMatt Macy 	 */
1869eda14cbcSMatt Macy 	spa_errlog_drain(spa);
1870eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_scrub);
1871eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_last);
1872271171e0SMartin Matuska 	avl_destroy(&spa->spa_errlist_healed);
1873eda14cbcSMatt Macy 
1874eda14cbcSMatt Macy 	spa_keystore_fini(&spa->spa_keystore);
1875eda14cbcSMatt Macy 
1876eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_UNINITIALIZED;
1877eda14cbcSMatt Macy 
1878eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1879eda14cbcSMatt Macy 	if (spa->spa_proc_state != SPA_PROC_NONE) {
1880eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1881eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1882eda14cbcSMatt Macy 		cv_broadcast(&spa->spa_proc_cv);
1883eda14cbcSMatt Macy 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1884eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1885eda14cbcSMatt Macy 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1886eda14cbcSMatt Macy 		}
1887eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1888eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_NONE;
1889eda14cbcSMatt Macy 	}
1890eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1891eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1892eda14cbcSMatt Macy 
1893eda14cbcSMatt Macy 	/*
1894eda14cbcSMatt Macy 	 * We want to make sure spa_thread() has actually exited the ZFS
1895eda14cbcSMatt Macy 	 * module, so that the module can't be unloaded out from underneath
1896eda14cbcSMatt Macy 	 * it.
1897eda14cbcSMatt Macy 	 */
1898eda14cbcSMatt Macy 	if (spa->spa_did != 0) {
1899eda14cbcSMatt Macy 		thread_join(spa->spa_did);
1900eda14cbcSMatt Macy 		spa->spa_did = 0;
1901eda14cbcSMatt Macy 	}
1902c03c5b1cSMartin Matuska 
1903c03c5b1cSMartin Matuska 	spa_deactivate_os(spa);
1904c03c5b1cSMartin Matuska 
1905eda14cbcSMatt Macy }
1906eda14cbcSMatt Macy 
1907eda14cbcSMatt Macy /*
1908eda14cbcSMatt Macy  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1909eda14cbcSMatt Macy  * will create all the necessary vdevs in the appropriate layout, with each vdev
1910eda14cbcSMatt Macy  * in the CLOSED state.  This will prep the pool before open/creation/import.
1911eda14cbcSMatt Macy  * All vdev validation is done by the vdev_alloc() routine.
1912eda14cbcSMatt Macy  */
1913eda14cbcSMatt Macy int
1914eda14cbcSMatt Macy spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1915eda14cbcSMatt Macy     uint_t id, int atype)
1916eda14cbcSMatt Macy {
1917eda14cbcSMatt Macy 	nvlist_t **child;
1918eda14cbcSMatt Macy 	uint_t children;
1919eda14cbcSMatt Macy 	int error;
1920eda14cbcSMatt Macy 
1921eda14cbcSMatt Macy 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1922eda14cbcSMatt Macy 		return (error);
1923eda14cbcSMatt Macy 
1924eda14cbcSMatt Macy 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1925eda14cbcSMatt Macy 		return (0);
1926eda14cbcSMatt Macy 
1927eda14cbcSMatt Macy 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1928eda14cbcSMatt Macy 	    &child, &children);
1929eda14cbcSMatt Macy 
1930eda14cbcSMatt Macy 	if (error == ENOENT)
1931eda14cbcSMatt Macy 		return (0);
1932eda14cbcSMatt Macy 
1933eda14cbcSMatt Macy 	if (error) {
1934eda14cbcSMatt Macy 		vdev_free(*vdp);
1935eda14cbcSMatt Macy 		*vdp = NULL;
1936eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
1937eda14cbcSMatt Macy 	}
1938eda14cbcSMatt Macy 
1939eda14cbcSMatt Macy 	for (int c = 0; c < children; c++) {
1940eda14cbcSMatt Macy 		vdev_t *vd;
1941eda14cbcSMatt Macy 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1942eda14cbcSMatt Macy 		    atype)) != 0) {
1943eda14cbcSMatt Macy 			vdev_free(*vdp);
1944eda14cbcSMatt Macy 			*vdp = NULL;
1945eda14cbcSMatt Macy 			return (error);
1946eda14cbcSMatt Macy 		}
1947eda14cbcSMatt Macy 	}
1948eda14cbcSMatt Macy 
1949eda14cbcSMatt Macy 	ASSERT(*vdp != NULL);
1950eda14cbcSMatt Macy 
1951eda14cbcSMatt Macy 	return (0);
1952eda14cbcSMatt Macy }
1953eda14cbcSMatt Macy 
1954eda14cbcSMatt Macy static boolean_t
1955eda14cbcSMatt Macy spa_should_flush_logs_on_unload(spa_t *spa)
1956eda14cbcSMatt Macy {
1957eda14cbcSMatt Macy 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1958eda14cbcSMatt Macy 		return (B_FALSE);
1959eda14cbcSMatt Macy 
1960eda14cbcSMatt Macy 	if (!spa_writeable(spa))
1961eda14cbcSMatt Macy 		return (B_FALSE);
1962eda14cbcSMatt Macy 
1963eda14cbcSMatt Macy 	if (!spa->spa_sync_on)
1964eda14cbcSMatt Macy 		return (B_FALSE);
1965eda14cbcSMatt Macy 
1966eda14cbcSMatt Macy 	if (spa_state(spa) != POOL_STATE_EXPORTED)
1967eda14cbcSMatt Macy 		return (B_FALSE);
1968eda14cbcSMatt Macy 
1969eda14cbcSMatt Macy 	if (zfs_keep_log_spacemaps_at_export)
1970eda14cbcSMatt Macy 		return (B_FALSE);
1971eda14cbcSMatt Macy 
1972eda14cbcSMatt Macy 	return (B_TRUE);
1973eda14cbcSMatt Macy }
1974eda14cbcSMatt Macy 
1975eda14cbcSMatt Macy /*
1976eda14cbcSMatt Macy  * Opens a transaction that will set the flag that will instruct
1977eda14cbcSMatt Macy  * spa_sync to attempt to flush all the metaslabs for that txg.
1978eda14cbcSMatt Macy  */
1979eda14cbcSMatt Macy static void
1980eda14cbcSMatt Macy spa_unload_log_sm_flush_all(spa_t *spa)
1981eda14cbcSMatt Macy {
1982eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1983eda14cbcSMatt Macy 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1984eda14cbcSMatt Macy 
1985eda14cbcSMatt Macy 	ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1986eda14cbcSMatt Macy 	spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1987eda14cbcSMatt Macy 
1988eda14cbcSMatt Macy 	dmu_tx_commit(tx);
1989eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1990eda14cbcSMatt Macy }
1991eda14cbcSMatt Macy 
1992eda14cbcSMatt Macy static void
1993eda14cbcSMatt Macy spa_unload_log_sm_metadata(spa_t *spa)
1994eda14cbcSMatt Macy {
1995eda14cbcSMatt Macy 	void *cookie = NULL;
1996eda14cbcSMatt Macy 	spa_log_sm_t *sls;
19974e8d558cSMartin Matuska 	log_summary_entry_t *e;
19984e8d558cSMartin Matuska 
1999eda14cbcSMatt Macy 	while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
2000eda14cbcSMatt Macy 	    &cookie)) != NULL) {
2001eda14cbcSMatt Macy 		VERIFY0(sls->sls_mscount);
2002eda14cbcSMatt Macy 		kmem_free(sls, sizeof (spa_log_sm_t));
2003eda14cbcSMatt Macy 	}
2004eda14cbcSMatt Macy 
20054e8d558cSMartin Matuska 	while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
2006eda14cbcSMatt Macy 		VERIFY0(e->lse_mscount);
2007eda14cbcSMatt Macy 		kmem_free(e, sizeof (log_summary_entry_t));
2008eda14cbcSMatt Macy 	}
2009eda14cbcSMatt Macy 
2010eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_nblocks = 0;
2011eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused = 0;
2012eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_blocklimit = 0;
2013eda14cbcSMatt Macy }
2014eda14cbcSMatt Macy 
2015eda14cbcSMatt Macy static void
2016eda14cbcSMatt Macy spa_destroy_aux_threads(spa_t *spa)
2017eda14cbcSMatt Macy {
2018eda14cbcSMatt Macy 	if (spa->spa_condense_zthr != NULL) {
2019eda14cbcSMatt Macy 		zthr_destroy(spa->spa_condense_zthr);
2020eda14cbcSMatt Macy 		spa->spa_condense_zthr = NULL;
2021eda14cbcSMatt Macy 	}
2022eda14cbcSMatt Macy 	if (spa->spa_checkpoint_discard_zthr != NULL) {
2023eda14cbcSMatt Macy 		zthr_destroy(spa->spa_checkpoint_discard_zthr);
2024eda14cbcSMatt Macy 		spa->spa_checkpoint_discard_zthr = NULL;
2025eda14cbcSMatt Macy 	}
2026eda14cbcSMatt Macy 	if (spa->spa_livelist_delete_zthr != NULL) {
2027eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_delete_zthr);
2028eda14cbcSMatt Macy 		spa->spa_livelist_delete_zthr = NULL;
2029eda14cbcSMatt Macy 	}
2030eda14cbcSMatt Macy 	if (spa->spa_livelist_condense_zthr != NULL) {
2031eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_condense_zthr);
2032eda14cbcSMatt Macy 		spa->spa_livelist_condense_zthr = NULL;
2033eda14cbcSMatt Macy 	}
2034e716630dSMartin Matuska 	if (spa->spa_raidz_expand_zthr != NULL) {
2035e716630dSMartin Matuska 		zthr_destroy(spa->spa_raidz_expand_zthr);
2036e716630dSMartin Matuska 		spa->spa_raidz_expand_zthr = NULL;
2037e716630dSMartin Matuska 	}
2038eda14cbcSMatt Macy }
2039eda14cbcSMatt Macy 
2040eda14cbcSMatt Macy /*
2041eda14cbcSMatt Macy  * Opposite of spa_load().
2042eda14cbcSMatt Macy  */
2043eda14cbcSMatt Macy static void
2044eda14cbcSMatt Macy spa_unload(spa_t *spa)
2045eda14cbcSMatt Macy {
2046aca928a5SMartin Matuska 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
2047aca928a5SMartin Matuska 	    spa->spa_export_thread == curthread);
2048eda14cbcSMatt Macy 	ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
2049eda14cbcSMatt Macy 
2050eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
2051eda14cbcSMatt Macy 	spa_load_note(spa, "UNLOADING");
2052eda14cbcSMatt Macy 
2053eda14cbcSMatt Macy 	spa_wake_waiters(spa);
2054eda14cbcSMatt Macy 
2055eda14cbcSMatt Macy 	/*
2056c03c5b1cSMartin Matuska 	 * If we have set the spa_final_txg, we have already performed the
2057c03c5b1cSMartin Matuska 	 * tasks below in spa_export_common(). We should not redo it here since
2058c03c5b1cSMartin Matuska 	 * we delay the final TXGs beyond what spa_final_txg is set at.
2059c03c5b1cSMartin Matuska 	 */
2060c03c5b1cSMartin Matuska 	if (spa->spa_final_txg == UINT64_MAX) {
2061c03c5b1cSMartin Matuska 		/*
2062c03c5b1cSMartin Matuska 		 * If the log space map feature is enabled and the pool is
2063c03c5b1cSMartin Matuska 		 * getting exported (but not destroyed), we want to spend some
2064c03c5b1cSMartin Matuska 		 * time flushing as many metaslabs as we can in an attempt to
2065c03c5b1cSMartin Matuska 		 * destroy log space maps and save import time.
2066eda14cbcSMatt Macy 		 */
2067eda14cbcSMatt Macy 		if (spa_should_flush_logs_on_unload(spa))
2068eda14cbcSMatt Macy 			spa_unload_log_sm_flush_all(spa);
2069eda14cbcSMatt Macy 
2070eda14cbcSMatt Macy 		/*
2071eda14cbcSMatt Macy 		 * Stop async tasks.
2072eda14cbcSMatt Macy 		 */
2073eda14cbcSMatt Macy 		spa_async_suspend(spa);
2074eda14cbcSMatt Macy 
2075eda14cbcSMatt Macy 		if (spa->spa_root_vdev) {
2076eda14cbcSMatt Macy 			vdev_t *root_vdev = spa->spa_root_vdev;
2077c03c5b1cSMartin Matuska 			vdev_initialize_stop_all(root_vdev,
2078c03c5b1cSMartin Matuska 			    VDEV_INITIALIZE_ACTIVE);
2079eda14cbcSMatt Macy 			vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
2080eda14cbcSMatt Macy 			vdev_autotrim_stop_all(spa);
2081eda14cbcSMatt Macy 			vdev_rebuild_stop_all(spa);
2082eda14cbcSMatt Macy 		}
2083c03c5b1cSMartin Matuska 	}
2084eda14cbcSMatt Macy 
2085eda14cbcSMatt Macy 	/*
2086eda14cbcSMatt Macy 	 * Stop syncing.
2087eda14cbcSMatt Macy 	 */
2088eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
2089eda14cbcSMatt Macy 		txg_sync_stop(spa->spa_dsl_pool);
2090eda14cbcSMatt Macy 		spa->spa_sync_on = B_FALSE;
2091eda14cbcSMatt Macy 	}
2092eda14cbcSMatt Macy 
2093eda14cbcSMatt Macy 	/*
2094eda14cbcSMatt Macy 	 * This ensures that there is no async metaslab prefetching
2095eda14cbcSMatt Macy 	 * while we attempt to unload the spa.
2096eda14cbcSMatt Macy 	 */
2097b2526e8bSMartin Matuska 	taskq_wait(spa->spa_metaslab_taskq);
2098eda14cbcSMatt Macy 
2099eda14cbcSMatt Macy 	if (spa->spa_mmp.mmp_thread)
2100eda14cbcSMatt Macy 		mmp_thread_stop(spa);
2101eda14cbcSMatt Macy 
2102eda14cbcSMatt Macy 	/*
2103eda14cbcSMatt Macy 	 * Wait for any outstanding async I/O to complete.
2104eda14cbcSMatt Macy 	 */
2105eda14cbcSMatt Macy 	if (spa->spa_async_zio_root != NULL) {
2106eda14cbcSMatt Macy 		for (int i = 0; i < max_ncpus; i++)
2107eda14cbcSMatt Macy 			(void) zio_wait(spa->spa_async_zio_root[i]);
2108eda14cbcSMatt Macy 		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
2109eda14cbcSMatt Macy 		spa->spa_async_zio_root = NULL;
2110eda14cbcSMatt Macy 	}
2111eda14cbcSMatt Macy 
2112eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
2113eda14cbcSMatt Macy 		spa_vdev_removal_destroy(spa->spa_vdev_removal);
2114eda14cbcSMatt Macy 		spa->spa_vdev_removal = NULL;
2115eda14cbcSMatt Macy 	}
2116eda14cbcSMatt Macy 
2117eda14cbcSMatt Macy 	spa_destroy_aux_threads(spa);
2118eda14cbcSMatt Macy 
2119eda14cbcSMatt Macy 	spa_condense_fini(spa);
2120eda14cbcSMatt Macy 
2121eda14cbcSMatt Macy 	bpobj_close(&spa->spa_deferred_bpobj);
2122eda14cbcSMatt Macy 
2123eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
2124eda14cbcSMatt Macy 
2125eda14cbcSMatt Macy 	/*
2126eda14cbcSMatt Macy 	 * Close all vdevs.
2127eda14cbcSMatt Macy 	 */
2128eda14cbcSMatt Macy 	if (spa->spa_root_vdev)
2129eda14cbcSMatt Macy 		vdev_free(spa->spa_root_vdev);
2130eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
2131eda14cbcSMatt Macy 
2132eda14cbcSMatt Macy 	/*
2133eda14cbcSMatt Macy 	 * Close the dsl pool.
2134eda14cbcSMatt Macy 	 */
2135eda14cbcSMatt Macy 	if (spa->spa_dsl_pool) {
2136eda14cbcSMatt Macy 		dsl_pool_close(spa->spa_dsl_pool);
2137eda14cbcSMatt Macy 		spa->spa_dsl_pool = NULL;
2138eda14cbcSMatt Macy 		spa->spa_meta_objset = NULL;
2139eda14cbcSMatt Macy 	}
2140eda14cbcSMatt Macy 
2141eda14cbcSMatt Macy 	ddt_unload(spa);
21422a58b312SMartin Matuska 	brt_unload(spa);
2143eda14cbcSMatt Macy 	spa_unload_log_sm_metadata(spa);
2144eda14cbcSMatt Macy 
2145eda14cbcSMatt Macy 	/*
2146eda14cbcSMatt Macy 	 * Drop and purge level 2 cache
2147eda14cbcSMatt Macy 	 */
2148eda14cbcSMatt Macy 	spa_l2cache_drop(spa);
2149eda14cbcSMatt Macy 
2150c9539b89SMartin Matuska 	if (spa->spa_spares.sav_vdevs) {
2151eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
2152eda14cbcSMatt Macy 			vdev_free(spa->spa_spares.sav_vdevs[i]);
2153eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
2154eda14cbcSMatt Macy 		    spa->spa_spares.sav_count * sizeof (void *));
2155eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs = NULL;
2156eda14cbcSMatt Macy 	}
2157eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
2158eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
2159eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
2160eda14cbcSMatt Macy 	}
2161eda14cbcSMatt Macy 	spa->spa_spares.sav_count = 0;
2162eda14cbcSMatt Macy 
2163c9539b89SMartin Matuska 	if (spa->spa_l2cache.sav_vdevs) {
2164eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
2165eda14cbcSMatt Macy 			vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
2166eda14cbcSMatt Macy 			vdev_free(spa->spa_l2cache.sav_vdevs[i]);
2167eda14cbcSMatt Macy 		}
2168eda14cbcSMatt Macy 		kmem_free(spa->spa_l2cache.sav_vdevs,
2169eda14cbcSMatt Macy 		    spa->spa_l2cache.sav_count * sizeof (void *));
2170eda14cbcSMatt Macy 		spa->spa_l2cache.sav_vdevs = NULL;
2171eda14cbcSMatt Macy 	}
2172eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
2173eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
2174eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
2175eda14cbcSMatt Macy 	}
2176eda14cbcSMatt Macy 	spa->spa_l2cache.sav_count = 0;
2177eda14cbcSMatt Macy 
2178eda14cbcSMatt Macy 	spa->spa_async_suspended = 0;
2179eda14cbcSMatt Macy 
2180eda14cbcSMatt Macy 	spa->spa_indirect_vdevs_loaded = B_FALSE;
2181eda14cbcSMatt Macy 
2182eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
2183eda14cbcSMatt Macy 		spa_strfree(spa->spa_comment);
2184eda14cbcSMatt Macy 		spa->spa_comment = NULL;
2185eda14cbcSMatt Macy 	}
2186ee36e25aSMartin Matuska 	if (spa->spa_compatibility != NULL) {
2187ee36e25aSMartin Matuska 		spa_strfree(spa->spa_compatibility);
2188ee36e25aSMartin Matuska 		spa->spa_compatibility = NULL;
2189ee36e25aSMartin Matuska 	}
2190eda14cbcSMatt Macy 
2191e716630dSMartin Matuska 	spa->spa_raidz_expand = NULL;
2192e716630dSMartin Matuska 
2193eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, spa);
2194eda14cbcSMatt Macy }
2195eda14cbcSMatt Macy 
2196eda14cbcSMatt Macy /*
2197eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active spares for
2198eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
2199eda14cbcSMatt Macy  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
2200eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
2201eda14cbcSMatt Macy  */
2202eda14cbcSMatt Macy void
2203eda14cbcSMatt Macy spa_load_spares(spa_t *spa)
2204eda14cbcSMatt Macy {
2205eda14cbcSMatt Macy 	nvlist_t **spares;
2206eda14cbcSMatt Macy 	uint_t nspares;
2207eda14cbcSMatt Macy 	int i;
2208eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
2209eda14cbcSMatt Macy 
2210eda14cbcSMatt Macy #ifndef _KERNEL
2211eda14cbcSMatt Macy 	/*
2212eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
2213eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
2214eda14cbcSMatt Macy 	 *
2215eda14cbcSMatt Macy 	 * As spare vdevs are shared among open pools, we skip loading
2216eda14cbcSMatt Macy 	 * them when we load the checkpointed state of the pool.
2217eda14cbcSMatt Macy 	 */
2218eda14cbcSMatt Macy 	if (!spa_writeable(spa))
2219eda14cbcSMatt Macy 		return;
2220eda14cbcSMatt Macy #endif
2221eda14cbcSMatt Macy 
2222eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2223eda14cbcSMatt Macy 
2224eda14cbcSMatt Macy 	/*
2225eda14cbcSMatt Macy 	 * First, close and free any existing spare vdevs.
2226eda14cbcSMatt Macy 	 */
2227c9539b89SMartin Matuska 	if (spa->spa_spares.sav_vdevs) {
2228eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
2229eda14cbcSMatt Macy 			vd = spa->spa_spares.sav_vdevs[i];
2230eda14cbcSMatt Macy 
2231eda14cbcSMatt Macy 			/* Undo the call to spa_activate() below */
2232eda14cbcSMatt Macy 			if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2233eda14cbcSMatt Macy 			    B_FALSE)) != NULL && tvd->vdev_isspare)
2234eda14cbcSMatt Macy 				spa_spare_remove(tvd);
2235eda14cbcSMatt Macy 			vdev_close(vd);
2236eda14cbcSMatt Macy 			vdev_free(vd);
2237eda14cbcSMatt Macy 		}
2238eda14cbcSMatt Macy 
2239eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
2240eda14cbcSMatt Macy 		    spa->spa_spares.sav_count * sizeof (void *));
2241c9539b89SMartin Matuska 	}
2242eda14cbcSMatt Macy 
2243eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config == NULL)
2244eda14cbcSMatt Macy 		nspares = 0;
2245eda14cbcSMatt Macy 	else
224681b22a98SMartin Matuska 		VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
224781b22a98SMartin Matuska 		    ZPOOL_CONFIG_SPARES, &spares, &nspares));
2248eda14cbcSMatt Macy 
2249eda14cbcSMatt Macy 	spa->spa_spares.sav_count = (int)nspares;
2250eda14cbcSMatt Macy 	spa->spa_spares.sav_vdevs = NULL;
2251eda14cbcSMatt Macy 
2252eda14cbcSMatt Macy 	if (nspares == 0)
2253eda14cbcSMatt Macy 		return;
2254eda14cbcSMatt Macy 
2255eda14cbcSMatt Macy 	/*
2256eda14cbcSMatt Macy 	 * Construct the array of vdevs, opening them to get status in the
2257eda14cbcSMatt Macy 	 * process.   For each spare, there is potentially two different vdev_t
2258eda14cbcSMatt Macy 	 * structures associated with it: one in the list of spares (used only
2259eda14cbcSMatt Macy 	 * for basic validation purposes) and one in the active vdev
2260eda14cbcSMatt Macy 	 * configuration (if it's spared in).  During this phase we open and
2261eda14cbcSMatt Macy 	 * validate each vdev on the spare list.  If the vdev also exists in the
2262eda14cbcSMatt Macy 	 * active configuration, then we also mark this vdev as an active spare.
2263eda14cbcSMatt Macy 	 */
2264eda14cbcSMatt Macy 	spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
2265eda14cbcSMatt Macy 	    KM_SLEEP);
2266eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
2267eda14cbcSMatt Macy 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
2268eda14cbcSMatt Macy 		    VDEV_ALLOC_SPARE) == 0);
2269eda14cbcSMatt Macy 		ASSERT(vd != NULL);
2270eda14cbcSMatt Macy 
2271eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs[i] = vd;
2272eda14cbcSMatt Macy 
2273eda14cbcSMatt Macy 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2274eda14cbcSMatt Macy 		    B_FALSE)) != NULL) {
2275eda14cbcSMatt Macy 			if (!tvd->vdev_isspare)
2276eda14cbcSMatt Macy 				spa_spare_add(tvd);
2277eda14cbcSMatt Macy 
2278eda14cbcSMatt Macy 			/*
2279eda14cbcSMatt Macy 			 * We only mark the spare active if we were successfully
2280eda14cbcSMatt Macy 			 * able to load the vdev.  Otherwise, importing a pool
2281eda14cbcSMatt Macy 			 * with a bad active spare would result in strange
2282eda14cbcSMatt Macy 			 * behavior, because multiple pool would think the spare
2283eda14cbcSMatt Macy 			 * is actively in use.
2284eda14cbcSMatt Macy 			 *
2285eda14cbcSMatt Macy 			 * There is a vulnerability here to an equally bizarre
2286eda14cbcSMatt Macy 			 * circumstance, where a dead active spare is later
2287eda14cbcSMatt Macy 			 * brought back to life (onlined or otherwise).  Given
2288eda14cbcSMatt Macy 			 * the rarity of this scenario, and the extra complexity
2289eda14cbcSMatt Macy 			 * it adds, we ignore the possibility.
2290eda14cbcSMatt Macy 			 */
2291eda14cbcSMatt Macy 			if (!vdev_is_dead(tvd))
2292eda14cbcSMatt Macy 				spa_spare_activate(tvd);
2293eda14cbcSMatt Macy 		}
2294eda14cbcSMatt Macy 
2295eda14cbcSMatt Macy 		vd->vdev_top = vd;
2296eda14cbcSMatt Macy 		vd->vdev_aux = &spa->spa_spares;
2297eda14cbcSMatt Macy 
2298eda14cbcSMatt Macy 		if (vdev_open(vd) != 0)
2299eda14cbcSMatt Macy 			continue;
2300eda14cbcSMatt Macy 
2301eda14cbcSMatt Macy 		if (vdev_validate_aux(vd) == 0)
2302eda14cbcSMatt Macy 			spa_spare_add(vd);
2303eda14cbcSMatt Macy 	}
2304eda14cbcSMatt Macy 
2305eda14cbcSMatt Macy 	/*
2306eda14cbcSMatt Macy 	 * Recompute the stashed list of spares, with status information
2307eda14cbcSMatt Macy 	 * this time.
2308eda14cbcSMatt Macy 	 */
230981b22a98SMartin Matuska 	fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
2310eda14cbcSMatt Macy 
2311eda14cbcSMatt Macy 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
2312eda14cbcSMatt Macy 	    KM_SLEEP);
2313eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++)
2314eda14cbcSMatt Macy 		spares[i] = vdev_config_generate(spa,
2315eda14cbcSMatt Macy 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
231681b22a98SMartin Matuska 	fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
2317681ce946SMartin Matuska 	    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
2318681ce946SMartin Matuska 	    spa->spa_spares.sav_count);
2319eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++)
2320eda14cbcSMatt Macy 		nvlist_free(spares[i]);
2321eda14cbcSMatt Macy 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
2322eda14cbcSMatt Macy }
2323eda14cbcSMatt Macy 
2324eda14cbcSMatt Macy /*
2325eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active l2cache for
2326eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
2327eda14cbcSMatt Macy  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
2328eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
2329eda14cbcSMatt Macy  * Devices which are already active have their details maintained, and are
2330eda14cbcSMatt Macy  * not re-opened.
2331eda14cbcSMatt Macy  */
2332eda14cbcSMatt Macy void
2333eda14cbcSMatt Macy spa_load_l2cache(spa_t *spa)
2334eda14cbcSMatt Macy {
2335eda14cbcSMatt Macy 	nvlist_t **l2cache = NULL;
2336eda14cbcSMatt Macy 	uint_t nl2cache;
2337eda14cbcSMatt Macy 	int i, j, oldnvdevs;
2338eda14cbcSMatt Macy 	uint64_t guid;
2339eda14cbcSMatt Macy 	vdev_t *vd, **oldvdevs, **newvdevs;
2340eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
2341eda14cbcSMatt Macy 
2342eda14cbcSMatt Macy #ifndef _KERNEL
2343eda14cbcSMatt Macy 	/*
2344eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
2345eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
2346eda14cbcSMatt Macy 	 *
2347eda14cbcSMatt Macy 	 * As L2 caches are part of the ARC which is shared among open
2348eda14cbcSMatt Macy 	 * pools, we skip loading them when we load the checkpointed
2349eda14cbcSMatt Macy 	 * state of the pool.
2350eda14cbcSMatt Macy 	 */
2351eda14cbcSMatt Macy 	if (!spa_writeable(spa))
2352eda14cbcSMatt Macy 		return;
2353eda14cbcSMatt Macy #endif
2354eda14cbcSMatt Macy 
2355eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2356eda14cbcSMatt Macy 
2357eda14cbcSMatt Macy 	oldvdevs = sav->sav_vdevs;
2358eda14cbcSMatt Macy 	oldnvdevs = sav->sav_count;
2359eda14cbcSMatt Macy 	sav->sav_vdevs = NULL;
2360eda14cbcSMatt Macy 	sav->sav_count = 0;
2361eda14cbcSMatt Macy 
2362eda14cbcSMatt Macy 	if (sav->sav_config == NULL) {
2363eda14cbcSMatt Macy 		nl2cache = 0;
2364eda14cbcSMatt Macy 		newvdevs = NULL;
2365eda14cbcSMatt Macy 		goto out;
2366eda14cbcSMatt Macy 	}
2367eda14cbcSMatt Macy 
236881b22a98SMartin Matuska 	VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
236981b22a98SMartin Matuska 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
2370eda14cbcSMatt Macy 	newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
2371eda14cbcSMatt Macy 
2372eda14cbcSMatt Macy 	/*
2373eda14cbcSMatt Macy 	 * Process new nvlist of vdevs.
2374eda14cbcSMatt Macy 	 */
2375eda14cbcSMatt Macy 	for (i = 0; i < nl2cache; i++) {
237681b22a98SMartin Matuska 		guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
2377eda14cbcSMatt Macy 
2378eda14cbcSMatt Macy 		newvdevs[i] = NULL;
2379eda14cbcSMatt Macy 		for (j = 0; j < oldnvdevs; j++) {
2380eda14cbcSMatt Macy 			vd = oldvdevs[j];
2381eda14cbcSMatt Macy 			if (vd != NULL && guid == vd->vdev_guid) {
2382eda14cbcSMatt Macy 				/*
2383eda14cbcSMatt Macy 				 * Retain previous vdev for add/remove ops.
2384eda14cbcSMatt Macy 				 */
2385eda14cbcSMatt Macy 				newvdevs[i] = vd;
2386eda14cbcSMatt Macy 				oldvdevs[j] = NULL;
2387eda14cbcSMatt Macy 				break;
2388eda14cbcSMatt Macy 			}
2389eda14cbcSMatt Macy 		}
2390eda14cbcSMatt Macy 
2391eda14cbcSMatt Macy 		if (newvdevs[i] == NULL) {
2392eda14cbcSMatt Macy 			/*
2393eda14cbcSMatt Macy 			 * Create new vdev
2394eda14cbcSMatt Macy 			 */
2395eda14cbcSMatt Macy 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
2396eda14cbcSMatt Macy 			    VDEV_ALLOC_L2CACHE) == 0);
2397eda14cbcSMatt Macy 			ASSERT(vd != NULL);
2398eda14cbcSMatt Macy 			newvdevs[i] = vd;
2399eda14cbcSMatt Macy 
2400eda14cbcSMatt Macy 			/*
2401eda14cbcSMatt Macy 			 * Commit this vdev as an l2cache device,
2402eda14cbcSMatt Macy 			 * even if it fails to open.
2403eda14cbcSMatt Macy 			 */
2404eda14cbcSMatt Macy 			spa_l2cache_add(vd);
2405eda14cbcSMatt Macy 
2406eda14cbcSMatt Macy 			vd->vdev_top = vd;
2407eda14cbcSMatt Macy 			vd->vdev_aux = sav;
2408eda14cbcSMatt Macy 
2409eda14cbcSMatt Macy 			spa_l2cache_activate(vd);
2410eda14cbcSMatt Macy 
2411eda14cbcSMatt Macy 			if (vdev_open(vd) != 0)
2412eda14cbcSMatt Macy 				continue;
2413eda14cbcSMatt Macy 
2414eda14cbcSMatt Macy 			(void) vdev_validate_aux(vd);
2415eda14cbcSMatt Macy 
2416eda14cbcSMatt Macy 			if (!vdev_is_dead(vd))
2417eda14cbcSMatt Macy 				l2arc_add_vdev(spa, vd);
2418eda14cbcSMatt Macy 
2419eda14cbcSMatt Macy 			/*
2420eda14cbcSMatt Macy 			 * Upon cache device addition to a pool or pool
2421eda14cbcSMatt Macy 			 * creation with a cache device or if the header
2422eda14cbcSMatt Macy 			 * of the device is invalid we issue an async
2423eda14cbcSMatt Macy 			 * TRIM command for the whole device which will
2424eda14cbcSMatt Macy 			 * execute if l2arc_trim_ahead > 0.
2425eda14cbcSMatt Macy 			 */
2426eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2427eda14cbcSMatt Macy 		}
2428eda14cbcSMatt Macy 	}
2429eda14cbcSMatt Macy 
2430eda14cbcSMatt Macy 	sav->sav_vdevs = newvdevs;
2431eda14cbcSMatt Macy 	sav->sav_count = (int)nl2cache;
2432eda14cbcSMatt Macy 
2433eda14cbcSMatt Macy 	/*
2434eda14cbcSMatt Macy 	 * Recompute the stashed list of l2cache devices, with status
2435eda14cbcSMatt Macy 	 * information this time.
2436eda14cbcSMatt Macy 	 */
243781b22a98SMartin Matuska 	fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
2438eda14cbcSMatt Macy 
2439eda14cbcSMatt Macy 	if (sav->sav_count > 0)
2440eda14cbcSMatt Macy 		l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
2441eda14cbcSMatt Macy 		    KM_SLEEP);
2442eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
2443eda14cbcSMatt Macy 		l2cache[i] = vdev_config_generate(spa,
2444eda14cbcSMatt Macy 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
2445681ce946SMartin Matuska 	fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
2446681ce946SMartin Matuska 	    (const nvlist_t * const *)l2cache, sav->sav_count);
2447eda14cbcSMatt Macy 
2448eda14cbcSMatt Macy out:
2449eda14cbcSMatt Macy 	/*
2450eda14cbcSMatt Macy 	 * Purge vdevs that were dropped
2451eda14cbcSMatt Macy 	 */
2452c9539b89SMartin Matuska 	if (oldvdevs) {
2453eda14cbcSMatt Macy 		for (i = 0; i < oldnvdevs; i++) {
2454eda14cbcSMatt Macy 			uint64_t pool;
2455eda14cbcSMatt Macy 
2456eda14cbcSMatt Macy 			vd = oldvdevs[i];
2457eda14cbcSMatt Macy 			if (vd != NULL) {
2458eda14cbcSMatt Macy 				ASSERT(vd->vdev_isl2cache);
2459eda14cbcSMatt Macy 
2460eda14cbcSMatt Macy 				if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2461eda14cbcSMatt Macy 				    pool != 0ULL && l2arc_vdev_present(vd))
2462eda14cbcSMatt Macy 					l2arc_remove_vdev(vd);
2463eda14cbcSMatt Macy 				vdev_clear_stats(vd);
2464eda14cbcSMatt Macy 				vdev_free(vd);
2465eda14cbcSMatt Macy 			}
2466eda14cbcSMatt Macy 		}
2467eda14cbcSMatt Macy 
2468eda14cbcSMatt Macy 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
2469c9539b89SMartin Matuska 	}
2470eda14cbcSMatt Macy 
2471eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
2472eda14cbcSMatt Macy 		nvlist_free(l2cache[i]);
2473eda14cbcSMatt Macy 	if (sav->sav_count)
2474eda14cbcSMatt Macy 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
2475eda14cbcSMatt Macy }
2476eda14cbcSMatt Macy 
2477eda14cbcSMatt Macy static int
2478eda14cbcSMatt Macy load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
2479eda14cbcSMatt Macy {
2480eda14cbcSMatt Macy 	dmu_buf_t *db;
2481eda14cbcSMatt Macy 	char *packed = NULL;
2482eda14cbcSMatt Macy 	size_t nvsize = 0;
2483eda14cbcSMatt Macy 	int error;
2484eda14cbcSMatt Macy 	*value = NULL;
2485eda14cbcSMatt Macy 
2486eda14cbcSMatt Macy 	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
2487eda14cbcSMatt Macy 	if (error)
2488eda14cbcSMatt Macy 		return (error);
2489eda14cbcSMatt Macy 
2490eda14cbcSMatt Macy 	nvsize = *(uint64_t *)db->db_data;
2491eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
2492eda14cbcSMatt Macy 
2493eda14cbcSMatt Macy 	packed = vmem_alloc(nvsize, KM_SLEEP);
2494eda14cbcSMatt Macy 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
2495eda14cbcSMatt Macy 	    DMU_READ_PREFETCH);
2496eda14cbcSMatt Macy 	if (error == 0)
2497eda14cbcSMatt Macy 		error = nvlist_unpack(packed, nvsize, value, 0);
2498eda14cbcSMatt Macy 	vmem_free(packed, nvsize);
2499eda14cbcSMatt Macy 
2500eda14cbcSMatt Macy 	return (error);
2501eda14cbcSMatt Macy }
2502eda14cbcSMatt Macy 
2503eda14cbcSMatt Macy /*
2504eda14cbcSMatt Macy  * Concrete top-level vdevs that are not missing and are not logs. At every
2505eda14cbcSMatt Macy  * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
2506eda14cbcSMatt Macy  */
2507eda14cbcSMatt Macy static uint64_t
2508eda14cbcSMatt Macy spa_healthy_core_tvds(spa_t *spa)
2509eda14cbcSMatt Macy {
2510eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2511eda14cbcSMatt Macy 	uint64_t tvds = 0;
2512eda14cbcSMatt Macy 
2513eda14cbcSMatt Macy 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
2514eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[i];
2515eda14cbcSMatt Macy 		if (vd->vdev_islog)
2516eda14cbcSMatt Macy 			continue;
2517eda14cbcSMatt Macy 		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
2518eda14cbcSMatt Macy 			tvds++;
2519eda14cbcSMatt Macy 	}
2520eda14cbcSMatt Macy 
2521eda14cbcSMatt Macy 	return (tvds);
2522eda14cbcSMatt Macy }
2523eda14cbcSMatt Macy 
2524eda14cbcSMatt Macy /*
2525eda14cbcSMatt Macy  * Checks to see if the given vdev could not be opened, in which case we post a
2526eda14cbcSMatt Macy  * sysevent to notify the autoreplace code that the device has been removed.
2527eda14cbcSMatt Macy  */
2528eda14cbcSMatt Macy static void
2529eda14cbcSMatt Macy spa_check_removed(vdev_t *vd)
2530eda14cbcSMatt Macy {
2531eda14cbcSMatt Macy 	for (uint64_t c = 0; c < vd->vdev_children; c++)
2532eda14cbcSMatt Macy 		spa_check_removed(vd->vdev_child[c]);
2533eda14cbcSMatt Macy 
2534eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
2535eda14cbcSMatt Macy 	    vdev_is_concrete(vd)) {
2536eda14cbcSMatt Macy 		zfs_post_autoreplace(vd->vdev_spa, vd);
2537eda14cbcSMatt Macy 		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
2538eda14cbcSMatt Macy 	}
2539eda14cbcSMatt Macy }
2540eda14cbcSMatt Macy 
2541eda14cbcSMatt Macy static int
2542eda14cbcSMatt Macy spa_check_for_missing_logs(spa_t *spa)
2543eda14cbcSMatt Macy {
2544eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2545eda14cbcSMatt Macy 
2546eda14cbcSMatt Macy 	/*
2547eda14cbcSMatt Macy 	 * If we're doing a normal import, then build up any additional
2548eda14cbcSMatt Macy 	 * diagnostic information about missing log devices.
2549eda14cbcSMatt Macy 	 * We'll pass this up to the user for further processing.
2550eda14cbcSMatt Macy 	 */
2551eda14cbcSMatt Macy 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2552eda14cbcSMatt Macy 		nvlist_t **child, *nv;
2553eda14cbcSMatt Macy 		uint64_t idx = 0;
2554eda14cbcSMatt Macy 
2555eda14cbcSMatt Macy 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2556eda14cbcSMatt Macy 		    KM_SLEEP);
255781b22a98SMartin Matuska 		nv = fnvlist_alloc();
2558eda14cbcSMatt Macy 
2559eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2560eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
2561eda14cbcSMatt Macy 
2562eda14cbcSMatt Macy 			/*
2563eda14cbcSMatt Macy 			 * We consider a device as missing only if it failed
2564eda14cbcSMatt Macy 			 * to open (i.e. offline or faulted is not considered
2565eda14cbcSMatt Macy 			 * as missing).
2566eda14cbcSMatt Macy 			 */
2567eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2568eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2569eda14cbcSMatt Macy 				child[idx++] = vdev_config_generate(spa, tvd,
2570eda14cbcSMatt Macy 				    B_FALSE, VDEV_CONFIG_MISSING);
2571eda14cbcSMatt Macy 			}
2572eda14cbcSMatt Macy 		}
2573eda14cbcSMatt Macy 
2574eda14cbcSMatt Macy 		if (idx > 0) {
2575681ce946SMartin Matuska 			fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2576681ce946SMartin Matuska 			    (const nvlist_t * const *)child, idx);
2577eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
2578eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
2579eda14cbcSMatt Macy 
2580eda14cbcSMatt Macy 			for (uint64_t i = 0; i < idx; i++)
2581eda14cbcSMatt Macy 				nvlist_free(child[i]);
2582eda14cbcSMatt Macy 		}
2583eda14cbcSMatt Macy 		nvlist_free(nv);
2584eda14cbcSMatt Macy 		kmem_free(child, rvd->vdev_children * sizeof (char **));
2585eda14cbcSMatt Macy 
2586eda14cbcSMatt Macy 		if (idx > 0) {
2587eda14cbcSMatt Macy 			spa_load_failed(spa, "some log devices are missing");
2588eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
2589eda14cbcSMatt Macy 			return (SET_ERROR(ENXIO));
2590eda14cbcSMatt Macy 		}
2591eda14cbcSMatt Macy 	} else {
2592eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2593eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
2594eda14cbcSMatt Macy 
2595eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2596eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2597eda14cbcSMatt Macy 				spa_set_log_state(spa, SPA_LOG_CLEAR);
2598eda14cbcSMatt Macy 				spa_load_note(spa, "some log devices are "
2599eda14cbcSMatt Macy 				    "missing, ZIL is dropped.");
2600eda14cbcSMatt Macy 				vdev_dbgmsg_print_tree(rvd, 2);
2601eda14cbcSMatt Macy 				break;
2602eda14cbcSMatt Macy 			}
2603eda14cbcSMatt Macy 		}
2604eda14cbcSMatt Macy 	}
2605eda14cbcSMatt Macy 
2606eda14cbcSMatt Macy 	return (0);
2607eda14cbcSMatt Macy }
2608eda14cbcSMatt Macy 
2609eda14cbcSMatt Macy /*
2610eda14cbcSMatt Macy  * Check for missing log devices
2611eda14cbcSMatt Macy  */
2612eda14cbcSMatt Macy static boolean_t
2613eda14cbcSMatt Macy spa_check_logs(spa_t *spa)
2614eda14cbcSMatt Macy {
2615eda14cbcSMatt Macy 	boolean_t rv = B_FALSE;
2616eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
2617eda14cbcSMatt Macy 
2618eda14cbcSMatt Macy 	switch (spa->spa_log_state) {
2619eda14cbcSMatt Macy 	default:
2620eda14cbcSMatt Macy 		break;
2621eda14cbcSMatt Macy 	case SPA_LOG_MISSING:
2622eda14cbcSMatt Macy 		/* need to recheck in case slog has been restored */
2623eda14cbcSMatt Macy 	case SPA_LOG_UNKNOWN:
2624eda14cbcSMatt Macy 		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2625eda14cbcSMatt Macy 		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2626eda14cbcSMatt Macy 		if (rv)
2627eda14cbcSMatt Macy 			spa_set_log_state(spa, SPA_LOG_MISSING);
2628eda14cbcSMatt Macy 		break;
2629eda14cbcSMatt Macy 	}
2630eda14cbcSMatt Macy 	return (rv);
2631eda14cbcSMatt Macy }
2632eda14cbcSMatt Macy 
2633184c1b94SMartin Matuska /*
2634184c1b94SMartin Matuska  * Passivate any log vdevs (note, does not apply to embedded log metaslabs).
2635184c1b94SMartin Matuska  */
2636eda14cbcSMatt Macy static boolean_t
2637eda14cbcSMatt Macy spa_passivate_log(spa_t *spa)
2638eda14cbcSMatt Macy {
2639eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2640eda14cbcSMatt Macy 	boolean_t slog_found = B_FALSE;
2641eda14cbcSMatt Macy 
2642eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2643eda14cbcSMatt Macy 
2644eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2645eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2646eda14cbcSMatt Macy 
2647eda14cbcSMatt Macy 		if (tvd->vdev_islog) {
2648184c1b94SMartin Matuska 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2649184c1b94SMartin Matuska 			metaslab_group_passivate(tvd->vdev_mg);
2650eda14cbcSMatt Macy 			slog_found = B_TRUE;
2651eda14cbcSMatt Macy 		}
2652eda14cbcSMatt Macy 	}
2653eda14cbcSMatt Macy 
2654eda14cbcSMatt Macy 	return (slog_found);
2655eda14cbcSMatt Macy }
2656eda14cbcSMatt Macy 
2657184c1b94SMartin Matuska /*
2658184c1b94SMartin Matuska  * Activate any log vdevs (note, does not apply to embedded log metaslabs).
2659184c1b94SMartin Matuska  */
2660eda14cbcSMatt Macy static void
2661eda14cbcSMatt Macy spa_activate_log(spa_t *spa)
2662eda14cbcSMatt Macy {
2663eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2664eda14cbcSMatt Macy 
2665eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2666eda14cbcSMatt Macy 
2667eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2668eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2669eda14cbcSMatt Macy 
2670184c1b94SMartin Matuska 		if (tvd->vdev_islog) {
2671184c1b94SMartin Matuska 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2672184c1b94SMartin Matuska 			metaslab_group_activate(tvd->vdev_mg);
2673184c1b94SMartin Matuska 		}
2674eda14cbcSMatt Macy 	}
2675eda14cbcSMatt Macy }
2676eda14cbcSMatt Macy 
2677eda14cbcSMatt Macy int
2678eda14cbcSMatt Macy spa_reset_logs(spa_t *spa)
2679eda14cbcSMatt Macy {
2680eda14cbcSMatt Macy 	int error;
2681eda14cbcSMatt Macy 
2682eda14cbcSMatt Macy 	error = dmu_objset_find(spa_name(spa), zil_reset,
2683eda14cbcSMatt Macy 	    NULL, DS_FIND_CHILDREN);
2684eda14cbcSMatt Macy 	if (error == 0) {
2685eda14cbcSMatt Macy 		/*
2686eda14cbcSMatt Macy 		 * We successfully offlined the log device, sync out the
2687eda14cbcSMatt Macy 		 * current txg so that the "stubby" block can be removed
2688eda14cbcSMatt Macy 		 * by zil_sync().
2689eda14cbcSMatt Macy 		 */
2690eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
2691eda14cbcSMatt Macy 	}
2692eda14cbcSMatt Macy 	return (error);
2693eda14cbcSMatt Macy }
2694eda14cbcSMatt Macy 
2695eda14cbcSMatt Macy static void
2696eda14cbcSMatt Macy spa_aux_check_removed(spa_aux_vdev_t *sav)
2697eda14cbcSMatt Macy {
2698eda14cbcSMatt Macy 	for (int i = 0; i < sav->sav_count; i++)
2699eda14cbcSMatt Macy 		spa_check_removed(sav->sav_vdevs[i]);
2700eda14cbcSMatt Macy }
2701eda14cbcSMatt Macy 
2702eda14cbcSMatt Macy void
2703eda14cbcSMatt Macy spa_claim_notify(zio_t *zio)
2704eda14cbcSMatt Macy {
2705eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2706eda14cbcSMatt Macy 
2707eda14cbcSMatt Macy 	if (zio->io_error)
2708eda14cbcSMatt Macy 		return;
2709eda14cbcSMatt Macy 
2710eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
2711783d3ff6SMartin Matuska 	if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp))
2712783d3ff6SMartin Matuska 		spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp);
2713eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
2714eda14cbcSMatt Macy }
2715eda14cbcSMatt Macy 
2716eda14cbcSMatt Macy typedef struct spa_load_error {
2717c03c5b1cSMartin Matuska 	boolean_t	sle_verify_data;
2718eda14cbcSMatt Macy 	uint64_t	sle_meta_count;
2719eda14cbcSMatt Macy 	uint64_t	sle_data_count;
2720eda14cbcSMatt Macy } spa_load_error_t;
2721eda14cbcSMatt Macy 
2722eda14cbcSMatt Macy static void
2723eda14cbcSMatt Macy spa_load_verify_done(zio_t *zio)
2724eda14cbcSMatt Macy {
2725eda14cbcSMatt Macy 	blkptr_t *bp = zio->io_bp;
2726eda14cbcSMatt Macy 	spa_load_error_t *sle = zio->io_private;
2727eda14cbcSMatt Macy 	dmu_object_type_t type = BP_GET_TYPE(bp);
2728eda14cbcSMatt Macy 	int error = zio->io_error;
2729eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2730eda14cbcSMatt Macy 
2731eda14cbcSMatt Macy 	abd_free(zio->io_abd);
2732eda14cbcSMatt Macy 	if (error) {
2733eda14cbcSMatt Macy 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2734eda14cbcSMatt Macy 		    type != DMU_OT_INTENT_LOG)
2735eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_meta_count);
2736eda14cbcSMatt Macy 		else
2737eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_data_count);
2738eda14cbcSMatt Macy 	}
2739eda14cbcSMatt Macy 
2740eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2741eda14cbcSMatt Macy 	spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
2742eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_scrub_io_cv);
2743eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2744eda14cbcSMatt Macy }
2745eda14cbcSMatt Macy 
2746eda14cbcSMatt Macy /*
2747eda14cbcSMatt Macy  * Maximum number of inflight bytes is the log2 fraction of the arc size.
2748eda14cbcSMatt Macy  * By default, we set it to 1/16th of the arc.
2749eda14cbcSMatt Macy  */
2750be181ee2SMartin Matuska static uint_t spa_load_verify_shift = 4;
2751e92ffd9bSMartin Matuska static int spa_load_verify_metadata = B_TRUE;
2752e92ffd9bSMartin Matuska static int spa_load_verify_data = B_TRUE;
2753eda14cbcSMatt Macy 
2754eda14cbcSMatt Macy static int
2755eda14cbcSMatt Macy spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2756eda14cbcSMatt Macy     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2757eda14cbcSMatt Macy {
2758c03c5b1cSMartin Matuska 	zio_t *rio = arg;
2759c03c5b1cSMartin Matuska 	spa_load_error_t *sle = rio->io_private;
2760c03c5b1cSMartin Matuska 
2761e92ffd9bSMartin Matuska 	(void) zilog, (void) dnp;
2762e92ffd9bSMartin Matuska 
2763eda14cbcSMatt Macy 	/*
2764eda14cbcSMatt Macy 	 * Note: normally this routine will not be called if
2765eda14cbcSMatt Macy 	 * spa_load_verify_metadata is not set.  However, it may be useful
2766eda14cbcSMatt Macy 	 * to manually set the flag after the traversal has begun.
2767eda14cbcSMatt Macy 	 */
2768eda14cbcSMatt Macy 	if (!spa_load_verify_metadata)
2769eda14cbcSMatt Macy 		return (0);
2770e3aa18adSMartin Matuska 
2771e3aa18adSMartin Matuska 	/*
2772e3aa18adSMartin Matuska 	 * Sanity check the block pointer in order to detect obvious damage
2773e3aa18adSMartin Matuska 	 * before using the contents in subsequent checks or in zio_read().
2774e3aa18adSMartin Matuska 	 * When damaged consider it to be a metadata error since we cannot
2775e3aa18adSMartin Matuska 	 * trust the BP_GET_TYPE and BP_GET_LEVEL values.
2776e3aa18adSMartin Matuska 	 */
2777e639e0d2SMartin Matuska 	if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
2778e3aa18adSMartin Matuska 		atomic_inc_64(&sle->sle_meta_count);
2779e3aa18adSMartin Matuska 		return (0);
2780e3aa18adSMartin Matuska 	}
2781e3aa18adSMartin Matuska 
2782e3aa18adSMartin Matuska 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
2783e3aa18adSMartin Matuska 	    BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2784e3aa18adSMartin Matuska 		return (0);
2785e3aa18adSMartin Matuska 
2786c03c5b1cSMartin Matuska 	if (!BP_IS_METADATA(bp) &&
2787c03c5b1cSMartin Matuska 	    (!spa_load_verify_data || !sle->sle_verify_data))
2788eda14cbcSMatt Macy 		return (0);
2789eda14cbcSMatt Macy 
2790eda14cbcSMatt Macy 	uint64_t maxinflight_bytes =
2791eda14cbcSMatt Macy 	    arc_target_bytes() >> spa_load_verify_shift;
2792eda14cbcSMatt Macy 	size_t size = BP_GET_PSIZE(bp);
2793eda14cbcSMatt Macy 
2794eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2795eda14cbcSMatt Macy 	while (spa->spa_load_verify_bytes >= maxinflight_bytes)
2796eda14cbcSMatt Macy 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2797eda14cbcSMatt Macy 	spa->spa_load_verify_bytes += size;
2798eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2799eda14cbcSMatt Macy 
2800eda14cbcSMatt Macy 	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2801eda14cbcSMatt Macy 	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2802eda14cbcSMatt Macy 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2803eda14cbcSMatt Macy 	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2804eda14cbcSMatt Macy 	return (0);
2805eda14cbcSMatt Macy }
2806eda14cbcSMatt Macy 
2807eda14cbcSMatt Macy static int
2808eda14cbcSMatt Macy verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2809eda14cbcSMatt Macy {
2810e92ffd9bSMartin Matuska 	(void) dp, (void) arg;
2811e92ffd9bSMartin Matuska 
2812eda14cbcSMatt Macy 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2813eda14cbcSMatt Macy 		return (SET_ERROR(ENAMETOOLONG));
2814eda14cbcSMatt Macy 
2815eda14cbcSMatt Macy 	return (0);
2816eda14cbcSMatt Macy }
2817eda14cbcSMatt Macy 
2818eda14cbcSMatt Macy static int
2819eda14cbcSMatt Macy spa_load_verify(spa_t *spa)
2820eda14cbcSMatt Macy {
2821eda14cbcSMatt Macy 	zio_t *rio;
2822eda14cbcSMatt Macy 	spa_load_error_t sle = { 0 };
2823eda14cbcSMatt Macy 	zpool_load_policy_t policy;
2824eda14cbcSMatt Macy 	boolean_t verify_ok = B_FALSE;
2825eda14cbcSMatt Macy 	int error = 0;
2826eda14cbcSMatt Macy 
2827eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
2828eda14cbcSMatt Macy 
2829c03c5b1cSMartin Matuska 	if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
2830c03c5b1cSMartin Matuska 	    policy.zlp_maxmeta == UINT64_MAX)
2831eda14cbcSMatt Macy 		return (0);
2832eda14cbcSMatt Macy 
2833eda14cbcSMatt Macy 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2834eda14cbcSMatt Macy 	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2835eda14cbcSMatt Macy 	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2836eda14cbcSMatt Macy 	    DS_FIND_CHILDREN);
2837eda14cbcSMatt Macy 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2838eda14cbcSMatt Macy 	if (error != 0)
2839eda14cbcSMatt Macy 		return (error);
2840eda14cbcSMatt Macy 
2841c03c5b1cSMartin Matuska 	/*
2842c03c5b1cSMartin Matuska 	 * Verify data only if we are rewinding or error limit was set.
2843c03c5b1cSMartin Matuska 	 * Otherwise nothing except dbgmsg care about it to waste time.
2844c03c5b1cSMartin Matuska 	 */
2845c03c5b1cSMartin Matuska 	sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
2846c03c5b1cSMartin Matuska 	    (policy.zlp_maxdata < UINT64_MAX);
2847c03c5b1cSMartin Matuska 
2848eda14cbcSMatt Macy 	rio = zio_root(spa, NULL, &sle,
2849eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2850eda14cbcSMatt Macy 
2851eda14cbcSMatt Macy 	if (spa_load_verify_metadata) {
2852eda14cbcSMatt Macy 		if (spa->spa_extreme_rewind) {
2853eda14cbcSMatt Macy 			spa_load_note(spa, "performing a complete scan of the "
2854eda14cbcSMatt Macy 			    "pool since extreme rewind is on. This may take "
2855eda14cbcSMatt Macy 			    "a very long time.\n  (spa_load_verify_data=%u, "
2856eda14cbcSMatt Macy 			    "spa_load_verify_metadata=%u)",
2857eda14cbcSMatt Macy 			    spa_load_verify_data, spa_load_verify_metadata);
2858eda14cbcSMatt Macy 		}
2859eda14cbcSMatt Macy 
2860eda14cbcSMatt Macy 		error = traverse_pool(spa, spa->spa_verify_min_txg,
2861eda14cbcSMatt Macy 		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2862eda14cbcSMatt Macy 		    TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2863eda14cbcSMatt Macy 	}
2864eda14cbcSMatt Macy 
2865eda14cbcSMatt Macy 	(void) zio_wait(rio);
2866eda14cbcSMatt Macy 	ASSERT0(spa->spa_load_verify_bytes);
2867eda14cbcSMatt Macy 
2868eda14cbcSMatt Macy 	spa->spa_load_meta_errors = sle.sle_meta_count;
2869eda14cbcSMatt Macy 	spa->spa_load_data_errors = sle.sle_data_count;
2870eda14cbcSMatt Macy 
2871eda14cbcSMatt Macy 	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2872eda14cbcSMatt Macy 		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2873eda14cbcSMatt Macy 		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2874eda14cbcSMatt Macy 		    (u_longlong_t)sle.sle_data_count);
2875eda14cbcSMatt Macy 	}
2876eda14cbcSMatt Macy 
2877eda14cbcSMatt Macy 	if (spa_load_verify_dryrun ||
2878eda14cbcSMatt Macy 	    (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2879eda14cbcSMatt Macy 	    sle.sle_data_count <= policy.zlp_maxdata)) {
2880eda14cbcSMatt Macy 		int64_t loss = 0;
2881eda14cbcSMatt Macy 
2882eda14cbcSMatt Macy 		verify_ok = B_TRUE;
2883eda14cbcSMatt Macy 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2884eda14cbcSMatt Macy 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2885eda14cbcSMatt Macy 
2886eda14cbcSMatt Macy 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
288781b22a98SMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
288881b22a98SMartin Matuska 		    spa->spa_load_txg_ts);
288981b22a98SMartin Matuska 		fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
289081b22a98SMartin Matuska 		    loss);
289181b22a98SMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info,
2892c03c5b1cSMartin Matuska 		    ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
2893c03c5b1cSMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info,
289481b22a98SMartin Matuska 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
2895eda14cbcSMatt Macy 	} else {
2896eda14cbcSMatt Macy 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2897eda14cbcSMatt Macy 	}
2898eda14cbcSMatt Macy 
2899eda14cbcSMatt Macy 	if (spa_load_verify_dryrun)
2900eda14cbcSMatt Macy 		return (0);
2901eda14cbcSMatt Macy 
2902eda14cbcSMatt Macy 	if (error) {
2903eda14cbcSMatt Macy 		if (error != ENXIO && error != EIO)
2904eda14cbcSMatt Macy 			error = SET_ERROR(EIO);
2905eda14cbcSMatt Macy 		return (error);
2906eda14cbcSMatt Macy 	}
2907eda14cbcSMatt Macy 
2908eda14cbcSMatt Macy 	return (verify_ok ? 0 : EIO);
2909eda14cbcSMatt Macy }
2910eda14cbcSMatt Macy 
2911eda14cbcSMatt Macy /*
2912eda14cbcSMatt Macy  * Find a value in the pool props object.
2913eda14cbcSMatt Macy  */
2914eda14cbcSMatt Macy static void
2915eda14cbcSMatt Macy spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2916eda14cbcSMatt Macy {
2917eda14cbcSMatt Macy 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2918eda14cbcSMatt Macy 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2919eda14cbcSMatt Macy }
2920eda14cbcSMatt Macy 
2921eda14cbcSMatt Macy /*
2922eda14cbcSMatt Macy  * Find a value in the pool directory object.
2923eda14cbcSMatt Macy  */
2924eda14cbcSMatt Macy static int
2925eda14cbcSMatt Macy spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2926eda14cbcSMatt Macy {
2927eda14cbcSMatt Macy 	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2928eda14cbcSMatt Macy 	    name, sizeof (uint64_t), 1, val);
2929eda14cbcSMatt Macy 
2930eda14cbcSMatt Macy 	if (error != 0 && (error != ENOENT || log_enoent)) {
2931eda14cbcSMatt Macy 		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2932eda14cbcSMatt Macy 		    "[error=%d]", name, error);
2933eda14cbcSMatt Macy 	}
2934eda14cbcSMatt Macy 
2935eda14cbcSMatt Macy 	return (error);
2936eda14cbcSMatt Macy }
2937eda14cbcSMatt Macy 
2938eda14cbcSMatt Macy static int
2939eda14cbcSMatt Macy spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2940eda14cbcSMatt Macy {
2941eda14cbcSMatt Macy 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2942eda14cbcSMatt Macy 	return (SET_ERROR(err));
2943eda14cbcSMatt Macy }
2944eda14cbcSMatt Macy 
2945eda14cbcSMatt Macy boolean_t
2946eda14cbcSMatt Macy spa_livelist_delete_check(spa_t *spa)
2947eda14cbcSMatt Macy {
2948eda14cbcSMatt Macy 	return (spa->spa_livelists_to_delete != 0);
2949eda14cbcSMatt Macy }
2950eda14cbcSMatt Macy 
2951eda14cbcSMatt Macy static boolean_t
2952eda14cbcSMatt Macy spa_livelist_delete_cb_check(void *arg, zthr_t *z)
2953eda14cbcSMatt Macy {
2954e92ffd9bSMartin Matuska 	(void) z;
2955eda14cbcSMatt Macy 	spa_t *spa = arg;
2956eda14cbcSMatt Macy 	return (spa_livelist_delete_check(spa));
2957eda14cbcSMatt Macy }
2958eda14cbcSMatt Macy 
2959eda14cbcSMatt Macy static int
2960eda14cbcSMatt Macy delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2961eda14cbcSMatt Macy {
2962eda14cbcSMatt Macy 	spa_t *spa = arg;
2963eda14cbcSMatt Macy 	zio_free(spa, tx->tx_txg, bp);
2964eda14cbcSMatt Macy 	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2965eda14cbcSMatt Macy 	    -bp_get_dsize_sync(spa, bp),
2966eda14cbcSMatt Macy 	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2967eda14cbcSMatt Macy 	return (0);
2968eda14cbcSMatt Macy }
2969eda14cbcSMatt Macy 
2970eda14cbcSMatt Macy static int
2971eda14cbcSMatt Macy dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
2972eda14cbcSMatt Macy {
2973eda14cbcSMatt Macy 	int err;
2974eda14cbcSMatt Macy 	zap_cursor_t zc;
2975eda14cbcSMatt Macy 	zap_attribute_t za;
2976eda14cbcSMatt Macy 	zap_cursor_init(&zc, os, zap_obj);
2977eda14cbcSMatt Macy 	err = zap_cursor_retrieve(&zc, &za);
2978eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
2979eda14cbcSMatt Macy 	if (err == 0)
2980eda14cbcSMatt Macy 		*llp = za.za_first_integer;
2981eda14cbcSMatt Macy 	return (err);
2982eda14cbcSMatt Macy }
2983eda14cbcSMatt Macy 
2984eda14cbcSMatt Macy /*
2985eda14cbcSMatt Macy  * Components of livelist deletion that must be performed in syncing
2986eda14cbcSMatt Macy  * context: freeing block pointers and updating the pool-wide data
2987eda14cbcSMatt Macy  * structures to indicate how much work is left to do
2988eda14cbcSMatt Macy  */
2989eda14cbcSMatt Macy typedef struct sublist_delete_arg {
2990eda14cbcSMatt Macy 	spa_t *spa;
2991eda14cbcSMatt Macy 	dsl_deadlist_t *ll;
2992eda14cbcSMatt Macy 	uint64_t key;
2993eda14cbcSMatt Macy 	bplist_t *to_free;
2994eda14cbcSMatt Macy } sublist_delete_arg_t;
2995eda14cbcSMatt Macy 
2996eda14cbcSMatt Macy static void
2997eda14cbcSMatt Macy sublist_delete_sync(void *arg, dmu_tx_t *tx)
2998eda14cbcSMatt Macy {
2999eda14cbcSMatt Macy 	sublist_delete_arg_t *sda = arg;
3000eda14cbcSMatt Macy 	spa_t *spa = sda->spa;
3001eda14cbcSMatt Macy 	dsl_deadlist_t *ll = sda->ll;
3002eda14cbcSMatt Macy 	uint64_t key = sda->key;
3003eda14cbcSMatt Macy 	bplist_t *to_free = sda->to_free;
3004eda14cbcSMatt Macy 
3005eda14cbcSMatt Macy 	bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
3006eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, key, tx);
3007eda14cbcSMatt Macy }
3008eda14cbcSMatt Macy 
3009eda14cbcSMatt Macy typedef struct livelist_delete_arg {
3010eda14cbcSMatt Macy 	spa_t *spa;
3011eda14cbcSMatt Macy 	uint64_t ll_obj;
3012eda14cbcSMatt Macy 	uint64_t zap_obj;
3013eda14cbcSMatt Macy } livelist_delete_arg_t;
3014eda14cbcSMatt Macy 
3015eda14cbcSMatt Macy static void
3016eda14cbcSMatt Macy livelist_delete_sync(void *arg, dmu_tx_t *tx)
3017eda14cbcSMatt Macy {
3018eda14cbcSMatt Macy 	livelist_delete_arg_t *lda = arg;
3019eda14cbcSMatt Macy 	spa_t *spa = lda->spa;
3020eda14cbcSMatt Macy 	uint64_t ll_obj = lda->ll_obj;
3021eda14cbcSMatt Macy 	uint64_t zap_obj = lda->zap_obj;
3022eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
3023eda14cbcSMatt Macy 	uint64_t count;
3024eda14cbcSMatt Macy 
3025eda14cbcSMatt Macy 	/* free the livelist and decrement the feature count */
3026eda14cbcSMatt Macy 	VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
3027eda14cbcSMatt Macy 	dsl_deadlist_free(mos, ll_obj, tx);
3028eda14cbcSMatt Macy 	spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
3029eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, zap_obj, &count));
3030eda14cbcSMatt Macy 	if (count == 0) {
3031eda14cbcSMatt Macy 		/* no more livelists to delete */
3032eda14cbcSMatt Macy 		VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
3033eda14cbcSMatt Macy 		    DMU_POOL_DELETED_CLONES, tx));
3034eda14cbcSMatt Macy 		VERIFY0(zap_destroy(mos, zap_obj, tx));
3035eda14cbcSMatt Macy 		spa->spa_livelists_to_delete = 0;
3036eda14cbcSMatt Macy 		spa_notify_waiters(spa);
3037eda14cbcSMatt Macy 	}
3038eda14cbcSMatt Macy }
3039eda14cbcSMatt Macy 
3040eda14cbcSMatt Macy /*
3041eda14cbcSMatt Macy  * Load in the value for the livelist to be removed and open it. Then,
3042eda14cbcSMatt Macy  * load its first sublist and determine which block pointers should actually
3043eda14cbcSMatt Macy  * be freed. Then, call a synctask which performs the actual frees and updates
3044eda14cbcSMatt Macy  * the pool-wide livelist data.
3045eda14cbcSMatt Macy  */
3046eda14cbcSMatt Macy static void
3047eda14cbcSMatt Macy spa_livelist_delete_cb(void *arg, zthr_t *z)
3048eda14cbcSMatt Macy {
3049eda14cbcSMatt Macy 	spa_t *spa = arg;
3050eda14cbcSMatt Macy 	uint64_t ll_obj = 0, count;
3051eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
3052eda14cbcSMatt Macy 	uint64_t zap_obj = spa->spa_livelists_to_delete;
3053eda14cbcSMatt Macy 	/*
3054eda14cbcSMatt Macy 	 * Determine the next livelist to delete. This function should only
3055eda14cbcSMatt Macy 	 * be called if there is at least one deleted clone.
3056eda14cbcSMatt Macy 	 */
3057eda14cbcSMatt Macy 	VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
3058eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, ll_obj, &count));
3059eda14cbcSMatt Macy 	if (count > 0) {
30602c48331dSMatt Macy 		dsl_deadlist_t *ll;
3061eda14cbcSMatt Macy 		dsl_deadlist_entry_t *dle;
3062eda14cbcSMatt Macy 		bplist_t to_free;
30632c48331dSMatt Macy 		ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
30642c48331dSMatt Macy 		dsl_deadlist_open(ll, mos, ll_obj);
30652c48331dSMatt Macy 		dle = dsl_deadlist_first(ll);
3066eda14cbcSMatt Macy 		ASSERT3P(dle, !=, NULL);
3067eda14cbcSMatt Macy 		bplist_create(&to_free);
3068eda14cbcSMatt Macy 		int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
3069eda14cbcSMatt Macy 		    z, NULL);
3070eda14cbcSMatt Macy 		if (err == 0) {
3071eda14cbcSMatt Macy 			sublist_delete_arg_t sync_arg = {
3072eda14cbcSMatt Macy 			    .spa = spa,
30732c48331dSMatt Macy 			    .ll = ll,
3074eda14cbcSMatt Macy 			    .key = dle->dle_mintxg,
3075eda14cbcSMatt Macy 			    .to_free = &to_free
3076eda14cbcSMatt Macy 			};
3077eda14cbcSMatt Macy 			zfs_dbgmsg("deleting sublist (id %llu) from"
307833b8c039SMartin Matuska 			    " livelist %llu, %lld remaining",
307933b8c039SMartin Matuska 			    (u_longlong_t)dle->dle_bpobj.bpo_object,
308033b8c039SMartin Matuska 			    (u_longlong_t)ll_obj, (longlong_t)count - 1);
3081eda14cbcSMatt Macy 			VERIFY0(dsl_sync_task(spa_name(spa), NULL,
3082eda14cbcSMatt Macy 			    sublist_delete_sync, &sync_arg, 0,
3083eda14cbcSMatt Macy 			    ZFS_SPACE_CHECK_DESTROY));
3084eda14cbcSMatt Macy 		} else {
3085eda14cbcSMatt Macy 			VERIFY3U(err, ==, EINTR);
3086eda14cbcSMatt Macy 		}
3087eda14cbcSMatt Macy 		bplist_clear(&to_free);
3088eda14cbcSMatt Macy 		bplist_destroy(&to_free);
30892c48331dSMatt Macy 		dsl_deadlist_close(ll);
30902c48331dSMatt Macy 		kmem_free(ll, sizeof (dsl_deadlist_t));
3091eda14cbcSMatt Macy 	} else {
3092eda14cbcSMatt Macy 		livelist_delete_arg_t sync_arg = {
3093eda14cbcSMatt Macy 		    .spa = spa,
3094eda14cbcSMatt Macy 		    .ll_obj = ll_obj,
3095eda14cbcSMatt Macy 		    .zap_obj = zap_obj
3096eda14cbcSMatt Macy 		};
309733b8c039SMartin Matuska 		zfs_dbgmsg("deletion of livelist %llu completed",
309833b8c039SMartin Matuska 		    (u_longlong_t)ll_obj);
3099eda14cbcSMatt Macy 		VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
3100eda14cbcSMatt Macy 		    &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
3101eda14cbcSMatt Macy 	}
3102eda14cbcSMatt Macy }
3103eda14cbcSMatt Macy 
3104eda14cbcSMatt Macy static void
3105eda14cbcSMatt Macy spa_start_livelist_destroy_thread(spa_t *spa)
3106eda14cbcSMatt Macy {
3107eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
3108eda14cbcSMatt Macy 	spa->spa_livelist_delete_zthr =
3109eda14cbcSMatt Macy 	    zthr_create("z_livelist_destroy",
31102faf504dSMartin Matuska 	    spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
31112faf504dSMartin Matuska 	    minclsyspri);
3112eda14cbcSMatt Macy }
3113eda14cbcSMatt Macy 
3114eda14cbcSMatt Macy typedef struct livelist_new_arg {
3115eda14cbcSMatt Macy 	bplist_t *allocs;
3116eda14cbcSMatt Macy 	bplist_t *frees;
3117eda14cbcSMatt Macy } livelist_new_arg_t;
3118eda14cbcSMatt Macy 
3119eda14cbcSMatt Macy static int
3120eda14cbcSMatt Macy livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3121eda14cbcSMatt Macy     dmu_tx_t *tx)
3122eda14cbcSMatt Macy {
3123eda14cbcSMatt Macy 	ASSERT(tx == NULL);
3124eda14cbcSMatt Macy 	livelist_new_arg_t *lna = arg;
3125eda14cbcSMatt Macy 	if (bp_freed) {
3126eda14cbcSMatt Macy 		bplist_append(lna->frees, bp);
3127eda14cbcSMatt Macy 	} else {
3128eda14cbcSMatt Macy 		bplist_append(lna->allocs, bp);
3129eda14cbcSMatt Macy 		zfs_livelist_condense_new_alloc++;
3130eda14cbcSMatt Macy 	}
3131eda14cbcSMatt Macy 	return (0);
3132eda14cbcSMatt Macy }
3133eda14cbcSMatt Macy 
3134eda14cbcSMatt Macy typedef struct livelist_condense_arg {
3135eda14cbcSMatt Macy 	spa_t *spa;
3136eda14cbcSMatt Macy 	bplist_t to_keep;
3137eda14cbcSMatt Macy 	uint64_t first_size;
3138eda14cbcSMatt Macy 	uint64_t next_size;
3139eda14cbcSMatt Macy } livelist_condense_arg_t;
3140eda14cbcSMatt Macy 
3141eda14cbcSMatt Macy static void
3142eda14cbcSMatt Macy spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
3143eda14cbcSMatt Macy {
3144eda14cbcSMatt Macy 	livelist_condense_arg_t *lca = arg;
3145eda14cbcSMatt Macy 	spa_t *spa = lca->spa;
3146eda14cbcSMatt Macy 	bplist_t new_frees;
3147eda14cbcSMatt Macy 	dsl_dataset_t *ds = spa->spa_to_condense.ds;
3148eda14cbcSMatt Macy 
3149eda14cbcSMatt Macy 	/* Have we been cancelled? */
3150eda14cbcSMatt Macy 	if (spa->spa_to_condense.cancelled) {
3151eda14cbcSMatt Macy 		zfs_livelist_condense_sync_cancel++;
3152eda14cbcSMatt Macy 		goto out;
3153eda14cbcSMatt Macy 	}
3154eda14cbcSMatt Macy 
3155eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3156eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3157eda14cbcSMatt Macy 	dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
3158eda14cbcSMatt Macy 
3159eda14cbcSMatt Macy 	/*
3160eda14cbcSMatt Macy 	 * It's possible that the livelist was changed while the zthr was
3161eda14cbcSMatt Macy 	 * running. Therefore, we need to check for new blkptrs in the two
3162eda14cbcSMatt Macy 	 * entries being condensed and continue to track them in the livelist.
3163eda14cbcSMatt Macy 	 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
3164eda14cbcSMatt Macy 	 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
3165eda14cbcSMatt Macy 	 * we need to sort them into two different bplists.
3166eda14cbcSMatt Macy 	 */
3167eda14cbcSMatt Macy 	uint64_t first_obj = first->dle_bpobj.bpo_object;
3168eda14cbcSMatt Macy 	uint64_t next_obj = next->dle_bpobj.bpo_object;
3169eda14cbcSMatt Macy 	uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3170eda14cbcSMatt Macy 	uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3171eda14cbcSMatt Macy 
3172eda14cbcSMatt Macy 	bplist_create(&new_frees);
3173eda14cbcSMatt Macy 	livelist_new_arg_t new_bps = {
3174eda14cbcSMatt Macy 	    .allocs = &lca->to_keep,
3175eda14cbcSMatt Macy 	    .frees = &new_frees,
3176eda14cbcSMatt Macy 	};
3177eda14cbcSMatt Macy 
3178eda14cbcSMatt Macy 	if (cur_first_size > lca->first_size) {
3179eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
3180eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->first_size));
3181eda14cbcSMatt Macy 	}
3182eda14cbcSMatt Macy 	if (cur_next_size > lca->next_size) {
3183eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
3184eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->next_size));
3185eda14cbcSMatt Macy 	}
3186eda14cbcSMatt Macy 
3187eda14cbcSMatt Macy 	dsl_deadlist_clear_entry(first, ll, tx);
3188eda14cbcSMatt Macy 	ASSERT(bpobj_is_empty(&first->dle_bpobj));
3189eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
3190eda14cbcSMatt Macy 
3191eda14cbcSMatt Macy 	bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
3192eda14cbcSMatt Macy 	bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
3193eda14cbcSMatt Macy 	bplist_destroy(&new_frees);
3194eda14cbcSMatt Macy 
3195eda14cbcSMatt Macy 	char dsname[ZFS_MAX_DATASET_NAME_LEN];
3196eda14cbcSMatt Macy 	dsl_dataset_name(ds, dsname);
3197eda14cbcSMatt Macy 	zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
3198eda14cbcSMatt Macy 	    "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
319933b8c039SMartin Matuska 	    "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
320033b8c039SMartin Matuska 	    (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
320133b8c039SMartin Matuska 	    (u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
320233b8c039SMartin Matuska 	    (u_longlong_t)cur_next_size,
320333b8c039SMartin Matuska 	    (u_longlong_t)first->dle_bpobj.bpo_object,
320433b8c039SMartin Matuska 	    (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
3205eda14cbcSMatt Macy out:
3206eda14cbcSMatt Macy 	dmu_buf_rele(ds->ds_dbuf, spa);
3207eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
3208eda14cbcSMatt Macy 	bplist_clear(&lca->to_keep);
3209eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
3210eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
3211eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
3212eda14cbcSMatt Macy }
3213eda14cbcSMatt Macy 
3214eda14cbcSMatt Macy static void
3215eda14cbcSMatt Macy spa_livelist_condense_cb(void *arg, zthr_t *t)
3216eda14cbcSMatt Macy {
3217eda14cbcSMatt Macy 	while (zfs_livelist_condense_zthr_pause &&
3218eda14cbcSMatt Macy 	    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3219eda14cbcSMatt Macy 		delay(1);
3220eda14cbcSMatt Macy 
3221eda14cbcSMatt Macy 	spa_t *spa = arg;
3222eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3223eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3224eda14cbcSMatt Macy 	uint64_t first_size, next_size;
3225eda14cbcSMatt Macy 
3226eda14cbcSMatt Macy 	livelist_condense_arg_t *lca =
3227eda14cbcSMatt Macy 	    kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
3228eda14cbcSMatt Macy 	bplist_create(&lca->to_keep);
3229eda14cbcSMatt Macy 
3230eda14cbcSMatt Macy 	/*
3231eda14cbcSMatt Macy 	 * Process the livelists (matching FREEs and ALLOCs) in open context
3232eda14cbcSMatt Macy 	 * so we have minimal work in syncing context to condense.
3233eda14cbcSMatt Macy 	 *
3234eda14cbcSMatt Macy 	 * We save bpobj sizes (first_size and next_size) to use later in
3235eda14cbcSMatt Macy 	 * syncing context to determine if entries were added to these sublists
3236eda14cbcSMatt Macy 	 * while in open context. This is possible because the clone is still
3237eda14cbcSMatt Macy 	 * active and open for normal writes and we want to make sure the new,
3238eda14cbcSMatt Macy 	 * unprocessed blockpointers are inserted into the livelist normally.
3239eda14cbcSMatt Macy 	 *
3240eda14cbcSMatt Macy 	 * Note that dsl_process_sub_livelist() both stores the size number of
3241eda14cbcSMatt Macy 	 * blockpointers and iterates over them while the bpobj's lock held, so
3242eda14cbcSMatt Macy 	 * the sizes returned to us are consistent which what was actually
3243eda14cbcSMatt Macy 	 * processed.
3244eda14cbcSMatt Macy 	 */
3245eda14cbcSMatt Macy 	int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
3246eda14cbcSMatt Macy 	    &first_size);
3247eda14cbcSMatt Macy 	if (err == 0)
3248eda14cbcSMatt Macy 		err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
3249eda14cbcSMatt Macy 		    t, &next_size);
3250eda14cbcSMatt Macy 
3251eda14cbcSMatt Macy 	if (err == 0) {
3252eda14cbcSMatt Macy 		while (zfs_livelist_condense_sync_pause &&
3253eda14cbcSMatt Macy 		    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3254eda14cbcSMatt Macy 			delay(1);
3255eda14cbcSMatt Macy 
3256eda14cbcSMatt Macy 		dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3257eda14cbcSMatt Macy 		dmu_tx_mark_netfree(tx);
3258eda14cbcSMatt Macy 		dmu_tx_hold_space(tx, 1);
3259eda14cbcSMatt Macy 		err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
3260eda14cbcSMatt Macy 		if (err == 0) {
3261eda14cbcSMatt Macy 			/*
3262eda14cbcSMatt Macy 			 * Prevent the condense zthr restarting before
3263eda14cbcSMatt Macy 			 * the synctask completes.
3264eda14cbcSMatt Macy 			 */
3265eda14cbcSMatt Macy 			spa->spa_to_condense.syncing = B_TRUE;
3266eda14cbcSMatt Macy 			lca->spa = spa;
3267eda14cbcSMatt Macy 			lca->first_size = first_size;
3268eda14cbcSMatt Macy 			lca->next_size = next_size;
3269eda14cbcSMatt Macy 			dsl_sync_task_nowait(spa_get_dsl(spa),
32702c48331dSMatt Macy 			    spa_livelist_condense_sync, lca, tx);
3271eda14cbcSMatt Macy 			dmu_tx_commit(tx);
3272eda14cbcSMatt Macy 			return;
3273eda14cbcSMatt Macy 		}
3274eda14cbcSMatt Macy 	}
3275eda14cbcSMatt Macy 	/*
3276eda14cbcSMatt Macy 	 * Condensing can not continue: either it was externally stopped or
3277eda14cbcSMatt Macy 	 * we were unable to assign to a tx because the pool has run out of
3278eda14cbcSMatt Macy 	 * space. In the second case, we'll just end up trying to condense
3279eda14cbcSMatt Macy 	 * again in a later txg.
3280eda14cbcSMatt Macy 	 */
3281eda14cbcSMatt Macy 	ASSERT(err != 0);
3282eda14cbcSMatt Macy 	bplist_clear(&lca->to_keep);
3283eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
3284eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
3285eda14cbcSMatt Macy 	dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
3286eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
3287eda14cbcSMatt Macy 	if (err == EINTR)
3288eda14cbcSMatt Macy 		zfs_livelist_condense_zthr_cancel++;
3289eda14cbcSMatt Macy }
3290eda14cbcSMatt Macy 
3291eda14cbcSMatt Macy /*
3292eda14cbcSMatt Macy  * Check that there is something to condense but that a condense is not
3293eda14cbcSMatt Macy  * already in progress and that condensing has not been cancelled.
3294eda14cbcSMatt Macy  */
3295eda14cbcSMatt Macy static boolean_t
3296eda14cbcSMatt Macy spa_livelist_condense_cb_check(void *arg, zthr_t *z)
3297eda14cbcSMatt Macy {
3298e92ffd9bSMartin Matuska 	(void) z;
3299eda14cbcSMatt Macy 	spa_t *spa = arg;
3300eda14cbcSMatt Macy 	if ((spa->spa_to_condense.ds != NULL) &&
3301eda14cbcSMatt Macy 	    (spa->spa_to_condense.syncing == B_FALSE) &&
3302eda14cbcSMatt Macy 	    (spa->spa_to_condense.cancelled == B_FALSE)) {
3303eda14cbcSMatt Macy 		return (B_TRUE);
3304eda14cbcSMatt Macy 	}
3305eda14cbcSMatt Macy 	return (B_FALSE);
3306eda14cbcSMatt Macy }
3307eda14cbcSMatt Macy 
3308eda14cbcSMatt Macy static void
3309eda14cbcSMatt Macy spa_start_livelist_condensing_thread(spa_t *spa)
3310eda14cbcSMatt Macy {
3311eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
3312eda14cbcSMatt Macy 	spa->spa_to_condense.first = NULL;
3313eda14cbcSMatt Macy 	spa->spa_to_condense.next = NULL;
3314eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
3315eda14cbcSMatt Macy 	spa->spa_to_condense.cancelled = B_FALSE;
3316eda14cbcSMatt Macy 
3317eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
3318eda14cbcSMatt Macy 	spa->spa_livelist_condense_zthr =
3319eda14cbcSMatt Macy 	    zthr_create("z_livelist_condense",
3320eda14cbcSMatt Macy 	    spa_livelist_condense_cb_check,
33212faf504dSMartin Matuska 	    spa_livelist_condense_cb, spa, minclsyspri);
3322eda14cbcSMatt Macy }
3323eda14cbcSMatt Macy 
3324eda14cbcSMatt Macy static void
3325eda14cbcSMatt Macy spa_spawn_aux_threads(spa_t *spa)
3326eda14cbcSMatt Macy {
3327eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
3328eda14cbcSMatt Macy 
3329e716630dSMartin Matuska 	spa_start_raidz_expansion_thread(spa);
3330eda14cbcSMatt Macy 	spa_start_indirect_condensing_thread(spa);
3331eda14cbcSMatt Macy 	spa_start_livelist_destroy_thread(spa);
3332eda14cbcSMatt Macy 	spa_start_livelist_condensing_thread(spa);
3333eda14cbcSMatt Macy 
3334eda14cbcSMatt Macy 	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
3335eda14cbcSMatt Macy 	spa->spa_checkpoint_discard_zthr =
3336eda14cbcSMatt Macy 	    zthr_create("z_checkpoint_discard",
3337eda14cbcSMatt Macy 	    spa_checkpoint_discard_thread_check,
33382faf504dSMartin Matuska 	    spa_checkpoint_discard_thread, spa, minclsyspri);
3339eda14cbcSMatt Macy }
3340eda14cbcSMatt Macy 
3341eda14cbcSMatt Macy /*
3342eda14cbcSMatt Macy  * Fix up config after a partly-completed split.  This is done with the
3343eda14cbcSMatt Macy  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
3344eda14cbcSMatt Macy  * pool have that entry in their config, but only the splitting one contains
3345eda14cbcSMatt Macy  * a list of all the guids of the vdevs that are being split off.
3346eda14cbcSMatt Macy  *
3347eda14cbcSMatt Macy  * This function determines what to do with that list: either rejoin
3348eda14cbcSMatt Macy  * all the disks to the pool, or complete the splitting process.  To attempt
3349eda14cbcSMatt Macy  * the rejoin, each disk that is offlined is marked online again, and
3350eda14cbcSMatt Macy  * we do a reopen() call.  If the vdev label for every disk that was
3351eda14cbcSMatt Macy  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
3352eda14cbcSMatt Macy  * then we call vdev_split() on each disk, and complete the split.
3353eda14cbcSMatt Macy  *
3354eda14cbcSMatt Macy  * Otherwise we leave the config alone, with all the vdevs in place in
3355eda14cbcSMatt Macy  * the original pool.
3356eda14cbcSMatt Macy  */
3357eda14cbcSMatt Macy static void
3358eda14cbcSMatt Macy spa_try_repair(spa_t *spa, nvlist_t *config)
3359eda14cbcSMatt Macy {
3360eda14cbcSMatt Macy 	uint_t extracted;
3361eda14cbcSMatt Macy 	uint64_t *glist;
3362eda14cbcSMatt Macy 	uint_t i, gcount;
3363eda14cbcSMatt Macy 	nvlist_t *nvl;
3364eda14cbcSMatt Macy 	vdev_t **vd;
3365eda14cbcSMatt Macy 	boolean_t attempt_reopen;
3366eda14cbcSMatt Macy 
3367eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
3368eda14cbcSMatt Macy 		return;
3369eda14cbcSMatt Macy 
3370eda14cbcSMatt Macy 	/* check that the config is complete */
3371eda14cbcSMatt Macy 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
3372eda14cbcSMatt Macy 	    &glist, &gcount) != 0)
3373eda14cbcSMatt Macy 		return;
3374eda14cbcSMatt Macy 
3375eda14cbcSMatt Macy 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
3376eda14cbcSMatt Macy 
3377eda14cbcSMatt Macy 	/* attempt to online all the vdevs & validate */
3378eda14cbcSMatt Macy 	attempt_reopen = B_TRUE;
3379eda14cbcSMatt Macy 	for (i = 0; i < gcount; i++) {
3380eda14cbcSMatt Macy 		if (glist[i] == 0)	/* vdev is hole */
3381eda14cbcSMatt Macy 			continue;
3382eda14cbcSMatt Macy 
3383eda14cbcSMatt Macy 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
3384eda14cbcSMatt Macy 		if (vd[i] == NULL) {
3385eda14cbcSMatt Macy 			/*
3386eda14cbcSMatt Macy 			 * Don't bother attempting to reopen the disks;
3387eda14cbcSMatt Macy 			 * just do the split.
3388eda14cbcSMatt Macy 			 */
3389eda14cbcSMatt Macy 			attempt_reopen = B_FALSE;
3390eda14cbcSMatt Macy 		} else {
3391eda14cbcSMatt Macy 			/* attempt to re-online it */
3392eda14cbcSMatt Macy 			vd[i]->vdev_offline = B_FALSE;
3393eda14cbcSMatt Macy 		}
3394eda14cbcSMatt Macy 	}
3395eda14cbcSMatt Macy 
3396eda14cbcSMatt Macy 	if (attempt_reopen) {
3397eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
3398eda14cbcSMatt Macy 
3399eda14cbcSMatt Macy 		/* check each device to see what state it's in */
3400eda14cbcSMatt Macy 		for (extracted = 0, i = 0; i < gcount; i++) {
3401eda14cbcSMatt Macy 			if (vd[i] != NULL &&
3402eda14cbcSMatt Macy 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
3403eda14cbcSMatt Macy 				break;
3404eda14cbcSMatt Macy 			++extracted;
3405eda14cbcSMatt Macy 		}
3406eda14cbcSMatt Macy 	}
3407eda14cbcSMatt Macy 
3408eda14cbcSMatt Macy 	/*
3409eda14cbcSMatt Macy 	 * If every disk has been moved to the new pool, or if we never
3410eda14cbcSMatt Macy 	 * even attempted to look at them, then we split them off for
3411eda14cbcSMatt Macy 	 * good.
3412eda14cbcSMatt Macy 	 */
3413eda14cbcSMatt Macy 	if (!attempt_reopen || gcount == extracted) {
3414eda14cbcSMatt Macy 		for (i = 0; i < gcount; i++)
3415eda14cbcSMatt Macy 			if (vd[i] != NULL)
3416eda14cbcSMatt Macy 				vdev_split(vd[i]);
3417eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
3418eda14cbcSMatt Macy 	}
3419eda14cbcSMatt Macy 
3420eda14cbcSMatt Macy 	kmem_free(vd, gcount * sizeof (vdev_t *));
3421eda14cbcSMatt Macy }
3422eda14cbcSMatt Macy 
3423eda14cbcSMatt Macy static int
3424eda14cbcSMatt Macy spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
3425eda14cbcSMatt Macy {
3426a0b956f5SMartin Matuska 	const char *ereport = FM_EREPORT_ZFS_POOL;
3427eda14cbcSMatt Macy 	int error;
3428eda14cbcSMatt Macy 
3429eda14cbcSMatt Macy 	spa->spa_load_state = state;
3430eda14cbcSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
3431eda14cbcSMatt Macy 	    spa_load_state(spa));
34323494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "spa_load()");
3433eda14cbcSMatt Macy 
3434eda14cbcSMatt Macy 	gethrestime(&spa->spa_loaded_ts);
3435eda14cbcSMatt Macy 	error = spa_load_impl(spa, type, &ereport);
3436eda14cbcSMatt Macy 
3437eda14cbcSMatt Macy 	/*
3438eda14cbcSMatt Macy 	 * Don't count references from objsets that are already closed
3439eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
3440eda14cbcSMatt Macy 	 */
3441eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
3442eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
3443eda14cbcSMatt Macy 	if (error) {
3444eda14cbcSMatt Macy 		if (error != EEXIST) {
3445eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_sec = 0;
3446eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_nsec = 0;
3447eda14cbcSMatt Macy 		}
3448eda14cbcSMatt Macy 		if (error != EBADF) {
3449eac7052fSMatt Macy 			(void) zfs_ereport_post(ereport, spa,
34502c48331dSMatt Macy 			    NULL, NULL, NULL, 0);
3451eda14cbcSMatt Macy 		}
3452eda14cbcSMatt Macy 	}
3453eda14cbcSMatt Macy 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
3454eda14cbcSMatt Macy 	spa->spa_ena = 0;
3455eda14cbcSMatt Macy 
3456eda14cbcSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
3457eda14cbcSMatt Macy 	    spa_load_state(spa));
3458eda14cbcSMatt Macy 
3459eda14cbcSMatt Macy 	return (error);
3460eda14cbcSMatt Macy }
3461eda14cbcSMatt Macy 
3462eda14cbcSMatt Macy #ifdef ZFS_DEBUG
3463eda14cbcSMatt Macy /*
3464eda14cbcSMatt Macy  * Count the number of per-vdev ZAPs associated with all of the vdevs in the
3465eda14cbcSMatt Macy  * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
3466eda14cbcSMatt Macy  * spa's per-vdev ZAP list.
3467eda14cbcSMatt Macy  */
3468eda14cbcSMatt Macy static uint64_t
3469eda14cbcSMatt Macy vdev_count_verify_zaps(vdev_t *vd)
3470eda14cbcSMatt Macy {
3471eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
3472eda14cbcSMatt Macy 	uint64_t total = 0;
3473eda14cbcSMatt Macy 
3474d411c1d6SMartin Matuska 	if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
3475d411c1d6SMartin Matuska 	    vd->vdev_root_zap != 0) {
3476d411c1d6SMartin Matuska 		total++;
3477d411c1d6SMartin Matuska 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3478d411c1d6SMartin Matuska 		    spa->spa_all_vdev_zaps, vd->vdev_root_zap));
3479d411c1d6SMartin Matuska 	}
3480eda14cbcSMatt Macy 	if (vd->vdev_top_zap != 0) {
3481eda14cbcSMatt Macy 		total++;
3482eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3483eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
3484eda14cbcSMatt Macy 	}
3485eda14cbcSMatt Macy 	if (vd->vdev_leaf_zap != 0) {
3486eda14cbcSMatt Macy 		total++;
3487eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3488eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
3489eda14cbcSMatt Macy 	}
3490eda14cbcSMatt Macy 
3491eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
3492eda14cbcSMatt Macy 		total += vdev_count_verify_zaps(vd->vdev_child[i]);
3493eda14cbcSMatt Macy 	}
3494eda14cbcSMatt Macy 
3495eda14cbcSMatt Macy 	return (total);
3496eda14cbcSMatt Macy }
3497e92ffd9bSMartin Matuska #else
3498e92ffd9bSMartin Matuska #define	vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
3499eda14cbcSMatt Macy #endif
3500eda14cbcSMatt Macy 
3501eda14cbcSMatt Macy /*
3502eda14cbcSMatt Macy  * Determine whether the activity check is required.
3503eda14cbcSMatt Macy  */
3504eda14cbcSMatt Macy static boolean_t
3505eda14cbcSMatt Macy spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
3506eda14cbcSMatt Macy     nvlist_t *config)
3507eda14cbcSMatt Macy {
3508eda14cbcSMatt Macy 	uint64_t state = 0;
3509eda14cbcSMatt Macy 	uint64_t hostid = 0;
3510eda14cbcSMatt Macy 	uint64_t tryconfig_txg = 0;
3511eda14cbcSMatt Macy 	uint64_t tryconfig_timestamp = 0;
3512eda14cbcSMatt Macy 	uint16_t tryconfig_mmp_seq = 0;
3513eda14cbcSMatt Macy 	nvlist_t *nvinfo;
3514eda14cbcSMatt Macy 
3515eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3516eda14cbcSMatt Macy 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3517eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
3518eda14cbcSMatt Macy 		    &tryconfig_txg);
3519eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3520eda14cbcSMatt Macy 		    &tryconfig_timestamp);
3521eda14cbcSMatt Macy 		(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
3522eda14cbcSMatt Macy 		    &tryconfig_mmp_seq);
3523eda14cbcSMatt Macy 	}
3524eda14cbcSMatt Macy 
3525eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
3526eda14cbcSMatt Macy 
3527eda14cbcSMatt Macy 	/*
3528eda14cbcSMatt Macy 	 * Disable the MMP activity check - This is used by zdb which
3529eda14cbcSMatt Macy 	 * is intended to be used on potentially active pools.
3530eda14cbcSMatt Macy 	 */
3531eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
3532eda14cbcSMatt Macy 		return (B_FALSE);
3533eda14cbcSMatt Macy 
3534eda14cbcSMatt Macy 	/*
3535eda14cbcSMatt Macy 	 * Skip the activity check when the MMP feature is disabled.
3536eda14cbcSMatt Macy 	 */
3537eda14cbcSMatt Macy 	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
3538eda14cbcSMatt Macy 		return (B_FALSE);
3539eda14cbcSMatt Macy 
3540eda14cbcSMatt Macy 	/*
3541eda14cbcSMatt Macy 	 * If the tryconfig_ values are nonzero, they are the results of an
3542eda14cbcSMatt Macy 	 * earlier tryimport.  If they all match the uberblock we just found,
3543eda14cbcSMatt Macy 	 * then the pool has not changed and we return false so we do not test
3544eda14cbcSMatt Macy 	 * a second time.
3545eda14cbcSMatt Macy 	 */
3546eda14cbcSMatt Macy 	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
3547eda14cbcSMatt Macy 	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
3548eda14cbcSMatt Macy 	    tryconfig_mmp_seq && tryconfig_mmp_seq ==
3549eda14cbcSMatt Macy 	    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
3550eda14cbcSMatt Macy 		return (B_FALSE);
3551eda14cbcSMatt Macy 
3552eda14cbcSMatt Macy 	/*
3553eda14cbcSMatt Macy 	 * Allow the activity check to be skipped when importing the pool
3554eda14cbcSMatt Macy 	 * on the same host which last imported it.  Since the hostid from
3555eda14cbcSMatt Macy 	 * configuration may be stale use the one read from the label.
3556eda14cbcSMatt Macy 	 */
3557eda14cbcSMatt Macy 	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
3558eda14cbcSMatt Macy 		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
3559eda14cbcSMatt Macy 
3560eda14cbcSMatt Macy 	if (hostid == spa_get_hostid(spa))
3561eda14cbcSMatt Macy 		return (B_FALSE);
3562eda14cbcSMatt Macy 
3563eda14cbcSMatt Macy 	/*
3564eda14cbcSMatt Macy 	 * Skip the activity test when the pool was cleanly exported.
3565eda14cbcSMatt Macy 	 */
3566eda14cbcSMatt Macy 	if (state != POOL_STATE_ACTIVE)
3567eda14cbcSMatt Macy 		return (B_FALSE);
3568eda14cbcSMatt Macy 
3569eda14cbcSMatt Macy 	return (B_TRUE);
3570eda14cbcSMatt Macy }
3571eda14cbcSMatt Macy 
3572eda14cbcSMatt Macy /*
3573eda14cbcSMatt Macy  * Nanoseconds the activity check must watch for changes on-disk.
3574eda14cbcSMatt Macy  */
3575eda14cbcSMatt Macy static uint64_t
3576eda14cbcSMatt Macy spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
3577eda14cbcSMatt Macy {
3578eda14cbcSMatt Macy 	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
3579eda14cbcSMatt Macy 	uint64_t multihost_interval = MSEC2NSEC(
3580eda14cbcSMatt Macy 	    MMP_INTERVAL_OK(zfs_multihost_interval));
3581eda14cbcSMatt Macy 	uint64_t import_delay = MAX(NANOSEC, import_intervals *
3582eda14cbcSMatt Macy 	    multihost_interval);
3583eda14cbcSMatt Macy 
3584eda14cbcSMatt Macy 	/*
3585eda14cbcSMatt Macy 	 * Local tunables determine a minimum duration except for the case
3586eda14cbcSMatt Macy 	 * where we know when the remote host will suspend the pool if MMP
3587eda14cbcSMatt Macy 	 * writes do not land.
3588eda14cbcSMatt Macy 	 *
3589eda14cbcSMatt Macy 	 * See Big Theory comment at the top of mmp.c for the reasoning behind
3590eda14cbcSMatt Macy 	 * these cases and times.
3591eda14cbcSMatt Macy 	 */
3592eda14cbcSMatt Macy 
3593eda14cbcSMatt Macy 	ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3594eda14cbcSMatt Macy 
3595eda14cbcSMatt Macy 	if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3596eda14cbcSMatt Macy 	    MMP_FAIL_INT(ub) > 0) {
3597eda14cbcSMatt Macy 
3598eda14cbcSMatt Macy 		/* MMP on remote host will suspend pool after failed writes */
3599eda14cbcSMatt Macy 		import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3600eda14cbcSMatt Macy 		    MMP_IMPORT_SAFETY_FACTOR / 100;
3601eda14cbcSMatt Macy 
3602eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
3603eda14cbcSMatt Macy 		    "mmp_fails=%llu ub_mmp mmp_interval=%llu "
360433b8c039SMartin Matuska 		    "import_intervals=%llu", (u_longlong_t)import_delay,
360533b8c039SMartin Matuska 		    (u_longlong_t)MMP_FAIL_INT(ub),
360633b8c039SMartin Matuska 		    (u_longlong_t)MMP_INTERVAL(ub),
360733b8c039SMartin Matuska 		    (u_longlong_t)import_intervals);
3608eda14cbcSMatt Macy 
3609eda14cbcSMatt Macy 	} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3610eda14cbcSMatt Macy 	    MMP_FAIL_INT(ub) == 0) {
3611eda14cbcSMatt Macy 
3612eda14cbcSMatt Macy 		/* MMP on remote host will never suspend pool */
3613eda14cbcSMatt Macy 		import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3614eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3615eda14cbcSMatt Macy 
3616eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
3617eda14cbcSMatt Macy 		    "mmp_interval=%llu ub_mmp_delay=%llu "
361833b8c039SMartin Matuska 		    "import_intervals=%llu", (u_longlong_t)import_delay,
361933b8c039SMartin Matuska 		    (u_longlong_t)MMP_INTERVAL(ub),
362033b8c039SMartin Matuska 		    (u_longlong_t)ub->ub_mmp_delay,
362133b8c039SMartin Matuska 		    (u_longlong_t)import_intervals);
3622eda14cbcSMatt Macy 
3623eda14cbcSMatt Macy 	} else if (MMP_VALID(ub)) {
3624eda14cbcSMatt Macy 		/*
3625eda14cbcSMatt Macy 		 * zfs-0.7 compatibility case
3626eda14cbcSMatt Macy 		 */
3627eda14cbcSMatt Macy 
3628eda14cbcSMatt Macy 		import_delay = MAX(import_delay, (multihost_interval +
3629eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3630eda14cbcSMatt Macy 
3631eda14cbcSMatt Macy 		zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
363233b8c039SMartin Matuska 		    "import_intervals=%llu leaves=%u",
363333b8c039SMartin Matuska 		    (u_longlong_t)import_delay,
363433b8c039SMartin Matuska 		    (u_longlong_t)ub->ub_mmp_delay,
363533b8c039SMartin Matuska 		    (u_longlong_t)import_intervals,
3636eda14cbcSMatt Macy 		    vdev_count_leaves(spa));
3637eda14cbcSMatt Macy 	} else {
3638eda14cbcSMatt Macy 		/* Using local tunings is the only reasonable option */
3639eda14cbcSMatt Macy 		zfs_dbgmsg("pool last imported on non-MMP aware "
3640eda14cbcSMatt Macy 		    "host using import_delay=%llu multihost_interval=%llu "
364133b8c039SMartin Matuska 		    "import_intervals=%llu", (u_longlong_t)import_delay,
364233b8c039SMartin Matuska 		    (u_longlong_t)multihost_interval,
364333b8c039SMartin Matuska 		    (u_longlong_t)import_intervals);
3644eda14cbcSMatt Macy 	}
3645eda14cbcSMatt Macy 
3646eda14cbcSMatt Macy 	return (import_delay);
3647eda14cbcSMatt Macy }
3648eda14cbcSMatt Macy 
3649eda14cbcSMatt Macy /*
3650b985c9caSMartin Matuska  * Remote host activity check.
3651b985c9caSMartin Matuska  *
3652b985c9caSMartin Matuska  * error results:
3653b985c9caSMartin Matuska  *          0 - no activity detected
3654b985c9caSMartin Matuska  *  EREMOTEIO - remote activity detected
3655b985c9caSMartin Matuska  *      EINTR - user canceled the operation
3656eda14cbcSMatt Macy  */
3657eda14cbcSMatt Macy static int
3658b985c9caSMartin Matuska spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config,
3659b985c9caSMartin Matuska     boolean_t importing)
3660eda14cbcSMatt Macy {
3661eda14cbcSMatt Macy 	uint64_t txg = ub->ub_txg;
3662eda14cbcSMatt Macy 	uint64_t timestamp = ub->ub_timestamp;
3663eda14cbcSMatt Macy 	uint64_t mmp_config = ub->ub_mmp_config;
3664eda14cbcSMatt Macy 	uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
3665eda14cbcSMatt Macy 	uint64_t import_delay;
36663494f7c0SMartin Matuska 	hrtime_t import_expire, now;
3667eda14cbcSMatt Macy 	nvlist_t *mmp_label = NULL;
3668eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3669eda14cbcSMatt Macy 	kcondvar_t cv;
3670eda14cbcSMatt Macy 	kmutex_t mtx;
3671eda14cbcSMatt Macy 	int error = 0;
3672eda14cbcSMatt Macy 
3673eda14cbcSMatt Macy 	cv_init(&cv, NULL, CV_DEFAULT, NULL);
3674eda14cbcSMatt Macy 	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
3675eda14cbcSMatt Macy 	mutex_enter(&mtx);
3676eda14cbcSMatt Macy 
3677eda14cbcSMatt Macy 	/*
3678eda14cbcSMatt Macy 	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3679eda14cbcSMatt Macy 	 * during the earlier tryimport.  If the txg recorded there is 0 then
3680eda14cbcSMatt Macy 	 * the pool is known to be active on another host.
3681eda14cbcSMatt Macy 	 *
3682eda14cbcSMatt Macy 	 * Otherwise, the pool might be in use on another host.  Check for
3683eda14cbcSMatt Macy 	 * changes in the uberblocks on disk if necessary.
3684eda14cbcSMatt Macy 	 */
3685eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3686eda14cbcSMatt Macy 		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
3687eda14cbcSMatt Macy 		    ZPOOL_CONFIG_LOAD_INFO);
3688eda14cbcSMatt Macy 
3689eda14cbcSMatt Macy 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
3690eda14cbcSMatt Macy 		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
3691eda14cbcSMatt Macy 			vdev_uberblock_load(rvd, ub, &mmp_label);
3692eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3693eda14cbcSMatt Macy 			goto out;
3694eda14cbcSMatt Macy 		}
3695eda14cbcSMatt Macy 	}
3696eda14cbcSMatt Macy 
3697eda14cbcSMatt Macy 	import_delay = spa_activity_check_duration(spa, ub);
3698eda14cbcSMatt Macy 
3699eda14cbcSMatt Macy 	/* Add a small random factor in case of simultaneous imports (0-25%) */
370033b8c039SMartin Matuska 	import_delay += import_delay * random_in_range(250) / 1000;
3701eda14cbcSMatt Macy 
3702eda14cbcSMatt Macy 	import_expire = gethrtime() + import_delay;
3703eda14cbcSMatt Macy 
3704b985c9caSMartin Matuska 	if (importing) {
3705b985c9caSMartin Matuska 		spa_import_progress_set_notes(spa, "Checking MMP activity, "
3706b985c9caSMartin Matuska 		    "waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay));
3707b985c9caSMartin Matuska 	}
37083494f7c0SMartin Matuska 
3709b985c9caSMartin Matuska 	int iterations = 0;
37103494f7c0SMartin Matuska 	while ((now = gethrtime()) < import_expire) {
3711b985c9caSMartin Matuska 		if (importing && iterations++ % 30 == 0) {
37123494f7c0SMartin Matuska 			spa_import_progress_set_notes(spa, "Checking MMP "
37133494f7c0SMartin Matuska 			    "activity, %llu ms remaining",
37143494f7c0SMartin Matuska 			    (u_longlong_t)NSEC2MSEC(import_expire - now));
37153494f7c0SMartin Matuska 		}
37163494f7c0SMartin Matuska 
3717b985c9caSMartin Matuska 		if (importing) {
3718eda14cbcSMatt Macy 			(void) spa_import_progress_set_mmp_check(spa_guid(spa),
3719eda14cbcSMatt Macy 			    NSEC2SEC(import_expire - gethrtime()));
3720b985c9caSMartin Matuska 		}
3721eda14cbcSMatt Macy 
3722eda14cbcSMatt Macy 		vdev_uberblock_load(rvd, ub, &mmp_label);
3723eda14cbcSMatt Macy 
3724eda14cbcSMatt Macy 		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
3725eda14cbcSMatt Macy 		    mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
3726eda14cbcSMatt Macy 			zfs_dbgmsg("multihost activity detected "
3727eda14cbcSMatt Macy 			    "txg %llu ub_txg  %llu "
3728eda14cbcSMatt Macy 			    "timestamp %llu ub_timestamp  %llu "
3729eda14cbcSMatt Macy 			    "mmp_config %#llx ub_mmp_config %#llx",
373033b8c039SMartin Matuska 			    (u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
373133b8c039SMartin Matuska 			    (u_longlong_t)timestamp,
373233b8c039SMartin Matuska 			    (u_longlong_t)ub->ub_timestamp,
373333b8c039SMartin Matuska 			    (u_longlong_t)mmp_config,
373433b8c039SMartin Matuska 			    (u_longlong_t)ub->ub_mmp_config);
3735eda14cbcSMatt Macy 
3736eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3737eda14cbcSMatt Macy 			break;
3738eda14cbcSMatt Macy 		}
3739eda14cbcSMatt Macy 
3740eda14cbcSMatt Macy 		if (mmp_label) {
3741eda14cbcSMatt Macy 			nvlist_free(mmp_label);
3742eda14cbcSMatt Macy 			mmp_label = NULL;
3743eda14cbcSMatt Macy 		}
3744eda14cbcSMatt Macy 
3745eda14cbcSMatt Macy 		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
3746eda14cbcSMatt Macy 		if (error != -1) {
3747eda14cbcSMatt Macy 			error = SET_ERROR(EINTR);
3748eda14cbcSMatt Macy 			break;
3749eda14cbcSMatt Macy 		}
3750eda14cbcSMatt Macy 		error = 0;
3751eda14cbcSMatt Macy 	}
3752eda14cbcSMatt Macy 
3753eda14cbcSMatt Macy out:
3754eda14cbcSMatt Macy 	mutex_exit(&mtx);
3755eda14cbcSMatt Macy 	mutex_destroy(&mtx);
3756eda14cbcSMatt Macy 	cv_destroy(&cv);
3757eda14cbcSMatt Macy 
3758eda14cbcSMatt Macy 	/*
3759eda14cbcSMatt Macy 	 * If the pool is determined to be active store the status in the
3760eda14cbcSMatt Macy 	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
3761eda14cbcSMatt Macy 	 * available from configuration read from disk store them as well.
3762eda14cbcSMatt Macy 	 * This allows 'zpool import' to generate a more useful message.
3763eda14cbcSMatt Macy 	 *
3764eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
3765eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
3766eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
3767eda14cbcSMatt Macy 	 */
3768eda14cbcSMatt Macy 	if (error == EREMOTEIO) {
3769a0b956f5SMartin Matuska 		const char *hostname = "<unknown>";
3770eda14cbcSMatt Macy 		uint64_t hostid = 0;
3771eda14cbcSMatt Macy 
3772eda14cbcSMatt Macy 		if (mmp_label) {
3773eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
3774eda14cbcSMatt Macy 				hostname = fnvlist_lookup_string(mmp_label,
3775eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTNAME);
3776eda14cbcSMatt Macy 				fnvlist_add_string(spa->spa_load_info,
3777eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
3778eda14cbcSMatt Macy 			}
3779eda14cbcSMatt Macy 
3780eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
3781eda14cbcSMatt Macy 				hostid = fnvlist_lookup_uint64(mmp_label,
3782eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTID);
3783eda14cbcSMatt Macy 				fnvlist_add_uint64(spa->spa_load_info,
3784eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
3785eda14cbcSMatt Macy 			}
3786eda14cbcSMatt Macy 		}
3787eda14cbcSMatt Macy 
3788eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3789eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
3790eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3791eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, 0);
3792eda14cbcSMatt Macy 
3793eda14cbcSMatt Macy 		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
3794eda14cbcSMatt Macy 	}
3795eda14cbcSMatt Macy 
3796eda14cbcSMatt Macy 	if (mmp_label)
3797eda14cbcSMatt Macy 		nvlist_free(mmp_label);
3798eda14cbcSMatt Macy 
3799eda14cbcSMatt Macy 	return (error);
3800eda14cbcSMatt Macy }
3801eda14cbcSMatt Macy 
3802b985c9caSMartin Matuska /*
3803b985c9caSMartin Matuska  * Called from zfs_ioc_clear for a pool that was suspended
3804b985c9caSMartin Matuska  * after failing mmp write checks.
3805b985c9caSMartin Matuska  */
3806b985c9caSMartin Matuska boolean_t
3807b985c9caSMartin Matuska spa_mmp_remote_host_activity(spa_t *spa)
3808b985c9caSMartin Matuska {
3809b985c9caSMartin Matuska 	ASSERT(spa_multihost(spa) && spa_suspended(spa));
3810b985c9caSMartin Matuska 
3811b985c9caSMartin Matuska 	nvlist_t *best_label;
3812b985c9caSMartin Matuska 	uberblock_t best_ub;
3813b985c9caSMartin Matuska 
3814b985c9caSMartin Matuska 	/*
3815b985c9caSMartin Matuska 	 * Locate the best uberblock on disk
3816b985c9caSMartin Matuska 	 */
3817b985c9caSMartin Matuska 	vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label);
3818b985c9caSMartin Matuska 	if (best_label) {
3819b985c9caSMartin Matuska 		/*
3820b985c9caSMartin Matuska 		 * confirm that the best hostid matches our hostid
3821b985c9caSMartin Matuska 		 */
3822b985c9caSMartin Matuska 		if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) &&
3823b985c9caSMartin Matuska 		    spa_get_hostid(spa) !=
3824b985c9caSMartin Matuska 		    fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) {
3825b985c9caSMartin Matuska 			nvlist_free(best_label);
3826b985c9caSMartin Matuska 			return (B_TRUE);
3827b985c9caSMartin Matuska 		}
3828b985c9caSMartin Matuska 		nvlist_free(best_label);
3829b985c9caSMartin Matuska 	} else {
3830b985c9caSMartin Matuska 		return (B_TRUE);
3831b985c9caSMartin Matuska 	}
3832b985c9caSMartin Matuska 
3833b985c9caSMartin Matuska 	if (!MMP_VALID(&best_ub) ||
3834b985c9caSMartin Matuska 	    !MMP_FAIL_INT_VALID(&best_ub) ||
3835b985c9caSMartin Matuska 	    MMP_FAIL_INT(&best_ub) == 0) {
3836b985c9caSMartin Matuska 		return (B_TRUE);
3837b985c9caSMartin Matuska 	}
3838b985c9caSMartin Matuska 
3839b985c9caSMartin Matuska 	if (best_ub.ub_txg != spa->spa_uberblock.ub_txg ||
3840b985c9caSMartin Matuska 	    best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) {
3841b985c9caSMartin Matuska 		zfs_dbgmsg("txg mismatch detected during pool clear "
3842b985c9caSMartin Matuska 		    "txg %llu ub_txg %llu timestamp %llu ub_timestamp %llu",
3843b985c9caSMartin Matuska 		    (u_longlong_t)spa->spa_uberblock.ub_txg,
3844b985c9caSMartin Matuska 		    (u_longlong_t)best_ub.ub_txg,
3845b985c9caSMartin Matuska 		    (u_longlong_t)spa->spa_uberblock.ub_timestamp,
3846b985c9caSMartin Matuska 		    (u_longlong_t)best_ub.ub_timestamp);
3847b985c9caSMartin Matuska 		return (B_TRUE);
3848b985c9caSMartin Matuska 	}
3849b985c9caSMartin Matuska 
3850b985c9caSMartin Matuska 	/*
3851b985c9caSMartin Matuska 	 * Perform an activity check looking for any remote writer
3852b985c9caSMartin Matuska 	 */
3853b985c9caSMartin Matuska 	return (spa_activity_check(spa, &spa->spa_uberblock, spa->spa_config,
3854b985c9caSMartin Matuska 	    B_FALSE) != 0);
3855b985c9caSMartin Matuska }
3856b985c9caSMartin Matuska 
3857eda14cbcSMatt Macy static int
3858eda14cbcSMatt Macy spa_verify_host(spa_t *spa, nvlist_t *mos_config)
3859eda14cbcSMatt Macy {
3860eda14cbcSMatt Macy 	uint64_t hostid;
38612a58b312SMartin Matuska 	const char *hostname;
3862eda14cbcSMatt Macy 	uint64_t myhostid = 0;
3863eda14cbcSMatt Macy 
3864eda14cbcSMatt Macy 	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
3865eda14cbcSMatt Macy 	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3866eda14cbcSMatt Macy 		hostname = fnvlist_lookup_string(mos_config,
3867eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HOSTNAME);
3868eda14cbcSMatt Macy 
3869eda14cbcSMatt Macy 		myhostid = zone_get_hostid(NULL);
3870eda14cbcSMatt Macy 
3871eda14cbcSMatt Macy 		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
3872eda14cbcSMatt Macy 			cmn_err(CE_WARN, "pool '%s' could not be "
3873eda14cbcSMatt Macy 			    "loaded as it was last accessed by "
3874eda14cbcSMatt Macy 			    "another system (host: %s hostid: 0x%llx). "
3875ac0bf12eSMatt Macy 			    "See: https://openzfs.github.io/openzfs-docs/msg/"
3876ac0bf12eSMatt Macy 			    "ZFS-8000-EY",
3877eda14cbcSMatt Macy 			    spa_name(spa), hostname, (u_longlong_t)hostid);
3878eda14cbcSMatt Macy 			spa_load_failed(spa, "hostid verification failed: pool "
3879eda14cbcSMatt Macy 			    "last accessed by host: %s (hostid: 0x%llx)",
3880eda14cbcSMatt Macy 			    hostname, (u_longlong_t)hostid);
3881eda14cbcSMatt Macy 			return (SET_ERROR(EBADF));
3882eda14cbcSMatt Macy 		}
3883eda14cbcSMatt Macy 	}
3884eda14cbcSMatt Macy 
3885eda14cbcSMatt Macy 	return (0);
3886eda14cbcSMatt Macy }
3887eda14cbcSMatt Macy 
3888eda14cbcSMatt Macy static int
3889eda14cbcSMatt Macy spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
3890eda14cbcSMatt Macy {
3891eda14cbcSMatt Macy 	int error = 0;
3892eda14cbcSMatt Macy 	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
3893eda14cbcSMatt Macy 	int parse;
3894eda14cbcSMatt Macy 	vdev_t *rvd;
3895eda14cbcSMatt Macy 	uint64_t pool_guid;
38962a58b312SMartin Matuska 	const char *comment;
38972a58b312SMartin Matuska 	const char *compatibility;
3898eda14cbcSMatt Macy 
3899eda14cbcSMatt Macy 	/*
3900eda14cbcSMatt Macy 	 * Versioning wasn't explicitly added to the label until later, so if
3901eda14cbcSMatt Macy 	 * it's not present treat it as the initial version.
3902eda14cbcSMatt Macy 	 */
3903eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
3904eda14cbcSMatt Macy 	    &spa->spa_ubsync.ub_version) != 0)
3905eda14cbcSMatt Macy 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
3906eda14cbcSMatt Macy 
3907eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
3908eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3909eda14cbcSMatt Macy 		    ZPOOL_CONFIG_POOL_GUID);
3910eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3911eda14cbcSMatt Macy 	}
3912eda14cbcSMatt Macy 
3913eda14cbcSMatt Macy 	/*
3914eda14cbcSMatt Macy 	 * If we are doing an import, ensure that the pool is not already
3915eda14cbcSMatt Macy 	 * imported by checking if its pool guid already exists in the
3916eda14cbcSMatt Macy 	 * spa namespace.
3917eda14cbcSMatt Macy 	 *
3918eda14cbcSMatt Macy 	 * The only case that we allow an already imported pool to be
3919eda14cbcSMatt Macy 	 * imported again, is when the pool is checkpointed and we want to
3920eda14cbcSMatt Macy 	 * look at its checkpointed state from userland tools like zdb.
3921eda14cbcSMatt Macy 	 */
3922eda14cbcSMatt Macy #ifdef _KERNEL
3923eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3924eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3925eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0)) {
3926eda14cbcSMatt Macy #else
3927eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3928eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3929eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0) &&
3930eda14cbcSMatt Macy 	    !spa_importing_readonly_checkpoint(spa)) {
3931eda14cbcSMatt Macy #endif
3932eda14cbcSMatt Macy 		spa_load_failed(spa, "a pool with guid %llu is already open",
3933eda14cbcSMatt Macy 		    (u_longlong_t)pool_guid);
3934eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
3935eda14cbcSMatt Macy 	}
3936eda14cbcSMatt Macy 
3937eda14cbcSMatt Macy 	spa->spa_config_guid = pool_guid;
3938eda14cbcSMatt Macy 
3939eda14cbcSMatt Macy 	nvlist_free(spa->spa_load_info);
3940eda14cbcSMatt Macy 	spa->spa_load_info = fnvlist_alloc();
3941eda14cbcSMatt Macy 
3942eda14cbcSMatt Macy 	ASSERT(spa->spa_comment == NULL);
3943eda14cbcSMatt Macy 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3944eda14cbcSMatt Macy 		spa->spa_comment = spa_strdup(comment);
3945eda14cbcSMatt Macy 
3946ee36e25aSMartin Matuska 	ASSERT(spa->spa_compatibility == NULL);
3947ee36e25aSMartin Matuska 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
3948ee36e25aSMartin Matuska 	    &compatibility) == 0)
3949ee36e25aSMartin Matuska 		spa->spa_compatibility = spa_strdup(compatibility);
3950ee36e25aSMartin Matuska 
3951eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
3952eda14cbcSMatt Macy 	    &spa->spa_config_txg);
3953eda14cbcSMatt Macy 
3954eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
3955eda14cbcSMatt Macy 		spa->spa_config_splitting = fnvlist_dup(nvl);
3956eda14cbcSMatt Macy 
3957eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
3958eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3959eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TREE);
3960eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3961eda14cbcSMatt Macy 	}
3962eda14cbcSMatt Macy 
3963eda14cbcSMatt Macy 	/*
3964eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
3965eda14cbcSMatt Macy 	 */
3966eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3967eda14cbcSMatt Macy 	    KM_SLEEP);
3968eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
3969eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3970eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3971eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
3972eda14cbcSMatt Macy 	}
3973eda14cbcSMatt Macy 
3974eda14cbcSMatt Macy 	/*
3975eda14cbcSMatt Macy 	 * Parse the configuration into a vdev tree.  We explicitly set the
3976eda14cbcSMatt Macy 	 * value that will be returned by spa_version() since parsing the
3977eda14cbcSMatt Macy 	 * configuration requires knowing the version number.
3978eda14cbcSMatt Macy 	 */
3979eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3980eda14cbcSMatt Macy 	parse = (type == SPA_IMPORT_EXISTING ?
3981eda14cbcSMatt Macy 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
3982eda14cbcSMatt Macy 	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
3983eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3984eda14cbcSMatt Macy 
3985eda14cbcSMatt Macy 	if (error != 0) {
3986eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to parse config [error=%d]",
3987eda14cbcSMatt Macy 		    error);
3988eda14cbcSMatt Macy 		return (error);
3989eda14cbcSMatt Macy 	}
3990eda14cbcSMatt Macy 
3991eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == rvd);
3992eda14cbcSMatt Macy 	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
3993eda14cbcSMatt Macy 	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
3994eda14cbcSMatt Macy 
3995eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
3996eda14cbcSMatt Macy 		ASSERT(spa_guid(spa) == pool_guid);
3997eda14cbcSMatt Macy 	}
3998eda14cbcSMatt Macy 
3999eda14cbcSMatt Macy 	return (0);
4000eda14cbcSMatt Macy }
4001eda14cbcSMatt Macy 
4002eda14cbcSMatt Macy /*
4003eda14cbcSMatt Macy  * Recursively open all vdevs in the vdev tree. This function is called twice:
4004eda14cbcSMatt Macy  * first with the untrusted config, then with the trusted config.
4005eda14cbcSMatt Macy  */
4006eda14cbcSMatt Macy static int
4007eda14cbcSMatt Macy spa_ld_open_vdevs(spa_t *spa)
4008eda14cbcSMatt Macy {
4009eda14cbcSMatt Macy 	int error = 0;
4010eda14cbcSMatt Macy 
4011eda14cbcSMatt Macy 	/*
4012eda14cbcSMatt Macy 	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
4013eda14cbcSMatt Macy 	 * missing/unopenable for the root vdev to be still considered openable.
4014eda14cbcSMatt Macy 	 */
4015eda14cbcSMatt Macy 	if (spa->spa_trust_config) {
4016eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
4017eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
4018eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
4019eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
4020eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
4021eda14cbcSMatt Macy 	} else {
4022eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = 0;
4023eda14cbcSMatt Macy 	}
4024eda14cbcSMatt Macy 
4025eda14cbcSMatt Macy 	spa->spa_missing_tvds_allowed =
4026eda14cbcSMatt Macy 	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
4027eda14cbcSMatt Macy 
4028eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4029eda14cbcSMatt Macy 	error = vdev_open(spa->spa_root_vdev);
4030eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4031eda14cbcSMatt Macy 
4032eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0) {
4033eda14cbcSMatt Macy 		spa_load_note(spa, "vdev tree has %lld missing top-level "
4034eda14cbcSMatt Macy 		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
4035eda14cbcSMatt Macy 		if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
4036eda14cbcSMatt Macy 			/*
4037eda14cbcSMatt Macy 			 * Although theoretically we could allow users to open
4038eda14cbcSMatt Macy 			 * incomplete pools in RW mode, we'd need to add a lot
4039eda14cbcSMatt Macy 			 * of extra logic (e.g. adjust pool space to account
4040eda14cbcSMatt Macy 			 * for missing vdevs).
4041eda14cbcSMatt Macy 			 * This limitation also prevents users from accidentally
4042eda14cbcSMatt Macy 			 * opening the pool in RW mode during data recovery and
4043eda14cbcSMatt Macy 			 * damaging it further.
4044eda14cbcSMatt Macy 			 */
4045eda14cbcSMatt Macy 			spa_load_note(spa, "pools with missing top-level "
4046eda14cbcSMatt Macy 			    "vdevs can only be opened in read-only mode.");
4047eda14cbcSMatt Macy 			error = SET_ERROR(ENXIO);
4048eda14cbcSMatt Macy 		} else {
4049eda14cbcSMatt Macy 			spa_load_note(spa, "current settings allow for maximum "
4050eda14cbcSMatt Macy 			    "%lld missing top-level vdevs at this stage.",
4051eda14cbcSMatt Macy 			    (u_longlong_t)spa->spa_missing_tvds_allowed);
4052eda14cbcSMatt Macy 		}
4053eda14cbcSMatt Macy 	}
4054eda14cbcSMatt Macy 	if (error != 0) {
4055eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
4056eda14cbcSMatt Macy 		    error);
4057eda14cbcSMatt Macy 	}
4058eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0 || error != 0)
4059eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
4060eda14cbcSMatt Macy 
4061eda14cbcSMatt Macy 	return (error);
4062eda14cbcSMatt Macy }
4063eda14cbcSMatt Macy 
4064eda14cbcSMatt Macy /*
4065eda14cbcSMatt Macy  * We need to validate the vdev labels against the configuration that
4066eda14cbcSMatt Macy  * we have in hand. This function is called twice: first with an untrusted
4067eda14cbcSMatt Macy  * config, then with a trusted config. The validation is more strict when the
4068eda14cbcSMatt Macy  * config is trusted.
4069eda14cbcSMatt Macy  */
4070eda14cbcSMatt Macy static int
4071eda14cbcSMatt Macy spa_ld_validate_vdevs(spa_t *spa)
4072eda14cbcSMatt Macy {
4073eda14cbcSMatt Macy 	int error = 0;
4074eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4075eda14cbcSMatt Macy 
4076eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4077eda14cbcSMatt Macy 	error = vdev_validate(rvd);
4078eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4079eda14cbcSMatt Macy 
4080eda14cbcSMatt Macy 	if (error != 0) {
4081eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
4082eda14cbcSMatt Macy 		return (error);
4083eda14cbcSMatt Macy 	}
4084eda14cbcSMatt Macy 
4085eda14cbcSMatt Macy 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
4086eda14cbcSMatt Macy 		spa_load_failed(spa, "cannot open vdev tree after invalidating "
4087eda14cbcSMatt Macy 		    "some vdevs");
4088eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4089eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
4090eda14cbcSMatt Macy 	}
4091eda14cbcSMatt Macy 
4092eda14cbcSMatt Macy 	return (0);
4093eda14cbcSMatt Macy }
4094eda14cbcSMatt Macy 
4095eda14cbcSMatt Macy static void
4096eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
4097eda14cbcSMatt Macy {
4098eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
4099eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
4100eda14cbcSMatt Macy 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
4101eda14cbcSMatt Macy 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
4102eda14cbcSMatt Macy 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
4103eda14cbcSMatt Macy 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
4104eda14cbcSMatt Macy 	spa->spa_claim_max_txg = spa->spa_first_txg;
4105eda14cbcSMatt Macy 	spa->spa_prev_software_version = ub->ub_software_version;
4106eda14cbcSMatt Macy }
4107eda14cbcSMatt Macy 
4108eda14cbcSMatt Macy static int
4109eda14cbcSMatt Macy spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
4110eda14cbcSMatt Macy {
4111eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4112eda14cbcSMatt Macy 	nvlist_t *label;
4113eda14cbcSMatt Macy 	uberblock_t *ub = &spa->spa_uberblock;
4114eda14cbcSMatt Macy 	boolean_t activity_check = B_FALSE;
4115eda14cbcSMatt Macy 
4116eda14cbcSMatt Macy 	/*
4117eda14cbcSMatt Macy 	 * If we are opening the checkpointed state of the pool by
4118eda14cbcSMatt Macy 	 * rewinding to it, at this point we will have written the
4119eda14cbcSMatt Macy 	 * checkpointed uberblock to the vdev labels, so searching
4120eda14cbcSMatt Macy 	 * the labels will find the right uberblock.  However, if
4121eda14cbcSMatt Macy 	 * we are opening the checkpointed state read-only, we have
4122eda14cbcSMatt Macy 	 * not modified the labels. Therefore, we must ignore the
4123eda14cbcSMatt Macy 	 * labels and continue using the spa_uberblock that was set
4124eda14cbcSMatt Macy 	 * by spa_ld_checkpoint_rewind.
4125eda14cbcSMatt Macy 	 *
4126eda14cbcSMatt Macy 	 * Note that it would be fine to ignore the labels when
4127eda14cbcSMatt Macy 	 * rewinding (opening writeable) as well. However, if we
4128eda14cbcSMatt Macy 	 * crash just after writing the labels, we will end up
4129eda14cbcSMatt Macy 	 * searching the labels. Doing so in the common case means
4130eda14cbcSMatt Macy 	 * that this code path gets exercised normally, rather than
4131eda14cbcSMatt Macy 	 * just in the edge case.
4132eda14cbcSMatt Macy 	 */
4133eda14cbcSMatt Macy 	if (ub->ub_checkpoint_txg != 0 &&
4134eda14cbcSMatt Macy 	    spa_importing_readonly_checkpoint(spa)) {
4135eda14cbcSMatt Macy 		spa_ld_select_uberblock_done(spa, ub);
4136eda14cbcSMatt Macy 		return (0);
4137eda14cbcSMatt Macy 	}
4138eda14cbcSMatt Macy 
4139eda14cbcSMatt Macy 	/*
4140eda14cbcSMatt Macy 	 * Find the best uberblock.
4141eda14cbcSMatt Macy 	 */
4142eda14cbcSMatt Macy 	vdev_uberblock_load(rvd, ub, &label);
4143eda14cbcSMatt Macy 
4144eda14cbcSMatt Macy 	/*
4145eda14cbcSMatt Macy 	 * If we weren't able to find a single valid uberblock, return failure.
4146eda14cbcSMatt Macy 	 */
4147eda14cbcSMatt Macy 	if (ub->ub_txg == 0) {
4148eda14cbcSMatt Macy 		nvlist_free(label);
4149eda14cbcSMatt Macy 		spa_load_failed(spa, "no valid uberblock found");
4150eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
4151eda14cbcSMatt Macy 	}
4152eda14cbcSMatt Macy 
4153eda14cbcSMatt Macy 	if (spa->spa_load_max_txg != UINT64_MAX) {
4154eda14cbcSMatt Macy 		(void) spa_import_progress_set_max_txg(spa_guid(spa),
4155eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_load_max_txg);
4156eda14cbcSMatt Macy 	}
4157eda14cbcSMatt Macy 	spa_load_note(spa, "using uberblock with txg=%llu",
4158eda14cbcSMatt Macy 	    (u_longlong_t)ub->ub_txg);
4159e716630dSMartin Matuska 	if (ub->ub_raidz_reflow_info != 0) {
4160e716630dSMartin Matuska 		spa_load_note(spa, "uberblock raidz_reflow_info: "
4161e716630dSMartin Matuska 		    "state=%u offset=%llu",
4162e716630dSMartin Matuska 		    (int)RRSS_GET_STATE(ub),
4163e716630dSMartin Matuska 		    (u_longlong_t)RRSS_GET_OFFSET(ub));
4164e716630dSMartin Matuska 	}
4165eda14cbcSMatt Macy 
4166eda14cbcSMatt Macy 
4167eda14cbcSMatt Macy 	/*
4168eda14cbcSMatt Macy 	 * For pools which have the multihost property on determine if the
4169eda14cbcSMatt Macy 	 * pool is truly inactive and can be safely imported.  Prevent
4170eda14cbcSMatt Macy 	 * hosts which don't have a hostid set from importing the pool.
4171eda14cbcSMatt Macy 	 */
4172eda14cbcSMatt Macy 	activity_check = spa_activity_check_required(spa, ub, label,
4173eda14cbcSMatt Macy 	    spa->spa_config);
4174eda14cbcSMatt Macy 	if (activity_check) {
4175eda14cbcSMatt Macy 		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
4176eda14cbcSMatt Macy 		    spa_get_hostid(spa) == 0) {
4177eda14cbcSMatt Macy 			nvlist_free(label);
4178eda14cbcSMatt Macy 			fnvlist_add_uint64(spa->spa_load_info,
4179eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4180eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4181eda14cbcSMatt Macy 		}
4182eda14cbcSMatt Macy 
4183b985c9caSMartin Matuska 		int error =
4184b985c9caSMartin Matuska 		    spa_activity_check(spa, ub, spa->spa_config, B_TRUE);
4185eda14cbcSMatt Macy 		if (error) {
4186eda14cbcSMatt Macy 			nvlist_free(label);
4187eda14cbcSMatt Macy 			return (error);
4188eda14cbcSMatt Macy 		}
4189eda14cbcSMatt Macy 
4190eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4191eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
4192eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4193eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
4194eda14cbcSMatt Macy 		fnvlist_add_uint16(spa->spa_load_info,
4195eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_SEQ,
4196eda14cbcSMatt Macy 		    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
4197eda14cbcSMatt Macy 	}
4198eda14cbcSMatt Macy 
4199eda14cbcSMatt Macy 	/*
4200eda14cbcSMatt Macy 	 * If the pool has an unsupported version we can't open it.
4201eda14cbcSMatt Macy 	 */
4202eda14cbcSMatt Macy 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
4203eda14cbcSMatt Macy 		nvlist_free(label);
4204eda14cbcSMatt Macy 		spa_load_failed(spa, "version %llu is not supported",
4205eda14cbcSMatt Macy 		    (u_longlong_t)ub->ub_version);
4206eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
4207eda14cbcSMatt Macy 	}
4208eda14cbcSMatt Macy 
4209eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
4210eda14cbcSMatt Macy 		nvlist_t *features;
4211eda14cbcSMatt Macy 
4212eda14cbcSMatt Macy 		/*
4213eda14cbcSMatt Macy 		 * If we weren't able to find what's necessary for reading the
4214eda14cbcSMatt Macy 		 * MOS in the label, return failure.
4215eda14cbcSMatt Macy 		 */
4216eda14cbcSMatt Macy 		if (label == NULL) {
4217eda14cbcSMatt Macy 			spa_load_failed(spa, "label config unavailable");
4218eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4219eda14cbcSMatt Macy 			    ENXIO));
4220eda14cbcSMatt Macy 		}
4221eda14cbcSMatt Macy 
4222eda14cbcSMatt Macy 		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
4223eda14cbcSMatt Macy 		    &features) != 0) {
4224eda14cbcSMatt Macy 			nvlist_free(label);
4225eda14cbcSMatt Macy 			spa_load_failed(spa, "invalid label: '%s' missing",
4226eda14cbcSMatt Macy 			    ZPOOL_CONFIG_FEATURES_FOR_READ);
4227eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4228eda14cbcSMatt Macy 			    ENXIO));
4229eda14cbcSMatt Macy 		}
4230eda14cbcSMatt Macy 
4231eda14cbcSMatt Macy 		/*
4232eda14cbcSMatt Macy 		 * Update our in-core representation with the definitive values
4233eda14cbcSMatt Macy 		 * from the label.
4234eda14cbcSMatt Macy 		 */
4235eda14cbcSMatt Macy 		nvlist_free(spa->spa_label_features);
423681b22a98SMartin Matuska 		spa->spa_label_features = fnvlist_dup(features);
4237eda14cbcSMatt Macy 	}
4238eda14cbcSMatt Macy 
4239eda14cbcSMatt Macy 	nvlist_free(label);
4240eda14cbcSMatt Macy 
4241eda14cbcSMatt Macy 	/*
4242eda14cbcSMatt Macy 	 * Look through entries in the label nvlist's features_for_read. If
4243eda14cbcSMatt Macy 	 * there is a feature listed there which we don't understand then we
4244eda14cbcSMatt Macy 	 * cannot open a pool.
4245eda14cbcSMatt Macy 	 */
4246eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
4247eda14cbcSMatt Macy 		nvlist_t *unsup_feat;
4248eda14cbcSMatt Macy 
424981b22a98SMartin Matuska 		unsup_feat = fnvlist_alloc();
4250eda14cbcSMatt Macy 
4251eda14cbcSMatt Macy 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
4252eda14cbcSMatt Macy 		    NULL); nvp != NULL;
4253eda14cbcSMatt Macy 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
4254eda14cbcSMatt Macy 			if (!zfeature_is_supported(nvpair_name(nvp))) {
425581b22a98SMartin Matuska 				fnvlist_add_string(unsup_feat,
425681b22a98SMartin Matuska 				    nvpair_name(nvp), "");
4257eda14cbcSMatt Macy 			}
4258eda14cbcSMatt Macy 		}
4259eda14cbcSMatt Macy 
4260eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
426181b22a98SMartin Matuska 			fnvlist_add_nvlist(spa->spa_load_info,
426281b22a98SMartin Matuska 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
4263eda14cbcSMatt Macy 			nvlist_free(unsup_feat);
4264eda14cbcSMatt Macy 			spa_load_failed(spa, "some features are unsupported");
4265eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
4266eda14cbcSMatt Macy 			    ENOTSUP));
4267eda14cbcSMatt Macy 		}
4268eda14cbcSMatt Macy 
4269eda14cbcSMatt Macy 		nvlist_free(unsup_feat);
4270eda14cbcSMatt Macy 	}
4271eda14cbcSMatt Macy 
4272eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
4273eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4274eda14cbcSMatt Macy 		spa_try_repair(spa, spa->spa_config);
4275eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4276eda14cbcSMatt Macy 		nvlist_free(spa->spa_config_splitting);
4277eda14cbcSMatt Macy 		spa->spa_config_splitting = NULL;
4278eda14cbcSMatt Macy 	}
4279eda14cbcSMatt Macy 
4280eda14cbcSMatt Macy 	/*
4281eda14cbcSMatt Macy 	 * Initialize internal SPA structures.
4282eda14cbcSMatt Macy 	 */
4283eda14cbcSMatt Macy 	spa_ld_select_uberblock_done(spa, ub);
4284eda14cbcSMatt Macy 
4285eda14cbcSMatt Macy 	return (0);
4286eda14cbcSMatt Macy }
4287eda14cbcSMatt Macy 
4288eda14cbcSMatt Macy static int
4289eda14cbcSMatt Macy spa_ld_open_rootbp(spa_t *spa)
4290eda14cbcSMatt Macy {
4291eda14cbcSMatt Macy 	int error = 0;
4292eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4293eda14cbcSMatt Macy 
4294eda14cbcSMatt Macy 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
4295eda14cbcSMatt Macy 	if (error != 0) {
4296eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
4297eda14cbcSMatt Macy 		    "[error=%d]", error);
4298eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4299eda14cbcSMatt Macy 	}
4300eda14cbcSMatt Macy 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
4301eda14cbcSMatt Macy 
4302eda14cbcSMatt Macy 	return (0);
4303eda14cbcSMatt Macy }
4304eda14cbcSMatt Macy 
4305eda14cbcSMatt Macy static int
4306eda14cbcSMatt Macy spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
4307eda14cbcSMatt Macy     boolean_t reloading)
4308eda14cbcSMatt Macy {
4309eda14cbcSMatt Macy 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
4310eda14cbcSMatt Macy 	nvlist_t *nv, *mos_config, *policy;
4311eda14cbcSMatt Macy 	int error = 0, copy_error;
4312eda14cbcSMatt Macy 	uint64_t healthy_tvds, healthy_tvds_mos;
4313eda14cbcSMatt Macy 	uint64_t mos_config_txg;
4314eda14cbcSMatt Macy 
4315eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
4316eda14cbcSMatt Macy 	    != 0)
4317eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4318eda14cbcSMatt Macy 
4319eda14cbcSMatt Macy 	/*
4320eda14cbcSMatt Macy 	 * If we're assembling a pool from a split, the config provided is
4321eda14cbcSMatt Macy 	 * already trusted so there is nothing to do.
4322eda14cbcSMatt Macy 	 */
4323eda14cbcSMatt Macy 	if (type == SPA_IMPORT_ASSEMBLE)
4324eda14cbcSMatt Macy 		return (0);
4325eda14cbcSMatt Macy 
4326eda14cbcSMatt Macy 	healthy_tvds = spa_healthy_core_tvds(spa);
4327eda14cbcSMatt Macy 
4328eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
4329eda14cbcSMatt Macy 	    != 0) {
4330eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
4331eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4332eda14cbcSMatt Macy 	}
4333eda14cbcSMatt Macy 
4334eda14cbcSMatt Macy 	/*
4335eda14cbcSMatt Macy 	 * If we are doing an open, pool owner wasn't verified yet, thus do
4336eda14cbcSMatt Macy 	 * the verification here.
4337eda14cbcSMatt Macy 	 */
4338eda14cbcSMatt Macy 	if (spa->spa_load_state == SPA_LOAD_OPEN) {
4339eda14cbcSMatt Macy 		error = spa_verify_host(spa, mos_config);
4340eda14cbcSMatt Macy 		if (error != 0) {
4341eda14cbcSMatt Macy 			nvlist_free(mos_config);
4342eda14cbcSMatt Macy 			return (error);
4343eda14cbcSMatt Macy 		}
4344eda14cbcSMatt Macy 	}
4345eda14cbcSMatt Macy 
4346eda14cbcSMatt Macy 	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
4347eda14cbcSMatt Macy 
4348eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4349eda14cbcSMatt Macy 
4350eda14cbcSMatt Macy 	/*
4351eda14cbcSMatt Macy 	 * Build a new vdev tree from the trusted config
4352eda14cbcSMatt Macy 	 */
43537877fdebSMatt Macy 	error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
43547877fdebSMatt Macy 	if (error != 0) {
43557877fdebSMatt Macy 		nvlist_free(mos_config);
43567877fdebSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
43577877fdebSMatt Macy 		spa_load_failed(spa, "spa_config_parse failed [error=%d]",
43587877fdebSMatt Macy 		    error);
43597877fdebSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
43607877fdebSMatt Macy 	}
4361eda14cbcSMatt Macy 
4362eda14cbcSMatt Macy 	/*
4363eda14cbcSMatt Macy 	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
4364eda14cbcSMatt Macy 	 * obtained by scanning /dev/dsk, then it will have the right vdev
4365eda14cbcSMatt Macy 	 * paths. We update the trusted MOS config with this information.
4366eda14cbcSMatt Macy 	 * We first try to copy the paths with vdev_copy_path_strict, which
4367eda14cbcSMatt Macy 	 * succeeds only when both configs have exactly the same vdev tree.
4368eda14cbcSMatt Macy 	 * If that fails, we fall back to a more flexible method that has a
4369eda14cbcSMatt Macy 	 * best effort policy.
4370eda14cbcSMatt Macy 	 */
4371eda14cbcSMatt Macy 	copy_error = vdev_copy_path_strict(rvd, mrvd);
4372eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
4373eda14cbcSMatt Macy 		spa_load_note(spa, "provided vdev tree:");
4374eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4375eda14cbcSMatt Macy 		spa_load_note(spa, "MOS vdev tree:");
4376eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(mrvd, 2);
4377eda14cbcSMatt Macy 	}
4378eda14cbcSMatt Macy 	if (copy_error != 0) {
4379eda14cbcSMatt Macy 		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
4380eda14cbcSMatt Macy 		    "back to vdev_copy_path_relaxed");
4381eda14cbcSMatt Macy 		vdev_copy_path_relaxed(rvd, mrvd);
4382eda14cbcSMatt Macy 	}
4383eda14cbcSMatt Macy 
4384eda14cbcSMatt Macy 	vdev_close(rvd);
4385eda14cbcSMatt Macy 	vdev_free(rvd);
4386eda14cbcSMatt Macy 	spa->spa_root_vdev = mrvd;
4387eda14cbcSMatt Macy 	rvd = mrvd;
4388eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4389eda14cbcSMatt Macy 
4390eda14cbcSMatt Macy 	/*
4391b2526e8bSMartin Matuska 	 * If 'zpool import' used a cached config, then the on-disk hostid and
4392b2526e8bSMartin Matuska 	 * hostname may be different to the cached config in ways that should
4393b2526e8bSMartin Matuska 	 * prevent import.  Userspace can't discover this without a scan, but
4394b2526e8bSMartin Matuska 	 * we know, so we add these values to LOAD_INFO so the caller can know
4395b2526e8bSMartin Matuska 	 * the difference.
4396b2526e8bSMartin Matuska 	 *
4397b2526e8bSMartin Matuska 	 * Note that we have to do this before the config is regenerated,
4398b2526e8bSMartin Matuska 	 * because the new config will have the hostid and hostname for this
4399b2526e8bSMartin Matuska 	 * host, in readiness for import.
4400b2526e8bSMartin Matuska 	 */
4401b2526e8bSMartin Matuska 	if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID))
4402b2526e8bSMartin Matuska 		fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID,
4403b2526e8bSMartin Matuska 		    fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID));
4404b2526e8bSMartin Matuska 	if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME))
4405b2526e8bSMartin Matuska 		fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME,
4406b2526e8bSMartin Matuska 		    fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME));
4407b2526e8bSMartin Matuska 
4408b2526e8bSMartin Matuska 	/*
4409eda14cbcSMatt Macy 	 * We will use spa_config if we decide to reload the spa or if spa_load
4410eda14cbcSMatt Macy 	 * fails and we rewind. We must thus regenerate the config using the
4411eda14cbcSMatt Macy 	 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
4412eda14cbcSMatt Macy 	 * pass settings on how to load the pool and is not stored in the MOS.
4413eda14cbcSMatt Macy 	 * We copy it over to our new, trusted config.
4414eda14cbcSMatt Macy 	 */
4415eda14cbcSMatt Macy 	mos_config_txg = fnvlist_lookup_uint64(mos_config,
4416eda14cbcSMatt Macy 	    ZPOOL_CONFIG_POOL_TXG);
4417eda14cbcSMatt Macy 	nvlist_free(mos_config);
4418eda14cbcSMatt Macy 	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
4419eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
4420eda14cbcSMatt Macy 	    &policy) == 0)
4421eda14cbcSMatt Macy 		fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
4422eda14cbcSMatt Macy 	spa_config_set(spa, mos_config);
4423eda14cbcSMatt Macy 	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
4424eda14cbcSMatt Macy 
4425eda14cbcSMatt Macy 	/*
4426eda14cbcSMatt Macy 	 * Now that we got the config from the MOS, we should be more strict
4427eda14cbcSMatt Macy 	 * in checking blkptrs and can make assumptions about the consistency
4428eda14cbcSMatt Macy 	 * of the vdev tree. spa_trust_config must be set to true before opening
4429eda14cbcSMatt Macy 	 * vdevs in order for them to be writeable.
4430eda14cbcSMatt Macy 	 */
4431eda14cbcSMatt Macy 	spa->spa_trust_config = B_TRUE;
4432eda14cbcSMatt Macy 
4433eda14cbcSMatt Macy 	/*
4434eda14cbcSMatt Macy 	 * Open and validate the new vdev tree
4435eda14cbcSMatt Macy 	 */
4436eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
4437eda14cbcSMatt Macy 	if (error != 0)
4438eda14cbcSMatt Macy 		return (error);
4439eda14cbcSMatt Macy 
4440eda14cbcSMatt Macy 	error = spa_ld_validate_vdevs(spa);
4441eda14cbcSMatt Macy 	if (error != 0)
4442eda14cbcSMatt Macy 		return (error);
4443eda14cbcSMatt Macy 
4444eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
4445eda14cbcSMatt Macy 		spa_load_note(spa, "final vdev tree:");
4446eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
4447eda14cbcSMatt Macy 	}
4448eda14cbcSMatt Macy 
4449eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
4450eda14cbcSMatt Macy 	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
4451eda14cbcSMatt Macy 		/*
4452eda14cbcSMatt Macy 		 * Sanity check to make sure that we are indeed loading the
4453eda14cbcSMatt Macy 		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
4454eda14cbcSMatt Macy 		 * in the config provided and they happened to be the only ones
4455eda14cbcSMatt Macy 		 * to have the latest uberblock, we could involuntarily perform
4456eda14cbcSMatt Macy 		 * an extreme rewind.
4457eda14cbcSMatt Macy 		 */
4458eda14cbcSMatt Macy 		healthy_tvds_mos = spa_healthy_core_tvds(spa);
4459eda14cbcSMatt Macy 		if (healthy_tvds_mos - healthy_tvds >=
4460eda14cbcSMatt Macy 		    SPA_SYNC_MIN_VDEVS) {
4461eda14cbcSMatt Macy 			spa_load_note(spa, "config provided misses too many "
4462eda14cbcSMatt Macy 			    "top-level vdevs compared to MOS (%lld vs %lld). ",
4463eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds,
4464eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds_mos);
4465eda14cbcSMatt Macy 			spa_load_note(spa, "vdev tree:");
4466eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
4467eda14cbcSMatt Macy 			if (reloading) {
4468eda14cbcSMatt Macy 				spa_load_failed(spa, "config was already "
4469eda14cbcSMatt Macy 				    "provided from MOS. Aborting.");
4470eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
4471eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
4472eda14cbcSMatt Macy 			}
4473eda14cbcSMatt Macy 			spa_load_note(spa, "spa must be reloaded using MOS "
4474eda14cbcSMatt Macy 			    "config");
4475eda14cbcSMatt Macy 			return (SET_ERROR(EAGAIN));
4476eda14cbcSMatt Macy 		}
4477eda14cbcSMatt Macy 	}
4478eda14cbcSMatt Macy 
4479eda14cbcSMatt Macy 	error = spa_check_for_missing_logs(spa);
4480eda14cbcSMatt Macy 	if (error != 0)
4481eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
4482eda14cbcSMatt Macy 
4483eda14cbcSMatt Macy 	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
4484eda14cbcSMatt Macy 		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
4485eda14cbcSMatt Macy 		    "guid sum (%llu != %llu)",
4486eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
4487eda14cbcSMatt Macy 		    (u_longlong_t)rvd->vdev_guid_sum);
4488eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
4489eda14cbcSMatt Macy 		    ENXIO));
4490eda14cbcSMatt Macy 	}
4491eda14cbcSMatt Macy 
4492eda14cbcSMatt Macy 	return (0);
4493eda14cbcSMatt Macy }
4494eda14cbcSMatt Macy 
4495eda14cbcSMatt Macy static int
4496eda14cbcSMatt Macy spa_ld_open_indirect_vdev_metadata(spa_t *spa)
4497eda14cbcSMatt Macy {
4498eda14cbcSMatt Macy 	int error = 0;
4499eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4500eda14cbcSMatt Macy 
4501eda14cbcSMatt Macy 	/*
4502eda14cbcSMatt Macy 	 * Everything that we read before spa_remove_init() must be stored
4503eda14cbcSMatt Macy 	 * on concreted vdevs.  Therefore we do this as early as possible.
4504eda14cbcSMatt Macy 	 */
4505eda14cbcSMatt Macy 	error = spa_remove_init(spa);
4506eda14cbcSMatt Macy 	if (error != 0) {
4507eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
4508eda14cbcSMatt Macy 		    error);
4509eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4510eda14cbcSMatt Macy 	}
4511eda14cbcSMatt Macy 
4512eda14cbcSMatt Macy 	/*
4513eda14cbcSMatt Macy 	 * Retrieve information needed to condense indirect vdev mappings.
4514eda14cbcSMatt Macy 	 */
4515eda14cbcSMatt Macy 	error = spa_condense_init(spa);
4516eda14cbcSMatt Macy 	if (error != 0) {
4517eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
4518eda14cbcSMatt Macy 		    error);
4519eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4520eda14cbcSMatt Macy 	}
4521eda14cbcSMatt Macy 
4522eda14cbcSMatt Macy 	return (0);
4523eda14cbcSMatt Macy }
4524eda14cbcSMatt Macy 
4525eda14cbcSMatt Macy static int
4526eda14cbcSMatt Macy spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
4527eda14cbcSMatt Macy {
4528eda14cbcSMatt Macy 	int error = 0;
4529eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4530eda14cbcSMatt Macy 
4531eda14cbcSMatt Macy 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
4532eda14cbcSMatt Macy 		boolean_t missing_feat_read = B_FALSE;
4533eda14cbcSMatt Macy 		nvlist_t *unsup_feat, *enabled_feat;
4534eda14cbcSMatt Macy 
4535eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
4536eda14cbcSMatt Macy 		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
4537eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4538eda14cbcSMatt Macy 		}
4539eda14cbcSMatt Macy 
4540eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
4541eda14cbcSMatt Macy 		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
4542eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4543eda14cbcSMatt Macy 		}
4544eda14cbcSMatt Macy 
4545eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
4546eda14cbcSMatt Macy 		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
4547eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4548eda14cbcSMatt Macy 		}
4549eda14cbcSMatt Macy 
4550eda14cbcSMatt Macy 		enabled_feat = fnvlist_alloc();
4551eda14cbcSMatt Macy 		unsup_feat = fnvlist_alloc();
4552eda14cbcSMatt Macy 
4553eda14cbcSMatt Macy 		if (!spa_features_check(spa, B_FALSE,
4554eda14cbcSMatt Macy 		    unsup_feat, enabled_feat))
4555eda14cbcSMatt Macy 			missing_feat_read = B_TRUE;
4556eda14cbcSMatt Macy 
4557eda14cbcSMatt Macy 		if (spa_writeable(spa) ||
4558eda14cbcSMatt Macy 		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
4559eda14cbcSMatt Macy 			if (!spa_features_check(spa, B_TRUE,
4560eda14cbcSMatt Macy 			    unsup_feat, enabled_feat)) {
4561eda14cbcSMatt Macy 				*missing_feat_writep = B_TRUE;
4562eda14cbcSMatt Macy 			}
4563eda14cbcSMatt Macy 		}
4564eda14cbcSMatt Macy 
4565eda14cbcSMatt Macy 		fnvlist_add_nvlist(spa->spa_load_info,
4566eda14cbcSMatt Macy 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
4567eda14cbcSMatt Macy 
4568eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
4569eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
4570eda14cbcSMatt Macy 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
4571eda14cbcSMatt Macy 		}
4572eda14cbcSMatt Macy 
4573eda14cbcSMatt Macy 		fnvlist_free(enabled_feat);
4574eda14cbcSMatt Macy 		fnvlist_free(unsup_feat);
4575eda14cbcSMatt Macy 
4576eda14cbcSMatt Macy 		if (!missing_feat_read) {
4577eda14cbcSMatt Macy 			fnvlist_add_boolean(spa->spa_load_info,
4578eda14cbcSMatt Macy 			    ZPOOL_CONFIG_CAN_RDONLY);
4579eda14cbcSMatt Macy 		}
4580eda14cbcSMatt Macy 
4581eda14cbcSMatt Macy 		/*
4582eda14cbcSMatt Macy 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
4583eda14cbcSMatt Macy 		 * twofold: to determine whether the pool is available for
4584eda14cbcSMatt Macy 		 * import in read-write mode and (if it is not) whether the
4585eda14cbcSMatt Macy 		 * pool is available for import in read-only mode. If the pool
4586eda14cbcSMatt Macy 		 * is available for import in read-write mode, it is displayed
4587eda14cbcSMatt Macy 		 * as available in userland; if it is not available for import
4588eda14cbcSMatt Macy 		 * in read-only mode, it is displayed as unavailable in
4589eda14cbcSMatt Macy 		 * userland. If the pool is available for import in read-only
4590eda14cbcSMatt Macy 		 * mode but not read-write mode, it is displayed as unavailable
4591eda14cbcSMatt Macy 		 * in userland with a special note that the pool is actually
4592eda14cbcSMatt Macy 		 * available for open in read-only mode.
4593eda14cbcSMatt Macy 		 *
4594eda14cbcSMatt Macy 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
4595eda14cbcSMatt Macy 		 * missing a feature for write, we must first determine whether
4596eda14cbcSMatt Macy 		 * the pool can be opened read-only before returning to
4597eda14cbcSMatt Macy 		 * userland in order to know whether to display the
4598eda14cbcSMatt Macy 		 * abovementioned note.
4599eda14cbcSMatt Macy 		 */
4600eda14cbcSMatt Macy 		if (missing_feat_read || (*missing_feat_writep &&
4601eda14cbcSMatt Macy 		    spa_writeable(spa))) {
4602eda14cbcSMatt Macy 			spa_load_failed(spa, "pool uses unsupported features");
4603eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
4604eda14cbcSMatt Macy 			    ENOTSUP));
4605eda14cbcSMatt Macy 		}
4606eda14cbcSMatt Macy 
4607eda14cbcSMatt Macy 		/*
4608eda14cbcSMatt Macy 		 * Load refcounts for ZFS features from disk into an in-memory
4609eda14cbcSMatt Macy 		 * cache during SPA initialization.
4610eda14cbcSMatt Macy 		 */
4611eda14cbcSMatt Macy 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
4612eda14cbcSMatt Macy 			uint64_t refcount;
4613eda14cbcSMatt Macy 
4614eda14cbcSMatt Macy 			error = feature_get_refcount_from_disk(spa,
4615eda14cbcSMatt Macy 			    &spa_feature_table[i], &refcount);
4616eda14cbcSMatt Macy 			if (error == 0) {
4617eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] = refcount;
4618eda14cbcSMatt Macy 			} else if (error == ENOTSUP) {
4619eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] =
4620eda14cbcSMatt Macy 				    SPA_FEATURE_DISABLED;
4621eda14cbcSMatt Macy 			} else {
4622eda14cbcSMatt Macy 				spa_load_failed(spa, "error getting refcount "
4623eda14cbcSMatt Macy 				    "for feature %s [error=%d]",
4624eda14cbcSMatt Macy 				    spa_feature_table[i].fi_guid, error);
4625eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
4626eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
4627eda14cbcSMatt Macy 			}
4628eda14cbcSMatt Macy 		}
4629eda14cbcSMatt Macy 	}
4630eda14cbcSMatt Macy 
4631eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
4632eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
4633eda14cbcSMatt Macy 		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
4634eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4635eda14cbcSMatt Macy 	}
4636eda14cbcSMatt Macy 
4637eda14cbcSMatt Macy 	/*
4638eda14cbcSMatt Macy 	 * Encryption was added before bookmark_v2, even though bookmark_v2
4639eda14cbcSMatt Macy 	 * is now a dependency. If this pool has encryption enabled without
4640eda14cbcSMatt Macy 	 * bookmark_v2, trigger an errata message.
4641eda14cbcSMatt Macy 	 */
4642eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
4643eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
4644eda14cbcSMatt Macy 		spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
4645eda14cbcSMatt Macy 	}
4646eda14cbcSMatt Macy 
4647eda14cbcSMatt Macy 	return (0);
4648eda14cbcSMatt Macy }
4649eda14cbcSMatt Macy 
4650eda14cbcSMatt Macy static int
4651eda14cbcSMatt Macy spa_ld_load_special_directories(spa_t *spa)
4652eda14cbcSMatt Macy {
4653eda14cbcSMatt Macy 	int error = 0;
4654eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4655eda14cbcSMatt Macy 
4656eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
4657eda14cbcSMatt Macy 	error = dsl_pool_open(spa->spa_dsl_pool);
4658eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
4659eda14cbcSMatt Macy 	if (error != 0) {
4660eda14cbcSMatt Macy 		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
4661eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4662eda14cbcSMatt Macy 	}
4663eda14cbcSMatt Macy 
4664eda14cbcSMatt Macy 	return (0);
4665eda14cbcSMatt Macy }
4666eda14cbcSMatt Macy 
4667eda14cbcSMatt Macy static int
4668eda14cbcSMatt Macy spa_ld_get_props(spa_t *spa)
4669eda14cbcSMatt Macy {
4670eda14cbcSMatt Macy 	int error = 0;
4671eda14cbcSMatt Macy 	uint64_t obj;
4672eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4673eda14cbcSMatt Macy 
4674eda14cbcSMatt Macy 	/* Grab the checksum salt from the MOS. */
4675eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4676eda14cbcSMatt Macy 	    DMU_POOL_CHECKSUM_SALT, 1,
4677eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes),
4678eda14cbcSMatt Macy 	    spa->spa_cksum_salt.zcs_bytes);
4679eda14cbcSMatt Macy 	if (error == ENOENT) {
4680eda14cbcSMatt Macy 		/* Generate a new salt for subsequent use */
4681eda14cbcSMatt Macy 		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
4682eda14cbcSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes));
4683eda14cbcSMatt Macy 	} else if (error != 0) {
4684eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checksum salt from "
4685eda14cbcSMatt Macy 		    "MOS [error=%d]", error);
4686eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4687eda14cbcSMatt Macy 	}
4688eda14cbcSMatt Macy 
4689eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
4690eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4691eda14cbcSMatt Macy 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
4692eda14cbcSMatt Macy 	if (error != 0) {
4693eda14cbcSMatt Macy 		spa_load_failed(spa, "error opening deferred-frees bpobj "
4694eda14cbcSMatt Macy 		    "[error=%d]", error);
4695eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4696eda14cbcSMatt Macy 	}
4697eda14cbcSMatt Macy 
4698eda14cbcSMatt Macy 	/*
4699eda14cbcSMatt Macy 	 * Load the bit that tells us to use the new accounting function
4700eda14cbcSMatt Macy 	 * (raid-z deflation).  If we have an older pool, this will not
4701eda14cbcSMatt Macy 	 * be present.
4702eda14cbcSMatt Macy 	 */
4703eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
4704eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4705eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4706eda14cbcSMatt Macy 
4707eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
4708eda14cbcSMatt Macy 	    &spa->spa_creation_version, B_FALSE);
4709eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4710eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4711eda14cbcSMatt Macy 
4712eda14cbcSMatt Macy 	/*
4713eda14cbcSMatt Macy 	 * Load the persistent error log.  If we have an older pool, this will
4714eda14cbcSMatt Macy 	 * not be present.
4715eda14cbcSMatt Macy 	 */
4716eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
4717eda14cbcSMatt Macy 	    B_FALSE);
4718eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4719eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4720eda14cbcSMatt Macy 
4721eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
4722eda14cbcSMatt Macy 	    &spa->spa_errlog_scrub, B_FALSE);
4723eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4724eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4725eda14cbcSMatt Macy 
4726eda14cbcSMatt Macy 	/*
4727eda14cbcSMatt Macy 	 * Load the livelist deletion field. If a livelist is queued for
4728eda14cbcSMatt Macy 	 * deletion, indicate that in the spa
4729eda14cbcSMatt Macy 	 */
4730eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
4731eda14cbcSMatt Macy 	    &spa->spa_livelists_to_delete, B_FALSE);
4732eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4733eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4734eda14cbcSMatt Macy 
4735eda14cbcSMatt Macy 	/*
4736eda14cbcSMatt Macy 	 * Load the history object.  If we have an older pool, this
4737eda14cbcSMatt Macy 	 * will not be present.
4738eda14cbcSMatt Macy 	 */
4739eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
4740eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4741eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4742eda14cbcSMatt Macy 
4743eda14cbcSMatt Macy 	/*
4744eda14cbcSMatt Macy 	 * Load the per-vdev ZAP map. If we have an older pool, this will not
4745eda14cbcSMatt Macy 	 * be present; in this case, defer its creation to a later time to
4746eda14cbcSMatt Macy 	 * avoid dirtying the MOS this early / out of sync context. See
4747eda14cbcSMatt Macy 	 * spa_sync_config_object.
4748eda14cbcSMatt Macy 	 */
4749eda14cbcSMatt Macy 
4750eda14cbcSMatt Macy 	/* The sentinel is only available in the MOS config. */
4751eda14cbcSMatt Macy 	nvlist_t *mos_config;
4752eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
4753eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
4754eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4755eda14cbcSMatt Macy 	}
4756eda14cbcSMatt Macy 
4757eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
4758eda14cbcSMatt Macy 	    &spa->spa_all_vdev_zaps, B_FALSE);
4759eda14cbcSMatt Macy 
4760eda14cbcSMatt Macy 	if (error == ENOENT) {
4761eda14cbcSMatt Macy 		VERIFY(!nvlist_exists(mos_config,
4762eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
4763eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
4764eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4765eda14cbcSMatt Macy 	} else if (error != 0) {
4766a0b956f5SMartin Matuska 		nvlist_free(mos_config);
4767eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4768eda14cbcSMatt Macy 	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
4769eda14cbcSMatt Macy 		/*
4770eda14cbcSMatt Macy 		 * An older version of ZFS overwrote the sentinel value, so
4771eda14cbcSMatt Macy 		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
4772eda14cbcSMatt Macy 		 * destruction to later; see spa_sync_config_object.
4773eda14cbcSMatt Macy 		 */
4774eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_DESTROY;
4775eda14cbcSMatt Macy 		/*
4776eda14cbcSMatt Macy 		 * We're assuming that no vdevs have had their ZAPs created
4777eda14cbcSMatt Macy 		 * before this. Better be sure of it.
4778eda14cbcSMatt Macy 		 */
4779eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4780eda14cbcSMatt Macy 	}
4781eda14cbcSMatt Macy 	nvlist_free(mos_config);
4782eda14cbcSMatt Macy 
4783eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4784eda14cbcSMatt Macy 
4785eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
4786eda14cbcSMatt Macy 	    B_FALSE);
4787eda14cbcSMatt Macy 	if (error && error != ENOENT)
4788eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4789eda14cbcSMatt Macy 
4790eda14cbcSMatt Macy 	if (error == 0) {
47911f88aa09SMartin Matuska 		uint64_t autoreplace = 0;
4792eda14cbcSMatt Macy 
4793eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
4794eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
4795eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
4796eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
4797eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
4798ce4dcb97SMartin Matuska 		spa_prop_find(spa, ZPOOL_PROP_DEDUP_TABLE_QUOTA,
4799ce4dcb97SMartin Matuska 		    &spa->spa_dedup_table_quota);
4800eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
4801eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
4802eda14cbcSMatt Macy 		spa->spa_autoreplace = (autoreplace != 0);
4803eda14cbcSMatt Macy 	}
4804eda14cbcSMatt Macy 
4805eda14cbcSMatt Macy 	/*
4806eda14cbcSMatt Macy 	 * If we are importing a pool with missing top-level vdevs,
4807eda14cbcSMatt Macy 	 * we enforce that the pool doesn't panic or get suspended on
4808eda14cbcSMatt Macy 	 * error since the likelihood of missing data is extremely high.
4809eda14cbcSMatt Macy 	 */
4810eda14cbcSMatt Macy 	if (spa->spa_missing_tvds > 0 &&
4811eda14cbcSMatt Macy 	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
4812eda14cbcSMatt Macy 	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4813eda14cbcSMatt Macy 		spa_load_note(spa, "forcing failmode to 'continue' "
4814eda14cbcSMatt Macy 		    "as some top level vdevs are missing");
4815eda14cbcSMatt Macy 		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
4816eda14cbcSMatt Macy 	}
4817eda14cbcSMatt Macy 
4818eda14cbcSMatt Macy 	return (0);
4819eda14cbcSMatt Macy }
4820eda14cbcSMatt Macy 
4821eda14cbcSMatt Macy static int
4822eda14cbcSMatt Macy spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
4823eda14cbcSMatt Macy {
4824eda14cbcSMatt Macy 	int error = 0;
4825eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4826eda14cbcSMatt Macy 
4827eda14cbcSMatt Macy 	/*
4828eda14cbcSMatt Macy 	 * If we're assembling the pool from the split-off vdevs of
4829eda14cbcSMatt Macy 	 * an existing pool, we don't want to attach the spares & cache
4830eda14cbcSMatt Macy 	 * devices.
4831eda14cbcSMatt Macy 	 */
4832eda14cbcSMatt Macy 
4833eda14cbcSMatt Macy 	/*
4834eda14cbcSMatt Macy 	 * Load any hot spares for this pool.
4835eda14cbcSMatt Macy 	 */
4836eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
4837eda14cbcSMatt Macy 	    B_FALSE);
4838eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4839eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4840eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4841eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
4842eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_spares.sav_object,
4843eda14cbcSMatt Macy 		    &spa->spa_spares.sav_config) != 0) {
4844eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading spares nvlist");
4845eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4846eda14cbcSMatt Macy 		}
4847eda14cbcSMatt Macy 
4848eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4849eda14cbcSMatt Macy 		spa_load_spares(spa);
4850eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4851eda14cbcSMatt Macy 	} else if (error == 0) {
4852eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
4853eda14cbcSMatt Macy 	}
4854eda14cbcSMatt Macy 
4855eda14cbcSMatt Macy 	/*
4856eda14cbcSMatt Macy 	 * Load any level 2 ARC devices for this pool.
4857eda14cbcSMatt Macy 	 */
4858eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
4859eda14cbcSMatt Macy 	    &spa->spa_l2cache.sav_object, B_FALSE);
4860eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4861eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4862eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4863eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
4864eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
4865eda14cbcSMatt Macy 		    &spa->spa_l2cache.sav_config) != 0) {
4866eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading l2cache nvlist");
4867eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4868eda14cbcSMatt Macy 		}
4869eda14cbcSMatt Macy 
4870eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4871eda14cbcSMatt Macy 		spa_load_l2cache(spa);
4872eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4873eda14cbcSMatt Macy 	} else if (error == 0) {
4874eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
4875eda14cbcSMatt Macy 	}
4876eda14cbcSMatt Macy 
4877eda14cbcSMatt Macy 	return (0);
4878eda14cbcSMatt Macy }
4879eda14cbcSMatt Macy 
4880eda14cbcSMatt Macy static int
4881eda14cbcSMatt Macy spa_ld_load_vdev_metadata(spa_t *spa)
4882eda14cbcSMatt Macy {
4883eda14cbcSMatt Macy 	int error = 0;
4884eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4885eda14cbcSMatt Macy 
4886eda14cbcSMatt Macy 	/*
4887eda14cbcSMatt Macy 	 * If the 'multihost' property is set, then never allow a pool to
4888eda14cbcSMatt Macy 	 * be imported when the system hostid is zero.  The exception to
4889eda14cbcSMatt Macy 	 * this rule is zdb which is always allowed to access pools.
4890eda14cbcSMatt Macy 	 */
4891eda14cbcSMatt Macy 	if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
4892eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
4893eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4894eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4895eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4896eda14cbcSMatt Macy 	}
4897eda14cbcSMatt Macy 
4898eda14cbcSMatt Macy 	/*
4899eda14cbcSMatt Macy 	 * If the 'autoreplace' property is set, then post a resource notifying
4900eda14cbcSMatt Macy 	 * the ZFS DE that it should not issue any faults for unopenable
4901eda14cbcSMatt Macy 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
4902eda14cbcSMatt Macy 	 * unopenable vdevs so that the normal autoreplace handler can take
4903eda14cbcSMatt Macy 	 * over.
4904eda14cbcSMatt Macy 	 */
4905eda14cbcSMatt Macy 	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4906eda14cbcSMatt Macy 		spa_check_removed(spa->spa_root_vdev);
4907eda14cbcSMatt Macy 		/*
4908eda14cbcSMatt Macy 		 * For the import case, this is done in spa_import(), because
4909eda14cbcSMatt Macy 		 * at this point we're using the spare definitions from
4910eda14cbcSMatt Macy 		 * the MOS config, not necessarily from the userland config.
4911eda14cbcSMatt Macy 		 */
4912eda14cbcSMatt Macy 		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
4913eda14cbcSMatt Macy 			spa_aux_check_removed(&spa->spa_spares);
4914eda14cbcSMatt Macy 			spa_aux_check_removed(&spa->spa_l2cache);
4915eda14cbcSMatt Macy 		}
4916eda14cbcSMatt Macy 	}
4917eda14cbcSMatt Macy 
4918eda14cbcSMatt Macy 	/*
4919eda14cbcSMatt Macy 	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
4920eda14cbcSMatt Macy 	 */
4921eda14cbcSMatt Macy 	error = vdev_load(rvd);
4922eda14cbcSMatt Macy 	if (error != 0) {
4923eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
4924eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4925eda14cbcSMatt Macy 	}
4926eda14cbcSMatt Macy 
4927eda14cbcSMatt Macy 	error = spa_ld_log_spacemaps(spa);
4928eda14cbcSMatt Macy 	if (error != 0) {
4929716fd348SMartin Matuska 		spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
4930eda14cbcSMatt Macy 		    error);
4931eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4932eda14cbcSMatt Macy 	}
4933eda14cbcSMatt Macy 
4934eda14cbcSMatt Macy 	/*
4935eda14cbcSMatt Macy 	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
4936eda14cbcSMatt Macy 	 */
4937eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4938eda14cbcSMatt Macy 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
4939eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4940eda14cbcSMatt Macy 
4941eda14cbcSMatt Macy 	return (0);
4942eda14cbcSMatt Macy }
4943eda14cbcSMatt Macy 
4944eda14cbcSMatt Macy static int
4945eda14cbcSMatt Macy spa_ld_load_dedup_tables(spa_t *spa)
4946eda14cbcSMatt Macy {
4947eda14cbcSMatt Macy 	int error = 0;
4948eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4949eda14cbcSMatt Macy 
4950eda14cbcSMatt Macy 	error = ddt_load(spa);
4951eda14cbcSMatt Macy 	if (error != 0) {
4952eda14cbcSMatt Macy 		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
4953eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4954eda14cbcSMatt Macy 	}
4955eda14cbcSMatt Macy 
4956eda14cbcSMatt Macy 	return (0);
4957eda14cbcSMatt Macy }
4958eda14cbcSMatt Macy 
4959eda14cbcSMatt Macy static int
49602a58b312SMartin Matuska spa_ld_load_brt(spa_t *spa)
49612a58b312SMartin Matuska {
49622a58b312SMartin Matuska 	int error = 0;
49632a58b312SMartin Matuska 	vdev_t *rvd = spa->spa_root_vdev;
49642a58b312SMartin Matuska 
49652a58b312SMartin Matuska 	error = brt_load(spa);
49662a58b312SMartin Matuska 	if (error != 0) {
49672a58b312SMartin Matuska 		spa_load_failed(spa, "brt_load failed [error=%d]", error);
49682a58b312SMartin Matuska 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
49692a58b312SMartin Matuska 	}
49702a58b312SMartin Matuska 
49712a58b312SMartin Matuska 	return (0);
49722a58b312SMartin Matuska }
49732a58b312SMartin Matuska 
49742a58b312SMartin Matuska static int
4975a0b956f5SMartin Matuska spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
4976eda14cbcSMatt Macy {
4977eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4978eda14cbcSMatt Macy 
4979eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
4980eda14cbcSMatt Macy 		boolean_t missing = spa_check_logs(spa);
4981eda14cbcSMatt Macy 		if (missing) {
4982eda14cbcSMatt Macy 			if (spa->spa_missing_tvds != 0) {
4983eda14cbcSMatt Macy 				spa_load_note(spa, "spa_check_logs failed "
4984eda14cbcSMatt Macy 				    "so dropping the logs");
4985eda14cbcSMatt Macy 			} else {
4986eda14cbcSMatt Macy 				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
4987eda14cbcSMatt Macy 				spa_load_failed(spa, "spa_check_logs failed");
4988eda14cbcSMatt Macy 				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
4989eda14cbcSMatt Macy 				    ENXIO));
4990eda14cbcSMatt Macy 			}
4991eda14cbcSMatt Macy 		}
4992eda14cbcSMatt Macy 	}
4993eda14cbcSMatt Macy 
4994eda14cbcSMatt Macy 	return (0);
4995eda14cbcSMatt Macy }
4996eda14cbcSMatt Macy 
4997eda14cbcSMatt Macy static int
4998eda14cbcSMatt Macy spa_ld_verify_pool_data(spa_t *spa)
4999eda14cbcSMatt Macy {
5000eda14cbcSMatt Macy 	int error = 0;
5001eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
5002eda14cbcSMatt Macy 
5003eda14cbcSMatt Macy 	/*
5004eda14cbcSMatt Macy 	 * We've successfully opened the pool, verify that we're ready
5005eda14cbcSMatt Macy 	 * to start pushing transactions.
5006eda14cbcSMatt Macy 	 */
5007eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
5008eda14cbcSMatt Macy 		error = spa_load_verify(spa);
5009eda14cbcSMatt Macy 		if (error != 0) {
5010eda14cbcSMatt Macy 			spa_load_failed(spa, "spa_load_verify failed "
5011eda14cbcSMatt Macy 			    "[error=%d]", error);
5012eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
5013eda14cbcSMatt Macy 			    error));
5014eda14cbcSMatt Macy 		}
5015eda14cbcSMatt Macy 	}
5016eda14cbcSMatt Macy 
5017eda14cbcSMatt Macy 	return (0);
5018eda14cbcSMatt Macy }
5019eda14cbcSMatt Macy 
5020eda14cbcSMatt Macy static void
5021eda14cbcSMatt Macy spa_ld_claim_log_blocks(spa_t *spa)
5022eda14cbcSMatt Macy {
5023eda14cbcSMatt Macy 	dmu_tx_t *tx;
5024eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
5025eda14cbcSMatt Macy 
5026eda14cbcSMatt Macy 	/*
5027eda14cbcSMatt Macy 	 * Claim log blocks that haven't been committed yet.
5028eda14cbcSMatt Macy 	 * This must all happen in a single txg.
5029eda14cbcSMatt Macy 	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
5030eda14cbcSMatt Macy 	 * invoked from zil_claim_log_block()'s i/o done callback.
5031eda14cbcSMatt Macy 	 * Price of rollback is that we abandon the log.
5032eda14cbcSMatt Macy 	 */
5033eda14cbcSMatt Macy 	spa->spa_claiming = B_TRUE;
5034eda14cbcSMatt Macy 
5035eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
5036eda14cbcSMatt Macy 	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
5037eda14cbcSMatt Macy 	    zil_claim, tx, DS_FIND_CHILDREN);
5038eda14cbcSMatt Macy 	dmu_tx_commit(tx);
5039eda14cbcSMatt Macy 
5040eda14cbcSMatt Macy 	spa->spa_claiming = B_FALSE;
5041eda14cbcSMatt Macy 
5042eda14cbcSMatt Macy 	spa_set_log_state(spa, SPA_LOG_GOOD);
5043eda14cbcSMatt Macy }
5044eda14cbcSMatt Macy 
5045eda14cbcSMatt Macy static void
5046eda14cbcSMatt Macy spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
5047eda14cbcSMatt Macy     boolean_t update_config_cache)
5048eda14cbcSMatt Macy {
5049eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
5050eda14cbcSMatt Macy 	int need_update = B_FALSE;
5051eda14cbcSMatt Macy 
5052eda14cbcSMatt Macy 	/*
5053eda14cbcSMatt Macy 	 * If the config cache is stale, or we have uninitialized
5054eda14cbcSMatt Macy 	 * metaslabs (see spa_vdev_add()), then update the config.
5055eda14cbcSMatt Macy 	 *
5056eda14cbcSMatt Macy 	 * If this is a verbatim import, trust the current
5057eda14cbcSMatt Macy 	 * in-core spa_config and update the disk labels.
5058eda14cbcSMatt Macy 	 */
5059eda14cbcSMatt Macy 	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
5060eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_IMPORT ||
5061eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_RECOVER ||
5062eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
5063eda14cbcSMatt Macy 		need_update = B_TRUE;
5064eda14cbcSMatt Macy 
5065eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++)
5066eda14cbcSMatt Macy 		if (rvd->vdev_child[c]->vdev_ms_array == 0)
5067eda14cbcSMatt Macy 			need_update = B_TRUE;
5068eda14cbcSMatt Macy 
5069eda14cbcSMatt Macy 	/*
5070eda14cbcSMatt Macy 	 * Update the config cache asynchronously in case we're the
5071eda14cbcSMatt Macy 	 * root pool, in which case the config cache isn't writable yet.
5072eda14cbcSMatt Macy 	 */
5073eda14cbcSMatt Macy 	if (need_update)
5074eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
5075eda14cbcSMatt Macy }
5076eda14cbcSMatt Macy 
5077eda14cbcSMatt Macy static void
5078eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa_t *spa)
5079eda14cbcSMatt Macy {
5080eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
5081eda14cbcSMatt Macy 	int async_suspended = spa->spa_async_suspended;
5082eda14cbcSMatt Macy 
5083eda14cbcSMatt Macy 	spa_unload(spa);
5084eda14cbcSMatt Macy 	spa_deactivate(spa);
5085eda14cbcSMatt Macy 	spa_activate(spa, mode);
5086eda14cbcSMatt Macy 
5087eda14cbcSMatt Macy 	/*
5088eda14cbcSMatt Macy 	 * We save the value of spa_async_suspended as it gets reset to 0 by
5089eda14cbcSMatt Macy 	 * spa_unload(). We want to restore it back to the original value before
5090eda14cbcSMatt Macy 	 * returning as we might be calling spa_async_resume() later.
5091eda14cbcSMatt Macy 	 */
5092eda14cbcSMatt Macy 	spa->spa_async_suspended = async_suspended;
5093eda14cbcSMatt Macy }
5094eda14cbcSMatt Macy 
5095eda14cbcSMatt Macy static int
5096eda14cbcSMatt Macy spa_ld_read_checkpoint_txg(spa_t *spa)
5097eda14cbcSMatt Macy {
5098eda14cbcSMatt Macy 	uberblock_t checkpoint;
5099eda14cbcSMatt Macy 	int error = 0;
5100eda14cbcSMatt Macy 
5101eda14cbcSMatt Macy 	ASSERT0(spa->spa_checkpoint_txg);
51020d4ad640SMartin Matuska 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
51030d4ad640SMartin Matuska 	    spa->spa_load_thread == curthread);
5104eda14cbcSMatt Macy 
5105eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5106eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5107eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5108eda14cbcSMatt Macy 
5109eda14cbcSMatt Macy 	if (error == ENOENT)
5110eda14cbcSMatt Macy 		return (0);
5111eda14cbcSMatt Macy 
5112eda14cbcSMatt Macy 	if (error != 0)
5113eda14cbcSMatt Macy 		return (error);
5114eda14cbcSMatt Macy 
5115eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, !=, 0);
5116eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
5117eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
5118eda14cbcSMatt Macy 	spa->spa_checkpoint_txg = checkpoint.ub_txg;
5119eda14cbcSMatt Macy 	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
5120eda14cbcSMatt Macy 
5121eda14cbcSMatt Macy 	return (0);
5122eda14cbcSMatt Macy }
5123eda14cbcSMatt Macy 
5124eda14cbcSMatt Macy static int
5125eda14cbcSMatt Macy spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
5126eda14cbcSMatt Macy {
5127eda14cbcSMatt Macy 	int error = 0;
5128eda14cbcSMatt Macy 
5129eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5130eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
5131eda14cbcSMatt Macy 
5132eda14cbcSMatt Macy 	/*
5133eda14cbcSMatt Macy 	 * Never trust the config that is provided unless we are assembling
5134eda14cbcSMatt Macy 	 * a pool following a split.
5135eda14cbcSMatt Macy 	 * This means don't trust blkptrs and the vdev tree in general. This
5136eda14cbcSMatt Macy 	 * also effectively puts the spa in read-only mode since
5137eda14cbcSMatt Macy 	 * spa_writeable() checks for spa_trust_config to be true.
5138eda14cbcSMatt Macy 	 * We will later load a trusted config from the MOS.
5139eda14cbcSMatt Macy 	 */
5140eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE)
5141eda14cbcSMatt Macy 		spa->spa_trust_config = B_FALSE;
5142eda14cbcSMatt Macy 
5143eda14cbcSMatt Macy 	/*
5144eda14cbcSMatt Macy 	 * Parse the config provided to create a vdev tree.
5145eda14cbcSMatt Macy 	 */
5146eda14cbcSMatt Macy 	error = spa_ld_parse_config(spa, type);
5147eda14cbcSMatt Macy 	if (error != 0)
5148eda14cbcSMatt Macy 		return (error);
5149eda14cbcSMatt Macy 
5150eda14cbcSMatt Macy 	spa_import_progress_add(spa);
5151eda14cbcSMatt Macy 
5152eda14cbcSMatt Macy 	/*
5153eda14cbcSMatt Macy 	 * Now that we have the vdev tree, try to open each vdev. This involves
5154eda14cbcSMatt Macy 	 * opening the underlying physical device, retrieving its geometry and
5155eda14cbcSMatt Macy 	 * probing the vdev with a dummy I/O. The state of each vdev will be set
5156eda14cbcSMatt Macy 	 * based on the success of those operations. After this we'll be ready
5157eda14cbcSMatt Macy 	 * to read from the vdevs.
5158eda14cbcSMatt Macy 	 */
5159eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
5160eda14cbcSMatt Macy 	if (error != 0)
5161eda14cbcSMatt Macy 		return (error);
5162eda14cbcSMatt Macy 
5163eda14cbcSMatt Macy 	/*
5164eda14cbcSMatt Macy 	 * Read the label of each vdev and make sure that the GUIDs stored
5165eda14cbcSMatt Macy 	 * there match the GUIDs in the config provided.
5166eda14cbcSMatt Macy 	 * If we're assembling a new pool that's been split off from an
5167eda14cbcSMatt Macy 	 * existing pool, the labels haven't yet been updated so we skip
5168eda14cbcSMatt Macy 	 * validation for now.
5169eda14cbcSMatt Macy 	 */
5170eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
5171eda14cbcSMatt Macy 		error = spa_ld_validate_vdevs(spa);
5172eda14cbcSMatt Macy 		if (error != 0)
5173eda14cbcSMatt Macy 			return (error);
5174eda14cbcSMatt Macy 	}
5175eda14cbcSMatt Macy 
5176eda14cbcSMatt Macy 	/*
5177eda14cbcSMatt Macy 	 * Read all vdev labels to find the best uberblock (i.e. latest,
5178eda14cbcSMatt Macy 	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
5179eda14cbcSMatt Macy 	 * get the list of features required to read blkptrs in the MOS from
5180eda14cbcSMatt Macy 	 * the vdev label with the best uberblock and verify that our version
5181eda14cbcSMatt Macy 	 * of zfs supports them all.
5182eda14cbcSMatt Macy 	 */
5183eda14cbcSMatt Macy 	error = spa_ld_select_uberblock(spa, type);
5184eda14cbcSMatt Macy 	if (error != 0)
5185eda14cbcSMatt Macy 		return (error);
5186eda14cbcSMatt Macy 
5187eda14cbcSMatt Macy 	/*
5188eda14cbcSMatt Macy 	 * Pass that uberblock to the dsl_pool layer which will open the root
5189eda14cbcSMatt Macy 	 * blkptr. This blkptr points to the latest version of the MOS and will
5190eda14cbcSMatt Macy 	 * allow us to read its contents.
5191eda14cbcSMatt Macy 	 */
5192eda14cbcSMatt Macy 	error = spa_ld_open_rootbp(spa);
5193eda14cbcSMatt Macy 	if (error != 0)
5194eda14cbcSMatt Macy 		return (error);
5195eda14cbcSMatt Macy 
5196eda14cbcSMatt Macy 	return (0);
5197eda14cbcSMatt Macy }
5198eda14cbcSMatt Macy 
5199eda14cbcSMatt Macy static int
5200eda14cbcSMatt Macy spa_ld_checkpoint_rewind(spa_t *spa)
5201eda14cbcSMatt Macy {
5202eda14cbcSMatt Macy 	uberblock_t checkpoint;
5203eda14cbcSMatt Macy 	int error = 0;
5204eda14cbcSMatt Macy 
5205eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5206eda14cbcSMatt Macy 	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5207eda14cbcSMatt Macy 
5208eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5209eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5210eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5211eda14cbcSMatt Macy 
5212eda14cbcSMatt Macy 	if (error != 0) {
5213eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checkpointed "
5214eda14cbcSMatt Macy 		    "uberblock from the MOS config [error=%d]", error);
5215eda14cbcSMatt Macy 
5216eda14cbcSMatt Macy 		if (error == ENOENT)
5217eda14cbcSMatt Macy 			error = ZFS_ERR_NO_CHECKPOINT;
5218eda14cbcSMatt Macy 
5219eda14cbcSMatt Macy 		return (error);
5220eda14cbcSMatt Macy 	}
5221eda14cbcSMatt Macy 
5222eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
5223eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
5224eda14cbcSMatt Macy 
5225eda14cbcSMatt Macy 	/*
5226eda14cbcSMatt Macy 	 * We need to update the txg and timestamp of the checkpointed
5227eda14cbcSMatt Macy 	 * uberblock to be higher than the latest one. This ensures that
5228eda14cbcSMatt Macy 	 * the checkpointed uberblock is selected if we were to close and
5229eda14cbcSMatt Macy 	 * reopen the pool right after we've written it in the vdev labels.
5230eda14cbcSMatt Macy 	 * (also see block comment in vdev_uberblock_compare)
5231eda14cbcSMatt Macy 	 */
5232eda14cbcSMatt Macy 	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
5233eda14cbcSMatt Macy 	checkpoint.ub_timestamp = gethrestime_sec();
5234eda14cbcSMatt Macy 
5235eda14cbcSMatt Macy 	/*
5236eda14cbcSMatt Macy 	 * Set current uberblock to be the checkpointed uberblock.
5237eda14cbcSMatt Macy 	 */
5238eda14cbcSMatt Macy 	spa->spa_uberblock = checkpoint;
5239eda14cbcSMatt Macy 
5240eda14cbcSMatt Macy 	/*
5241eda14cbcSMatt Macy 	 * If we are doing a normal rewind, then the pool is open for
5242eda14cbcSMatt Macy 	 * writing and we sync the "updated" checkpointed uberblock to
5243eda14cbcSMatt Macy 	 * disk. Once this is done, we've basically rewound the whole
5244eda14cbcSMatt Macy 	 * pool and there is no way back.
5245eda14cbcSMatt Macy 	 *
5246eda14cbcSMatt Macy 	 * There are cases when we don't want to attempt and sync the
5247eda14cbcSMatt Macy 	 * checkpointed uberblock to disk because we are opening a
5248eda14cbcSMatt Macy 	 * pool as read-only. Specifically, verifying the checkpointed
5249eda14cbcSMatt Macy 	 * state with zdb, and importing the checkpointed state to get
5250eda14cbcSMatt Macy 	 * a "preview" of its content.
5251eda14cbcSMatt Macy 	 */
5252eda14cbcSMatt Macy 	if (spa_writeable(spa)) {
5253eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
5254eda14cbcSMatt Macy 
5255eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5256eda14cbcSMatt Macy 		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
5257eda14cbcSMatt Macy 		int svdcount = 0;
5258eda14cbcSMatt Macy 		int children = rvd->vdev_children;
525933b8c039SMartin Matuska 		int c0 = random_in_range(children);
5260eda14cbcSMatt Macy 
5261eda14cbcSMatt Macy 		for (int c = 0; c < children; c++) {
5262eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
5263eda14cbcSMatt Macy 
5264eda14cbcSMatt Macy 			/* Stop when revisiting the first vdev */
5265eda14cbcSMatt Macy 			if (c > 0 && svd[0] == vd)
5266eda14cbcSMatt Macy 				break;
5267eda14cbcSMatt Macy 
5268eda14cbcSMatt Macy 			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
5269eda14cbcSMatt Macy 			    !vdev_is_concrete(vd))
5270eda14cbcSMatt Macy 				continue;
5271eda14cbcSMatt Macy 
5272eda14cbcSMatt Macy 			svd[svdcount++] = vd;
5273eda14cbcSMatt Macy 			if (svdcount == SPA_SYNC_MIN_VDEVS)
5274eda14cbcSMatt Macy 				break;
5275eda14cbcSMatt Macy 		}
5276eda14cbcSMatt Macy 		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
5277eda14cbcSMatt Macy 		if (error == 0)
5278eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
5279eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
5280eda14cbcSMatt Macy 
5281eda14cbcSMatt Macy 		if (error != 0) {
5282eda14cbcSMatt Macy 			spa_load_failed(spa, "failed to write checkpointed "
5283eda14cbcSMatt Macy 			    "uberblock to the vdev labels [error=%d]", error);
5284eda14cbcSMatt Macy 			return (error);
5285eda14cbcSMatt Macy 		}
5286eda14cbcSMatt Macy 	}
5287eda14cbcSMatt Macy 
5288eda14cbcSMatt Macy 	return (0);
5289eda14cbcSMatt Macy }
5290eda14cbcSMatt Macy 
5291eda14cbcSMatt Macy static int
5292eda14cbcSMatt Macy spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
5293eda14cbcSMatt Macy     boolean_t *update_config_cache)
5294eda14cbcSMatt Macy {
5295eda14cbcSMatt Macy 	int error;
5296eda14cbcSMatt Macy 
5297eda14cbcSMatt Macy 	/*
5298eda14cbcSMatt Macy 	 * Parse the config for pool, open and validate vdevs,
5299eda14cbcSMatt Macy 	 * select an uberblock, and use that uberblock to open
5300eda14cbcSMatt Macy 	 * the MOS.
5301eda14cbcSMatt Macy 	 */
5302eda14cbcSMatt Macy 	error = spa_ld_mos_init(spa, type);
5303eda14cbcSMatt Macy 	if (error != 0)
5304eda14cbcSMatt Macy 		return (error);
5305eda14cbcSMatt Macy 
5306eda14cbcSMatt Macy 	/*
5307eda14cbcSMatt Macy 	 * Retrieve the trusted config stored in the MOS and use it to create
5308eda14cbcSMatt Macy 	 * a new, exact version of the vdev tree, then reopen all vdevs.
5309eda14cbcSMatt Macy 	 */
5310eda14cbcSMatt Macy 	error = spa_ld_trusted_config(spa, type, B_FALSE);
5311eda14cbcSMatt Macy 	if (error == EAGAIN) {
5312eda14cbcSMatt Macy 		if (update_config_cache != NULL)
5313eda14cbcSMatt Macy 			*update_config_cache = B_TRUE;
5314eda14cbcSMatt Macy 
5315eda14cbcSMatt Macy 		/*
5316eda14cbcSMatt Macy 		 * Redo the loading process with the trusted config if it is
5317eda14cbcSMatt Macy 		 * too different from the untrusted config.
5318eda14cbcSMatt Macy 		 */
5319eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
5320eda14cbcSMatt Macy 		spa_load_note(spa, "RELOADING");
5321eda14cbcSMatt Macy 		error = spa_ld_mos_init(spa, type);
5322eda14cbcSMatt Macy 		if (error != 0)
5323eda14cbcSMatt Macy 			return (error);
5324eda14cbcSMatt Macy 
5325eda14cbcSMatt Macy 		error = spa_ld_trusted_config(spa, type, B_TRUE);
5326eda14cbcSMatt Macy 		if (error != 0)
5327eda14cbcSMatt Macy 			return (error);
5328eda14cbcSMatt Macy 
5329eda14cbcSMatt Macy 	} else if (error != 0) {
5330eda14cbcSMatt Macy 		return (error);
5331eda14cbcSMatt Macy 	}
5332eda14cbcSMatt Macy 
5333eda14cbcSMatt Macy 	return (0);
5334eda14cbcSMatt Macy }
5335eda14cbcSMatt Macy 
5336eda14cbcSMatt Macy /*
5337eda14cbcSMatt Macy  * Load an existing storage pool, using the config provided. This config
5338eda14cbcSMatt Macy  * describes which vdevs are part of the pool and is later validated against
5339eda14cbcSMatt Macy  * partial configs present in each vdev's label and an entire copy of the
5340eda14cbcSMatt Macy  * config stored in the MOS.
5341eda14cbcSMatt Macy  */
5342eda14cbcSMatt Macy static int
5343a0b956f5SMartin Matuska spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
5344eda14cbcSMatt Macy {
5345eda14cbcSMatt Macy 	int error = 0;
5346eda14cbcSMatt Macy 	boolean_t missing_feat_write = B_FALSE;
5347eda14cbcSMatt Macy 	boolean_t checkpoint_rewind =
5348eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5349eda14cbcSMatt Macy 	boolean_t update_config_cache = B_FALSE;
53500d4ad640SMartin Matuska 	hrtime_t load_start = gethrtime();
5351eda14cbcSMatt Macy 
5352eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5353eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
5354eda14cbcSMatt Macy 
5355eda14cbcSMatt Macy 	spa_load_note(spa, "LOADING");
5356eda14cbcSMatt Macy 
5357eda14cbcSMatt Macy 	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
5358eda14cbcSMatt Macy 	if (error != 0)
5359eda14cbcSMatt Macy 		return (error);
5360eda14cbcSMatt Macy 
5361eda14cbcSMatt Macy 	/*
5362eda14cbcSMatt Macy 	 * If we are rewinding to the checkpoint then we need to repeat
5363eda14cbcSMatt Macy 	 * everything we've done so far in this function but this time
5364eda14cbcSMatt Macy 	 * selecting the checkpointed uberblock and using that to open
5365eda14cbcSMatt Macy 	 * the MOS.
5366eda14cbcSMatt Macy 	 */
5367eda14cbcSMatt Macy 	if (checkpoint_rewind) {
5368eda14cbcSMatt Macy 		/*
5369eda14cbcSMatt Macy 		 * If we are rewinding to the checkpoint update config cache
5370eda14cbcSMatt Macy 		 * anyway.
5371eda14cbcSMatt Macy 		 */
5372eda14cbcSMatt Macy 		update_config_cache = B_TRUE;
5373eda14cbcSMatt Macy 
5374eda14cbcSMatt Macy 		/*
5375eda14cbcSMatt Macy 		 * Extract the checkpointed uberblock from the current MOS
5376eda14cbcSMatt Macy 		 * and use this as the pool's uberblock from now on. If the
5377eda14cbcSMatt Macy 		 * pool is imported as writeable we also write the checkpoint
5378eda14cbcSMatt Macy 		 * uberblock to the labels, making the rewind permanent.
5379eda14cbcSMatt Macy 		 */
5380eda14cbcSMatt Macy 		error = spa_ld_checkpoint_rewind(spa);
5381eda14cbcSMatt Macy 		if (error != 0)
5382eda14cbcSMatt Macy 			return (error);
5383eda14cbcSMatt Macy 
5384eda14cbcSMatt Macy 		/*
5385eda14cbcSMatt Macy 		 * Redo the loading process again with the
5386eda14cbcSMatt Macy 		 * checkpointed uberblock.
5387eda14cbcSMatt Macy 		 */
5388eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
5389eda14cbcSMatt Macy 		spa_load_note(spa, "LOADING checkpointed uberblock");
5390eda14cbcSMatt Macy 		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
5391eda14cbcSMatt Macy 		if (error != 0)
5392eda14cbcSMatt Macy 			return (error);
5393eda14cbcSMatt Macy 	}
5394eda14cbcSMatt Macy 
5395eda14cbcSMatt Macy 	/*
53960d4ad640SMartin Matuska 	 * Drop the namespace lock for the rest of the function.
53970d4ad640SMartin Matuska 	 */
53980d4ad640SMartin Matuska 	spa->spa_load_thread = curthread;
53990d4ad640SMartin Matuska 	mutex_exit(&spa_namespace_lock);
54000d4ad640SMartin Matuska 
54010d4ad640SMartin Matuska 	/*
5402eda14cbcSMatt Macy 	 * Retrieve the checkpoint txg if the pool has a checkpoint.
5403eda14cbcSMatt Macy 	 */
54043494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading checkpoint txg");
5405eda14cbcSMatt Macy 	error = spa_ld_read_checkpoint_txg(spa);
5406eda14cbcSMatt Macy 	if (error != 0)
54070d4ad640SMartin Matuska 		goto fail;
5408eda14cbcSMatt Macy 
5409eda14cbcSMatt Macy 	/*
5410eda14cbcSMatt Macy 	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
5411eda14cbcSMatt Macy 	 * from the pool and their contents were re-mapped to other vdevs. Note
5412eda14cbcSMatt Macy 	 * that everything that we read before this step must have been
5413eda14cbcSMatt Macy 	 * rewritten on concrete vdevs after the last device removal was
5414eda14cbcSMatt Macy 	 * initiated. Otherwise we could be reading from indirect vdevs before
5415eda14cbcSMatt Macy 	 * we have loaded their mappings.
5416eda14cbcSMatt Macy 	 */
54173494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading indirect vdev metadata");
5418eda14cbcSMatt Macy 	error = spa_ld_open_indirect_vdev_metadata(spa);
5419eda14cbcSMatt Macy 	if (error != 0)
54200d4ad640SMartin Matuska 		goto fail;
5421eda14cbcSMatt Macy 
5422eda14cbcSMatt Macy 	/*
5423eda14cbcSMatt Macy 	 * Retrieve the full list of active features from the MOS and check if
5424eda14cbcSMatt Macy 	 * they are all supported.
5425eda14cbcSMatt Macy 	 */
54263494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Checking feature flags");
5427eda14cbcSMatt Macy 	error = spa_ld_check_features(spa, &missing_feat_write);
5428eda14cbcSMatt Macy 	if (error != 0)
54290d4ad640SMartin Matuska 		goto fail;
5430eda14cbcSMatt Macy 
5431eda14cbcSMatt Macy 	/*
5432eda14cbcSMatt Macy 	 * Load several special directories from the MOS needed by the dsl_pool
5433eda14cbcSMatt Macy 	 * layer.
5434eda14cbcSMatt Macy 	 */
54353494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading special MOS directories");
5436eda14cbcSMatt Macy 	error = spa_ld_load_special_directories(spa);
5437eda14cbcSMatt Macy 	if (error != 0)
54380d4ad640SMartin Matuska 		goto fail;
5439eda14cbcSMatt Macy 
5440eda14cbcSMatt Macy 	/*
5441eda14cbcSMatt Macy 	 * Retrieve pool properties from the MOS.
5442eda14cbcSMatt Macy 	 */
54433494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading properties");
5444eda14cbcSMatt Macy 	error = spa_ld_get_props(spa);
5445eda14cbcSMatt Macy 	if (error != 0)
54460d4ad640SMartin Matuska 		goto fail;
5447eda14cbcSMatt Macy 
5448eda14cbcSMatt Macy 	/*
5449eda14cbcSMatt Macy 	 * Retrieve the list of auxiliary devices - cache devices and spares -
5450eda14cbcSMatt Macy 	 * and open them.
5451eda14cbcSMatt Macy 	 */
54523494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading AUX vdevs");
5453eda14cbcSMatt Macy 	error = spa_ld_open_aux_vdevs(spa, type);
5454eda14cbcSMatt Macy 	if (error != 0)
54550d4ad640SMartin Matuska 		goto fail;
5456eda14cbcSMatt Macy 
5457eda14cbcSMatt Macy 	/*
5458eda14cbcSMatt Macy 	 * Load the metadata for all vdevs. Also check if unopenable devices
5459eda14cbcSMatt Macy 	 * should be autoreplaced.
5460eda14cbcSMatt Macy 	 */
54613494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading vdev metadata");
5462eda14cbcSMatt Macy 	error = spa_ld_load_vdev_metadata(spa);
5463eda14cbcSMatt Macy 	if (error != 0)
54640d4ad640SMartin Matuska 		goto fail;
5465eda14cbcSMatt Macy 
54663494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading dedup tables");
5467eda14cbcSMatt Macy 	error = spa_ld_load_dedup_tables(spa);
5468eda14cbcSMatt Macy 	if (error != 0)
54690d4ad640SMartin Matuska 		goto fail;
5470eda14cbcSMatt Macy 
54713494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Loading BRT");
54722a58b312SMartin Matuska 	error = spa_ld_load_brt(spa);
54732a58b312SMartin Matuska 	if (error != 0)
54740d4ad640SMartin Matuska 		goto fail;
54752a58b312SMartin Matuska 
5476eda14cbcSMatt Macy 	/*
5477eda14cbcSMatt Macy 	 * Verify the logs now to make sure we don't have any unexpected errors
5478eda14cbcSMatt Macy 	 * when we claim log blocks later.
5479eda14cbcSMatt Macy 	 */
54803494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Verifying Log Devices");
5481eda14cbcSMatt Macy 	error = spa_ld_verify_logs(spa, type, ereport);
5482eda14cbcSMatt Macy 	if (error != 0)
54830d4ad640SMartin Matuska 		goto fail;
5484eda14cbcSMatt Macy 
5485eda14cbcSMatt Macy 	if (missing_feat_write) {
5486eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
5487eda14cbcSMatt Macy 
5488eda14cbcSMatt Macy 		/*
5489eda14cbcSMatt Macy 		 * At this point, we know that we can open the pool in
5490eda14cbcSMatt Macy 		 * read-only mode but not read-write mode. We now have enough
5491eda14cbcSMatt Macy 		 * information and can return to userland.
5492eda14cbcSMatt Macy 		 */
54930d4ad640SMartin Matuska 		error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
54940d4ad640SMartin Matuska 		    ENOTSUP);
54950d4ad640SMartin Matuska 		goto fail;
5496eda14cbcSMatt Macy 	}
5497eda14cbcSMatt Macy 
5498eda14cbcSMatt Macy 	/*
5499eda14cbcSMatt Macy 	 * Traverse the last txgs to make sure the pool was left off in a safe
5500eda14cbcSMatt Macy 	 * state. When performing an extreme rewind, we verify the whole pool,
5501eda14cbcSMatt Macy 	 * which can take a very long time.
5502eda14cbcSMatt Macy 	 */
55033494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Verifying pool data");
5504eda14cbcSMatt Macy 	error = spa_ld_verify_pool_data(spa);
5505eda14cbcSMatt Macy 	if (error != 0)
55060d4ad640SMartin Matuska 		goto fail;
5507eda14cbcSMatt Macy 
5508eda14cbcSMatt Macy 	/*
5509eda14cbcSMatt Macy 	 * Calculate the deflated space for the pool. This must be done before
5510eda14cbcSMatt Macy 	 * we write anything to the pool because we'd need to update the space
5511eda14cbcSMatt Macy 	 * accounting using the deflated sizes.
5512eda14cbcSMatt Macy 	 */
55133494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Calculating deflated space");
5514eda14cbcSMatt Macy 	spa_update_dspace(spa);
5515eda14cbcSMatt Macy 
5516eda14cbcSMatt Macy 	/*
5517eda14cbcSMatt Macy 	 * We have now retrieved all the information we needed to open the
5518eda14cbcSMatt Macy 	 * pool. If we are importing the pool in read-write mode, a few
5519eda14cbcSMatt Macy 	 * additional steps must be performed to finish the import.
5520eda14cbcSMatt Macy 	 */
55213494f7c0SMartin Matuska 	spa_import_progress_set_notes(spa, "Starting import");
5522eda14cbcSMatt Macy 	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
5523eda14cbcSMatt Macy 	    spa->spa_load_max_txg == UINT64_MAX)) {
5524eda14cbcSMatt Macy 		uint64_t config_cache_txg = spa->spa_config_txg;
5525eda14cbcSMatt Macy 
5526eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
5527eda14cbcSMatt Macy 
5528eda14cbcSMatt Macy 		/*
5529e716630dSMartin Matuska 		 * Before we do any zio_write's, complete the raidz expansion
5530e716630dSMartin Matuska 		 * scratch space copying, if necessary.
5531e716630dSMartin Matuska 		 */
5532e716630dSMartin Matuska 		if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID)
5533e716630dSMartin Matuska 			vdev_raidz_reflow_copy_scratch(spa);
5534e716630dSMartin Matuska 
5535e716630dSMartin Matuska 		/*
5536eda14cbcSMatt Macy 		 * In case of a checkpoint rewind, log the original txg
5537eda14cbcSMatt Macy 		 * of the checkpointed uberblock.
5538eda14cbcSMatt Macy 		 */
5539eda14cbcSMatt Macy 		if (checkpoint_rewind) {
5540eda14cbcSMatt Macy 			spa_history_log_internal(spa, "checkpoint rewind",
5541eda14cbcSMatt Macy 			    NULL, "rewound state to txg=%llu",
5542eda14cbcSMatt Macy 			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
5543eda14cbcSMatt Macy 		}
5544eda14cbcSMatt Macy 
55453494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Claiming ZIL blocks");
5546eda14cbcSMatt Macy 		/*
5547eda14cbcSMatt Macy 		 * Traverse the ZIL and claim all blocks.
5548eda14cbcSMatt Macy 		 */
5549eda14cbcSMatt Macy 		spa_ld_claim_log_blocks(spa);
5550eda14cbcSMatt Macy 
5551eda14cbcSMatt Macy 		/*
5552eda14cbcSMatt Macy 		 * Kick-off the syncing thread.
5553eda14cbcSMatt Macy 		 */
5554eda14cbcSMatt Macy 		spa->spa_sync_on = B_TRUE;
5555eda14cbcSMatt Macy 		txg_sync_start(spa->spa_dsl_pool);
5556eda14cbcSMatt Macy 		mmp_thread_start(spa);
5557eda14cbcSMatt Macy 
5558eda14cbcSMatt Macy 		/*
5559eda14cbcSMatt Macy 		 * Wait for all claims to sync.  We sync up to the highest
5560eda14cbcSMatt Macy 		 * claimed log block birth time so that claimed log blocks
5561eda14cbcSMatt Macy 		 * don't appear to be from the future.  spa_claim_max_txg
5562eda14cbcSMatt Macy 		 * will have been set for us by ZIL traversal operations
5563eda14cbcSMatt Macy 		 * performed above.
5564eda14cbcSMatt Macy 		 */
55653494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Syncing ZIL claims");
5566eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
5567eda14cbcSMatt Macy 
5568eda14cbcSMatt Macy 		/*
5569eda14cbcSMatt Macy 		 * Check if we need to request an update of the config. On the
5570eda14cbcSMatt Macy 		 * next sync, we would update the config stored in vdev labels
5571eda14cbcSMatt Macy 		 * and the cachefile (by default /etc/zfs/zpool.cache).
5572eda14cbcSMatt Macy 		 */
55733494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Updating configs");
5574eda14cbcSMatt Macy 		spa_ld_check_for_config_update(spa, config_cache_txg,
5575eda14cbcSMatt Macy 		    update_config_cache);
5576eda14cbcSMatt Macy 
5577eda14cbcSMatt Macy 		/*
5578eda14cbcSMatt Macy 		 * Check if a rebuild was in progress and if so resume it.
5579eda14cbcSMatt Macy 		 * Then check all DTLs to see if anything needs resilvering.
5580eda14cbcSMatt Macy 		 * The resilver will be deferred if a rebuild was started.
5581eda14cbcSMatt Macy 		 */
55823494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Starting resilvers");
5583eda14cbcSMatt Macy 		if (vdev_rebuild_active(spa->spa_root_vdev)) {
5584eda14cbcSMatt Macy 			vdev_rebuild_restart(spa);
5585eda14cbcSMatt Macy 		} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
5586eda14cbcSMatt Macy 		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5587eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_RESILVER);
5588eda14cbcSMatt Macy 		}
5589eda14cbcSMatt Macy 
5590eda14cbcSMatt Macy 		/*
5591eda14cbcSMatt Macy 		 * Log the fact that we booted up (so that we can detect if
5592eda14cbcSMatt Macy 		 * we rebooted in the middle of an operation).
5593eda14cbcSMatt Macy 		 */
5594eda14cbcSMatt Macy 		spa_history_log_version(spa, "open", NULL);
5595eda14cbcSMatt Macy 
55963494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa,
55973494f7c0SMartin Matuska 		    "Restarting device removals");
5598eda14cbcSMatt Macy 		spa_restart_removal(spa);
5599eda14cbcSMatt Macy 		spa_spawn_aux_threads(spa);
5600eda14cbcSMatt Macy 
5601eda14cbcSMatt Macy 		/*
5602eda14cbcSMatt Macy 		 * Delete any inconsistent datasets.
5603eda14cbcSMatt Macy 		 *
5604eda14cbcSMatt Macy 		 * Note:
5605eda14cbcSMatt Macy 		 * Since we may be issuing deletes for clones here,
5606eda14cbcSMatt Macy 		 * we make sure to do so after we've spawned all the
5607eda14cbcSMatt Macy 		 * auxiliary threads above (from which the livelist
5608eda14cbcSMatt Macy 		 * deletion zthr is part of).
5609eda14cbcSMatt Macy 		 */
56103494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa,
56113494f7c0SMartin Matuska 		    "Cleaning up inconsistent objsets");
5612eda14cbcSMatt Macy 		(void) dmu_objset_find(spa_name(spa),
5613eda14cbcSMatt Macy 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
5614eda14cbcSMatt Macy 
5615eda14cbcSMatt Macy 		/*
5616eda14cbcSMatt Macy 		 * Clean up any stale temporary dataset userrefs.
5617eda14cbcSMatt Macy 		 */
56183494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa,
56193494f7c0SMartin Matuska 		    "Cleaning up temporary userrefs");
5620eda14cbcSMatt Macy 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
5621eda14cbcSMatt Macy 
5622eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
56233494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Restarting initialize");
5624eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
56253494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Restarting TRIM");
5626eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
5627eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
5628eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
56293494f7c0SMartin Matuska 		spa_import_progress_set_notes(spa, "Finished importing");
5630eda14cbcSMatt Macy 	}
56310d4ad640SMartin Matuska 	zio_handle_import_delay(spa, gethrtime() - load_start);
5632eda14cbcSMatt Macy 
5633eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
5634eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
5635eda14cbcSMatt Macy 
5636eda14cbcSMatt Macy 	spa_load_note(spa, "LOADED");
56370d4ad640SMartin Matuska fail:
56380d4ad640SMartin Matuska 	mutex_enter(&spa_namespace_lock);
56390d4ad640SMartin Matuska 	spa->spa_load_thread = NULL;
56400d4ad640SMartin Matuska 	cv_broadcast(&spa_namespace_cv);
5641eda14cbcSMatt Macy 
56420d4ad640SMartin Matuska 	return (error);
56430d4ad640SMartin Matuska 
5644eda14cbcSMatt Macy }
5645eda14cbcSMatt Macy 
5646eda14cbcSMatt Macy static int
5647eda14cbcSMatt Macy spa_load_retry(spa_t *spa, spa_load_state_t state)
5648eda14cbcSMatt Macy {
5649eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
5650eda14cbcSMatt Macy 
5651eda14cbcSMatt Macy 	spa_unload(spa);
5652eda14cbcSMatt Macy 	spa_deactivate(spa);
5653eda14cbcSMatt Macy 
5654eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
5655eda14cbcSMatt Macy 
5656eda14cbcSMatt Macy 	spa_activate(spa, mode);
5657eda14cbcSMatt Macy 	spa_async_suspend(spa);
5658eda14cbcSMatt Macy 
5659eda14cbcSMatt Macy 	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
5660eda14cbcSMatt Macy 	    (u_longlong_t)spa->spa_load_max_txg);
5661eda14cbcSMatt Macy 
5662eda14cbcSMatt Macy 	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
5663eda14cbcSMatt Macy }
5664eda14cbcSMatt Macy 
5665eda14cbcSMatt Macy /*
5666eda14cbcSMatt Macy  * If spa_load() fails this function will try loading prior txg's. If
5667eda14cbcSMatt Macy  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
5668eda14cbcSMatt Macy  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
5669eda14cbcSMatt Macy  * function will not rewind the pool and will return the same error as
5670eda14cbcSMatt Macy  * spa_load().
5671eda14cbcSMatt Macy  */
5672eda14cbcSMatt Macy static int
5673eda14cbcSMatt Macy spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
5674eda14cbcSMatt Macy     int rewind_flags)
5675eda14cbcSMatt Macy {
5676eda14cbcSMatt Macy 	nvlist_t *loadinfo = NULL;
5677eda14cbcSMatt Macy 	nvlist_t *config = NULL;
5678eda14cbcSMatt Macy 	int load_error, rewind_error;
5679eda14cbcSMatt Macy 	uint64_t safe_rewind_txg;
5680eda14cbcSMatt Macy 	uint64_t min_txg;
5681eda14cbcSMatt Macy 
5682eda14cbcSMatt Macy 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
5683eda14cbcSMatt Macy 		spa->spa_load_max_txg = spa->spa_load_txg;
5684eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
5685eda14cbcSMatt Macy 	} else {
5686eda14cbcSMatt Macy 		spa->spa_load_max_txg = max_request;
5687eda14cbcSMatt Macy 		if (max_request != UINT64_MAX)
5688eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5689eda14cbcSMatt Macy 	}
5690eda14cbcSMatt Macy 
5691eda14cbcSMatt Macy 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
5692eda14cbcSMatt Macy 	if (load_error == 0)
5693eda14cbcSMatt Macy 		return (0);
5694eda14cbcSMatt Macy 	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
5695eda14cbcSMatt Macy 		/*
5696eda14cbcSMatt Macy 		 * When attempting checkpoint-rewind on a pool with no
5697eda14cbcSMatt Macy 		 * checkpoint, we should not attempt to load uberblocks
5698eda14cbcSMatt Macy 		 * from previous txgs when spa_load fails.
5699eda14cbcSMatt Macy 		 */
5700eda14cbcSMatt Macy 		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5701eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5702eda14cbcSMatt Macy 		return (load_error);
5703eda14cbcSMatt Macy 	}
5704eda14cbcSMatt Macy 
5705eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL)
5706eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5707eda14cbcSMatt Macy 
5708eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
5709eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
5710eda14cbcSMatt Macy 
5711eda14cbcSMatt Macy 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
5712eda14cbcSMatt Macy 		nvlist_free(config);
5713eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5714eda14cbcSMatt Macy 		return (load_error);
5715eda14cbcSMatt Macy 	}
5716eda14cbcSMatt Macy 
5717eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5718eda14cbcSMatt Macy 		/* Price of rolling back is discarding txgs, including log */
5719eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
5720eda14cbcSMatt Macy 	} else {
5721eda14cbcSMatt Macy 		/*
5722eda14cbcSMatt Macy 		 * If we aren't rolling back save the load info from our first
5723eda14cbcSMatt Macy 		 * import attempt so that we can restore it after attempting
5724eda14cbcSMatt Macy 		 * to rewind.
5725eda14cbcSMatt Macy 		 */
5726eda14cbcSMatt Macy 		loadinfo = spa->spa_load_info;
5727eda14cbcSMatt Macy 		spa->spa_load_info = fnvlist_alloc();
5728eda14cbcSMatt Macy 	}
5729eda14cbcSMatt Macy 
5730eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
5731eda14cbcSMatt Macy 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
5732eda14cbcSMatt Macy 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
5733eda14cbcSMatt Macy 	    TXG_INITIAL : safe_rewind_txg;
5734eda14cbcSMatt Macy 
5735eda14cbcSMatt Macy 	/*
5736eda14cbcSMatt Macy 	 * Continue as long as we're finding errors, we're still within
5737eda14cbcSMatt Macy 	 * the acceptable rewind range, and we're still finding uberblocks
5738eda14cbcSMatt Macy 	 */
5739eda14cbcSMatt Macy 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
5740eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
5741eda14cbcSMatt Macy 		if (spa->spa_load_max_txg < safe_rewind_txg)
5742eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5743eda14cbcSMatt Macy 		rewind_error = spa_load_retry(spa, state);
5744eda14cbcSMatt Macy 	}
5745eda14cbcSMatt Macy 
5746eda14cbcSMatt Macy 	spa->spa_extreme_rewind = B_FALSE;
5747eda14cbcSMatt Macy 	spa->spa_load_max_txg = UINT64_MAX;
5748eda14cbcSMatt Macy 
5749eda14cbcSMatt Macy 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
5750eda14cbcSMatt Macy 		spa_config_set(spa, config);
5751eda14cbcSMatt Macy 	else
5752eda14cbcSMatt Macy 		nvlist_free(config);
5753eda14cbcSMatt Macy 
5754eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5755eda14cbcSMatt Macy 		ASSERT3P(loadinfo, ==, NULL);
5756eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5757eda14cbcSMatt Macy 		return (rewind_error);
5758eda14cbcSMatt Macy 	} else {
5759eda14cbcSMatt Macy 		/* Store the rewind info as part of the initial load info */
5760eda14cbcSMatt Macy 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
5761eda14cbcSMatt Macy 		    spa->spa_load_info);
5762eda14cbcSMatt Macy 
5763eda14cbcSMatt Macy 		/* Restore the initial load info */
5764eda14cbcSMatt Macy 		fnvlist_free(spa->spa_load_info);
5765eda14cbcSMatt Macy 		spa->spa_load_info = loadinfo;
5766eda14cbcSMatt Macy 
5767eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5768eda14cbcSMatt Macy 		return (load_error);
5769eda14cbcSMatt Macy 	}
5770eda14cbcSMatt Macy }
5771eda14cbcSMatt Macy 
5772eda14cbcSMatt Macy /*
5773eda14cbcSMatt Macy  * Pool Open/Import
5774eda14cbcSMatt Macy  *
5775eda14cbcSMatt Macy  * The import case is identical to an open except that the configuration is sent
5776eda14cbcSMatt Macy  * down from userland, instead of grabbed from the configuration cache.  For the
5777eda14cbcSMatt Macy  * case of an open, the pool configuration will exist in the
5778eda14cbcSMatt Macy  * POOL_STATE_UNINITIALIZED state.
5779eda14cbcSMatt Macy  *
5780eda14cbcSMatt Macy  * The stats information (gen/count/ustats) is used to gather vdev statistics at
5781eda14cbcSMatt Macy  * the same time open the pool, without having to keep around the spa_t in some
5782eda14cbcSMatt Macy  * ambiguous state.
5783eda14cbcSMatt Macy  */
5784eda14cbcSMatt Macy static int
5785a0b956f5SMartin Matuska spa_open_common(const char *pool, spa_t **spapp, const void *tag,
5786a0b956f5SMartin Matuska     nvlist_t *nvpolicy, nvlist_t **config)
5787eda14cbcSMatt Macy {
5788eda14cbcSMatt Macy 	spa_t *spa;
5789eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_OPEN;
5790eda14cbcSMatt Macy 	int error;
5791eda14cbcSMatt Macy 	int locked = B_FALSE;
5792eda14cbcSMatt Macy 	int firstopen = B_FALSE;
5793eda14cbcSMatt Macy 
5794eda14cbcSMatt Macy 	*spapp = NULL;
5795eda14cbcSMatt Macy 
5796eda14cbcSMatt Macy 	/*
5797eda14cbcSMatt Macy 	 * As disgusting as this is, we need to support recursive calls to this
5798eda14cbcSMatt Macy 	 * function because dsl_dir_open() is called during spa_load(), and ends
5799eda14cbcSMatt Macy 	 * up calling spa_open() again.  The real fix is to figure out how to
5800eda14cbcSMatt Macy 	 * avoid dsl_dir_open() calling this in the first place.
5801eda14cbcSMatt Macy 	 */
5802eda14cbcSMatt Macy 	if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
5803eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
5804eda14cbcSMatt Macy 		locked = B_TRUE;
5805eda14cbcSMatt Macy 	}
5806eda14cbcSMatt Macy 
5807eda14cbcSMatt Macy 	if ((spa = spa_lookup(pool)) == NULL) {
5808eda14cbcSMatt Macy 		if (locked)
5809eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
5810eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
5811eda14cbcSMatt Macy 	}
5812eda14cbcSMatt Macy 
5813eda14cbcSMatt Macy 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
5814eda14cbcSMatt Macy 		zpool_load_policy_t policy;
5815eda14cbcSMatt Macy 
5816eda14cbcSMatt Macy 		firstopen = B_TRUE;
5817eda14cbcSMatt Macy 
5818eda14cbcSMatt Macy 		zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
5819eda14cbcSMatt Macy 		    &policy);
5820eda14cbcSMatt Macy 		if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5821eda14cbcSMatt Macy 			state = SPA_LOAD_RECOVER;
5822eda14cbcSMatt Macy 
5823eda14cbcSMatt Macy 		spa_activate(spa, spa_mode_global);
5824eda14cbcSMatt Macy 
5825eda14cbcSMatt Macy 		if (state != SPA_LOAD_RECOVER)
5826eda14cbcSMatt Macy 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5827eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5828eda14cbcSMatt Macy 
5829eda14cbcSMatt Macy 		zfs_dbgmsg("spa_open_common: opening %s", pool);
5830eda14cbcSMatt Macy 		error = spa_load_best(spa, state, policy.zlp_txg,
5831eda14cbcSMatt Macy 		    policy.zlp_rewind);
5832eda14cbcSMatt Macy 
5833eda14cbcSMatt Macy 		if (error == EBADF) {
5834eda14cbcSMatt Macy 			/*
5835eda14cbcSMatt Macy 			 * If vdev_validate() returns failure (indicated by
5836eda14cbcSMatt Macy 			 * EBADF), it indicates that one of the vdevs indicates
5837eda14cbcSMatt Macy 			 * that the pool has been exported or destroyed.  If
5838eda14cbcSMatt Macy 			 * this is the case, the config cache is out of sync and
5839eda14cbcSMatt Macy 			 * we should remove the pool from the namespace.
5840eda14cbcSMatt Macy 			 */
5841eda14cbcSMatt Macy 			spa_unload(spa);
5842eda14cbcSMatt Macy 			spa_deactivate(spa);
5843be181ee2SMartin Matuska 			spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
5844eda14cbcSMatt Macy 			spa_remove(spa);
5845eda14cbcSMatt Macy 			if (locked)
5846eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5847eda14cbcSMatt Macy 			return (SET_ERROR(ENOENT));
5848eda14cbcSMatt Macy 		}
5849eda14cbcSMatt Macy 
5850eda14cbcSMatt Macy 		if (error) {
5851eda14cbcSMatt Macy 			/*
5852eda14cbcSMatt Macy 			 * We can't open the pool, but we still have useful
5853eda14cbcSMatt Macy 			 * information: the state of each vdev after the
5854eda14cbcSMatt Macy 			 * attempted vdev_open().  Return this to the user.
5855eda14cbcSMatt Macy 			 */
5856eda14cbcSMatt Macy 			if (config != NULL && spa->spa_config) {
585781b22a98SMartin Matuska 				*config = fnvlist_dup(spa->spa_config);
585881b22a98SMartin Matuska 				fnvlist_add_nvlist(*config,
5859eda14cbcSMatt Macy 				    ZPOOL_CONFIG_LOAD_INFO,
586081b22a98SMartin Matuska 				    spa->spa_load_info);
5861eda14cbcSMatt Macy 			}
5862eda14cbcSMatt Macy 			spa_unload(spa);
5863eda14cbcSMatt Macy 			spa_deactivate(spa);
5864eda14cbcSMatt Macy 			spa->spa_last_open_failed = error;
5865eda14cbcSMatt Macy 			if (locked)
5866eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5867eda14cbcSMatt Macy 			*spapp = NULL;
5868eda14cbcSMatt Macy 			return (error);
5869eda14cbcSMatt Macy 		}
5870eda14cbcSMatt Macy 	}
5871eda14cbcSMatt Macy 
5872eda14cbcSMatt Macy 	spa_open_ref(spa, tag);
5873eda14cbcSMatt Macy 
5874eda14cbcSMatt Macy 	if (config != NULL)
5875eda14cbcSMatt Macy 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5876eda14cbcSMatt Macy 
5877eda14cbcSMatt Macy 	/*
5878eda14cbcSMatt Macy 	 * If we've recovered the pool, pass back any information we
5879eda14cbcSMatt Macy 	 * gathered while doing the load.
5880eda14cbcSMatt Macy 	 */
5881dbd5678dSMartin Matuska 	if (state == SPA_LOAD_RECOVER && config != NULL) {
588281b22a98SMartin Matuska 		fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
588381b22a98SMartin Matuska 		    spa->spa_load_info);
5884eda14cbcSMatt Macy 	}
5885eda14cbcSMatt Macy 
5886eda14cbcSMatt Macy 	if (locked) {
5887eda14cbcSMatt Macy 		spa->spa_last_open_failed = 0;
5888eda14cbcSMatt Macy 		spa->spa_last_ubsync_txg = 0;
5889eda14cbcSMatt Macy 		spa->spa_load_txg = 0;
5890eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5891eda14cbcSMatt Macy 	}
5892eda14cbcSMatt Macy 
5893eda14cbcSMatt Macy 	if (firstopen)
5894eda14cbcSMatt Macy 		zvol_create_minors_recursive(spa_name(spa));
5895eda14cbcSMatt Macy 
5896eda14cbcSMatt Macy 	*spapp = spa;
5897eda14cbcSMatt Macy 
5898eda14cbcSMatt Macy 	return (0);
5899eda14cbcSMatt Macy }
5900eda14cbcSMatt Macy 
5901eda14cbcSMatt Macy int
5902a0b956f5SMartin Matuska spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
5903a0b956f5SMartin Matuska     nvlist_t *policy, nvlist_t **config)
5904eda14cbcSMatt Macy {
5905eda14cbcSMatt Macy 	return (spa_open_common(name, spapp, tag, policy, config));
5906eda14cbcSMatt Macy }
5907eda14cbcSMatt Macy 
5908eda14cbcSMatt Macy int
5909a0b956f5SMartin Matuska spa_open(const char *name, spa_t **spapp, const void *tag)
5910eda14cbcSMatt Macy {
5911eda14cbcSMatt Macy 	return (spa_open_common(name, spapp, tag, NULL, NULL));
5912eda14cbcSMatt Macy }
5913eda14cbcSMatt Macy 
5914eda14cbcSMatt Macy /*
5915eda14cbcSMatt Macy  * Lookup the given spa_t, incrementing the inject count in the process,
5916eda14cbcSMatt Macy  * preventing it from being exported or destroyed.
5917eda14cbcSMatt Macy  */
5918eda14cbcSMatt Macy spa_t *
5919eda14cbcSMatt Macy spa_inject_addref(char *name)
5920eda14cbcSMatt Macy {
5921eda14cbcSMatt Macy 	spa_t *spa;
5922eda14cbcSMatt Macy 
5923eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5924eda14cbcSMatt Macy 	if ((spa = spa_lookup(name)) == NULL) {
5925eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5926eda14cbcSMatt Macy 		return (NULL);
5927eda14cbcSMatt Macy 	}
5928eda14cbcSMatt Macy 	spa->spa_inject_ref++;
5929eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5930eda14cbcSMatt Macy 
5931eda14cbcSMatt Macy 	return (spa);
5932eda14cbcSMatt Macy }
5933eda14cbcSMatt Macy 
5934eda14cbcSMatt Macy void
5935eda14cbcSMatt Macy spa_inject_delref(spa_t *spa)
5936eda14cbcSMatt Macy {
5937eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5938eda14cbcSMatt Macy 	spa->spa_inject_ref--;
5939eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5940eda14cbcSMatt Macy }
5941eda14cbcSMatt Macy 
5942eda14cbcSMatt Macy /*
5943eda14cbcSMatt Macy  * Add spares device information to the nvlist.
5944eda14cbcSMatt Macy  */
5945eda14cbcSMatt Macy static void
5946eda14cbcSMatt Macy spa_add_spares(spa_t *spa, nvlist_t *config)
5947eda14cbcSMatt Macy {
5948eda14cbcSMatt Macy 	nvlist_t **spares;
5949eda14cbcSMatt Macy 	uint_t i, nspares;
5950eda14cbcSMatt Macy 	nvlist_t *nvroot;
5951eda14cbcSMatt Macy 	uint64_t guid;
5952eda14cbcSMatt Macy 	vdev_stat_t *vs;
5953eda14cbcSMatt Macy 	uint_t vsc;
5954eda14cbcSMatt Macy 	uint64_t pool;
5955eda14cbcSMatt Macy 
5956eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5957eda14cbcSMatt Macy 
5958eda14cbcSMatt Macy 	if (spa->spa_spares.sav_count == 0)
5959eda14cbcSMatt Macy 		return;
5960eda14cbcSMatt Macy 
596181b22a98SMartin Matuska 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
596281b22a98SMartin Matuska 	VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
596381b22a98SMartin Matuska 	    ZPOOL_CONFIG_SPARES, &spares, &nspares));
5964eda14cbcSMatt Macy 	if (nspares != 0) {
5965681ce946SMartin Matuska 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5966681ce946SMartin Matuska 		    (const nvlist_t * const *)spares, nspares);
596781b22a98SMartin Matuska 		VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
596881b22a98SMartin Matuska 		    &spares, &nspares));
5969eda14cbcSMatt Macy 
5970eda14cbcSMatt Macy 		/*
5971eda14cbcSMatt Macy 		 * Go through and find any spares which have since been
5972eda14cbcSMatt Macy 		 * repurposed as an active spare.  If this is the case, update
5973eda14cbcSMatt Macy 		 * their status appropriately.
5974eda14cbcSMatt Macy 		 */
5975eda14cbcSMatt Macy 		for (i = 0; i < nspares; i++) {
597681b22a98SMartin Matuska 			guid = fnvlist_lookup_uint64(spares[i],
597781b22a98SMartin Matuska 			    ZPOOL_CONFIG_GUID);
59782a58b312SMartin Matuska 			VERIFY0(nvlist_lookup_uint64_array(spares[i],
59792a58b312SMartin Matuska 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
5980eda14cbcSMatt Macy 			if (spa_spare_exists(guid, &pool, NULL) &&
5981eda14cbcSMatt Macy 			    pool != 0ULL) {
5982eda14cbcSMatt Macy 				vs->vs_state = VDEV_STATE_CANT_OPEN;
5983eda14cbcSMatt Macy 				vs->vs_aux = VDEV_AUX_SPARED;
59842a58b312SMartin Matuska 			} else {
59852a58b312SMartin Matuska 				vs->vs_state =
59862a58b312SMartin Matuska 				    spa->spa_spares.sav_vdevs[i]->vdev_state;
5987eda14cbcSMatt Macy 			}
5988eda14cbcSMatt Macy 		}
5989eda14cbcSMatt Macy 	}
5990eda14cbcSMatt Macy }
5991eda14cbcSMatt Macy 
5992eda14cbcSMatt Macy /*
5993eda14cbcSMatt Macy  * Add l2cache device information to the nvlist, including vdev stats.
5994eda14cbcSMatt Macy  */
5995eda14cbcSMatt Macy static void
5996eda14cbcSMatt Macy spa_add_l2cache(spa_t *spa, nvlist_t *config)
5997eda14cbcSMatt Macy {
5998eda14cbcSMatt Macy 	nvlist_t **l2cache;
5999eda14cbcSMatt Macy 	uint_t i, j, nl2cache;
6000eda14cbcSMatt Macy 	nvlist_t *nvroot;
6001eda14cbcSMatt Macy 	uint64_t guid;
6002eda14cbcSMatt Macy 	vdev_t *vd;
6003eda14cbcSMatt Macy 	vdev_stat_t *vs;
6004eda14cbcSMatt Macy 	uint_t vsc;
6005eda14cbcSMatt Macy 
6006eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6007eda14cbcSMatt Macy 
6008eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_count == 0)
6009eda14cbcSMatt Macy 		return;
6010eda14cbcSMatt Macy 
601181b22a98SMartin Matuska 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
601281b22a98SMartin Matuska 	VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
601381b22a98SMartin Matuska 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
6014eda14cbcSMatt Macy 	if (nl2cache != 0) {
6015681ce946SMartin Matuska 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6016681ce946SMartin Matuska 		    (const nvlist_t * const *)l2cache, nl2cache);
601781b22a98SMartin Matuska 		VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
601881b22a98SMartin Matuska 		    &l2cache, &nl2cache));
6019eda14cbcSMatt Macy 
6020eda14cbcSMatt Macy 		/*
6021eda14cbcSMatt Macy 		 * Update level 2 cache device stats.
6022eda14cbcSMatt Macy 		 */
6023eda14cbcSMatt Macy 
6024eda14cbcSMatt Macy 		for (i = 0; i < nl2cache; i++) {
602581b22a98SMartin Matuska 			guid = fnvlist_lookup_uint64(l2cache[i],
602681b22a98SMartin Matuska 			    ZPOOL_CONFIG_GUID);
6027eda14cbcSMatt Macy 
6028eda14cbcSMatt Macy 			vd = NULL;
6029eda14cbcSMatt Macy 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
6030eda14cbcSMatt Macy 				if (guid ==
6031eda14cbcSMatt Macy 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
6032eda14cbcSMatt Macy 					vd = spa->spa_l2cache.sav_vdevs[j];
6033eda14cbcSMatt Macy 					break;
6034eda14cbcSMatt Macy 				}
6035eda14cbcSMatt Macy 			}
6036eda14cbcSMatt Macy 			ASSERT(vd != NULL);
6037eda14cbcSMatt Macy 
603881b22a98SMartin Matuska 			VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
603981b22a98SMartin Matuska 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
6040eda14cbcSMatt Macy 			vdev_get_stats(vd, vs);
6041eda14cbcSMatt Macy 			vdev_config_generate_stats(vd, l2cache[i]);
6042eda14cbcSMatt Macy 
6043eda14cbcSMatt Macy 		}
6044eda14cbcSMatt Macy 	}
6045eda14cbcSMatt Macy }
6046eda14cbcSMatt Macy 
6047eda14cbcSMatt Macy static void
6048eda14cbcSMatt Macy spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
6049eda14cbcSMatt Macy {
6050eda14cbcSMatt Macy 	zap_cursor_t zc;
6051eda14cbcSMatt Macy 	zap_attribute_t za;
6052eda14cbcSMatt Macy 
6053eda14cbcSMatt Macy 	if (spa->spa_feat_for_read_obj != 0) {
6054eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
6055eda14cbcSMatt Macy 		    spa->spa_feat_for_read_obj);
6056eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
6057eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
6058eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
6059eda14cbcSMatt Macy 			    za.za_num_integers == 1);
6060eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
6061eda14cbcSMatt Macy 			    za.za_first_integer));
6062eda14cbcSMatt Macy 		}
6063eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
6064eda14cbcSMatt Macy 	}
6065eda14cbcSMatt Macy 
6066eda14cbcSMatt Macy 	if (spa->spa_feat_for_write_obj != 0) {
6067eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
6068eda14cbcSMatt Macy 		    spa->spa_feat_for_write_obj);
6069eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
6070eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
6071eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
6072eda14cbcSMatt Macy 			    za.za_num_integers == 1);
6073eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
6074eda14cbcSMatt Macy 			    za.za_first_integer));
6075eda14cbcSMatt Macy 		}
6076eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
6077eda14cbcSMatt Macy 	}
6078eda14cbcSMatt Macy }
6079eda14cbcSMatt Macy 
6080eda14cbcSMatt Macy static void
6081eda14cbcSMatt Macy spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
6082eda14cbcSMatt Macy {
6083eda14cbcSMatt Macy 	int i;
6084eda14cbcSMatt Macy 
6085eda14cbcSMatt Macy 	for (i = 0; i < SPA_FEATURES; i++) {
6086eda14cbcSMatt Macy 		zfeature_info_t feature = spa_feature_table[i];
6087eda14cbcSMatt Macy 		uint64_t refcount;
6088eda14cbcSMatt Macy 
6089eda14cbcSMatt Macy 		if (feature_get_refcount(spa, &feature, &refcount) != 0)
6090eda14cbcSMatt Macy 			continue;
6091eda14cbcSMatt Macy 
6092eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
6093eda14cbcSMatt Macy 	}
6094eda14cbcSMatt Macy }
6095eda14cbcSMatt Macy 
6096eda14cbcSMatt Macy /*
6097eda14cbcSMatt Macy  * Store a list of pool features and their reference counts in the
6098eda14cbcSMatt Macy  * config.
6099eda14cbcSMatt Macy  *
6100eda14cbcSMatt Macy  * The first time this is called on a spa, allocate a new nvlist, fetch
6101eda14cbcSMatt Macy  * the pool features and reference counts from disk, then save the list
6102eda14cbcSMatt Macy  * in the spa. In subsequent calls on the same spa use the saved nvlist
6103eda14cbcSMatt Macy  * and refresh its values from the cached reference counts.  This
6104eda14cbcSMatt Macy  * ensures we don't block here on I/O on a suspended pool so 'zpool
6105eda14cbcSMatt Macy  * clear' can resume the pool.
6106eda14cbcSMatt Macy  */
6107eda14cbcSMatt Macy static void
6108eda14cbcSMatt Macy spa_add_feature_stats(spa_t *spa, nvlist_t *config)
6109eda14cbcSMatt Macy {
6110eda14cbcSMatt Macy 	nvlist_t *features;
6111eda14cbcSMatt Macy 
6112eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6113eda14cbcSMatt Macy 
6114eda14cbcSMatt Macy 	mutex_enter(&spa->spa_feat_stats_lock);
6115eda14cbcSMatt Macy 	features = spa->spa_feat_stats;
6116eda14cbcSMatt Macy 
6117eda14cbcSMatt Macy 	if (features != NULL) {
6118eda14cbcSMatt Macy 		spa_feature_stats_from_cache(spa, features);
6119eda14cbcSMatt Macy 	} else {
6120eda14cbcSMatt Macy 		VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
6121eda14cbcSMatt Macy 		spa->spa_feat_stats = features;
6122eda14cbcSMatt Macy 		spa_feature_stats_from_disk(spa, features);
6123eda14cbcSMatt Macy 	}
6124eda14cbcSMatt Macy 
6125eda14cbcSMatt Macy 	VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
6126eda14cbcSMatt Macy 	    features));
6127eda14cbcSMatt Macy 
6128eda14cbcSMatt Macy 	mutex_exit(&spa->spa_feat_stats_lock);
6129eda14cbcSMatt Macy }
6130eda14cbcSMatt Macy 
6131eda14cbcSMatt Macy int
6132eda14cbcSMatt Macy spa_get_stats(const char *name, nvlist_t **config,
6133eda14cbcSMatt Macy     char *altroot, size_t buflen)
6134eda14cbcSMatt Macy {
6135eda14cbcSMatt Macy 	int error;
6136eda14cbcSMatt Macy 	spa_t *spa;
6137eda14cbcSMatt Macy 
6138eda14cbcSMatt Macy 	*config = NULL;
6139eda14cbcSMatt Macy 	error = spa_open_common(name, &spa, FTAG, NULL, config);
6140eda14cbcSMatt Macy 
6141eda14cbcSMatt Macy 	if (spa != NULL) {
6142eda14cbcSMatt Macy 		/*
6143eda14cbcSMatt Macy 		 * This still leaves a window of inconsistency where the spares
6144eda14cbcSMatt Macy 		 * or l2cache devices could change and the config would be
6145eda14cbcSMatt Macy 		 * self-inconsistent.
6146eda14cbcSMatt Macy 		 */
6147eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6148eda14cbcSMatt Macy 
6149eda14cbcSMatt Macy 		if (*config != NULL) {
6150eda14cbcSMatt Macy 			uint64_t loadtimes[2];
6151eda14cbcSMatt Macy 
6152eda14cbcSMatt Macy 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
6153eda14cbcSMatt Macy 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
615481b22a98SMartin Matuska 			fnvlist_add_uint64_array(*config,
615581b22a98SMartin Matuska 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
6156eda14cbcSMatt Macy 
615781b22a98SMartin Matuska 			fnvlist_add_uint64(*config,
6158eda14cbcSMatt Macy 			    ZPOOL_CONFIG_ERRCOUNT,
615915f0b8c3SMartin Matuska 			    spa_approx_errlog_size(spa));
6160eda14cbcSMatt Macy 
6161eda14cbcSMatt Macy 			if (spa_suspended(spa)) {
616281b22a98SMartin Matuska 				fnvlist_add_uint64(*config,
6163eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED,
616481b22a98SMartin Matuska 				    spa->spa_failmode);
616581b22a98SMartin Matuska 				fnvlist_add_uint64(*config,
6166eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED_REASON,
616781b22a98SMartin Matuska 				    spa->spa_suspended);
6168eda14cbcSMatt Macy 			}
6169eda14cbcSMatt Macy 
6170eda14cbcSMatt Macy 			spa_add_spares(spa, *config);
6171eda14cbcSMatt Macy 			spa_add_l2cache(spa, *config);
6172eda14cbcSMatt Macy 			spa_add_feature_stats(spa, *config);
6173eda14cbcSMatt Macy 		}
6174eda14cbcSMatt Macy 	}
6175eda14cbcSMatt Macy 
6176eda14cbcSMatt Macy 	/*
6177eda14cbcSMatt Macy 	 * We want to get the alternate root even for faulted pools, so we cheat
6178eda14cbcSMatt Macy 	 * and call spa_lookup() directly.
6179eda14cbcSMatt Macy 	 */
6180eda14cbcSMatt Macy 	if (altroot) {
6181eda14cbcSMatt Macy 		if (spa == NULL) {
6182eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
6183eda14cbcSMatt Macy 			spa = spa_lookup(name);
6184eda14cbcSMatt Macy 			if (spa)
6185eda14cbcSMatt Macy 				spa_altroot(spa, altroot, buflen);
6186eda14cbcSMatt Macy 			else
6187eda14cbcSMatt Macy 				altroot[0] = '\0';
6188eda14cbcSMatt Macy 			spa = NULL;
6189eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
6190eda14cbcSMatt Macy 		} else {
6191eda14cbcSMatt Macy 			spa_altroot(spa, altroot, buflen);
6192eda14cbcSMatt Macy 		}
6193eda14cbcSMatt Macy 	}
6194eda14cbcSMatt Macy 
6195eda14cbcSMatt Macy 	if (spa != NULL) {
6196eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6197eda14cbcSMatt Macy 		spa_close(spa, FTAG);
6198eda14cbcSMatt Macy 	}
6199eda14cbcSMatt Macy 
6200eda14cbcSMatt Macy 	return (error);
6201eda14cbcSMatt Macy }
6202eda14cbcSMatt Macy 
6203eda14cbcSMatt Macy /*
6204eda14cbcSMatt Macy  * Validate that the auxiliary device array is well formed.  We must have an
6205eda14cbcSMatt Macy  * array of nvlists, each which describes a valid leaf vdev.  If this is an
6206eda14cbcSMatt Macy  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
6207eda14cbcSMatt Macy  * specified, as long as they are well-formed.
6208eda14cbcSMatt Macy  */
6209eda14cbcSMatt Macy static int
6210eda14cbcSMatt Macy spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
6211eda14cbcSMatt Macy     spa_aux_vdev_t *sav, const char *config, uint64_t version,
6212eda14cbcSMatt Macy     vdev_labeltype_t label)
6213eda14cbcSMatt Macy {
6214eda14cbcSMatt Macy 	nvlist_t **dev;
6215eda14cbcSMatt Macy 	uint_t i, ndev;
6216eda14cbcSMatt Macy 	vdev_t *vd;
6217eda14cbcSMatt Macy 	int error;
6218eda14cbcSMatt Macy 
6219eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6220eda14cbcSMatt Macy 
6221eda14cbcSMatt Macy 	/*
6222eda14cbcSMatt Macy 	 * It's acceptable to have no devs specified.
6223eda14cbcSMatt Macy 	 */
6224eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
6225eda14cbcSMatt Macy 		return (0);
6226eda14cbcSMatt Macy 
6227eda14cbcSMatt Macy 	if (ndev == 0)
6228eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
6229eda14cbcSMatt Macy 
6230eda14cbcSMatt Macy 	/*
6231eda14cbcSMatt Macy 	 * Make sure the pool is formatted with a version that supports this
6232eda14cbcSMatt Macy 	 * device type.
6233eda14cbcSMatt Macy 	 */
6234eda14cbcSMatt Macy 	if (spa_version(spa) < version)
6235eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
6236eda14cbcSMatt Macy 
6237eda14cbcSMatt Macy 	/*
6238eda14cbcSMatt Macy 	 * Set the pending device list so we correctly handle device in-use
6239eda14cbcSMatt Macy 	 * checking.
6240eda14cbcSMatt Macy 	 */
6241eda14cbcSMatt Macy 	sav->sav_pending = dev;
6242eda14cbcSMatt Macy 	sav->sav_npending = ndev;
6243eda14cbcSMatt Macy 
6244eda14cbcSMatt Macy 	for (i = 0; i < ndev; i++) {
6245eda14cbcSMatt Macy 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
6246eda14cbcSMatt Macy 		    mode)) != 0)
6247eda14cbcSMatt Macy 			goto out;
6248eda14cbcSMatt Macy 
6249eda14cbcSMatt Macy 		if (!vd->vdev_ops->vdev_op_leaf) {
6250eda14cbcSMatt Macy 			vdev_free(vd);
6251eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
6252eda14cbcSMatt Macy 			goto out;
6253eda14cbcSMatt Macy 		}
6254eda14cbcSMatt Macy 
6255eda14cbcSMatt Macy 		vd->vdev_top = vd;
6256eda14cbcSMatt Macy 
6257eda14cbcSMatt Macy 		if ((error = vdev_open(vd)) == 0 &&
6258eda14cbcSMatt Macy 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
625981b22a98SMartin Matuska 			fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
626081b22a98SMartin Matuska 			    vd->vdev_guid);
6261eda14cbcSMatt Macy 		}
6262eda14cbcSMatt Macy 
6263eda14cbcSMatt Macy 		vdev_free(vd);
6264eda14cbcSMatt Macy 
6265eda14cbcSMatt Macy 		if (error &&
6266eda14cbcSMatt Macy 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
6267eda14cbcSMatt Macy 			goto out;
6268eda14cbcSMatt Macy 		else
6269eda14cbcSMatt Macy 			error = 0;
6270eda14cbcSMatt Macy 	}
6271eda14cbcSMatt Macy 
6272eda14cbcSMatt Macy out:
6273eda14cbcSMatt Macy 	sav->sav_pending = NULL;
6274eda14cbcSMatt Macy 	sav->sav_npending = 0;
6275eda14cbcSMatt Macy 	return (error);
6276eda14cbcSMatt Macy }
6277eda14cbcSMatt Macy 
6278eda14cbcSMatt Macy static int
6279eda14cbcSMatt Macy spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
6280eda14cbcSMatt Macy {
6281eda14cbcSMatt Macy 	int error;
6282eda14cbcSMatt Macy 
6283eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6284eda14cbcSMatt Macy 
6285eda14cbcSMatt Macy 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6286eda14cbcSMatt Macy 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
6287eda14cbcSMatt Macy 	    VDEV_LABEL_SPARE)) != 0) {
6288eda14cbcSMatt Macy 		return (error);
6289eda14cbcSMatt Macy 	}
6290eda14cbcSMatt Macy 
6291eda14cbcSMatt Macy 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6292eda14cbcSMatt Macy 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
6293eda14cbcSMatt Macy 	    VDEV_LABEL_L2CACHE));
6294eda14cbcSMatt Macy }
6295eda14cbcSMatt Macy 
6296eda14cbcSMatt Macy static void
6297eda14cbcSMatt Macy spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
6298eda14cbcSMatt Macy     const char *config)
6299eda14cbcSMatt Macy {
6300eda14cbcSMatt Macy 	int i;
6301eda14cbcSMatt Macy 
6302eda14cbcSMatt Macy 	if (sav->sav_config != NULL) {
6303eda14cbcSMatt Macy 		nvlist_t **olddevs;
6304eda14cbcSMatt Macy 		uint_t oldndevs;
6305eda14cbcSMatt Macy 		nvlist_t **newdevs;
6306eda14cbcSMatt Macy 
6307eda14cbcSMatt Macy 		/*
6308eda14cbcSMatt Macy 		 * Generate new dev list by concatenating with the
6309eda14cbcSMatt Macy 		 * current dev list.
6310eda14cbcSMatt Macy 		 */
631181b22a98SMartin Matuska 		VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
631281b22a98SMartin Matuska 		    &olddevs, &oldndevs));
6313eda14cbcSMatt Macy 
6314eda14cbcSMatt Macy 		newdevs = kmem_alloc(sizeof (void *) *
6315eda14cbcSMatt Macy 		    (ndevs + oldndevs), KM_SLEEP);
6316eda14cbcSMatt Macy 		for (i = 0; i < oldndevs; i++)
631781b22a98SMartin Matuska 			newdevs[i] = fnvlist_dup(olddevs[i]);
6318eda14cbcSMatt Macy 		for (i = 0; i < ndevs; i++)
631981b22a98SMartin Matuska 			newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
6320eda14cbcSMatt Macy 
632181b22a98SMartin Matuska 		fnvlist_remove(sav->sav_config, config);
6322eda14cbcSMatt Macy 
6323681ce946SMartin Matuska 		fnvlist_add_nvlist_array(sav->sav_config, config,
6324681ce946SMartin Matuska 		    (const nvlist_t * const *)newdevs, ndevs + oldndevs);
6325eda14cbcSMatt Macy 		for (i = 0; i < oldndevs + ndevs; i++)
6326eda14cbcSMatt Macy 			nvlist_free(newdevs[i]);
6327eda14cbcSMatt Macy 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
6328eda14cbcSMatt Macy 	} else {
6329eda14cbcSMatt Macy 		/*
6330eda14cbcSMatt Macy 		 * Generate a new dev list.
6331eda14cbcSMatt Macy 		 */
633281b22a98SMartin Matuska 		sav->sav_config = fnvlist_alloc();
6333681ce946SMartin Matuska 		fnvlist_add_nvlist_array(sav->sav_config, config,
6334681ce946SMartin Matuska 		    (const nvlist_t * const *)devs, ndevs);
6335eda14cbcSMatt Macy 	}
6336eda14cbcSMatt Macy }
6337eda14cbcSMatt Macy 
6338eda14cbcSMatt Macy /*
6339eda14cbcSMatt Macy  * Stop and drop level 2 ARC devices
6340eda14cbcSMatt Macy  */
6341eda14cbcSMatt Macy void
6342eda14cbcSMatt Macy spa_l2cache_drop(spa_t *spa)
6343eda14cbcSMatt Macy {
6344eda14cbcSMatt Macy 	vdev_t *vd;
6345eda14cbcSMatt Macy 	int i;
6346eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
6347eda14cbcSMatt Macy 
6348eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
6349eda14cbcSMatt Macy 		uint64_t pool;
6350eda14cbcSMatt Macy 
6351eda14cbcSMatt Macy 		vd = sav->sav_vdevs[i];
6352eda14cbcSMatt Macy 		ASSERT(vd != NULL);
6353eda14cbcSMatt Macy 
6354eda14cbcSMatt Macy 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
6355eda14cbcSMatt Macy 		    pool != 0ULL && l2arc_vdev_present(vd))
6356eda14cbcSMatt Macy 			l2arc_remove_vdev(vd);
6357eda14cbcSMatt Macy 	}
6358eda14cbcSMatt Macy }
6359eda14cbcSMatt Macy 
6360eda14cbcSMatt Macy /*
6361eda14cbcSMatt Macy  * Verify encryption parameters for spa creation. If we are encrypting, we must
6362eda14cbcSMatt Macy  * have the encryption feature flag enabled.
6363eda14cbcSMatt Macy  */
6364eda14cbcSMatt Macy static int
6365eda14cbcSMatt Macy spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
6366eda14cbcSMatt Macy     boolean_t has_encryption)
6367eda14cbcSMatt Macy {
6368eda14cbcSMatt Macy 	if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
6369eda14cbcSMatt Macy 	    dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
6370eda14cbcSMatt Macy 	    !has_encryption)
6371eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
6372eda14cbcSMatt Macy 
6373eda14cbcSMatt Macy 	return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
6374eda14cbcSMatt Macy }
6375eda14cbcSMatt Macy 
6376eda14cbcSMatt Macy /*
6377eda14cbcSMatt Macy  * Pool Creation
6378eda14cbcSMatt Macy  */
6379eda14cbcSMatt Macy int
6380eda14cbcSMatt Macy spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
6381eda14cbcSMatt Macy     nvlist_t *zplprops, dsl_crypto_params_t *dcp)
6382eda14cbcSMatt Macy {
6383eda14cbcSMatt Macy 	spa_t *spa;
63842a58b312SMartin Matuska 	const char *altroot = NULL;
6385eda14cbcSMatt Macy 	vdev_t *rvd;
6386eda14cbcSMatt Macy 	dsl_pool_t *dp;
6387eda14cbcSMatt Macy 	dmu_tx_t *tx;
6388eda14cbcSMatt Macy 	int error = 0;
6389eda14cbcSMatt Macy 	uint64_t txg = TXG_INITIAL;
6390eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6391eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
63927877fdebSMatt Macy 	uint64_t version, obj, ndraid = 0;
6393eda14cbcSMatt Macy 	boolean_t has_features;
6394eda14cbcSMatt Macy 	boolean_t has_encryption;
6395eda14cbcSMatt Macy 	boolean_t has_allocclass;
6396eda14cbcSMatt Macy 	spa_feature_t feat;
63972a58b312SMartin Matuska 	const char *feat_name;
63982a58b312SMartin Matuska 	const char *poolname;
6399eda14cbcSMatt Macy 	nvlist_t *nvl;
6400eda14cbcSMatt Macy 
6401eda14cbcSMatt Macy 	if (props == NULL ||
6402783d3ff6SMartin Matuska 	    nvlist_lookup_string(props,
6403783d3ff6SMartin Matuska 	    zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0)
6404eda14cbcSMatt Macy 		poolname = (char *)pool;
6405eda14cbcSMatt Macy 
6406eda14cbcSMatt Macy 	/*
6407eda14cbcSMatt Macy 	 * If this pool already exists, return failure.
6408eda14cbcSMatt Macy 	 */
6409eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6410eda14cbcSMatt Macy 	if (spa_lookup(poolname) != NULL) {
6411eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6412eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
6413eda14cbcSMatt Macy 	}
6414eda14cbcSMatt Macy 
6415eda14cbcSMatt Macy 	/*
6416eda14cbcSMatt Macy 	 * Allocate a new spa_t structure.
6417eda14cbcSMatt Macy 	 */
6418eda14cbcSMatt Macy 	nvl = fnvlist_alloc();
6419eda14cbcSMatt Macy 	fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
6420eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
6421eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6422eda14cbcSMatt Macy 	spa = spa_add(poolname, nvl, altroot);
6423eda14cbcSMatt Macy 	fnvlist_free(nvl);
6424eda14cbcSMatt Macy 	spa_activate(spa, spa_mode_global);
6425eda14cbcSMatt Macy 
6426eda14cbcSMatt Macy 	if (props && (error = spa_prop_validate(spa, props))) {
6427eda14cbcSMatt Macy 		spa_deactivate(spa);
6428eda14cbcSMatt Macy 		spa_remove(spa);
6429eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6430eda14cbcSMatt Macy 		return (error);
6431eda14cbcSMatt Macy 	}
6432eda14cbcSMatt Macy 
6433eda14cbcSMatt Macy 	/*
6434eda14cbcSMatt Macy 	 * Temporary pool names should never be written to disk.
6435eda14cbcSMatt Macy 	 */
6436eda14cbcSMatt Macy 	if (poolname != pool)
6437eda14cbcSMatt Macy 		spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
6438eda14cbcSMatt Macy 
6439eda14cbcSMatt Macy 	has_features = B_FALSE;
6440eda14cbcSMatt Macy 	has_encryption = B_FALSE;
6441eda14cbcSMatt Macy 	has_allocclass = B_FALSE;
6442eda14cbcSMatt Macy 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
6443eda14cbcSMatt Macy 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
6444eda14cbcSMatt Macy 		if (zpool_prop_feature(nvpair_name(elem))) {
6445eda14cbcSMatt Macy 			has_features = B_TRUE;
6446eda14cbcSMatt Macy 
6447eda14cbcSMatt Macy 			feat_name = strchr(nvpair_name(elem), '@') + 1;
6448eda14cbcSMatt Macy 			VERIFY0(zfeature_lookup_name(feat_name, &feat));
6449eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ENCRYPTION)
6450eda14cbcSMatt Macy 				has_encryption = B_TRUE;
6451eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
6452eda14cbcSMatt Macy 				has_allocclass = B_TRUE;
6453eda14cbcSMatt Macy 		}
6454eda14cbcSMatt Macy 	}
6455eda14cbcSMatt Macy 
6456eda14cbcSMatt Macy 	/* verify encryption params, if they were provided */
6457eda14cbcSMatt Macy 	if (dcp != NULL) {
6458eda14cbcSMatt Macy 		error = spa_create_check_encryption_params(dcp, has_encryption);
6459eda14cbcSMatt Macy 		if (error != 0) {
6460eda14cbcSMatt Macy 			spa_deactivate(spa);
6461eda14cbcSMatt Macy 			spa_remove(spa);
6462eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
6463eda14cbcSMatt Macy 			return (error);
6464eda14cbcSMatt Macy 		}
6465eda14cbcSMatt Macy 	}
6466eda14cbcSMatt Macy 	if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
6467eda14cbcSMatt Macy 		spa_deactivate(spa);
6468eda14cbcSMatt Macy 		spa_remove(spa);
6469eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6470eda14cbcSMatt Macy 		return (ENOTSUP);
6471eda14cbcSMatt Macy 	}
6472eda14cbcSMatt Macy 
6473eda14cbcSMatt Macy 	if (has_features || nvlist_lookup_uint64(props,
6474eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
6475eda14cbcSMatt Macy 		version = SPA_VERSION;
6476eda14cbcSMatt Macy 	}
6477eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
6478eda14cbcSMatt Macy 
6479eda14cbcSMatt Macy 	spa->spa_first_txg = txg;
6480eda14cbcSMatt Macy 	spa->spa_uberblock.ub_txg = txg - 1;
6481eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
6482eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
6483eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_CREATE;
6484eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_state = DSS_NONE;
6485eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_removing_vdev = -1;
6486eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
6487eda14cbcSMatt Macy 	spa->spa_indirect_vdevs_loaded = B_TRUE;
6488eda14cbcSMatt Macy 
6489eda14cbcSMatt Macy 	/*
6490eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
6491eda14cbcSMatt Macy 	 */
6492eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
6493eda14cbcSMatt Macy 	    KM_SLEEP);
6494eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
6495eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
6496eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
6497eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
6498eda14cbcSMatt Macy 	}
6499eda14cbcSMatt Macy 
6500eda14cbcSMatt Macy 	/*
6501eda14cbcSMatt Macy 	 * Create the root vdev.
6502eda14cbcSMatt Macy 	 */
6503eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6504eda14cbcSMatt Macy 
6505eda14cbcSMatt Macy 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
6506eda14cbcSMatt Macy 
6507eda14cbcSMatt Macy 	ASSERT(error != 0 || rvd != NULL);
6508eda14cbcSMatt Macy 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
6509eda14cbcSMatt Macy 
6510eda14cbcSMatt Macy 	if (error == 0 && !zfs_allocatable_devs(nvroot))
6511eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
6512eda14cbcSMatt Macy 
6513eda14cbcSMatt Macy 	if (error == 0 &&
6514eda14cbcSMatt Macy 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
65157877fdebSMatt Macy 	    (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
65167877fdebSMatt Macy 	    (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
6517eda14cbcSMatt Macy 		/*
6518eda14cbcSMatt Macy 		 * instantiate the metaslab groups (this will dirty the vdevs)
6519eda14cbcSMatt Macy 		 * we can no longer error exit past this point
6520eda14cbcSMatt Macy 		 */
6521eda14cbcSMatt Macy 		for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
6522eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[c];
6523eda14cbcSMatt Macy 
6524eda14cbcSMatt Macy 			vdev_metaslab_set_size(vd);
6525eda14cbcSMatt Macy 			vdev_expand(vd, txg);
6526eda14cbcSMatt Macy 		}
6527eda14cbcSMatt Macy 	}
6528eda14cbcSMatt Macy 
6529eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
6530eda14cbcSMatt Macy 
6531eda14cbcSMatt Macy 	if (error != 0) {
6532eda14cbcSMatt Macy 		spa_unload(spa);
6533eda14cbcSMatt Macy 		spa_deactivate(spa);
6534eda14cbcSMatt Macy 		spa_remove(spa);
6535eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6536eda14cbcSMatt Macy 		return (error);
6537eda14cbcSMatt Macy 	}
6538eda14cbcSMatt Macy 
6539eda14cbcSMatt Macy 	/*
6540eda14cbcSMatt Macy 	 * Get the list of spares, if specified.
6541eda14cbcSMatt Macy 	 */
6542eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6543eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
654481b22a98SMartin Matuska 		spa->spa_spares.sav_config = fnvlist_alloc();
654581b22a98SMartin Matuska 		fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
6546681ce946SMartin Matuska 		    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
6547681ce946SMartin Matuska 		    nspares);
6548eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6549eda14cbcSMatt Macy 		spa_load_spares(spa);
6550eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6551eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6552eda14cbcSMatt Macy 	}
6553eda14cbcSMatt Macy 
6554eda14cbcSMatt Macy 	/*
6555eda14cbcSMatt Macy 	 * Get the list of level 2 cache devices, if specified.
6556eda14cbcSMatt Macy 	 */
6557eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6558eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
6559681ce946SMartin Matuska 		VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
6560681ce946SMartin Matuska 		    NV_UNIQUE_NAME, KM_SLEEP));
656181b22a98SMartin Matuska 		fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6562681ce946SMartin Matuska 		    ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
6563681ce946SMartin Matuska 		    nl2cache);
6564eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6565eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6566eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6567eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6568eda14cbcSMatt Macy 	}
6569eda14cbcSMatt Macy 
6570eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
6571eda14cbcSMatt Macy 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
6572eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
6573eda14cbcSMatt Macy 
6574eda14cbcSMatt Macy 	/*
6575eda14cbcSMatt Macy 	 * Create DDTs (dedup tables).
6576eda14cbcSMatt Macy 	 */
6577eda14cbcSMatt Macy 	ddt_create(spa);
65782a58b312SMartin Matuska 	/*
65792a58b312SMartin Matuska 	 * Create BRT table and BRT table object.
65802a58b312SMartin Matuska 	 */
65812a58b312SMartin Matuska 	brt_create(spa);
6582eda14cbcSMatt Macy 
6583eda14cbcSMatt Macy 	spa_update_dspace(spa);
6584eda14cbcSMatt Macy 
6585eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, txg);
6586eda14cbcSMatt Macy 
6587eda14cbcSMatt Macy 	/*
6588eda14cbcSMatt Macy 	 * Create the pool's history object.
6589eda14cbcSMatt Macy 	 */
6590eda14cbcSMatt Macy 	if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
6591eda14cbcSMatt Macy 		spa_history_create_obj(spa, tx);
6592eda14cbcSMatt Macy 
6593eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
6594eda14cbcSMatt Macy 	spa_history_log_version(spa, "create", tx);
6595eda14cbcSMatt Macy 
6596eda14cbcSMatt Macy 	/*
6597eda14cbcSMatt Macy 	 * Create the pool config object.
6598eda14cbcSMatt Macy 	 */
6599eda14cbcSMatt Macy 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
6600eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
6601eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
6602eda14cbcSMatt Macy 
6603eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
6604eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
6605eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
6606eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool config");
6607eda14cbcSMatt Macy 	}
6608eda14cbcSMatt Macy 
6609eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
6610eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
6611eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &version, tx) != 0) {
6612eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool version");
6613eda14cbcSMatt Macy 	}
6614eda14cbcSMatt Macy 
6615eda14cbcSMatt Macy 	/* Newly created pools with the right version are always deflated. */
6616eda14cbcSMatt Macy 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
6617eda14cbcSMatt Macy 		spa->spa_deflate = TRUE;
6618eda14cbcSMatt Macy 		if (zap_add(spa->spa_meta_objset,
6619eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
6620eda14cbcSMatt Macy 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
6621eda14cbcSMatt Macy 			cmn_err(CE_PANIC, "failed to add deflate");
6622eda14cbcSMatt Macy 		}
6623eda14cbcSMatt Macy 	}
6624eda14cbcSMatt Macy 
6625eda14cbcSMatt Macy 	/*
6626eda14cbcSMatt Macy 	 * Create the deferred-free bpobj.  Turn off compression
6627eda14cbcSMatt Macy 	 * because sync-to-convergence takes longer if the blocksize
6628eda14cbcSMatt Macy 	 * keeps changing.
6629eda14cbcSMatt Macy 	 */
6630eda14cbcSMatt Macy 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
6631eda14cbcSMatt Macy 	dmu_object_set_compress(spa->spa_meta_objset, obj,
6632eda14cbcSMatt Macy 	    ZIO_COMPRESS_OFF, tx);
6633eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
6634eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
6635eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
6636eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add bpobj");
6637eda14cbcSMatt Macy 	}
6638eda14cbcSMatt Macy 	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
6639eda14cbcSMatt Macy 	    spa->spa_meta_objset, obj));
6640eda14cbcSMatt Macy 
6641eda14cbcSMatt Macy 	/*
6642eda14cbcSMatt Macy 	 * Generate some random noise for salted checksums to operate on.
6643eda14cbcSMatt Macy 	 */
6644eda14cbcSMatt Macy 	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
6645eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes));
6646eda14cbcSMatt Macy 
6647eda14cbcSMatt Macy 	/*
6648eda14cbcSMatt Macy 	 * Set pool properties.
6649eda14cbcSMatt Macy 	 */
6650eda14cbcSMatt Macy 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
6651eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
6652eda14cbcSMatt Macy 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
6653eda14cbcSMatt Macy 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
6654eda14cbcSMatt Macy 	spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
6655eda14cbcSMatt Macy 	spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
6656ce4dcb97SMartin Matuska 	spa->spa_dedup_table_quota =
6657ce4dcb97SMartin Matuska 	    zpool_prop_default_numeric(ZPOOL_PROP_DEDUP_TABLE_QUOTA);
6658eda14cbcSMatt Macy 
6659eda14cbcSMatt Macy 	if (props != NULL) {
6660eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
6661eda14cbcSMatt Macy 		spa_sync_props(props, tx);
6662eda14cbcSMatt Macy 	}
6663eda14cbcSMatt Macy 
66647877fdebSMatt Macy 	for (int i = 0; i < ndraid; i++)
66657877fdebSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
66667877fdebSMatt Macy 
6667eda14cbcSMatt Macy 	dmu_tx_commit(tx);
6668eda14cbcSMatt Macy 
6669eda14cbcSMatt Macy 	spa->spa_sync_on = B_TRUE;
6670eda14cbcSMatt Macy 	txg_sync_start(dp);
6671eda14cbcSMatt Macy 	mmp_thread_start(spa);
6672eda14cbcSMatt Macy 	txg_wait_synced(dp, txg);
6673eda14cbcSMatt Macy 
6674eda14cbcSMatt Macy 	spa_spawn_aux_threads(spa);
6675eda14cbcSMatt Macy 
6676be181ee2SMartin Matuska 	spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
6677eda14cbcSMatt Macy 
6678eda14cbcSMatt Macy 	/*
6679eda14cbcSMatt Macy 	 * Don't count references from objsets that are already closed
6680eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
6681eda14cbcSMatt Macy 	 */
6682eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
6683eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
6684eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_NONE;
6685eda14cbcSMatt Macy 
6686c03c5b1cSMartin Matuska 	spa_import_os(spa);
6687c03c5b1cSMartin Matuska 
6688eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6689eda14cbcSMatt Macy 
6690eda14cbcSMatt Macy 	return (0);
6691eda14cbcSMatt Macy }
6692eda14cbcSMatt Macy 
6693eda14cbcSMatt Macy /*
6694eda14cbcSMatt Macy  * Import a non-root pool into the system.
6695eda14cbcSMatt Macy  */
6696eda14cbcSMatt Macy int
6697eda14cbcSMatt Macy spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
6698eda14cbcSMatt Macy {
6699eda14cbcSMatt Macy 	spa_t *spa;
67002a58b312SMartin Matuska 	const char *altroot = NULL;
6701eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_IMPORT;
6702eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6703eda14cbcSMatt Macy 	spa_mode_t mode = spa_mode_global;
6704eda14cbcSMatt Macy 	uint64_t readonly = B_FALSE;
6705eda14cbcSMatt Macy 	int error;
6706eda14cbcSMatt Macy 	nvlist_t *nvroot;
6707eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6708eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
6709eda14cbcSMatt Macy 
6710eda14cbcSMatt Macy 	/*
6711eda14cbcSMatt Macy 	 * If a pool with this name exists, return failure.
6712eda14cbcSMatt Macy 	 */
6713eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6714eda14cbcSMatt Macy 	if (spa_lookup(pool) != NULL) {
6715eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6716eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
6717eda14cbcSMatt Macy 	}
6718eda14cbcSMatt Macy 
6719eda14cbcSMatt Macy 	/*
6720eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6721eda14cbcSMatt Macy 	 */
6722eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
6723eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6724eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(props,
6725eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
6726eda14cbcSMatt Macy 	if (readonly)
6727eda14cbcSMatt Macy 		mode = SPA_MODE_READ;
6728eda14cbcSMatt Macy 	spa = spa_add(pool, config, altroot);
6729eda14cbcSMatt Macy 	spa->spa_import_flags = flags;
6730eda14cbcSMatt Macy 
6731eda14cbcSMatt Macy 	/*
6732eda14cbcSMatt Macy 	 * Verbatim import - Take a pool and insert it into the namespace
6733eda14cbcSMatt Macy 	 * as if it had been loaded at boot.
6734eda14cbcSMatt Macy 	 */
6735eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
6736eda14cbcSMatt Macy 		if (props != NULL)
6737eda14cbcSMatt Macy 			spa_configfile_set(spa, props, B_FALSE);
6738eda14cbcSMatt Macy 
6739be181ee2SMartin Matuska 		spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
6740eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6741eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
6742eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6743eda14cbcSMatt Macy 		return (0);
6744eda14cbcSMatt Macy 	}
6745eda14cbcSMatt Macy 
6746eda14cbcSMatt Macy 	spa_activate(spa, mode);
6747eda14cbcSMatt Macy 
6748eda14cbcSMatt Macy 	/*
6749eda14cbcSMatt Macy 	 * Don't start async tasks until we know everything is healthy.
6750eda14cbcSMatt Macy 	 */
6751eda14cbcSMatt Macy 	spa_async_suspend(spa);
6752eda14cbcSMatt Macy 
6753eda14cbcSMatt Macy 	zpool_get_load_policy(config, &policy);
6754eda14cbcSMatt Macy 	if (policy.zlp_rewind & ZPOOL_DO_REWIND)
6755eda14cbcSMatt Macy 		state = SPA_LOAD_RECOVER;
6756eda14cbcSMatt Macy 
6757eda14cbcSMatt Macy 	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
6758eda14cbcSMatt Macy 
6759eda14cbcSMatt Macy 	if (state != SPA_LOAD_RECOVER) {
6760eda14cbcSMatt Macy 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
6761eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s", pool);
6762eda14cbcSMatt Macy 	} else {
6763eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
6764eda14cbcSMatt Macy 		    "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
6765eda14cbcSMatt Macy 	}
6766eda14cbcSMatt Macy 	error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
6767eda14cbcSMatt Macy 
6768eda14cbcSMatt Macy 	/*
6769eda14cbcSMatt Macy 	 * Propagate anything learned while loading the pool and pass it
6770eda14cbcSMatt Macy 	 * back to caller (i.e. rewind info, missing devices, etc).
6771eda14cbcSMatt Macy 	 */
677281b22a98SMartin Matuska 	fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
6773eda14cbcSMatt Macy 
6774eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6775eda14cbcSMatt Macy 	/*
6776eda14cbcSMatt Macy 	 * Toss any existing sparelist, as it doesn't have any validity
6777eda14cbcSMatt Macy 	 * anymore, and conflicts with spa_has_spare().
6778eda14cbcSMatt Macy 	 */
6779eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
6780eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
6781eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
6782eda14cbcSMatt Macy 		spa_load_spares(spa);
6783eda14cbcSMatt Macy 	}
6784eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
6785eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
6786eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
6787eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6788eda14cbcSMatt Macy 	}
6789eda14cbcSMatt Macy 
679081b22a98SMartin Matuska 	nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
6791eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
6792eda14cbcSMatt Macy 
6793eda14cbcSMatt Macy 	if (props != NULL)
6794eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
6795eda14cbcSMatt Macy 
6796eda14cbcSMatt Macy 	if (error != 0 || (props && spa_writeable(spa) &&
6797eda14cbcSMatt Macy 	    (error = spa_prop_set(spa, props)))) {
6798eda14cbcSMatt Macy 		spa_unload(spa);
6799eda14cbcSMatt Macy 		spa_deactivate(spa);
6800eda14cbcSMatt Macy 		spa_remove(spa);
6801eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6802eda14cbcSMatt Macy 		return (error);
6803eda14cbcSMatt Macy 	}
6804eda14cbcSMatt Macy 
6805eda14cbcSMatt Macy 	spa_async_resume(spa);
6806eda14cbcSMatt Macy 
6807eda14cbcSMatt Macy 	/*
6808eda14cbcSMatt Macy 	 * Override any spares and level 2 cache devices as specified by
6809eda14cbcSMatt Macy 	 * the user, as these may have correct device names/devids, etc.
6810eda14cbcSMatt Macy 	 */
6811eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6812eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
6813eda14cbcSMatt Macy 		if (spa->spa_spares.sav_config)
681481b22a98SMartin Matuska 			fnvlist_remove(spa->spa_spares.sav_config,
681581b22a98SMartin Matuska 			    ZPOOL_CONFIG_SPARES);
6816eda14cbcSMatt Macy 		else
681781b22a98SMartin Matuska 			spa->spa_spares.sav_config = fnvlist_alloc();
681881b22a98SMartin Matuska 		fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
6819681ce946SMartin Matuska 		    ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
6820681ce946SMartin Matuska 		    nspares);
6821eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6822eda14cbcSMatt Macy 		spa_load_spares(spa);
6823eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6824eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6825ce4dcb97SMartin Matuska 		spa->spa_spares.sav_label_sync = B_TRUE;
6826eda14cbcSMatt Macy 	}
6827eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6828eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
6829eda14cbcSMatt Macy 		if (spa->spa_l2cache.sav_config)
683081b22a98SMartin Matuska 			fnvlist_remove(spa->spa_l2cache.sav_config,
683181b22a98SMartin Matuska 			    ZPOOL_CONFIG_L2CACHE);
6832eda14cbcSMatt Macy 		else
683381b22a98SMartin Matuska 			spa->spa_l2cache.sav_config = fnvlist_alloc();
683481b22a98SMartin Matuska 		fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6835681ce946SMartin Matuska 		    ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
6836681ce946SMartin Matuska 		    nl2cache);
6837eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6838eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6839eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6840eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6841ce4dcb97SMartin Matuska 		spa->spa_l2cache.sav_label_sync = B_TRUE;
6842eda14cbcSMatt Macy 	}
6843eda14cbcSMatt Macy 
6844eda14cbcSMatt Macy 	/*
6845eda14cbcSMatt Macy 	 * Check for any removed devices.
6846eda14cbcSMatt Macy 	 */
6847eda14cbcSMatt Macy 	if (spa->spa_autoreplace) {
6848eda14cbcSMatt Macy 		spa_aux_check_removed(&spa->spa_spares);
6849eda14cbcSMatt Macy 		spa_aux_check_removed(&spa->spa_l2cache);
6850eda14cbcSMatt Macy 	}
6851eda14cbcSMatt Macy 
6852eda14cbcSMatt Macy 	if (spa_writeable(spa)) {
6853eda14cbcSMatt Macy 		/*
6854eda14cbcSMatt Macy 		 * Update the config cache to include the newly-imported pool.
6855eda14cbcSMatt Macy 		 */
6856eda14cbcSMatt Macy 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6857eda14cbcSMatt Macy 	}
6858eda14cbcSMatt Macy 
6859eda14cbcSMatt Macy 	/*
6860eda14cbcSMatt Macy 	 * It's possible that the pool was expanded while it was exported.
6861eda14cbcSMatt Macy 	 * We kick off an async task to handle this for us.
6862eda14cbcSMatt Macy 	 */
6863eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
6864eda14cbcSMatt Macy 
6865eda14cbcSMatt Macy 	spa_history_log_version(spa, "import", NULL);
6866eda14cbcSMatt Macy 
6867eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6868eda14cbcSMatt Macy 
6869eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6870eda14cbcSMatt Macy 
6871eda14cbcSMatt Macy 	zvol_create_minors_recursive(pool);
6872eda14cbcSMatt Macy 
6873c03c5b1cSMartin Matuska 	spa_import_os(spa);
6874c03c5b1cSMartin Matuska 
6875eda14cbcSMatt Macy 	return (0);
6876eda14cbcSMatt Macy }
6877eda14cbcSMatt Macy 
6878eda14cbcSMatt Macy nvlist_t *
6879eda14cbcSMatt Macy spa_tryimport(nvlist_t *tryconfig)
6880eda14cbcSMatt Macy {
6881eda14cbcSMatt Macy 	nvlist_t *config = NULL;
68822a58b312SMartin Matuska 	const char *poolname, *cachefile;
6883eda14cbcSMatt Macy 	spa_t *spa;
6884eda14cbcSMatt Macy 	uint64_t state;
6885eda14cbcSMatt Macy 	int error;
6886eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6887eda14cbcSMatt Macy 
6888eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
6889eda14cbcSMatt Macy 		return (NULL);
6890eda14cbcSMatt Macy 
6891eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6892eda14cbcSMatt Macy 		return (NULL);
6893eda14cbcSMatt Macy 
6894eda14cbcSMatt Macy 	/*
6895eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6896eda14cbcSMatt Macy 	 */
68970d4ad640SMartin Matuska 	char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
68980d4ad640SMartin Matuska 	(void) snprintf(name, MAXPATHLEN, "%s-%llx-%s",
6899d316de24SBrooks Davis 	    TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname);
69000d4ad640SMartin Matuska 
6901eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
69020d4ad640SMartin Matuska 	spa = spa_add(name, tryconfig, NULL);
6903eda14cbcSMatt Macy 	spa_activate(spa, SPA_MODE_READ);
69040d4ad640SMartin Matuska 	kmem_free(name, MAXPATHLEN);
6905eda14cbcSMatt Macy 
6906eda14cbcSMatt Macy 	/*
6907eda14cbcSMatt Macy 	 * Rewind pool if a max txg was provided.
6908eda14cbcSMatt Macy 	 */
6909eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
6910eda14cbcSMatt Macy 	if (policy.zlp_txg != UINT64_MAX) {
6911eda14cbcSMatt Macy 		spa->spa_load_max_txg = policy.zlp_txg;
6912eda14cbcSMatt Macy 		spa->spa_extreme_rewind = B_TRUE;
6913eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
6914eda14cbcSMatt Macy 		    poolname, (longlong_t)policy.zlp_txg);
6915eda14cbcSMatt Macy 	} else {
6916eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
6917eda14cbcSMatt Macy 	}
6918eda14cbcSMatt Macy 
6919eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
6920eda14cbcSMatt Macy 	    == 0) {
6921eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
6922eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6923eda14cbcSMatt Macy 	} else {
6924eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
6925eda14cbcSMatt Macy 	}
6926eda14cbcSMatt Macy 
6927e639e0d2SMartin Matuska 	/*
6928e639e0d2SMartin Matuska 	 * spa_import() relies on a pool config fetched by spa_try_import()
6929e639e0d2SMartin Matuska 	 * for spare/cache devices. Import flags are not passed to
6930e639e0d2SMartin Matuska 	 * spa_tryimport(), which makes it return early due to a missing log
6931e639e0d2SMartin Matuska 	 * device and missing retrieving the cache device and spare eventually.
6932e639e0d2SMartin Matuska 	 * Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
6933e639e0d2SMartin Matuska 	 * the correct configuration regardless of the missing log device.
6934e639e0d2SMartin Matuska 	 */
6935e639e0d2SMartin Matuska 	spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
6936e639e0d2SMartin Matuska 
6937eda14cbcSMatt Macy 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
6938eda14cbcSMatt Macy 
6939eda14cbcSMatt Macy 	/*
6940eda14cbcSMatt Macy 	 * If 'tryconfig' was at least parsable, return the current config.
6941eda14cbcSMatt Macy 	 */
6942eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL) {
6943eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
694481b22a98SMartin Matuska 		fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
694581b22a98SMartin Matuska 		fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
694681b22a98SMartin Matuska 		fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
694781b22a98SMartin Matuska 		    spa->spa_uberblock.ub_timestamp);
694881b22a98SMartin Matuska 		fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
694981b22a98SMartin Matuska 		    spa->spa_load_info);
695081b22a98SMartin Matuska 		fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
695181b22a98SMartin Matuska 		    spa->spa_errata);
6952eda14cbcSMatt Macy 
6953eda14cbcSMatt Macy 		/*
6954eda14cbcSMatt Macy 		 * If the bootfs property exists on this pool then we
6955eda14cbcSMatt Macy 		 * copy it out so that external consumers can tell which
6956eda14cbcSMatt Macy 		 * pools are bootable.
6957eda14cbcSMatt Macy 		 */
6958eda14cbcSMatt Macy 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
6959eda14cbcSMatt Macy 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6960eda14cbcSMatt Macy 
6961eda14cbcSMatt Macy 			/*
6962eda14cbcSMatt Macy 			 * We have to play games with the name since the
6963eda14cbcSMatt Macy 			 * pool was opened as TRYIMPORT_NAME.
6964eda14cbcSMatt Macy 			 */
6965eda14cbcSMatt Macy 			if (dsl_dsobj_to_dsname(spa_name(spa),
6966eda14cbcSMatt Macy 			    spa->spa_bootfs, tmpname) == 0) {
6967eda14cbcSMatt Macy 				char *cp;
6968eda14cbcSMatt Macy 				char *dsname;
6969eda14cbcSMatt Macy 
6970eda14cbcSMatt Macy 				dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6971eda14cbcSMatt Macy 
6972eda14cbcSMatt Macy 				cp = strchr(tmpname, '/');
6973eda14cbcSMatt Macy 				if (cp == NULL) {
6974eda14cbcSMatt Macy 					(void) strlcpy(dsname, tmpname,
6975eda14cbcSMatt Macy 					    MAXPATHLEN);
6976eda14cbcSMatt Macy 				} else {
6977eda14cbcSMatt Macy 					(void) snprintf(dsname, MAXPATHLEN,
6978eda14cbcSMatt Macy 					    "%s/%s", poolname, ++cp);
6979eda14cbcSMatt Macy 				}
698081b22a98SMartin Matuska 				fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
698181b22a98SMartin Matuska 				    dsname);
6982eda14cbcSMatt Macy 				kmem_free(dsname, MAXPATHLEN);
6983eda14cbcSMatt Macy 			}
6984eda14cbcSMatt Macy 			kmem_free(tmpname, MAXPATHLEN);
6985eda14cbcSMatt Macy 		}
6986eda14cbcSMatt Macy 
6987eda14cbcSMatt Macy 		/*
6988eda14cbcSMatt Macy 		 * Add the list of hot spares and level 2 cache devices.
6989eda14cbcSMatt Macy 		 */
6990eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6991eda14cbcSMatt Macy 		spa_add_spares(spa, config);
6992eda14cbcSMatt Macy 		spa_add_l2cache(spa, config);
6993eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6994eda14cbcSMatt Macy 	}
6995eda14cbcSMatt Macy 
6996eda14cbcSMatt Macy 	spa_unload(spa);
6997eda14cbcSMatt Macy 	spa_deactivate(spa);
6998eda14cbcSMatt Macy 	spa_remove(spa);
6999eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7000eda14cbcSMatt Macy 
7001eda14cbcSMatt Macy 	return (config);
7002eda14cbcSMatt Macy }
7003eda14cbcSMatt Macy 
7004eda14cbcSMatt Macy /*
7005eda14cbcSMatt Macy  * Pool export/destroy
7006eda14cbcSMatt Macy  *
7007eda14cbcSMatt Macy  * The act of destroying or exporting a pool is very simple.  We make sure there
7008eda14cbcSMatt Macy  * is no more pending I/O and any references to the pool are gone.  Then, we
7009eda14cbcSMatt Macy  * update the pool state and sync all the labels to disk, removing the
7010eda14cbcSMatt Macy  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
7011eda14cbcSMatt Macy  * we don't sync the labels or remove the configuration cache.
7012eda14cbcSMatt Macy  */
7013eda14cbcSMatt Macy static int
7014180f8225SMatt Macy spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
7015eda14cbcSMatt Macy     boolean_t force, boolean_t hardforce)
7016eda14cbcSMatt Macy {
7017aca928a5SMartin Matuska 	int error = 0;
7018eda14cbcSMatt Macy 	spa_t *spa;
70190d4ad640SMartin Matuska 	hrtime_t export_start = gethrtime();
7020eda14cbcSMatt Macy 
7021eda14cbcSMatt Macy 	if (oldconfig)
7022eda14cbcSMatt Macy 		*oldconfig = NULL;
7023eda14cbcSMatt Macy 
7024eda14cbcSMatt Macy 	if (!(spa_mode_global & SPA_MODE_WRITE))
7025eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
7026eda14cbcSMatt Macy 
7027eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7028eda14cbcSMatt Macy 	if ((spa = spa_lookup(pool)) == NULL) {
7029eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
7030eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
7031eda14cbcSMatt Macy 	}
7032eda14cbcSMatt Macy 
7033eda14cbcSMatt Macy 	if (spa->spa_is_exporting) {
7034eda14cbcSMatt Macy 		/* the pool is being exported by another thread */
7035eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
7036eda14cbcSMatt Macy 		return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
7037eda14cbcSMatt Macy 	}
7038eda14cbcSMatt Macy 	spa->spa_is_exporting = B_TRUE;
7039eda14cbcSMatt Macy 
7040eda14cbcSMatt Macy 	/*
7041aca928a5SMartin Matuska 	 * Put a hold on the pool, drop the namespace lock, stop async tasks
7042aca928a5SMartin Matuska 	 * and see if we can export.
7043eda14cbcSMatt Macy 	 */
7044eda14cbcSMatt Macy 	spa_open_ref(spa, FTAG);
7045eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7046eda14cbcSMatt Macy 	spa_async_suspend(spa);
7047eda14cbcSMatt Macy 	if (spa->spa_zvol_taskq) {
7048eda14cbcSMatt Macy 		zvol_remove_minors(spa, spa_name(spa), B_TRUE);
7049eda14cbcSMatt Macy 		taskq_wait(spa->spa_zvol_taskq);
7050eda14cbcSMatt Macy 	}
7051eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7052aca928a5SMartin Matuska 	spa->spa_export_thread = curthread;
7053eda14cbcSMatt Macy 	spa_close(spa, FTAG);
7054eda14cbcSMatt Macy 
7055aca928a5SMartin Matuska 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
7056aca928a5SMartin Matuska 		mutex_exit(&spa_namespace_lock);
7057eda14cbcSMatt Macy 		goto export_spa;
7058aca928a5SMartin Matuska 	}
7059aca928a5SMartin Matuska 
7060eda14cbcSMatt Macy 	/*
7061eda14cbcSMatt Macy 	 * The pool will be in core if it's openable, in which case we can
7062eda14cbcSMatt Macy 	 * modify its state.  Objsets may be open only because they're dirty,
7063eda14cbcSMatt Macy 	 * so we have to force it to sync before checking spa_refcnt.
7064eda14cbcSMatt Macy 	 */
7065eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
7066eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
7067eda14cbcSMatt Macy 		spa_evicting_os_wait(spa);
7068eda14cbcSMatt Macy 	}
7069eda14cbcSMatt Macy 
7070eda14cbcSMatt Macy 	/*
7071eda14cbcSMatt Macy 	 * A pool cannot be exported or destroyed if there are active
7072eda14cbcSMatt Macy 	 * references.  If we are resetting a pool, allow references by
7073eda14cbcSMatt Macy 	 * fault injection handlers.
7074eda14cbcSMatt Macy 	 */
7075184c1b94SMartin Matuska 	if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
7076184c1b94SMartin Matuska 		error = SET_ERROR(EBUSY);
7077184c1b94SMartin Matuska 		goto fail;
7078eda14cbcSMatt Macy 	}
7079eda14cbcSMatt Macy 
7080aca928a5SMartin Matuska 	mutex_exit(&spa_namespace_lock);
7081aca928a5SMartin Matuska 	/*
7082aca928a5SMartin Matuska 	 * At this point we no longer hold the spa_namespace_lock and
7083aca928a5SMartin Matuska 	 * there were no references on the spa. Future spa_lookups will
7084aca928a5SMartin Matuska 	 * notice the spa->spa_export_thread and wait until we signal
7085aca928a5SMartin Matuska 	 * that we are finshed.
7086aca928a5SMartin Matuska 	 */
7087aca928a5SMartin Matuska 
7088eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
7089be181ee2SMartin Matuska 		vdev_t *rvd = spa->spa_root_vdev;
7090eda14cbcSMatt Macy 		/*
7091eda14cbcSMatt Macy 		 * A pool cannot be exported if it has an active shared spare.
7092eda14cbcSMatt Macy 		 * This is to prevent other pools stealing the active spare
7093eda14cbcSMatt Macy 		 * from an exported pool. At user's own will, such pool can
7094eda14cbcSMatt Macy 		 * be forcedly exported.
7095eda14cbcSMatt Macy 		 */
7096eda14cbcSMatt Macy 		if (!force && new_state == POOL_STATE_EXPORTED &&
7097eda14cbcSMatt Macy 		    spa_has_active_shared_spare(spa)) {
7098184c1b94SMartin Matuska 			error = SET_ERROR(EXDEV);
7099aca928a5SMartin Matuska 			mutex_enter(&spa_namespace_lock);
7100184c1b94SMartin Matuska 			goto fail;
7101eda14cbcSMatt Macy 		}
7102eda14cbcSMatt Macy 
7103eda14cbcSMatt Macy 		/*
7104eda14cbcSMatt Macy 		 * We're about to export or destroy this pool. Make sure
7105eda14cbcSMatt Macy 		 * we stop all initialization and trim activity here before
7106eda14cbcSMatt Macy 		 * we set the spa_final_txg. This will ensure that all
7107eda14cbcSMatt Macy 		 * dirty data resulting from the initialization is
7108eda14cbcSMatt Macy 		 * committed to disk before we unload the pool.
7109eda14cbcSMatt Macy 		 */
7110eda14cbcSMatt Macy 		vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
7111eda14cbcSMatt Macy 		vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
7112eda14cbcSMatt Macy 		vdev_autotrim_stop_all(spa);
7113eda14cbcSMatt Macy 		vdev_rebuild_stop_all(spa);
7114eda14cbcSMatt Macy 
7115eda14cbcSMatt Macy 		/*
7116eda14cbcSMatt Macy 		 * We want this to be reflected on every label,
7117eda14cbcSMatt Macy 		 * so mark them all dirty.  spa_unload() will do the
7118eda14cbcSMatt Macy 		 * final sync that pushes these changes out.
7119eda14cbcSMatt Macy 		 */
7120eda14cbcSMatt Macy 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7121eda14cbcSMatt Macy 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7122eda14cbcSMatt Macy 			spa->spa_state = new_state;
7123be181ee2SMartin Matuska 			vdev_config_dirty(rvd);
7124c03c5b1cSMartin Matuska 			spa_config_exit(spa, SCL_ALL, FTAG);
7125c03c5b1cSMartin Matuska 		}
7126c03c5b1cSMartin Matuska 
7127c03c5b1cSMartin Matuska 		/*
7128c03c5b1cSMartin Matuska 		 * If the log space map feature is enabled and the pool is
7129c03c5b1cSMartin Matuska 		 * getting exported (but not destroyed), we want to spend some
7130c03c5b1cSMartin Matuska 		 * time flushing as many metaslabs as we can in an attempt to
7131c03c5b1cSMartin Matuska 		 * destroy log space maps and save import time. This has to be
7132c03c5b1cSMartin Matuska 		 * done before we set the spa_final_txg, otherwise
7133c03c5b1cSMartin Matuska 		 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
7134c03c5b1cSMartin Matuska 		 * spa_should_flush_logs_on_unload() should be called after
7135c03c5b1cSMartin Matuska 		 * spa_state has been set to the new_state.
7136c03c5b1cSMartin Matuska 		 */
7137c03c5b1cSMartin Matuska 		if (spa_should_flush_logs_on_unload(spa))
7138c03c5b1cSMartin Matuska 			spa_unload_log_sm_flush_all(spa);
7139c03c5b1cSMartin Matuska 
7140c03c5b1cSMartin Matuska 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7141c03c5b1cSMartin Matuska 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7142eda14cbcSMatt Macy 			spa->spa_final_txg = spa_last_synced_txg(spa) +
7143eda14cbcSMatt Macy 			    TXG_DEFER_SIZE + 1;
7144eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ALL, FTAG);
7145eda14cbcSMatt Macy 		}
7146eda14cbcSMatt Macy 	}
7147eda14cbcSMatt Macy 
7148eda14cbcSMatt Macy export_spa:
7149c03c5b1cSMartin Matuska 	spa_export_os(spa);
7150c03c5b1cSMartin Matuska 
7151eda14cbcSMatt Macy 	if (new_state == POOL_STATE_DESTROYED)
7152eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
7153eda14cbcSMatt Macy 	else if (new_state == POOL_STATE_EXPORTED)
7154eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
7155eda14cbcSMatt Macy 
7156eda14cbcSMatt Macy 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7157eda14cbcSMatt Macy 		spa_unload(spa);
7158eda14cbcSMatt Macy 		spa_deactivate(spa);
7159eda14cbcSMatt Macy 	}
7160eda14cbcSMatt Macy 
7161eda14cbcSMatt Macy 	if (oldconfig && spa->spa_config)
716281b22a98SMartin Matuska 		*oldconfig = fnvlist_dup(spa->spa_config);
7163eda14cbcSMatt Macy 
7164aca928a5SMartin Matuska 	if (new_state == POOL_STATE_EXPORTED)
7165aca928a5SMartin Matuska 		zio_handle_export_delay(spa, gethrtime() - export_start);
7166aca928a5SMartin Matuska 
7167aca928a5SMartin Matuska 	/*
7168aca928a5SMartin Matuska 	 * Take the namespace lock for the actual spa_t removal
7169aca928a5SMartin Matuska 	 */
7170aca928a5SMartin Matuska 	mutex_enter(&spa_namespace_lock);
7171eda14cbcSMatt Macy 	if (new_state != POOL_STATE_UNINITIALIZED) {
7172eda14cbcSMatt Macy 		if (!hardforce)
7173be181ee2SMartin Matuska 			spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
7174eda14cbcSMatt Macy 		spa_remove(spa);
7175eda14cbcSMatt Macy 	} else {
7176eda14cbcSMatt Macy 		/*
7177eda14cbcSMatt Macy 		 * If spa_remove() is not called for this spa_t and
7178eda14cbcSMatt Macy 		 * there is any possibility that it can be reused,
7179eda14cbcSMatt Macy 		 * we make sure to reset the exporting flag.
7180eda14cbcSMatt Macy 		 */
7181eda14cbcSMatt Macy 		spa->spa_is_exporting = B_FALSE;
7182aca928a5SMartin Matuska 		spa->spa_export_thread = NULL;
7183eda14cbcSMatt Macy 	}
7184eda14cbcSMatt Macy 
7185aca928a5SMartin Matuska 	/*
7186aca928a5SMartin Matuska 	 * Wake up any waiters in spa_lookup()
7187aca928a5SMartin Matuska 	 */
7188aca928a5SMartin Matuska 	cv_broadcast(&spa_namespace_cv);
7189eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7190eda14cbcSMatt Macy 	return (0);
7191184c1b94SMartin Matuska 
7192184c1b94SMartin Matuska fail:
7193184c1b94SMartin Matuska 	spa->spa_is_exporting = B_FALSE;
7194aca928a5SMartin Matuska 	spa->spa_export_thread = NULL;
7195aca928a5SMartin Matuska 
7196184c1b94SMartin Matuska 	spa_async_resume(spa);
7197aca928a5SMartin Matuska 	/*
7198aca928a5SMartin Matuska 	 * Wake up any waiters in spa_lookup()
7199aca928a5SMartin Matuska 	 */
7200aca928a5SMartin Matuska 	cv_broadcast(&spa_namespace_cv);
7201184c1b94SMartin Matuska 	mutex_exit(&spa_namespace_lock);
7202184c1b94SMartin Matuska 	return (error);
7203eda14cbcSMatt Macy }
7204eda14cbcSMatt Macy 
7205eda14cbcSMatt Macy /*
7206eda14cbcSMatt Macy  * Destroy a storage pool.
7207eda14cbcSMatt Macy  */
7208eda14cbcSMatt Macy int
7209180f8225SMatt Macy spa_destroy(const char *pool)
7210eda14cbcSMatt Macy {
7211eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
7212eda14cbcSMatt Macy 	    B_FALSE, B_FALSE));
7213eda14cbcSMatt Macy }
7214eda14cbcSMatt Macy 
7215eda14cbcSMatt Macy /*
7216eda14cbcSMatt Macy  * Export a storage pool.
7217eda14cbcSMatt Macy  */
7218eda14cbcSMatt Macy int
7219180f8225SMatt Macy spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
7220eda14cbcSMatt Macy     boolean_t hardforce)
7221eda14cbcSMatt Macy {
7222eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
7223eda14cbcSMatt Macy 	    force, hardforce));
7224eda14cbcSMatt Macy }
7225eda14cbcSMatt Macy 
7226eda14cbcSMatt Macy /*
7227eda14cbcSMatt Macy  * Similar to spa_export(), this unloads the spa_t without actually removing it
7228eda14cbcSMatt Macy  * from the namespace in any way.
7229eda14cbcSMatt Macy  */
7230eda14cbcSMatt Macy int
7231180f8225SMatt Macy spa_reset(const char *pool)
7232eda14cbcSMatt Macy {
7233eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
7234eda14cbcSMatt Macy 	    B_FALSE, B_FALSE));
7235eda14cbcSMatt Macy }
7236eda14cbcSMatt Macy 
7237eda14cbcSMatt Macy /*
7238eda14cbcSMatt Macy  * ==========================================================================
7239eda14cbcSMatt Macy  * Device manipulation
7240eda14cbcSMatt Macy  * ==========================================================================
7241eda14cbcSMatt Macy  */
7242eda14cbcSMatt Macy 
7243eda14cbcSMatt Macy /*
72447877fdebSMatt Macy  * This is called as a synctask to increment the draid feature flag
72457877fdebSMatt Macy  */
72467877fdebSMatt Macy static void
72477877fdebSMatt Macy spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
72487877fdebSMatt Macy {
72497877fdebSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
72507877fdebSMatt Macy 	int draid = (int)(uintptr_t)arg;
72517877fdebSMatt Macy 
72527877fdebSMatt Macy 	for (int c = 0; c < draid; c++)
72537877fdebSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
72547877fdebSMatt Macy }
72557877fdebSMatt Macy 
72567877fdebSMatt Macy /*
7257eda14cbcSMatt Macy  * Add a device to a storage pool.
7258eda14cbcSMatt Macy  */
7259eda14cbcSMatt Macy int
7260783d3ff6SMartin Matuska spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift)
7261eda14cbcSMatt Macy {
72627877fdebSMatt Macy 	uint64_t txg, ndraid = 0;
7263eda14cbcSMatt Macy 	int error;
7264eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
7265eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
7266eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
7267eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
7268eda14cbcSMatt Macy 
7269eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7270eda14cbcSMatt Macy 
7271eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
7272eda14cbcSMatt Macy 
7273eda14cbcSMatt Macy 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
7274eda14cbcSMatt Macy 	    VDEV_ALLOC_ADD)) != 0)
7275eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7276eda14cbcSMatt Macy 
7277eda14cbcSMatt Macy 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
7278eda14cbcSMatt Macy 
7279eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
7280eda14cbcSMatt Macy 	    &nspares) != 0)
7281eda14cbcSMatt Macy 		nspares = 0;
7282eda14cbcSMatt Macy 
7283eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
7284eda14cbcSMatt Macy 	    &nl2cache) != 0)
7285eda14cbcSMatt Macy 		nl2cache = 0;
7286eda14cbcSMatt Macy 
7287eda14cbcSMatt Macy 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
7288eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
7289eda14cbcSMatt Macy 
7290eda14cbcSMatt Macy 	if (vd->vdev_children != 0 &&
72917877fdebSMatt Macy 	    (error = vdev_create(vd, txg, B_FALSE)) != 0) {
7292eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
72937877fdebSMatt Macy 	}
72947877fdebSMatt Macy 
72957877fdebSMatt Macy 	/*
72967877fdebSMatt Macy 	 * The virtual dRAID spares must be added after vdev tree is created
729716038816SMartin Matuska 	 * and the vdev guids are generated.  The guid of their associated
72987877fdebSMatt Macy 	 * dRAID is stored in the config and used when opening the spare.
72997877fdebSMatt Macy 	 */
73007877fdebSMatt Macy 	if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
73017877fdebSMatt Macy 	    rvd->vdev_children)) == 0) {
73027877fdebSMatt Macy 		if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
73037877fdebSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
73047877fdebSMatt Macy 			nspares = 0;
73057877fdebSMatt Macy 	} else {
73067877fdebSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
73077877fdebSMatt Macy 	}
7308eda14cbcSMatt Macy 
7309eda14cbcSMatt Macy 	/*
7310eda14cbcSMatt Macy 	 * We must validate the spares and l2cache devices after checking the
7311eda14cbcSMatt Macy 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
7312eda14cbcSMatt Macy 	 */
7313eda14cbcSMatt Macy 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
7314eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
7315eda14cbcSMatt Macy 
7316eda14cbcSMatt Macy 	/*
7317eda14cbcSMatt Macy 	 * If we are in the middle of a device removal, we can only add
7318eda14cbcSMatt Macy 	 * devices which match the existing devices in the pool.
7319eda14cbcSMatt Macy 	 * If we are in the middle of a removal, or have some indirect
73207877fdebSMatt Macy 	 * vdevs, we can not add raidz or dRAID top levels.
7321eda14cbcSMatt Macy 	 */
7322eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL ||
7323eda14cbcSMatt Macy 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
7324eda14cbcSMatt Macy 		for (int c = 0; c < vd->vdev_children; c++) {
7325eda14cbcSMatt Macy 			tvd = vd->vdev_child[c];
7326eda14cbcSMatt Macy 			if (spa->spa_vdev_removal != NULL &&
7327eda14cbcSMatt Macy 			    tvd->vdev_ashift != spa->spa_max_ashift) {
7328eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
7329eda14cbcSMatt Macy 			}
73307877fdebSMatt Macy 			/* Fail if top level vdev is raidz or a dRAID */
73317877fdebSMatt Macy 			if (vdev_get_nparity(tvd) != 0)
7332eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
73337877fdebSMatt Macy 
7334eda14cbcSMatt Macy 			/*
7335eda14cbcSMatt Macy 			 * Need the top level mirror to be
7336eda14cbcSMatt Macy 			 * a mirror of leaf vdevs only
7337eda14cbcSMatt Macy 			 */
7338eda14cbcSMatt Macy 			if (tvd->vdev_ops == &vdev_mirror_ops) {
7339eda14cbcSMatt Macy 				for (uint64_t cid = 0;
7340eda14cbcSMatt Macy 				    cid < tvd->vdev_children; cid++) {
7341eda14cbcSMatt Macy 					vdev_t *cvd = tvd->vdev_child[cid];
7342eda14cbcSMatt Macy 					if (!cvd->vdev_ops->vdev_op_leaf) {
7343eda14cbcSMatt Macy 						return (spa_vdev_exit(spa, vd,
7344eda14cbcSMatt Macy 						    txg, EINVAL));
7345eda14cbcSMatt Macy 					}
7346eda14cbcSMatt Macy 				}
7347eda14cbcSMatt Macy 			}
7348eda14cbcSMatt Macy 		}
7349eda14cbcSMatt Macy 	}
7350eda14cbcSMatt Macy 
7351783d3ff6SMartin Matuska 	if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) {
7352783d3ff6SMartin Matuska 		for (int c = 0; c < vd->vdev_children; c++) {
7353783d3ff6SMartin Matuska 			tvd = vd->vdev_child[c];
7354783d3ff6SMartin Matuska 			if (tvd->vdev_ashift != spa->spa_max_ashift) {
7355783d3ff6SMartin Matuska 				return (spa_vdev_exit(spa, vd, txg,
7356783d3ff6SMartin Matuska 				    ZFS_ERR_ASHIFT_MISMATCH));
7357783d3ff6SMartin Matuska 			}
7358783d3ff6SMartin Matuska 		}
7359783d3ff6SMartin Matuska 	}
7360783d3ff6SMartin Matuska 
7361eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
7362eda14cbcSMatt Macy 		tvd = vd->vdev_child[c];
7363eda14cbcSMatt Macy 		vdev_remove_child(vd, tvd);
7364eda14cbcSMatt Macy 		tvd->vdev_id = rvd->vdev_children;
7365eda14cbcSMatt Macy 		vdev_add_child(rvd, tvd);
7366eda14cbcSMatt Macy 		vdev_config_dirty(tvd);
7367eda14cbcSMatt Macy 	}
7368eda14cbcSMatt Macy 
7369eda14cbcSMatt Macy 	if (nspares != 0) {
7370eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
7371eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES);
7372eda14cbcSMatt Macy 		spa_load_spares(spa);
7373eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
7374eda14cbcSMatt Macy 	}
7375eda14cbcSMatt Macy 
7376eda14cbcSMatt Macy 	if (nl2cache != 0) {
7377eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
7378eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE);
7379eda14cbcSMatt Macy 		spa_load_l2cache(spa);
7380eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
7381eda14cbcSMatt Macy 	}
7382eda14cbcSMatt Macy 
7383eda14cbcSMatt Macy 	/*
73847877fdebSMatt Macy 	 * We can't increment a feature while holding spa_vdev so we
73857877fdebSMatt Macy 	 * have to do it in a synctask.
73867877fdebSMatt Macy 	 */
73877877fdebSMatt Macy 	if (ndraid != 0) {
73887877fdebSMatt Macy 		dmu_tx_t *tx;
73897877fdebSMatt Macy 
73907877fdebSMatt Macy 		tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
73917877fdebSMatt Macy 		dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
73927877fdebSMatt Macy 		    (void *)(uintptr_t)ndraid, tx);
73937877fdebSMatt Macy 		dmu_tx_commit(tx);
73947877fdebSMatt Macy 	}
73957877fdebSMatt Macy 
73967877fdebSMatt Macy 	/*
7397eda14cbcSMatt Macy 	 * We have to be careful when adding new vdevs to an existing pool.
7398eda14cbcSMatt Macy 	 * If other threads start allocating from these vdevs before we
7399eda14cbcSMatt Macy 	 * sync the config cache, and we lose power, then upon reboot we may
7400eda14cbcSMatt Macy 	 * fail to open the pool because there are DVAs that the config cache
7401eda14cbcSMatt Macy 	 * can't translate.  Therefore, we first add the vdevs without
7402eda14cbcSMatt Macy 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
7403eda14cbcSMatt Macy 	 * and then let spa_config_update() initialize the new metaslabs.
7404eda14cbcSMatt Macy 	 *
7405eda14cbcSMatt Macy 	 * spa_load() checks for added-but-not-initialized vdevs, so that
7406eda14cbcSMatt Macy 	 * if we lose power at any point in this sequence, the remaining
7407eda14cbcSMatt Macy 	 * steps will be completed the next time we load the pool.
7408eda14cbcSMatt Macy 	 */
7409eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, vd, txg, 0);
7410eda14cbcSMatt Macy 
7411eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7412eda14cbcSMatt Macy 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7413eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
7414eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7415eda14cbcSMatt Macy 
7416eda14cbcSMatt Macy 	return (0);
7417eda14cbcSMatt Macy }
7418eda14cbcSMatt Macy 
7419eda14cbcSMatt Macy /*
7420e716630dSMartin Matuska  * Attach a device to a vdev specified by its guid.  The vdev type can be
7421e716630dSMartin Matuska  * a mirror, a raidz, or a leaf device that is also a top-level (e.g. a
7422e716630dSMartin Matuska  * single device). When the vdev is a single device, a mirror vdev will be
7423e716630dSMartin Matuska  * automatically inserted.
7424eda14cbcSMatt Macy  *
7425eda14cbcSMatt Macy  * If 'replacing' is specified, the new device is intended to replace the
7426eda14cbcSMatt Macy  * existing device; in this case the two devices are made into their own
7427eda14cbcSMatt Macy  * mirror using the 'replacing' vdev, which is functionally identical to
7428eda14cbcSMatt Macy  * the mirror vdev (it actually reuses all the same ops) but has a few
7429eda14cbcSMatt Macy  * extra rules: you can't attach to it after it's been created, and upon
7430eda14cbcSMatt Macy  * completion of resilvering, the first disk (the one being replaced)
7431eda14cbcSMatt Macy  * is automatically detached.
7432eda14cbcSMatt Macy  *
7433eda14cbcSMatt Macy  * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
7434eda14cbcSMatt Macy  * should be performed instead of traditional healing reconstruction.  From
7435eda14cbcSMatt Macy  * an administrators perspective these are both resilver operations.
7436eda14cbcSMatt Macy  */
7437eda14cbcSMatt Macy int
7438eda14cbcSMatt Macy spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
7439eda14cbcSMatt Macy     int rebuild)
7440eda14cbcSMatt Macy {
7441eda14cbcSMatt Macy 	uint64_t txg, dtl_max_txg;
7442eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
7443eda14cbcSMatt Macy 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
7444eda14cbcSMatt Macy 	vdev_ops_t *pvops;
7445eda14cbcSMatt Macy 	char *oldvdpath, *newvdpath;
7446e716630dSMartin Matuska 	int newvd_isspare = B_FALSE;
7447eda14cbcSMatt Macy 	int error;
7448eda14cbcSMatt Macy 
7449eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7450eda14cbcSMatt Macy 
7451eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
7452eda14cbcSMatt Macy 
7453eda14cbcSMatt Macy 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
7454eda14cbcSMatt Macy 
7455eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7456eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7457eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
7458eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7459eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7460eda14cbcSMatt Macy 	}
7461eda14cbcSMatt Macy 
7462eda14cbcSMatt Macy 	if (rebuild) {
7463eda14cbcSMatt Macy 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
7464eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7465eda14cbcSMatt Macy 
74664e8d558cSMartin Matuska 		if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
74674e8d558cSMartin Matuska 		    dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
7468eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
7469eda14cbcSMatt Macy 			    ZFS_ERR_RESILVER_IN_PROGRESS));
74704e8d558cSMartin Matuska 		}
7471eda14cbcSMatt Macy 	} else {
7472eda14cbcSMatt Macy 		if (vdev_rebuild_active(rvd))
7473eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
7474eda14cbcSMatt Macy 			    ZFS_ERR_REBUILD_IN_PROGRESS));
7475eda14cbcSMatt Macy 	}
7476eda14cbcSMatt Macy 
7477e716630dSMartin Matuska 	if (spa->spa_vdev_removal != NULL) {
7478e716630dSMartin Matuska 		return (spa_vdev_exit(spa, NULL, txg,
7479e716630dSMartin Matuska 		    ZFS_ERR_DEVRM_IN_PROGRESS));
7480e716630dSMartin Matuska 	}
7481eda14cbcSMatt Macy 
7482eda14cbcSMatt Macy 	if (oldvd == NULL)
7483eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
7484eda14cbcSMatt Macy 
7485e716630dSMartin Matuska 	boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops;
7486e716630dSMartin Matuska 
7487e716630dSMartin Matuska 	if (raidz) {
7488e716630dSMartin Matuska 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION))
7489eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7490eda14cbcSMatt Macy 
7491e716630dSMartin Matuska 		/*
7492e716630dSMartin Matuska 		 * Can't expand a raidz while prior expand is in progress.
7493e716630dSMartin Matuska 		 */
7494e716630dSMartin Matuska 		if (spa->spa_raidz_expand != NULL) {
7495e716630dSMartin Matuska 			return (spa_vdev_exit(spa, NULL, txg,
7496e716630dSMartin Matuska 			    ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
7497e716630dSMartin Matuska 		}
7498e716630dSMartin Matuska 	} else if (!oldvd->vdev_ops->vdev_op_leaf) {
7499e716630dSMartin Matuska 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7500e716630dSMartin Matuska 	}
7501e716630dSMartin Matuska 
7502e716630dSMartin Matuska 	if (raidz)
7503e716630dSMartin Matuska 		pvd = oldvd;
7504e716630dSMartin Matuska 	else
7505eda14cbcSMatt Macy 		pvd = oldvd->vdev_parent;
7506eda14cbcSMatt Macy 
7507dbd5678dSMartin Matuska 	if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
7508dbd5678dSMartin Matuska 	    VDEV_ALLOC_ATTACH) != 0)
7509eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7510eda14cbcSMatt Macy 
7511eda14cbcSMatt Macy 	if (newrootvd->vdev_children != 1)
7512eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
7513eda14cbcSMatt Macy 
7514eda14cbcSMatt Macy 	newvd = newrootvd->vdev_child[0];
7515eda14cbcSMatt Macy 
7516eda14cbcSMatt Macy 	if (!newvd->vdev_ops->vdev_op_leaf)
7517eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
7518eda14cbcSMatt Macy 
7519eda14cbcSMatt Macy 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
7520eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, error));
7521eda14cbcSMatt Macy 
7522eda14cbcSMatt Macy 	/*
7523dbd5678dSMartin Matuska 	 * log, dedup and special vdevs should not be replaced by spares.
7524eda14cbcSMatt Macy 	 */
7525dbd5678dSMartin Matuska 	if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
7526dbd5678dSMartin Matuska 	    oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
7527eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7528dbd5678dSMartin Matuska 	}
7529eda14cbcSMatt Macy 
75307877fdebSMatt Macy 	/*
75317877fdebSMatt Macy 	 * A dRAID spare can only replace a child of its parent dRAID vdev.
75327877fdebSMatt Macy 	 */
75337877fdebSMatt Macy 	if (newvd->vdev_ops == &vdev_draid_spare_ops &&
75347877fdebSMatt Macy 	    oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
75357877fdebSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
75367877fdebSMatt Macy 	}
75377877fdebSMatt Macy 
7538eda14cbcSMatt Macy 	if (rebuild) {
7539eda14cbcSMatt Macy 		/*
75407877fdebSMatt Macy 		 * For rebuilds, the top vdev must support reconstruction
7541eda14cbcSMatt Macy 		 * using only space maps.  This means the only allowable
75427877fdebSMatt Macy 		 * vdevs types are the root vdev, a mirror, or dRAID.
7543eda14cbcSMatt Macy 		 */
75447877fdebSMatt Macy 		tvd = pvd;
75457877fdebSMatt Macy 		if (pvd->vdev_top != NULL)
75467877fdebSMatt Macy 			tvd = pvd->vdev_top;
75477877fdebSMatt Macy 
75487877fdebSMatt Macy 		if (tvd->vdev_ops != &vdev_mirror_ops &&
75497877fdebSMatt Macy 		    tvd->vdev_ops != &vdev_root_ops &&
75507877fdebSMatt Macy 		    tvd->vdev_ops != &vdev_draid_ops) {
7551eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7552eda14cbcSMatt Macy 		}
7553eda14cbcSMatt Macy 	}
7554eda14cbcSMatt Macy 
7555eda14cbcSMatt Macy 	if (!replacing) {
7556eda14cbcSMatt Macy 		/*
75572276e539SMartin Matuska 		 * For attach, the only allowable parent is a mirror or
75582276e539SMartin Matuska 		 * the root vdev. A raidz vdev can be attached to, but
75592276e539SMartin Matuska 		 * you cannot attach to a raidz child.
7560eda14cbcSMatt Macy 		 */
7561eda14cbcSMatt Macy 		if (pvd->vdev_ops != &vdev_mirror_ops &&
75622276e539SMartin Matuska 		    pvd->vdev_ops != &vdev_root_ops &&
75632276e539SMartin Matuska 		    !raidz)
7564eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7565eda14cbcSMatt Macy 
7566eda14cbcSMatt Macy 		pvops = &vdev_mirror_ops;
7567eda14cbcSMatt Macy 	} else {
7568eda14cbcSMatt Macy 		/*
7569eda14cbcSMatt Macy 		 * Active hot spares can only be replaced by inactive hot
7570eda14cbcSMatt Macy 		 * spares.
7571eda14cbcSMatt Macy 		 */
7572eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops &&
7573eda14cbcSMatt Macy 		    oldvd->vdev_isspare &&
7574eda14cbcSMatt Macy 		    !spa_has_spare(spa, newvd->vdev_guid))
7575eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7576eda14cbcSMatt Macy 
7577eda14cbcSMatt Macy 		/*
7578eda14cbcSMatt Macy 		 * If the source is a hot spare, and the parent isn't already a
7579eda14cbcSMatt Macy 		 * spare, then we want to create a new hot spare.  Otherwise, we
7580eda14cbcSMatt Macy 		 * want to create a replacing vdev.  The user is not allowed to
7581eda14cbcSMatt Macy 		 * attach to a spared vdev child unless the 'isspare' state is
7582eda14cbcSMatt Macy 		 * the same (spare replaces spare, non-spare replaces
7583eda14cbcSMatt Macy 		 * non-spare).
7584eda14cbcSMatt Macy 		 */
7585eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_replacing_ops &&
7586eda14cbcSMatt Macy 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
7587eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7588eda14cbcSMatt Macy 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
7589eda14cbcSMatt Macy 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
7590eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7591eda14cbcSMatt Macy 		}
7592eda14cbcSMatt Macy 
7593eda14cbcSMatt Macy 		if (newvd->vdev_isspare)
7594eda14cbcSMatt Macy 			pvops = &vdev_spare_ops;
7595eda14cbcSMatt Macy 		else
7596eda14cbcSMatt Macy 			pvops = &vdev_replacing_ops;
7597eda14cbcSMatt Macy 	}
7598eda14cbcSMatt Macy 
7599eda14cbcSMatt Macy 	/*
7600eda14cbcSMatt Macy 	 * Make sure the new device is big enough.
7601eda14cbcSMatt Macy 	 */
7602e716630dSMartin Matuska 	vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd;
7603e716630dSMartin Matuska 	if (newvd->vdev_asize < vdev_get_min_asize(min_vdev))
7604eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
7605eda14cbcSMatt Macy 
7606eda14cbcSMatt Macy 	/*
7607eda14cbcSMatt Macy 	 * The new device cannot have a higher alignment requirement
7608eda14cbcSMatt Macy 	 * than the top-level vdev.
7609eda14cbcSMatt Macy 	 */
7610*e2df9bb4SMartin Matuska 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) {
7611*e2df9bb4SMartin Matuska 		return (spa_vdev_exit(spa, newrootvd, txg,
7612*e2df9bb4SMartin Matuska 		    ZFS_ERR_ASHIFT_MISMATCH));
7613*e2df9bb4SMartin Matuska 	}
7614eda14cbcSMatt Macy 
7615eda14cbcSMatt Macy 	/*
7616e716630dSMartin Matuska 	 * RAIDZ-expansion-specific checks.
7617e716630dSMartin Matuska 	 */
7618e716630dSMartin Matuska 	if (raidz) {
7619e716630dSMartin Matuska 		if (vdev_raidz_attach_check(newvd) != 0)
7620e716630dSMartin Matuska 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
7621e716630dSMartin Matuska 
7622e716630dSMartin Matuska 		/*
7623e716630dSMartin Matuska 		 * Fail early if a child is not healthy or being replaced
7624e716630dSMartin Matuska 		 */
7625e716630dSMartin Matuska 		for (int i = 0; i < oldvd->vdev_children; i++) {
7626e716630dSMartin Matuska 			if (vdev_is_dead(oldvd->vdev_child[i]) ||
7627e716630dSMartin Matuska 			    !oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) {
7628e716630dSMartin Matuska 				return (spa_vdev_exit(spa, newrootvd, txg,
7629e716630dSMartin Matuska 				    ENXIO));
7630e716630dSMartin Matuska 			}
7631e716630dSMartin Matuska 			/* Also fail if reserved boot area is in-use */
7632e716630dSMartin Matuska 			if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i])
7633e716630dSMartin Matuska 			    != 0) {
7634e716630dSMartin Matuska 				return (spa_vdev_exit(spa, newrootvd, txg,
7635e716630dSMartin Matuska 				    EADDRINUSE));
7636e716630dSMartin Matuska 			}
7637e716630dSMartin Matuska 		}
7638e716630dSMartin Matuska 	}
7639e716630dSMartin Matuska 
7640e716630dSMartin Matuska 	if (raidz) {
7641e716630dSMartin Matuska 		/*
7642e716630dSMartin Matuska 		 * Note: oldvdpath is freed by spa_strfree(),  but
7643e716630dSMartin Matuska 		 * kmem_asprintf() is freed by kmem_strfree(), so we have to
7644e716630dSMartin Matuska 		 * move it to a spa_strdup-ed string.
7645e716630dSMartin Matuska 		 */
7646e716630dSMartin Matuska 		char *tmp = kmem_asprintf("raidz%u-%u",
7647e716630dSMartin Matuska 		    (uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id);
7648e716630dSMartin Matuska 		oldvdpath = spa_strdup(tmp);
7649e716630dSMartin Matuska 		kmem_strfree(tmp);
7650e716630dSMartin Matuska 	} else {
7651e716630dSMartin Matuska 		oldvdpath = spa_strdup(oldvd->vdev_path);
7652e716630dSMartin Matuska 	}
7653e716630dSMartin Matuska 	newvdpath = spa_strdup(newvd->vdev_path);
7654e716630dSMartin Matuska 
7655e716630dSMartin Matuska 	/*
7656eda14cbcSMatt Macy 	 * If this is an in-place replacement, update oldvd's path and devid
7657eda14cbcSMatt Macy 	 * to make it distinguishable from newvd, and unopenable from now on.
7658eda14cbcSMatt Macy 	 */
7659e716630dSMartin Matuska 	if (strcmp(oldvdpath, newvdpath) == 0) {
7660eda14cbcSMatt Macy 		spa_strfree(oldvd->vdev_path);
7661e716630dSMartin Matuska 		oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5,
7662eda14cbcSMatt Macy 		    KM_SLEEP);
7663e716630dSMartin Matuska 		(void) sprintf(oldvd->vdev_path, "%s/old",
7664e716630dSMartin Matuska 		    newvdpath);
7665eda14cbcSMatt Macy 		if (oldvd->vdev_devid != NULL) {
7666eda14cbcSMatt Macy 			spa_strfree(oldvd->vdev_devid);
7667eda14cbcSMatt Macy 			oldvd->vdev_devid = NULL;
7668eda14cbcSMatt Macy 		}
7669e716630dSMartin Matuska 		spa_strfree(oldvdpath);
7670e716630dSMartin Matuska 		oldvdpath = spa_strdup(oldvd->vdev_path);
7671eda14cbcSMatt Macy 	}
7672eda14cbcSMatt Macy 
7673eda14cbcSMatt Macy 	/*
7674eda14cbcSMatt Macy 	 * If the parent is not a mirror, or if we're replacing, insert the new
7675eda14cbcSMatt Macy 	 * mirror/replacing/spare vdev above oldvd.
7676eda14cbcSMatt Macy 	 */
7677e716630dSMartin Matuska 	if (!raidz && pvd->vdev_ops != pvops) {
7678eda14cbcSMatt Macy 		pvd = vdev_add_parent(oldvd, pvops);
7679eda14cbcSMatt Macy 		ASSERT(pvd->vdev_ops == pvops);
7680eda14cbcSMatt Macy 		ASSERT(oldvd->vdev_parent == pvd);
7681e716630dSMartin Matuska 	}
7682e716630dSMartin Matuska 
7683e716630dSMartin Matuska 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
7684eda14cbcSMatt Macy 
7685eda14cbcSMatt Macy 	/*
7686eda14cbcSMatt Macy 	 * Extract the new device from its root and add it to pvd.
7687eda14cbcSMatt Macy 	 */
7688eda14cbcSMatt Macy 	vdev_remove_child(newrootvd, newvd);
7689eda14cbcSMatt Macy 	newvd->vdev_id = pvd->vdev_children;
7690eda14cbcSMatt Macy 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
7691eda14cbcSMatt Macy 	vdev_add_child(pvd, newvd);
7692eda14cbcSMatt Macy 
7693eda14cbcSMatt Macy 	/*
7694eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
7695eda14cbcSMatt Macy 	 */
7696eda14cbcSMatt Macy 	vdev_propagate_state(pvd);
7697eda14cbcSMatt Macy 
7698eda14cbcSMatt Macy 	tvd = newvd->vdev_top;
7699eda14cbcSMatt Macy 	ASSERT(pvd->vdev_top == tvd);
7700eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
7701eda14cbcSMatt Macy 
7702eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
7703eda14cbcSMatt Macy 
7704eda14cbcSMatt Macy 	/*
7705eda14cbcSMatt Macy 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
7706eda14cbcSMatt Macy 	 * for any dmu_sync-ed blocks.  It will propagate upward when
7707eda14cbcSMatt Macy 	 * spa_vdev_exit() calls vdev_dtl_reassess().
7708eda14cbcSMatt Macy 	 */
7709eda14cbcSMatt Macy 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
7710eda14cbcSMatt Macy 
7711e716630dSMartin Matuska 	if (raidz) {
7712e716630dSMartin Matuska 		/*
7713e716630dSMartin Matuska 		 * Wait for the youngest allocations and frees to sync,
7714e716630dSMartin Matuska 		 * and then wait for the deferral of those frees to finish.
7715e716630dSMartin Matuska 		 */
7716e716630dSMartin Matuska 		spa_vdev_config_exit(spa, NULL,
7717e716630dSMartin Matuska 		    txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
7718e716630dSMartin Matuska 
7719e716630dSMartin Matuska 		vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE);
7720e716630dSMartin Matuska 		vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE);
7721e716630dSMartin Matuska 		vdev_autotrim_stop_wait(tvd);
7722e716630dSMartin Matuska 
7723e716630dSMartin Matuska 		dtl_max_txg = spa_vdev_config_enter(spa);
7724e716630dSMartin Matuska 
7725e716630dSMartin Matuska 		tvd->vdev_rz_expanding = B_TRUE;
7726e716630dSMartin Matuska 
7727e716630dSMartin Matuska 		vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg);
7728e716630dSMartin Matuska 		vdev_config_dirty(tvd);
7729e716630dSMartin Matuska 
7730e716630dSMartin Matuska 		dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
7731e716630dSMartin Matuska 		    dtl_max_txg);
7732e716630dSMartin Matuska 		dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync,
7733e716630dSMartin Matuska 		    newvd, tx);
7734e716630dSMartin Matuska 		dmu_tx_commit(tx);
7735e716630dSMartin Matuska 	} else {
7736e716630dSMartin Matuska 		vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
7737e716630dSMartin Matuska 		    dtl_max_txg - TXG_INITIAL);
7738eda14cbcSMatt Macy 
7739eda14cbcSMatt Macy 		if (newvd->vdev_isspare) {
7740eda14cbcSMatt Macy 			spa_spare_activate(newvd);
7741eda14cbcSMatt Macy 			spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
7742eda14cbcSMatt Macy 		}
7743eda14cbcSMatt Macy 
7744eda14cbcSMatt Macy 		newvd_isspare = newvd->vdev_isspare;
7745eda14cbcSMatt Macy 
7746eda14cbcSMatt Macy 		/*
7747eda14cbcSMatt Macy 		 * Mark newvd's DTL dirty in this txg.
7748eda14cbcSMatt Macy 		 */
7749eda14cbcSMatt Macy 		vdev_dirty(tvd, VDD_DTL, newvd, txg);
7750eda14cbcSMatt Macy 
7751eda14cbcSMatt Macy 		/*
7752e716630dSMartin Matuska 		 * Schedule the resilver or rebuild to restart in the future.
7753e716630dSMartin Matuska 		 * We do this to ensure that dmu_sync-ed blocks have been
7754e716630dSMartin Matuska 		 * stitched into the respective datasets.
7755eda14cbcSMatt Macy 		 */
7756eda14cbcSMatt Macy 		if (rebuild) {
7757eda14cbcSMatt Macy 			newvd->vdev_rebuild_txg = txg;
7758eda14cbcSMatt Macy 
7759eda14cbcSMatt Macy 			vdev_rebuild(tvd);
7760eda14cbcSMatt Macy 		} else {
7761eda14cbcSMatt Macy 			newvd->vdev_resilver_txg = txg;
7762eda14cbcSMatt Macy 
7763eda14cbcSMatt Macy 			if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
7764e716630dSMartin Matuska 			    spa_feature_is_enabled(spa,
7765e716630dSMartin Matuska 			    SPA_FEATURE_RESILVER_DEFER)) {
7766eda14cbcSMatt Macy 				vdev_defer_resilver(newvd);
7767eda14cbcSMatt Macy 			} else {
7768eda14cbcSMatt Macy 				dsl_scan_restart_resilver(spa->spa_dsl_pool,
7769eda14cbcSMatt Macy 				    dtl_max_txg);
7770eda14cbcSMatt Macy 			}
7771eda14cbcSMatt Macy 		}
7772e716630dSMartin Matuska 	}
7773eda14cbcSMatt Macy 
7774eda14cbcSMatt Macy 	if (spa->spa_bootfs)
7775eda14cbcSMatt Macy 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
7776eda14cbcSMatt Macy 
7777eda14cbcSMatt Macy 	spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
7778eda14cbcSMatt Macy 
7779eda14cbcSMatt Macy 	/*
7780eda14cbcSMatt Macy 	 * Commit the config
7781eda14cbcSMatt Macy 	 */
7782eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
7783eda14cbcSMatt Macy 
7784eda14cbcSMatt Macy 	spa_history_log_internal(spa, "vdev attach", NULL,
7785eda14cbcSMatt Macy 	    "%s vdev=%s %s vdev=%s",
7786eda14cbcSMatt Macy 	    replacing && newvd_isspare ? "spare in" :
7787eda14cbcSMatt Macy 	    replacing ? "replace" : "attach", newvdpath,
7788eda14cbcSMatt Macy 	    replacing ? "for" : "to", oldvdpath);
7789eda14cbcSMatt Macy 
7790eda14cbcSMatt Macy 	spa_strfree(oldvdpath);
7791eda14cbcSMatt Macy 	spa_strfree(newvdpath);
7792eda14cbcSMatt Macy 
7793eda14cbcSMatt Macy 	return (0);
7794eda14cbcSMatt Macy }
7795eda14cbcSMatt Macy 
7796eda14cbcSMatt Macy /*
7797eda14cbcSMatt Macy  * Detach a device from a mirror or replacing vdev.
7798eda14cbcSMatt Macy  *
7799eda14cbcSMatt Macy  * If 'replace_done' is specified, only detach if the parent
7800d411c1d6SMartin Matuska  * is a replacing or a spare vdev.
7801eda14cbcSMatt Macy  */
7802eda14cbcSMatt Macy int
7803eda14cbcSMatt Macy spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
7804eda14cbcSMatt Macy {
7805eda14cbcSMatt Macy 	uint64_t txg;
7806eda14cbcSMatt Macy 	int error;
7807eda14cbcSMatt Macy 	vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
7808eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *cvd, *tvd;
7809eda14cbcSMatt Macy 	boolean_t unspare = B_FALSE;
7810eda14cbcSMatt Macy 	uint64_t unspare_guid = 0;
7811eda14cbcSMatt Macy 	char *vdpath;
7812eda14cbcSMatt Macy 
7813eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7814eda14cbcSMatt Macy 
7815eda14cbcSMatt Macy 	txg = spa_vdev_detach_enter(spa, guid);
7816eda14cbcSMatt Macy 
7817eda14cbcSMatt Macy 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7818eda14cbcSMatt Macy 
7819eda14cbcSMatt Macy 	/*
7820eda14cbcSMatt Macy 	 * Besides being called directly from the userland through the
7821eda14cbcSMatt Macy 	 * ioctl interface, spa_vdev_detach() can be potentially called
7822eda14cbcSMatt Macy 	 * at the end of spa_vdev_resilver_done().
7823eda14cbcSMatt Macy 	 *
7824eda14cbcSMatt Macy 	 * In the regular case, when we have a checkpoint this shouldn't
7825eda14cbcSMatt Macy 	 * happen as we never empty the DTLs of a vdev during the scrub
7826eda14cbcSMatt Macy 	 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
7827eda14cbcSMatt Macy 	 * should never get here when we have a checkpoint.
7828eda14cbcSMatt Macy 	 *
7829eda14cbcSMatt Macy 	 * That said, even in a case when we checkpoint the pool exactly
7830eda14cbcSMatt Macy 	 * as spa_vdev_resilver_done() calls this function everything
7831eda14cbcSMatt Macy 	 * should be fine as the resilver will return right away.
7832eda14cbcSMatt Macy 	 */
7833eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7834eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7835eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
7836eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7837eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7838eda14cbcSMatt Macy 	}
7839eda14cbcSMatt Macy 
7840eda14cbcSMatt Macy 	if (vd == NULL)
7841eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
7842eda14cbcSMatt Macy 
7843eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
7844eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7845eda14cbcSMatt Macy 
7846eda14cbcSMatt Macy 	pvd = vd->vdev_parent;
7847eda14cbcSMatt Macy 
7848eda14cbcSMatt Macy 	/*
7849eda14cbcSMatt Macy 	 * If the parent/child relationship is not as expected, don't do it.
7850eda14cbcSMatt Macy 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
7851eda14cbcSMatt Macy 	 * vdev that's replacing B with C.  The user's intent in replacing
7852eda14cbcSMatt Macy 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
7853eda14cbcSMatt Macy 	 * the replace by detaching C, the expected behavior is to end up
7854eda14cbcSMatt Macy 	 * M(A,B).  But suppose that right after deciding to detach C,
7855eda14cbcSMatt Macy 	 * the replacement of B completes.  We would have M(A,C), and then
7856eda14cbcSMatt Macy 	 * ask to detach C, which would leave us with just A -- not what
7857eda14cbcSMatt Macy 	 * the user wanted.  To prevent this, we make sure that the
7858eda14cbcSMatt Macy 	 * parent/child relationship hasn't changed -- in this example,
7859eda14cbcSMatt Macy 	 * that C's parent is still the replacing vdev R.
7860eda14cbcSMatt Macy 	 */
7861eda14cbcSMatt Macy 	if (pvd->vdev_guid != pguid && pguid != 0)
7862eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
7863eda14cbcSMatt Macy 
7864eda14cbcSMatt Macy 	/*
7865eda14cbcSMatt Macy 	 * Only 'replacing' or 'spare' vdevs can be replaced.
7866eda14cbcSMatt Macy 	 */
7867eda14cbcSMatt Macy 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
7868eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7869eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7870eda14cbcSMatt Macy 
7871eda14cbcSMatt Macy 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
7872eda14cbcSMatt Macy 	    spa_version(spa) >= SPA_VERSION_SPARES);
7873eda14cbcSMatt Macy 
7874eda14cbcSMatt Macy 	/*
7875eda14cbcSMatt Macy 	 * Only mirror, replacing, and spare vdevs support detach.
7876eda14cbcSMatt Macy 	 */
7877eda14cbcSMatt Macy 	if (pvd->vdev_ops != &vdev_replacing_ops &&
7878eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_mirror_ops &&
7879eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7880eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7881eda14cbcSMatt Macy 
7882eda14cbcSMatt Macy 	/*
7883eda14cbcSMatt Macy 	 * If this device has the only valid copy of some data,
7884eda14cbcSMatt Macy 	 * we cannot safely detach it.
7885eda14cbcSMatt Macy 	 */
7886eda14cbcSMatt Macy 	if (vdev_dtl_required(vd))
7887eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
7888eda14cbcSMatt Macy 
7889eda14cbcSMatt Macy 	ASSERT(pvd->vdev_children >= 2);
7890eda14cbcSMatt Macy 
7891eda14cbcSMatt Macy 	/*
7892eda14cbcSMatt Macy 	 * If we are detaching the second disk from a replacing vdev, then
7893eda14cbcSMatt Macy 	 * check to see if we changed the original vdev's path to have "/old"
7894eda14cbcSMatt Macy 	 * at the end in spa_vdev_attach().  If so, undo that change now.
7895eda14cbcSMatt Macy 	 */
7896eda14cbcSMatt Macy 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
7897eda14cbcSMatt Macy 	    vd->vdev_path != NULL) {
7898eda14cbcSMatt Macy 		size_t len = strlen(vd->vdev_path);
7899eda14cbcSMatt Macy 
7900eda14cbcSMatt Macy 		for (int c = 0; c < pvd->vdev_children; c++) {
7901eda14cbcSMatt Macy 			cvd = pvd->vdev_child[c];
7902eda14cbcSMatt Macy 
7903eda14cbcSMatt Macy 			if (cvd == vd || cvd->vdev_path == NULL)
7904eda14cbcSMatt Macy 				continue;
7905eda14cbcSMatt Macy 
7906eda14cbcSMatt Macy 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
7907eda14cbcSMatt Macy 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
7908eda14cbcSMatt Macy 				spa_strfree(cvd->vdev_path);
7909eda14cbcSMatt Macy 				cvd->vdev_path = spa_strdup(vd->vdev_path);
7910eda14cbcSMatt Macy 				break;
7911eda14cbcSMatt Macy 			}
7912eda14cbcSMatt Macy 		}
7913eda14cbcSMatt Macy 	}
7914eda14cbcSMatt Macy 
7915eda14cbcSMatt Macy 	/*
79167877fdebSMatt Macy 	 * If we are detaching the original disk from a normal spare, then it
79177877fdebSMatt Macy 	 * implies that the spare should become a real disk, and be removed
79187877fdebSMatt Macy 	 * from the active spare list for the pool.  dRAID spares on the
79197877fdebSMatt Macy 	 * other hand are coupled to the pool and thus should never be removed
79207877fdebSMatt Macy 	 * from the spares list.
7921eda14cbcSMatt Macy 	 */
79227877fdebSMatt Macy 	if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
79237877fdebSMatt Macy 		vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
79247877fdebSMatt Macy 
79257877fdebSMatt Macy 		if (last_cvd->vdev_isspare &&
79267877fdebSMatt Macy 		    last_cvd->vdev_ops != &vdev_draid_spare_ops) {
7927eda14cbcSMatt Macy 			unspare = B_TRUE;
79287877fdebSMatt Macy 		}
79297877fdebSMatt Macy 	}
7930eda14cbcSMatt Macy 
7931eda14cbcSMatt Macy 	/*
7932eda14cbcSMatt Macy 	 * Erase the disk labels so the disk can be used for other things.
7933eda14cbcSMatt Macy 	 * This must be done after all other error cases are handled,
7934eda14cbcSMatt Macy 	 * but before we disembowel vd (so we can still do I/O to it).
7935eda14cbcSMatt Macy 	 * But if we can't do it, don't treat the error as fatal --
7936eda14cbcSMatt Macy 	 * it may be that the unwritability of the disk is the reason
7937eda14cbcSMatt Macy 	 * it's being detached!
7938eda14cbcSMatt Macy 	 */
7939dbd5678dSMartin Matuska 	(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
7940eda14cbcSMatt Macy 
7941eda14cbcSMatt Macy 	/*
7942eda14cbcSMatt Macy 	 * Remove vd from its parent and compact the parent's children.
7943eda14cbcSMatt Macy 	 */
7944eda14cbcSMatt Macy 	vdev_remove_child(pvd, vd);
7945eda14cbcSMatt Macy 	vdev_compact_children(pvd);
7946eda14cbcSMatt Macy 
7947eda14cbcSMatt Macy 	/*
7948eda14cbcSMatt Macy 	 * Remember one of the remaining children so we can get tvd below.
7949eda14cbcSMatt Macy 	 */
7950eda14cbcSMatt Macy 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
7951eda14cbcSMatt Macy 
7952eda14cbcSMatt Macy 	/*
7953eda14cbcSMatt Macy 	 * If we need to remove the remaining child from the list of hot spares,
7954eda14cbcSMatt Macy 	 * do it now, marking the vdev as no longer a spare in the process.
7955eda14cbcSMatt Macy 	 * We must do this before vdev_remove_parent(), because that can
7956eda14cbcSMatt Macy 	 * change the GUID if it creates a new toplevel GUID.  For a similar
7957eda14cbcSMatt Macy 	 * reason, we must remove the spare now, in the same txg as the detach;
7958eda14cbcSMatt Macy 	 * otherwise someone could attach a new sibling, change the GUID, and
7959eda14cbcSMatt Macy 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
7960eda14cbcSMatt Macy 	 */
7961eda14cbcSMatt Macy 	if (unspare) {
7962eda14cbcSMatt Macy 		ASSERT(cvd->vdev_isspare);
7963eda14cbcSMatt Macy 		spa_spare_remove(cvd);
7964eda14cbcSMatt Macy 		unspare_guid = cvd->vdev_guid;
7965eda14cbcSMatt Macy 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
7966eda14cbcSMatt Macy 		cvd->vdev_unspare = B_TRUE;
7967eda14cbcSMatt Macy 	}
7968eda14cbcSMatt Macy 
7969eda14cbcSMatt Macy 	/*
7970eda14cbcSMatt Macy 	 * If the parent mirror/replacing vdev only has one child,
7971eda14cbcSMatt Macy 	 * the parent is no longer needed.  Remove it from the tree.
7972eda14cbcSMatt Macy 	 */
7973eda14cbcSMatt Macy 	if (pvd->vdev_children == 1) {
7974eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops)
7975eda14cbcSMatt Macy 			cvd->vdev_unspare = B_FALSE;
7976eda14cbcSMatt Macy 		vdev_remove_parent(cvd);
7977eda14cbcSMatt Macy 	}
7978eda14cbcSMatt Macy 
7979eda14cbcSMatt Macy 	/*
7980eda14cbcSMatt Macy 	 * We don't set tvd until now because the parent we just removed
7981eda14cbcSMatt Macy 	 * may have been the previous top-level vdev.
7982eda14cbcSMatt Macy 	 */
7983eda14cbcSMatt Macy 	tvd = cvd->vdev_top;
7984eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
7985eda14cbcSMatt Macy 
7986eda14cbcSMatt Macy 	/*
7987eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
7988eda14cbcSMatt Macy 	 */
7989eda14cbcSMatt Macy 	vdev_propagate_state(cvd);
7990eda14cbcSMatt Macy 
7991eda14cbcSMatt Macy 	/*
7992eda14cbcSMatt Macy 	 * If the 'autoexpand' property is set on the pool then automatically
7993eda14cbcSMatt Macy 	 * try to expand the size of the pool. For example if the device we
7994eda14cbcSMatt Macy 	 * just detached was smaller than the others, it may be possible to
7995eda14cbcSMatt Macy 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
7996eda14cbcSMatt Macy 	 * first so that we can obtain the updated sizes of the leaf vdevs.
7997eda14cbcSMatt Macy 	 */
7998eda14cbcSMatt Macy 	if (spa->spa_autoexpand) {
7999eda14cbcSMatt Macy 		vdev_reopen(tvd);
8000eda14cbcSMatt Macy 		vdev_expand(tvd, txg);
8001eda14cbcSMatt Macy 	}
8002eda14cbcSMatt Macy 
8003eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
8004eda14cbcSMatt Macy 
8005eda14cbcSMatt Macy 	/*
8006eda14cbcSMatt Macy 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
8007eda14cbcSMatt Macy 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
8008eda14cbcSMatt Macy 	 * But first make sure we're not on any *other* txg's DTL list, to
8009eda14cbcSMatt Macy 	 * prevent vd from being accessed after it's freed.
8010eda14cbcSMatt Macy 	 */
8011eda14cbcSMatt Macy 	vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
8012eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++)
8013eda14cbcSMatt Macy 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
8014eda14cbcSMatt Macy 	vd->vdev_detached = B_TRUE;
8015eda14cbcSMatt Macy 	vdev_dirty(tvd, VDD_DTL, vd, txg);
8016eda14cbcSMatt Macy 
8017eda14cbcSMatt Macy 	spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
8018eda14cbcSMatt Macy 	spa_notify_waiters(spa);
8019eda14cbcSMatt Macy 
8020eda14cbcSMatt Macy 	/* hang on to the spa before we release the lock */
8021eda14cbcSMatt Macy 	spa_open_ref(spa, FTAG);
8022eda14cbcSMatt Macy 
8023eda14cbcSMatt Macy 	error = spa_vdev_exit(spa, vd, txg, 0);
8024eda14cbcSMatt Macy 
8025eda14cbcSMatt Macy 	spa_history_log_internal(spa, "detach", NULL,
8026eda14cbcSMatt Macy 	    "vdev=%s", vdpath);
8027eda14cbcSMatt Macy 	spa_strfree(vdpath);
8028eda14cbcSMatt Macy 
8029eda14cbcSMatt Macy 	/*
8030eda14cbcSMatt Macy 	 * If this was the removal of the original device in a hot spare vdev,
8031eda14cbcSMatt Macy 	 * then we want to go through and remove the device from the hot spare
8032eda14cbcSMatt Macy 	 * list of every other pool.
8033eda14cbcSMatt Macy 	 */
8034eda14cbcSMatt Macy 	if (unspare) {
8035eda14cbcSMatt Macy 		spa_t *altspa = NULL;
8036eda14cbcSMatt Macy 
8037eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8038eda14cbcSMatt Macy 		while ((altspa = spa_next(altspa)) != NULL) {
8039eda14cbcSMatt Macy 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
8040eda14cbcSMatt Macy 			    altspa == spa)
8041eda14cbcSMatt Macy 				continue;
8042eda14cbcSMatt Macy 
8043eda14cbcSMatt Macy 			spa_open_ref(altspa, FTAG);
8044eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
8045eda14cbcSMatt Macy 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
8046eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
8047eda14cbcSMatt Macy 			spa_close(altspa, FTAG);
8048eda14cbcSMatt Macy 		}
8049eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8050eda14cbcSMatt Macy 
8051eda14cbcSMatt Macy 		/* search the rest of the vdevs for spares to remove */
8052eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
8053eda14cbcSMatt Macy 	}
8054eda14cbcSMatt Macy 
8055eda14cbcSMatt Macy 	/* all done with the spa; OK to release */
8056eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
8057eda14cbcSMatt Macy 	spa_close(spa, FTAG);
8058eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
8059eda14cbcSMatt Macy 
8060eda14cbcSMatt Macy 	return (error);
8061eda14cbcSMatt Macy }
8062eda14cbcSMatt Macy 
8063eda14cbcSMatt Macy static int
8064eda14cbcSMatt Macy spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
8065eda14cbcSMatt Macy     list_t *vd_list)
8066eda14cbcSMatt Macy {
8067eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
8068eda14cbcSMatt Macy 
8069eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
8070eda14cbcSMatt Macy 
8071eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
8072eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
8073eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
8074eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8075eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
8076eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
8077eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8078eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
8079eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
8080eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8081eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
8082eda14cbcSMatt Macy 	}
8083eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_initialize_lock);
8084eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8085eda14cbcSMatt Macy 
8086eda14cbcSMatt Macy 	/*
8087eda14cbcSMatt Macy 	 * When we activate an initialize action we check to see
8088eda14cbcSMatt Macy 	 * if the vdev_initialize_thread is NULL. We do this instead
8089eda14cbcSMatt Macy 	 * of using the vdev_initialize_state since there might be
8090eda14cbcSMatt Macy 	 * a previous initialization process which has completed but
8091eda14cbcSMatt Macy 	 * the thread is not exited.
8092eda14cbcSMatt Macy 	 */
8093eda14cbcSMatt Macy 	if (cmd_type == POOL_INITIALIZE_START &&
8094eda14cbcSMatt Macy 	    (vd->vdev_initialize_thread != NULL ||
8095e716630dSMartin Matuska 	    vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) {
8096eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8097eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8098eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
8099eda14cbcSMatt Macy 	    (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
8100eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
8101eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8102eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8103eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
8104eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
8105eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
8106eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8107c0a83fe0SMartin Matuska 	} else if (cmd_type == POOL_INITIALIZE_UNINIT &&
8108c0a83fe0SMartin Matuska 	    vd->vdev_initialize_thread != NULL) {
8109c0a83fe0SMartin Matuska 		mutex_exit(&vd->vdev_initialize_lock);
8110c0a83fe0SMartin Matuska 		return (SET_ERROR(EBUSY));
8111eda14cbcSMatt Macy 	}
8112eda14cbcSMatt Macy 
8113eda14cbcSMatt Macy 	switch (cmd_type) {
8114eda14cbcSMatt Macy 	case POOL_INITIALIZE_START:
8115eda14cbcSMatt Macy 		vdev_initialize(vd);
8116eda14cbcSMatt Macy 		break;
8117eda14cbcSMatt Macy 	case POOL_INITIALIZE_CANCEL:
8118eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
8119eda14cbcSMatt Macy 		break;
8120eda14cbcSMatt Macy 	case POOL_INITIALIZE_SUSPEND:
8121eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
8122eda14cbcSMatt Macy 		break;
8123c0a83fe0SMartin Matuska 	case POOL_INITIALIZE_UNINIT:
8124c0a83fe0SMartin Matuska 		vdev_uninitialize(vd);
8125c0a83fe0SMartin Matuska 		break;
8126eda14cbcSMatt Macy 	default:
8127eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
8128eda14cbcSMatt Macy 	}
8129eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_initialize_lock);
8130eda14cbcSMatt Macy 
8131eda14cbcSMatt Macy 	return (0);
8132eda14cbcSMatt Macy }
8133eda14cbcSMatt Macy 
8134eda14cbcSMatt Macy int
8135eda14cbcSMatt Macy spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
8136eda14cbcSMatt Macy     nvlist_t *vdev_errlist)
8137eda14cbcSMatt Macy {
8138eda14cbcSMatt Macy 	int total_errors = 0;
8139eda14cbcSMatt Macy 	list_t vd_list;
8140eda14cbcSMatt Macy 
8141eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
8142eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
8143eda14cbcSMatt Macy 
8144eda14cbcSMatt Macy 	/*
8145eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
8146eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
8147eda14cbcSMatt Macy 	 * stopping initialization. The config and state locks are held so that
8148eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
8149eda14cbcSMatt Macy 	 * the initializing operation.
8150eda14cbcSMatt Macy 	 */
8151eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
8152eda14cbcSMatt Macy 
8153eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
8154eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
8155eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
8156eda14cbcSMatt Macy 
8157eda14cbcSMatt Macy 		int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
8158eda14cbcSMatt Macy 		    &vd_list);
8159eda14cbcSMatt Macy 		if (error != 0) {
8160eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
8161eda14cbcSMatt Macy 
8162eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
8163eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
8164eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
8165eda14cbcSMatt Macy 			total_errors++;
8166eda14cbcSMatt Macy 		}
8167eda14cbcSMatt Macy 	}
8168eda14cbcSMatt Macy 
8169eda14cbcSMatt Macy 	/* Wait for all initialize threads to stop. */
8170eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_list);
8171eda14cbcSMatt Macy 
8172eda14cbcSMatt Macy 	/* Sync out the initializing state */
8173eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
8174eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
8175eda14cbcSMatt Macy 
8176eda14cbcSMatt Macy 	list_destroy(&vd_list);
8177eda14cbcSMatt Macy 
8178eda14cbcSMatt Macy 	return (total_errors);
8179eda14cbcSMatt Macy }
8180eda14cbcSMatt Macy 
8181eda14cbcSMatt Macy static int
8182eda14cbcSMatt Macy spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
8183eda14cbcSMatt Macy     uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
8184eda14cbcSMatt Macy {
8185eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
8186eda14cbcSMatt Macy 
8187eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
8188eda14cbcSMatt Macy 
8189eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
8190eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
8191eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
8192eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8193eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
8194eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
8195eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8196eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
8197eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
8198eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8199eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
8200eda14cbcSMatt Macy 	} else if (!vd->vdev_has_trim) {
8201eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8202eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
8203eda14cbcSMatt Macy 	} else if (secure && !vd->vdev_has_securetrim) {
8204eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8205eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
8206eda14cbcSMatt Macy 	}
8207eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_trim_lock);
8208eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8209eda14cbcSMatt Macy 
8210eda14cbcSMatt Macy 	/*
8211eda14cbcSMatt Macy 	 * When we activate a TRIM action we check to see if the
8212eda14cbcSMatt Macy 	 * vdev_trim_thread is NULL. We do this instead of using the
8213eda14cbcSMatt Macy 	 * vdev_trim_state since there might be a previous TRIM process
8214eda14cbcSMatt Macy 	 * which has completed but the thread is not exited.
8215eda14cbcSMatt Macy 	 */
8216eda14cbcSMatt Macy 	if (cmd_type == POOL_TRIM_START &&
8217e716630dSMartin Matuska 	    (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing ||
8218e716630dSMartin Matuska 	    vd->vdev_top->vdev_rz_expanding)) {
8219eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8220eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8221eda14cbcSMatt Macy 	} else if (cmd_type == POOL_TRIM_CANCEL &&
8222eda14cbcSMatt Macy 	    (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
8223eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
8224eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8225eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8226eda14cbcSMatt Macy 	} else if (cmd_type == POOL_TRIM_SUSPEND &&
8227eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
8228eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
8229eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
8230eda14cbcSMatt Macy 	}
8231eda14cbcSMatt Macy 
8232eda14cbcSMatt Macy 	switch (cmd_type) {
8233eda14cbcSMatt Macy 	case POOL_TRIM_START:
8234eda14cbcSMatt Macy 		vdev_trim(vd, rate, partial, secure);
8235eda14cbcSMatt Macy 		break;
8236eda14cbcSMatt Macy 	case POOL_TRIM_CANCEL:
8237eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
8238eda14cbcSMatt Macy 		break;
8239eda14cbcSMatt Macy 	case POOL_TRIM_SUSPEND:
8240eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
8241eda14cbcSMatt Macy 		break;
8242eda14cbcSMatt Macy 	default:
8243eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
8244eda14cbcSMatt Macy 	}
8245eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_trim_lock);
8246eda14cbcSMatt Macy 
8247eda14cbcSMatt Macy 	return (0);
8248eda14cbcSMatt Macy }
8249eda14cbcSMatt Macy 
8250eda14cbcSMatt Macy /*
8251eda14cbcSMatt Macy  * Initiates a manual TRIM for the requested vdevs. This kicks off individual
8252eda14cbcSMatt Macy  * TRIM threads for each child vdev.  These threads pass over all of the free
8253eda14cbcSMatt Macy  * space in the vdev's metaslabs and issues TRIM commands for that space.
8254eda14cbcSMatt Macy  */
8255eda14cbcSMatt Macy int
8256eda14cbcSMatt Macy spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
8257eda14cbcSMatt Macy     boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
8258eda14cbcSMatt Macy {
8259eda14cbcSMatt Macy 	int total_errors = 0;
8260eda14cbcSMatt Macy 	list_t vd_list;
8261eda14cbcSMatt Macy 
8262eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
8263eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
8264eda14cbcSMatt Macy 
8265eda14cbcSMatt Macy 	/*
8266eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
8267eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
8268eda14cbcSMatt Macy 	 * stopping TRIM. The config and state locks are held so that
8269eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
8270eda14cbcSMatt Macy 	 * the TRIM operation.
8271eda14cbcSMatt Macy 	 */
8272eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
8273eda14cbcSMatt Macy 
8274eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
8275eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
8276eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
8277eda14cbcSMatt Macy 
8278eda14cbcSMatt Macy 		int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
8279eda14cbcSMatt Macy 		    rate, partial, secure, &vd_list);
8280eda14cbcSMatt Macy 		if (error != 0) {
8281eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
8282eda14cbcSMatt Macy 
8283eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
8284eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
8285eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
8286eda14cbcSMatt Macy 			total_errors++;
8287eda14cbcSMatt Macy 		}
8288eda14cbcSMatt Macy 	}
8289eda14cbcSMatt Macy 
8290eda14cbcSMatt Macy 	/* Wait for all TRIM threads to stop. */
8291eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_list);
8292eda14cbcSMatt Macy 
8293eda14cbcSMatt Macy 	/* Sync out the TRIM state */
8294eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
8295eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
8296eda14cbcSMatt Macy 
8297eda14cbcSMatt Macy 	list_destroy(&vd_list);
8298eda14cbcSMatt Macy 
8299eda14cbcSMatt Macy 	return (total_errors);
8300eda14cbcSMatt Macy }
8301eda14cbcSMatt Macy 
8302eda14cbcSMatt Macy /*
8303eda14cbcSMatt Macy  * Split a set of devices from their mirrors, and create a new pool from them.
8304eda14cbcSMatt Macy  */
8305eda14cbcSMatt Macy int
8306a0b956f5SMartin Matuska spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
8307eda14cbcSMatt Macy     nvlist_t *props, boolean_t exp)
8308eda14cbcSMatt Macy {
8309eda14cbcSMatt Macy 	int error = 0;
8310eda14cbcSMatt Macy 	uint64_t txg, *glist;
8311eda14cbcSMatt Macy 	spa_t *newspa;
8312eda14cbcSMatt Macy 	uint_t c, children, lastlog;
8313eda14cbcSMatt Macy 	nvlist_t **child, *nvl, *tmp;
8314eda14cbcSMatt Macy 	dmu_tx_t *tx;
83152a58b312SMartin Matuska 	const char *altroot = NULL;
8316eda14cbcSMatt Macy 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
8317eda14cbcSMatt Macy 	boolean_t activate_slog;
8318eda14cbcSMatt Macy 
8319eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8320eda14cbcSMatt Macy 
8321eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
8322eda14cbcSMatt Macy 
8323eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
8324eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
8325eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
8326eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
8327eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
8328eda14cbcSMatt Macy 	}
8329eda14cbcSMatt Macy 
8330eda14cbcSMatt Macy 	/* clear the log and flush everything up to now */
8331eda14cbcSMatt Macy 	activate_slog = spa_passivate_log(spa);
8332eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
8333eda14cbcSMatt Macy 	error = spa_reset_logs(spa);
8334eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8335eda14cbcSMatt Macy 
8336eda14cbcSMatt Macy 	if (activate_slog)
8337eda14cbcSMatt Macy 		spa_activate_log(spa);
8338eda14cbcSMatt Macy 
8339eda14cbcSMatt Macy 	if (error != 0)
8340eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
8341eda14cbcSMatt Macy 
8342eda14cbcSMatt Macy 	/* check new spa name before going any further */
8343eda14cbcSMatt Macy 	if (spa_lookup(newname) != NULL)
8344eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
8345eda14cbcSMatt Macy 
8346eda14cbcSMatt Macy 	/*
8347eda14cbcSMatt Macy 	 * scan through all the children to ensure they're all mirrors
8348eda14cbcSMatt Macy 	 */
8349eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
8350eda14cbcSMatt Macy 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
8351eda14cbcSMatt Macy 	    &children) != 0)
8352eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8353eda14cbcSMatt Macy 
8354eda14cbcSMatt Macy 	/* first, check to ensure we've got the right child count */
8355eda14cbcSMatt Macy 	rvd = spa->spa_root_vdev;
8356eda14cbcSMatt Macy 	lastlog = 0;
8357eda14cbcSMatt Macy 	for (c = 0; c < rvd->vdev_children; c++) {
8358eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
8359eda14cbcSMatt Macy 
8360eda14cbcSMatt Macy 		/* don't count the holes & logs as children */
8361eda14cbcSMatt Macy 		if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
8362eda14cbcSMatt Macy 		    !vdev_is_concrete(vd))) {
8363eda14cbcSMatt Macy 			if (lastlog == 0)
8364eda14cbcSMatt Macy 				lastlog = c;
8365eda14cbcSMatt Macy 			continue;
8366eda14cbcSMatt Macy 		}
8367eda14cbcSMatt Macy 
8368eda14cbcSMatt Macy 		lastlog = 0;
8369eda14cbcSMatt Macy 	}
8370eda14cbcSMatt Macy 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
8371eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8372eda14cbcSMatt Macy 
8373eda14cbcSMatt Macy 	/* next, ensure no spare or cache devices are part of the split */
8374eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
8375eda14cbcSMatt Macy 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
8376eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8377eda14cbcSMatt Macy 
8378eda14cbcSMatt Macy 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
8379eda14cbcSMatt Macy 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
8380eda14cbcSMatt Macy 
8381eda14cbcSMatt Macy 	/* then, loop over each vdev and validate it */
8382eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8383eda14cbcSMatt Macy 		uint64_t is_hole = 0;
8384eda14cbcSMatt Macy 
8385eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
8386eda14cbcSMatt Macy 		    &is_hole);
8387eda14cbcSMatt Macy 
8388eda14cbcSMatt Macy 		if (is_hole != 0) {
8389eda14cbcSMatt Macy 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
8390eda14cbcSMatt Macy 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
8391eda14cbcSMatt Macy 				continue;
8392eda14cbcSMatt Macy 			} else {
8393eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
8394eda14cbcSMatt Macy 				break;
8395eda14cbcSMatt Macy 			}
8396eda14cbcSMatt Macy 		}
8397eda14cbcSMatt Macy 
8398eda14cbcSMatt Macy 		/* deal with indirect vdevs */
8399eda14cbcSMatt Macy 		if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
8400eda14cbcSMatt Macy 		    &vdev_indirect_ops)
8401eda14cbcSMatt Macy 			continue;
8402eda14cbcSMatt Macy 
8403eda14cbcSMatt Macy 		/* which disk is going to be split? */
8404eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
8405eda14cbcSMatt Macy 		    &glist[c]) != 0) {
8406eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
8407eda14cbcSMatt Macy 			break;
8408eda14cbcSMatt Macy 		}
8409eda14cbcSMatt Macy 
8410eda14cbcSMatt Macy 		/* look it up in the spa */
8411eda14cbcSMatt Macy 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
8412eda14cbcSMatt Macy 		if (vml[c] == NULL) {
8413eda14cbcSMatt Macy 			error = SET_ERROR(ENODEV);
8414eda14cbcSMatt Macy 			break;
8415eda14cbcSMatt Macy 		}
8416eda14cbcSMatt Macy 
8417eda14cbcSMatt Macy 		/* make sure there's nothing stopping the split */
8418eda14cbcSMatt Macy 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
8419eda14cbcSMatt Macy 		    vml[c]->vdev_islog ||
8420eda14cbcSMatt Macy 		    !vdev_is_concrete(vml[c]) ||
8421eda14cbcSMatt Macy 		    vml[c]->vdev_isspare ||
8422eda14cbcSMatt Macy 		    vml[c]->vdev_isl2cache ||
8423eda14cbcSMatt Macy 		    !vdev_writeable(vml[c]) ||
8424eda14cbcSMatt Macy 		    vml[c]->vdev_children != 0 ||
8425eda14cbcSMatt Macy 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
8426eda14cbcSMatt Macy 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
8427eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
8428eda14cbcSMatt Macy 			break;
8429eda14cbcSMatt Macy 		}
8430eda14cbcSMatt Macy 
8431eda14cbcSMatt Macy 		if (vdev_dtl_required(vml[c]) ||
8432eda14cbcSMatt Macy 		    vdev_resilver_needed(vml[c], NULL, NULL)) {
8433eda14cbcSMatt Macy 			error = SET_ERROR(EBUSY);
8434eda14cbcSMatt Macy 			break;
8435eda14cbcSMatt Macy 		}
8436eda14cbcSMatt Macy 
8437eda14cbcSMatt Macy 		/* we need certain info from the top level */
843881b22a98SMartin Matuska 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
843981b22a98SMartin Matuska 		    vml[c]->vdev_top->vdev_ms_array);
844081b22a98SMartin Matuska 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
844181b22a98SMartin Matuska 		    vml[c]->vdev_top->vdev_ms_shift);
844281b22a98SMartin Matuska 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
844381b22a98SMartin Matuska 		    vml[c]->vdev_top->vdev_asize);
844481b22a98SMartin Matuska 		fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
844581b22a98SMartin Matuska 		    vml[c]->vdev_top->vdev_ashift);
8446eda14cbcSMatt Macy 
8447eda14cbcSMatt Macy 		/* transfer per-vdev ZAPs */
8448eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
8449eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
8450eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
8451eda14cbcSMatt Macy 
8452eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
8453eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
8454eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TOP_ZAP,
8455eda14cbcSMatt Macy 		    vml[c]->vdev_parent->vdev_top_zap));
8456eda14cbcSMatt Macy 	}
8457eda14cbcSMatt Macy 
8458eda14cbcSMatt Macy 	if (error != 0) {
8459eda14cbcSMatt Macy 		kmem_free(vml, children * sizeof (vdev_t *));
8460eda14cbcSMatt Macy 		kmem_free(glist, children * sizeof (uint64_t));
8461eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
8462eda14cbcSMatt Macy 	}
8463eda14cbcSMatt Macy 
8464eda14cbcSMatt Macy 	/* stop writers from using the disks */
8465eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8466eda14cbcSMatt Macy 		if (vml[c] != NULL)
8467eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_TRUE;
8468eda14cbcSMatt Macy 	}
8469eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
8470eda14cbcSMatt Macy 
8471eda14cbcSMatt Macy 	/*
8472eda14cbcSMatt Macy 	 * Temporarily record the splitting vdevs in the spa config.  This
8473eda14cbcSMatt Macy 	 * will disappear once the config is regenerated.
8474eda14cbcSMatt Macy 	 */
847581b22a98SMartin Matuska 	nvl = fnvlist_alloc();
847681b22a98SMartin Matuska 	fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
8477eda14cbcSMatt Macy 	kmem_free(glist, children * sizeof (uint64_t));
8478eda14cbcSMatt Macy 
8479eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
848081b22a98SMartin Matuska 	fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
8481eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
8482eda14cbcSMatt Macy 	spa->spa_config_splitting = nvl;
8483eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
8484eda14cbcSMatt Macy 
8485eda14cbcSMatt Macy 	/* configure and create the new pool */
848681b22a98SMartin Matuska 	fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
848781b22a98SMartin Matuska 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
848881b22a98SMartin Matuska 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
848981b22a98SMartin Matuska 	fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
849081b22a98SMartin Matuska 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
849181b22a98SMartin Matuska 	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
849281b22a98SMartin Matuska 	    spa_generate_guid(NULL));
8493eda14cbcSMatt Macy 	VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
8494eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
8495eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
8496eda14cbcSMatt Macy 
8497eda14cbcSMatt Macy 	/* add the new pool to the namespace */
8498eda14cbcSMatt Macy 	newspa = spa_add(newname, config, altroot);
8499eda14cbcSMatt Macy 	newspa->spa_avz_action = AVZ_ACTION_REBUILD;
8500eda14cbcSMatt Macy 	newspa->spa_config_txg = spa->spa_config_txg;
8501eda14cbcSMatt Macy 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
8502eda14cbcSMatt Macy 
8503eda14cbcSMatt Macy 	/* release the spa config lock, retaining the namespace lock */
8504eda14cbcSMatt Macy 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
8505eda14cbcSMatt Macy 
8506eda14cbcSMatt Macy 	if (zio_injection_enabled)
8507eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 1);
8508eda14cbcSMatt Macy 
8509eda14cbcSMatt Macy 	spa_activate(newspa, spa_mode_global);
8510eda14cbcSMatt Macy 	spa_async_suspend(newspa);
8511eda14cbcSMatt Macy 
8512eda14cbcSMatt Macy 	/*
8513eda14cbcSMatt Macy 	 * Temporarily stop the initializing and TRIM activity.  We set the
8514eda14cbcSMatt Macy 	 * state to ACTIVE so that we know to resume initializing or TRIM
8515eda14cbcSMatt Macy 	 * once the split has completed.
8516eda14cbcSMatt Macy 	 */
8517eda14cbcSMatt Macy 	list_t vd_initialize_list;
8518eda14cbcSMatt Macy 	list_create(&vd_initialize_list, sizeof (vdev_t),
8519eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
8520eda14cbcSMatt Macy 
8521eda14cbcSMatt Macy 	list_t vd_trim_list;
8522eda14cbcSMatt Macy 	list_create(&vd_trim_list, sizeof (vdev_t),
8523eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
8524eda14cbcSMatt Macy 
8525eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8526eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
8527eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_initialize_lock);
8528eda14cbcSMatt Macy 			vdev_initialize_stop(vml[c],
8529eda14cbcSMatt Macy 			    VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
8530eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_initialize_lock);
8531eda14cbcSMatt Macy 
8532eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_trim_lock);
8533eda14cbcSMatt Macy 			vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
8534eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_trim_lock);
8535eda14cbcSMatt Macy 		}
8536eda14cbcSMatt Macy 	}
8537eda14cbcSMatt Macy 
8538eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_initialize_list);
8539eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_trim_list);
8540eda14cbcSMatt Macy 
8541eda14cbcSMatt Macy 	list_destroy(&vd_initialize_list);
8542eda14cbcSMatt Macy 	list_destroy(&vd_trim_list);
8543eda14cbcSMatt Macy 
8544eda14cbcSMatt Macy 	newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
8545eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_TRUE;
8546eda14cbcSMatt Macy 
8547eda14cbcSMatt Macy 	/* create the new pool from the disks of the original pool */
8548eda14cbcSMatt Macy 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
8549eda14cbcSMatt Macy 	if (error)
8550eda14cbcSMatt Macy 		goto out;
8551eda14cbcSMatt Macy 
8552eda14cbcSMatt Macy 	/* if that worked, generate a real config for the new pool */
8553eda14cbcSMatt Macy 	if (newspa->spa_root_vdev != NULL) {
855481b22a98SMartin Matuska 		newspa->spa_config_splitting = fnvlist_alloc();
855581b22a98SMartin Matuska 		fnvlist_add_uint64(newspa->spa_config_splitting,
855681b22a98SMartin Matuska 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
8557eda14cbcSMatt Macy 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
8558eda14cbcSMatt Macy 		    B_TRUE));
8559eda14cbcSMatt Macy 	}
8560eda14cbcSMatt Macy 
8561eda14cbcSMatt Macy 	/* set the props */
8562eda14cbcSMatt Macy 	if (props != NULL) {
8563eda14cbcSMatt Macy 		spa_configfile_set(newspa, props, B_FALSE);
8564eda14cbcSMatt Macy 		error = spa_prop_set(newspa, props);
8565eda14cbcSMatt Macy 		if (error)
8566eda14cbcSMatt Macy 			goto out;
8567eda14cbcSMatt Macy 	}
8568eda14cbcSMatt Macy 
8569eda14cbcSMatt Macy 	/* flush everything */
8570eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(newspa);
8571eda14cbcSMatt Macy 	vdev_config_dirty(newspa->spa_root_vdev);
8572eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
8573eda14cbcSMatt Macy 
8574eda14cbcSMatt Macy 	if (zio_injection_enabled)
8575eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 2);
8576eda14cbcSMatt Macy 
8577eda14cbcSMatt Macy 	spa_async_resume(newspa);
8578eda14cbcSMatt Macy 
8579eda14cbcSMatt Macy 	/* finally, update the original pool's config */
8580eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8581eda14cbcSMatt Macy 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
8582eda14cbcSMatt Macy 	error = dmu_tx_assign(tx, TXG_WAIT);
8583eda14cbcSMatt Macy 	if (error != 0)
8584eda14cbcSMatt Macy 		dmu_tx_abort(tx);
8585eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8586eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
8587eda14cbcSMatt Macy 			vdev_t *tvd = vml[c]->vdev_top;
8588eda14cbcSMatt Macy 
8589eda14cbcSMatt Macy 			/*
8590eda14cbcSMatt Macy 			 * Need to be sure the detachable VDEV is not
8591eda14cbcSMatt Macy 			 * on any *other* txg's DTL list to prevent it
8592eda14cbcSMatt Macy 			 * from being accessed after it's freed.
8593eda14cbcSMatt Macy 			 */
8594eda14cbcSMatt Macy 			for (int t = 0; t < TXG_SIZE; t++) {
8595eda14cbcSMatt Macy 				(void) txg_list_remove_this(
8596eda14cbcSMatt Macy 				    &tvd->vdev_dtl_list, vml[c], t);
8597eda14cbcSMatt Macy 			}
8598eda14cbcSMatt Macy 
8599eda14cbcSMatt Macy 			vdev_split(vml[c]);
8600eda14cbcSMatt Macy 			if (error == 0)
8601eda14cbcSMatt Macy 				spa_history_log_internal(spa, "detach", tx,
8602eda14cbcSMatt Macy 				    "vdev=%s", vml[c]->vdev_path);
8603eda14cbcSMatt Macy 
8604eda14cbcSMatt Macy 			vdev_free(vml[c]);
8605eda14cbcSMatt Macy 		}
8606eda14cbcSMatt Macy 	}
8607eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_REBUILD;
8608eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
8609eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
8610eda14cbcSMatt Macy 	nvlist_free(nvl);
8611eda14cbcSMatt Macy 	if (error == 0)
8612eda14cbcSMatt Macy 		dmu_tx_commit(tx);
8613eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, 0);
8614eda14cbcSMatt Macy 
8615eda14cbcSMatt Macy 	if (zio_injection_enabled)
8616eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 3);
8617eda14cbcSMatt Macy 
8618eda14cbcSMatt Macy 	/* split is complete; log a history record */
8619eda14cbcSMatt Macy 	spa_history_log_internal(newspa, "split", NULL,
8620eda14cbcSMatt Macy 	    "from pool %s", spa_name(spa));
8621eda14cbcSMatt Macy 
8622eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_FALSE;
8623eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
8624eda14cbcSMatt Macy 
8625eda14cbcSMatt Macy 	/* if we're not going to mount the filesystems in userland, export */
8626eda14cbcSMatt Macy 	if (exp)
8627eda14cbcSMatt Macy 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
8628eda14cbcSMatt Macy 		    B_FALSE, B_FALSE);
8629eda14cbcSMatt Macy 
8630eda14cbcSMatt Macy 	return (error);
8631eda14cbcSMatt Macy 
8632eda14cbcSMatt Macy out:
8633eda14cbcSMatt Macy 	spa_unload(newspa);
8634eda14cbcSMatt Macy 	spa_deactivate(newspa);
8635eda14cbcSMatt Macy 	spa_remove(newspa);
8636eda14cbcSMatt Macy 
8637eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
8638eda14cbcSMatt Macy 
8639eda14cbcSMatt Macy 	/* re-online all offlined disks */
8640eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
8641eda14cbcSMatt Macy 		if (vml[c] != NULL)
8642eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_FALSE;
8643eda14cbcSMatt Macy 	}
8644eda14cbcSMatt Macy 
8645eda14cbcSMatt Macy 	/* restart initializing or trimming disks as necessary */
8646eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
8647eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
8648eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
8649eda14cbcSMatt Macy 
8650eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
8651eda14cbcSMatt Macy 
8652eda14cbcSMatt Macy 	nvlist_free(spa->spa_config_splitting);
8653eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
8654eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, error);
8655eda14cbcSMatt Macy 
8656eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
8657eda14cbcSMatt Macy 	return (error);
8658eda14cbcSMatt Macy }
8659eda14cbcSMatt Macy 
8660eda14cbcSMatt Macy /*
8661eda14cbcSMatt Macy  * Find any device that's done replacing, or a vdev marked 'unspare' that's
8662eda14cbcSMatt Macy  * currently spared, so we can detach it.
8663eda14cbcSMatt Macy  */
8664eda14cbcSMatt Macy static vdev_t *
8665eda14cbcSMatt Macy spa_vdev_resilver_done_hunt(vdev_t *vd)
8666eda14cbcSMatt Macy {
8667eda14cbcSMatt Macy 	vdev_t *newvd, *oldvd;
8668eda14cbcSMatt Macy 
8669eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
8670eda14cbcSMatt Macy 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
8671eda14cbcSMatt Macy 		if (oldvd != NULL)
8672eda14cbcSMatt Macy 			return (oldvd);
8673eda14cbcSMatt Macy 	}
8674eda14cbcSMatt Macy 
8675eda14cbcSMatt Macy 	/*
8676eda14cbcSMatt Macy 	 * Check for a completed replacement.  We always consider the first
8677eda14cbcSMatt Macy 	 * vdev in the list to be the oldest vdev, and the last one to be
8678eda14cbcSMatt Macy 	 * the newest (see spa_vdev_attach() for how that works).  In
8679eda14cbcSMatt Macy 	 * the case where the newest vdev is faulted, we will not automatically
8680eda14cbcSMatt Macy 	 * remove it after a resilver completes.  This is OK as it will require
8681eda14cbcSMatt Macy 	 * user intervention to determine which disk the admin wishes to keep.
8682eda14cbcSMatt Macy 	 */
8683eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_replacing_ops) {
8684eda14cbcSMatt Macy 		ASSERT(vd->vdev_children > 1);
8685eda14cbcSMatt Macy 
8686eda14cbcSMatt Macy 		newvd = vd->vdev_child[vd->vdev_children - 1];
8687eda14cbcSMatt Macy 		oldvd = vd->vdev_child[0];
8688eda14cbcSMatt Macy 
8689eda14cbcSMatt Macy 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
8690eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
8691eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
8692eda14cbcSMatt Macy 			return (oldvd);
8693eda14cbcSMatt Macy 	}
8694eda14cbcSMatt Macy 
8695eda14cbcSMatt Macy 	/*
8696eda14cbcSMatt Macy 	 * Check for a completed resilver with the 'unspare' flag set.
8697eda14cbcSMatt Macy 	 * Also potentially update faulted state.
8698eda14cbcSMatt Macy 	 */
8699eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_spare_ops) {
8700eda14cbcSMatt Macy 		vdev_t *first = vd->vdev_child[0];
8701eda14cbcSMatt Macy 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
8702eda14cbcSMatt Macy 
8703eda14cbcSMatt Macy 		if (last->vdev_unspare) {
8704eda14cbcSMatt Macy 			oldvd = first;
8705eda14cbcSMatt Macy 			newvd = last;
8706eda14cbcSMatt Macy 		} else if (first->vdev_unspare) {
8707eda14cbcSMatt Macy 			oldvd = last;
8708eda14cbcSMatt Macy 			newvd = first;
8709eda14cbcSMatt Macy 		} else {
8710eda14cbcSMatt Macy 			oldvd = NULL;
8711eda14cbcSMatt Macy 		}
8712eda14cbcSMatt Macy 
8713eda14cbcSMatt Macy 		if (oldvd != NULL &&
8714eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
8715eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
8716eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
8717eda14cbcSMatt Macy 			return (oldvd);
8718eda14cbcSMatt Macy 
8719eda14cbcSMatt Macy 		vdev_propagate_state(vd);
8720eda14cbcSMatt Macy 
8721eda14cbcSMatt Macy 		/*
8722eda14cbcSMatt Macy 		 * If there are more than two spares attached to a disk,
8723eda14cbcSMatt Macy 		 * and those spares are not required, then we want to
8724eda14cbcSMatt Macy 		 * attempt to free them up now so that they can be used
8725eda14cbcSMatt Macy 		 * by other pools.  Once we're back down to a single
8726eda14cbcSMatt Macy 		 * disk+spare, we stop removing them.
8727eda14cbcSMatt Macy 		 */
8728eda14cbcSMatt Macy 		if (vd->vdev_children > 2) {
8729eda14cbcSMatt Macy 			newvd = vd->vdev_child[1];
8730eda14cbcSMatt Macy 
8731eda14cbcSMatt Macy 			if (newvd->vdev_isspare && last->vdev_isspare &&
8732eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_MISSING) &&
8733eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
8734eda14cbcSMatt Macy 			    !vdev_dtl_required(newvd))
8735eda14cbcSMatt Macy 				return (newvd);
8736eda14cbcSMatt Macy 		}
8737eda14cbcSMatt Macy 	}
8738eda14cbcSMatt Macy 
8739eda14cbcSMatt Macy 	return (NULL);
8740eda14cbcSMatt Macy }
8741eda14cbcSMatt Macy 
8742eda14cbcSMatt Macy static void
8743eda14cbcSMatt Macy spa_vdev_resilver_done(spa_t *spa)
8744eda14cbcSMatt Macy {
8745eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *ppvd;
8746eda14cbcSMatt Macy 	uint64_t guid, sguid, pguid, ppguid;
8747eda14cbcSMatt Macy 
8748eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
8749eda14cbcSMatt Macy 
8750eda14cbcSMatt Macy 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
8751eda14cbcSMatt Macy 		pvd = vd->vdev_parent;
8752eda14cbcSMatt Macy 		ppvd = pvd->vdev_parent;
8753eda14cbcSMatt Macy 		guid = vd->vdev_guid;
8754eda14cbcSMatt Macy 		pguid = pvd->vdev_guid;
8755eda14cbcSMatt Macy 		ppguid = ppvd->vdev_guid;
8756eda14cbcSMatt Macy 		sguid = 0;
8757eda14cbcSMatt Macy 		/*
8758eda14cbcSMatt Macy 		 * If we have just finished replacing a hot spared device, then
8759eda14cbcSMatt Macy 		 * we need to detach the parent's first child (the original hot
8760eda14cbcSMatt Macy 		 * spare) as well.
8761eda14cbcSMatt Macy 		 */
8762eda14cbcSMatt Macy 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
8763eda14cbcSMatt Macy 		    ppvd->vdev_children == 2) {
8764eda14cbcSMatt Macy 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
8765eda14cbcSMatt Macy 			sguid = ppvd->vdev_child[1]->vdev_guid;
8766eda14cbcSMatt Macy 		}
8767eda14cbcSMatt Macy 		ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
8768eda14cbcSMatt Macy 
8769eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
8770eda14cbcSMatt Macy 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
8771eda14cbcSMatt Macy 			return;
8772eda14cbcSMatt Macy 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
8773eda14cbcSMatt Macy 			return;
8774eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
8775eda14cbcSMatt Macy 	}
8776eda14cbcSMatt Macy 
8777eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
8778eda14cbcSMatt Macy 
8779eda14cbcSMatt Macy 	/*
8780eda14cbcSMatt Macy 	 * If a detach was not performed above replace waiters will not have
8781eda14cbcSMatt Macy 	 * been notified.  In which case we must do so now.
8782eda14cbcSMatt Macy 	 */
8783eda14cbcSMatt Macy 	spa_notify_waiters(spa);
8784eda14cbcSMatt Macy }
8785eda14cbcSMatt Macy 
8786eda14cbcSMatt Macy /*
8787eda14cbcSMatt Macy  * Update the stored path or FRU for this vdev.
8788eda14cbcSMatt Macy  */
8789eda14cbcSMatt Macy static int
8790eda14cbcSMatt Macy spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
8791eda14cbcSMatt Macy     boolean_t ispath)
8792eda14cbcSMatt Macy {
8793eda14cbcSMatt Macy 	vdev_t *vd;
8794eda14cbcSMatt Macy 	boolean_t sync = B_FALSE;
8795eda14cbcSMatt Macy 
8796eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8797eda14cbcSMatt Macy 
8798eda14cbcSMatt Macy 	spa_vdev_state_enter(spa, SCL_ALL);
8799eda14cbcSMatt Macy 
8800eda14cbcSMatt Macy 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
8801eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
8802eda14cbcSMatt Macy 
8803eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
8804eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
8805eda14cbcSMatt Macy 
8806eda14cbcSMatt Macy 	if (ispath) {
8807eda14cbcSMatt Macy 		if (strcmp(value, vd->vdev_path) != 0) {
8808eda14cbcSMatt Macy 			spa_strfree(vd->vdev_path);
8809eda14cbcSMatt Macy 			vd->vdev_path = spa_strdup(value);
8810eda14cbcSMatt Macy 			sync = B_TRUE;
8811eda14cbcSMatt Macy 		}
8812eda14cbcSMatt Macy 	} else {
8813eda14cbcSMatt Macy 		if (vd->vdev_fru == NULL) {
8814eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
8815eda14cbcSMatt Macy 			sync = B_TRUE;
8816eda14cbcSMatt Macy 		} else if (strcmp(value, vd->vdev_fru) != 0) {
8817eda14cbcSMatt Macy 			spa_strfree(vd->vdev_fru);
8818eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
8819eda14cbcSMatt Macy 			sync = B_TRUE;
8820eda14cbcSMatt Macy 		}
8821eda14cbcSMatt Macy 	}
8822eda14cbcSMatt Macy 
8823eda14cbcSMatt Macy 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
8824eda14cbcSMatt Macy }
8825eda14cbcSMatt Macy 
8826eda14cbcSMatt Macy int
8827eda14cbcSMatt Macy spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
8828eda14cbcSMatt Macy {
8829eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
8830eda14cbcSMatt Macy }
8831eda14cbcSMatt Macy 
8832eda14cbcSMatt Macy int
8833eda14cbcSMatt Macy spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
8834eda14cbcSMatt Macy {
8835eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
8836eda14cbcSMatt Macy }
8837eda14cbcSMatt Macy 
8838eda14cbcSMatt Macy /*
8839eda14cbcSMatt Macy  * ==========================================================================
8840eda14cbcSMatt Macy  * SPA Scanning
8841eda14cbcSMatt Macy  * ==========================================================================
8842eda14cbcSMatt Macy  */
8843eda14cbcSMatt Macy int
8844eda14cbcSMatt Macy spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
8845eda14cbcSMatt Macy {
8846eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8847eda14cbcSMatt Macy 
8848eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
8849eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8850eda14cbcSMatt Macy 
8851eda14cbcSMatt Macy 	return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
8852eda14cbcSMatt Macy }
8853eda14cbcSMatt Macy 
8854eda14cbcSMatt Macy int
8855eda14cbcSMatt Macy spa_scan_stop(spa_t *spa)
8856eda14cbcSMatt Macy {
8857eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8858eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
8859eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
8860c0a83fe0SMartin Matuska 
8861eda14cbcSMatt Macy 	return (dsl_scan_cancel(spa->spa_dsl_pool));
8862eda14cbcSMatt Macy }
8863eda14cbcSMatt Macy 
8864eda14cbcSMatt Macy int
8865eda14cbcSMatt Macy spa_scan(spa_t *spa, pool_scan_func_t func)
8866eda14cbcSMatt Macy {
8867eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8868eda14cbcSMatt Macy 
8869eda14cbcSMatt Macy 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
8870eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8871eda14cbcSMatt Macy 
8872eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8873eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
8874eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8875eda14cbcSMatt Macy 
8876eda14cbcSMatt Macy 	/*
8877eda14cbcSMatt Macy 	 * If a resilver was requested, but there is no DTL on a
8878eda14cbcSMatt Macy 	 * writeable leaf device, we have nothing to do.
8879eda14cbcSMatt Macy 	 */
8880eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8881eda14cbcSMatt Macy 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
8882eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
8883eda14cbcSMatt Macy 		return (0);
8884eda14cbcSMatt Macy 	}
8885eda14cbcSMatt Macy 
8886c0a83fe0SMartin Matuska 	if (func == POOL_SCAN_ERRORSCRUB &&
8887c0a83fe0SMartin Matuska 	    !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
8888c0a83fe0SMartin Matuska 		return (SET_ERROR(ENOTSUP));
8889c0a83fe0SMartin Matuska 
8890eda14cbcSMatt Macy 	return (dsl_scan(spa->spa_dsl_pool, func));
8891eda14cbcSMatt Macy }
8892eda14cbcSMatt Macy 
8893eda14cbcSMatt Macy /*
8894eda14cbcSMatt Macy  * ==========================================================================
8895eda14cbcSMatt Macy  * SPA async task processing
8896eda14cbcSMatt Macy  * ==========================================================================
8897eda14cbcSMatt Macy  */
8898eda14cbcSMatt Macy 
8899eda14cbcSMatt Macy static void
8900eda14cbcSMatt Macy spa_async_remove(spa_t *spa, vdev_t *vd)
8901eda14cbcSMatt Macy {
8902eda14cbcSMatt Macy 	if (vd->vdev_remove_wanted) {
8903eda14cbcSMatt Macy 		vd->vdev_remove_wanted = B_FALSE;
8904eda14cbcSMatt Macy 		vd->vdev_delayed_close = B_FALSE;
8905eda14cbcSMatt Macy 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
8906eda14cbcSMatt Macy 
8907eda14cbcSMatt Macy 		/*
8908eda14cbcSMatt Macy 		 * We want to clear the stats, but we don't want to do a full
8909eda14cbcSMatt Macy 		 * vdev_clear() as that will cause us to throw away
8910eda14cbcSMatt Macy 		 * degraded/faulted state as well as attempt to reopen the
8911eda14cbcSMatt Macy 		 * device, all of which is a waste.
8912eda14cbcSMatt Macy 		 */
8913eda14cbcSMatt Macy 		vd->vdev_stat.vs_read_errors = 0;
8914eda14cbcSMatt Macy 		vd->vdev_stat.vs_write_errors = 0;
8915eda14cbcSMatt Macy 		vd->vdev_stat.vs_checksum_errors = 0;
8916eda14cbcSMatt Macy 
8917eda14cbcSMatt Macy 		vdev_state_dirty(vd->vdev_top);
89187877fdebSMatt Macy 
89197877fdebSMatt Macy 		/* Tell userspace that the vdev is gone. */
89207877fdebSMatt Macy 		zfs_post_remove(spa, vd);
8921eda14cbcSMatt Macy 	}
8922eda14cbcSMatt Macy 
8923eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8924eda14cbcSMatt Macy 		spa_async_remove(spa, vd->vdev_child[c]);
8925eda14cbcSMatt Macy }
8926eda14cbcSMatt Macy 
8927eda14cbcSMatt Macy static void
8928b985c9caSMartin Matuska spa_async_fault_vdev(spa_t *spa, vdev_t *vd)
8929eda14cbcSMatt Macy {
8930b985c9caSMartin Matuska 	if (vd->vdev_fault_wanted) {
8931b985c9caSMartin Matuska 		vd->vdev_fault_wanted = B_FALSE;
8932b985c9caSMartin Matuska 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
8933b985c9caSMartin Matuska 		    VDEV_AUX_ERR_EXCEEDED);
8934eda14cbcSMatt Macy 	}
8935eda14cbcSMatt Macy 
8936eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8937b985c9caSMartin Matuska 		spa_async_fault_vdev(spa, vd->vdev_child[c]);
8938eda14cbcSMatt Macy }
8939eda14cbcSMatt Macy 
8940eda14cbcSMatt Macy static void
8941eda14cbcSMatt Macy spa_async_autoexpand(spa_t *spa, vdev_t *vd)
8942eda14cbcSMatt Macy {
8943eda14cbcSMatt Macy 	if (!spa->spa_autoexpand)
8944eda14cbcSMatt Macy 		return;
8945eda14cbcSMatt Macy 
8946eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
8947eda14cbcSMatt Macy 		vdev_t *cvd = vd->vdev_child[c];
8948eda14cbcSMatt Macy 		spa_async_autoexpand(spa, cvd);
8949eda14cbcSMatt Macy 	}
8950eda14cbcSMatt Macy 
8951eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
8952eda14cbcSMatt Macy 		return;
8953eda14cbcSMatt Macy 
8954eda14cbcSMatt Macy 	spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
8955eda14cbcSMatt Macy }
8956eda14cbcSMatt Macy 
8957da5137abSMartin Matuska static __attribute__((noreturn)) void
8958eda14cbcSMatt Macy spa_async_thread(void *arg)
8959eda14cbcSMatt Macy {
8960eda14cbcSMatt Macy 	spa_t *spa = (spa_t *)arg;
8961eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
8962eda14cbcSMatt Macy 	int tasks;
8963eda14cbcSMatt Macy 
8964eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on);
8965eda14cbcSMatt Macy 
8966eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8967eda14cbcSMatt Macy 	tasks = spa->spa_async_tasks;
8968eda14cbcSMatt Macy 	spa->spa_async_tasks = 0;
8969eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8970eda14cbcSMatt Macy 
8971eda14cbcSMatt Macy 	/*
8972eda14cbcSMatt Macy 	 * See if the config needs to be updated.
8973eda14cbcSMatt Macy 	 */
8974eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
8975eda14cbcSMatt Macy 		uint64_t old_space, new_space;
8976eda14cbcSMatt Macy 
8977eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8978eda14cbcSMatt Macy 		old_space = metaslab_class_get_space(spa_normal_class(spa));
8979eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_special_class(spa));
8980eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_dedup_class(spa));
8981184c1b94SMartin Matuska 		old_space += metaslab_class_get_space(
8982184c1b94SMartin Matuska 		    spa_embedded_log_class(spa));
8983eda14cbcSMatt Macy 
8984eda14cbcSMatt Macy 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
8985eda14cbcSMatt Macy 
8986eda14cbcSMatt Macy 		new_space = metaslab_class_get_space(spa_normal_class(spa));
8987eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_special_class(spa));
8988eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_dedup_class(spa));
8989184c1b94SMartin Matuska 		new_space += metaslab_class_get_space(
8990184c1b94SMartin Matuska 		    spa_embedded_log_class(spa));
8991eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8992eda14cbcSMatt Macy 
8993eda14cbcSMatt Macy 		/*
8994eda14cbcSMatt Macy 		 * If the pool grew as a result of the config update,
8995eda14cbcSMatt Macy 		 * then log an internal history event.
8996eda14cbcSMatt Macy 		 */
8997eda14cbcSMatt Macy 		if (new_space != old_space) {
8998eda14cbcSMatt Macy 			spa_history_log_internal(spa, "vdev online", NULL,
8999eda14cbcSMatt Macy 			    "pool '%s' size: %llu(+%llu)",
9000eda14cbcSMatt Macy 			    spa_name(spa), (u_longlong_t)new_space,
9001eda14cbcSMatt Macy 			    (u_longlong_t)(new_space - old_space));
9002eda14cbcSMatt Macy 		}
9003eda14cbcSMatt Macy 	}
9004eda14cbcSMatt Macy 
9005eda14cbcSMatt Macy 	/*
9006eda14cbcSMatt Macy 	 * See if any devices need to be marked REMOVED.
9007eda14cbcSMatt Macy 	 */
9008eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_REMOVE) {
9009eda14cbcSMatt Macy 		spa_vdev_state_enter(spa, SCL_NONE);
9010eda14cbcSMatt Macy 		spa_async_remove(spa, spa->spa_root_vdev);
9011eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
9012eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
9013eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
9014eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
9015eda14cbcSMatt Macy 		(void) spa_vdev_state_exit(spa, NULL, 0);
9016eda14cbcSMatt Macy 	}
9017eda14cbcSMatt Macy 
9018eda14cbcSMatt Macy 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
9019eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9020eda14cbcSMatt Macy 		spa_async_autoexpand(spa, spa->spa_root_vdev);
9021eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
9022eda14cbcSMatt Macy 	}
9023eda14cbcSMatt Macy 
9024eda14cbcSMatt Macy 	/*
9025b985c9caSMartin Matuska 	 * See if any devices need to be marked faulted.
9026eda14cbcSMatt Macy 	 */
9027b985c9caSMartin Matuska 	if (tasks & SPA_ASYNC_FAULT_VDEV) {
9028eda14cbcSMatt Macy 		spa_vdev_state_enter(spa, SCL_NONE);
9029b985c9caSMartin Matuska 		spa_async_fault_vdev(spa, spa->spa_root_vdev);
9030eda14cbcSMatt Macy 		(void) spa_vdev_state_exit(spa, NULL, 0);
9031eda14cbcSMatt Macy 	}
9032eda14cbcSMatt Macy 
9033eda14cbcSMatt Macy 	/*
9034eda14cbcSMatt Macy 	 * If any devices are done replacing, detach them.
9035eda14cbcSMatt Macy 	 */
90367877fdebSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER_DONE ||
9037d411c1d6SMartin Matuska 	    tasks & SPA_ASYNC_REBUILD_DONE ||
9038d411c1d6SMartin Matuska 	    tasks & SPA_ASYNC_DETACH_SPARE) {
9039eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
9040eda14cbcSMatt Macy 	}
9041eda14cbcSMatt Macy 
9042eda14cbcSMatt Macy 	/*
9043eda14cbcSMatt Macy 	 * Kick off a resilver.
9044eda14cbcSMatt Macy 	 */
9045eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER &&
9046eda14cbcSMatt Macy 	    !vdev_rebuild_active(spa->spa_root_vdev) &&
9047eda14cbcSMatt Macy 	    (!dsl_scan_resilvering(dp) ||
9048eda14cbcSMatt Macy 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
9049eda14cbcSMatt Macy 		dsl_scan_restart_resilver(dp, 0);
9050eda14cbcSMatt Macy 
9051eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
9052eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9053eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9054eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
9055eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
9056eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9057eda14cbcSMatt Macy 	}
9058eda14cbcSMatt Macy 
9059eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_TRIM_RESTART) {
9060eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9061eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9062eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
9063eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
9064eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9065eda14cbcSMatt Macy 	}
9066eda14cbcSMatt Macy 
9067eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
9068eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9069eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9070eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
9071eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
9072eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9073eda14cbcSMatt Macy 	}
9074eda14cbcSMatt Macy 
9075eda14cbcSMatt Macy 	/*
9076eda14cbcSMatt Macy 	 * Kick off L2 cache whole device TRIM.
9077eda14cbcSMatt Macy 	 */
9078eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
9079eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9080eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9081eda14cbcSMatt Macy 		vdev_trim_l2arc(spa);
9082eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
9083eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9084eda14cbcSMatt Macy 	}
9085eda14cbcSMatt Macy 
9086eda14cbcSMatt Macy 	/*
9087eda14cbcSMatt Macy 	 * Kick off L2 cache rebuilding.
9088eda14cbcSMatt Macy 	 */
9089eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
9090eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9091eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
9092eda14cbcSMatt Macy 		l2arc_spa_rebuild_start(spa);
9093eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_L2ARC, FTAG);
9094eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9095eda14cbcSMatt Macy 	}
9096eda14cbcSMatt Macy 
9097eda14cbcSMatt Macy 	/*
9098eda14cbcSMatt Macy 	 * Let the world know that we're done.
9099eda14cbcSMatt Macy 	 */
9100eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9101eda14cbcSMatt Macy 	spa->spa_async_thread = NULL;
9102eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_async_cv);
9103eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9104eda14cbcSMatt Macy 	thread_exit();
9105eda14cbcSMatt Macy }
9106eda14cbcSMatt Macy 
9107eda14cbcSMatt Macy void
9108eda14cbcSMatt Macy spa_async_suspend(spa_t *spa)
9109eda14cbcSMatt Macy {
9110eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9111eda14cbcSMatt Macy 	spa->spa_async_suspended++;
9112eda14cbcSMatt Macy 	while (spa->spa_async_thread != NULL)
9113eda14cbcSMatt Macy 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
9114eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9115eda14cbcSMatt Macy 
9116eda14cbcSMatt Macy 	spa_vdev_remove_suspend(spa);
9117eda14cbcSMatt Macy 
9118eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
9119eda14cbcSMatt Macy 	if (condense_thread != NULL)
9120eda14cbcSMatt Macy 		zthr_cancel(condense_thread);
9121eda14cbcSMatt Macy 
9122e716630dSMartin Matuska 	zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
9123e716630dSMartin Matuska 	if (raidz_expand_thread != NULL)
9124e716630dSMartin Matuska 		zthr_cancel(raidz_expand_thread);
9125e716630dSMartin Matuska 
9126eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
9127eda14cbcSMatt Macy 	if (discard_thread != NULL)
9128eda14cbcSMatt Macy 		zthr_cancel(discard_thread);
9129eda14cbcSMatt Macy 
9130eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
9131eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
9132eda14cbcSMatt Macy 		zthr_cancel(ll_delete_thread);
9133eda14cbcSMatt Macy 
9134eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
9135eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
9136eda14cbcSMatt Macy 		zthr_cancel(ll_condense_thread);
9137eda14cbcSMatt Macy }
9138eda14cbcSMatt Macy 
9139eda14cbcSMatt Macy void
9140eda14cbcSMatt Macy spa_async_resume(spa_t *spa)
9141eda14cbcSMatt Macy {
9142eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9143eda14cbcSMatt Macy 	ASSERT(spa->spa_async_suspended != 0);
9144eda14cbcSMatt Macy 	spa->spa_async_suspended--;
9145eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9146eda14cbcSMatt Macy 	spa_restart_removal(spa);
9147eda14cbcSMatt Macy 
9148eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
9149eda14cbcSMatt Macy 	if (condense_thread != NULL)
9150eda14cbcSMatt Macy 		zthr_resume(condense_thread);
9151eda14cbcSMatt Macy 
9152e716630dSMartin Matuska 	zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
9153e716630dSMartin Matuska 	if (raidz_expand_thread != NULL)
9154e716630dSMartin Matuska 		zthr_resume(raidz_expand_thread);
9155e716630dSMartin Matuska 
9156eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
9157eda14cbcSMatt Macy 	if (discard_thread != NULL)
9158eda14cbcSMatt Macy 		zthr_resume(discard_thread);
9159eda14cbcSMatt Macy 
9160eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
9161eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
9162eda14cbcSMatt Macy 		zthr_resume(ll_delete_thread);
9163eda14cbcSMatt Macy 
9164eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
9165eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
9166eda14cbcSMatt Macy 		zthr_resume(ll_condense_thread);
9167eda14cbcSMatt Macy }
9168eda14cbcSMatt Macy 
9169eda14cbcSMatt Macy static boolean_t
9170eda14cbcSMatt Macy spa_async_tasks_pending(spa_t *spa)
9171eda14cbcSMatt Macy {
9172eda14cbcSMatt Macy 	uint_t non_config_tasks;
9173eda14cbcSMatt Macy 	uint_t config_task;
9174eda14cbcSMatt Macy 	boolean_t config_task_suspended;
9175eda14cbcSMatt Macy 
9176eda14cbcSMatt Macy 	non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
9177eda14cbcSMatt Macy 	config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
9178eda14cbcSMatt Macy 	if (spa->spa_ccw_fail_time == 0) {
9179eda14cbcSMatt Macy 		config_task_suspended = B_FALSE;
9180eda14cbcSMatt Macy 	} else {
9181eda14cbcSMatt Macy 		config_task_suspended =
9182eda14cbcSMatt Macy 		    (gethrtime() - spa->spa_ccw_fail_time) <
9183eda14cbcSMatt Macy 		    ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
9184eda14cbcSMatt Macy 	}
9185eda14cbcSMatt Macy 
9186eda14cbcSMatt Macy 	return (non_config_tasks || (config_task && !config_task_suspended));
9187eda14cbcSMatt Macy }
9188eda14cbcSMatt Macy 
9189eda14cbcSMatt Macy static void
9190eda14cbcSMatt Macy spa_async_dispatch(spa_t *spa)
9191eda14cbcSMatt Macy {
9192eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9193eda14cbcSMatt Macy 	if (spa_async_tasks_pending(spa) &&
9194eda14cbcSMatt Macy 	    !spa->spa_async_suspended &&
9195eda14cbcSMatt Macy 	    spa->spa_async_thread == NULL)
9196eda14cbcSMatt Macy 		spa->spa_async_thread = thread_create(NULL, 0,
9197eda14cbcSMatt Macy 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
9198eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9199eda14cbcSMatt Macy }
9200eda14cbcSMatt Macy 
9201eda14cbcSMatt Macy void
9202eda14cbcSMatt Macy spa_async_request(spa_t *spa, int task)
9203eda14cbcSMatt Macy {
9204eda14cbcSMatt Macy 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
9205eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
9206eda14cbcSMatt Macy 	spa->spa_async_tasks |= task;
9207eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
9208eda14cbcSMatt Macy }
9209eda14cbcSMatt Macy 
9210eda14cbcSMatt Macy int
9211eda14cbcSMatt Macy spa_async_tasks(spa_t *spa)
9212eda14cbcSMatt Macy {
9213eda14cbcSMatt Macy 	return (spa->spa_async_tasks);
9214eda14cbcSMatt Macy }
9215eda14cbcSMatt Macy 
9216eda14cbcSMatt Macy /*
9217eda14cbcSMatt Macy  * ==========================================================================
9218eda14cbcSMatt Macy  * SPA syncing routines
9219eda14cbcSMatt Macy  * ==========================================================================
9220eda14cbcSMatt Macy  */
9221eda14cbcSMatt Macy 
9222eda14cbcSMatt Macy 
9223eda14cbcSMatt Macy static int
9224eda14cbcSMatt Macy bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
9225eda14cbcSMatt Macy     dmu_tx_t *tx)
9226eda14cbcSMatt Macy {
9227eda14cbcSMatt Macy 	bpobj_t *bpo = arg;
9228eda14cbcSMatt Macy 	bpobj_enqueue(bpo, bp, bp_freed, tx);
9229eda14cbcSMatt Macy 	return (0);
9230eda14cbcSMatt Macy }
9231eda14cbcSMatt Macy 
9232eda14cbcSMatt Macy int
9233eda14cbcSMatt Macy bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9234eda14cbcSMatt Macy {
9235eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
9236eda14cbcSMatt Macy }
9237eda14cbcSMatt Macy 
9238eda14cbcSMatt Macy int
9239eda14cbcSMatt Macy bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9240eda14cbcSMatt Macy {
9241eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
9242eda14cbcSMatt Macy }
9243eda14cbcSMatt Macy 
9244eda14cbcSMatt Macy static int
9245eda14cbcSMatt Macy spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
9246eda14cbcSMatt Macy {
9247eda14cbcSMatt Macy 	zio_t *pio = arg;
9248eda14cbcSMatt Macy 
9249eda14cbcSMatt Macy 	zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
9250eda14cbcSMatt Macy 	    pio->io_flags));
9251eda14cbcSMatt Macy 	return (0);
9252eda14cbcSMatt Macy }
9253eda14cbcSMatt Macy 
9254eda14cbcSMatt Macy static int
9255eda14cbcSMatt Macy bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
9256eda14cbcSMatt Macy     dmu_tx_t *tx)
9257eda14cbcSMatt Macy {
9258eda14cbcSMatt Macy 	ASSERT(!bp_freed);
9259eda14cbcSMatt Macy 	return (spa_free_sync_cb(arg, bp, tx));
9260eda14cbcSMatt Macy }
9261eda14cbcSMatt Macy 
9262eda14cbcSMatt Macy /*
9263eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
9264eda14cbcSMatt Macy  * amount of time spent syncing frees.
9265eda14cbcSMatt Macy  */
9266eda14cbcSMatt Macy static void
9267eda14cbcSMatt Macy spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
9268eda14cbcSMatt Macy {
9269eda14cbcSMatt Macy 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
9270eda14cbcSMatt Macy 	bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
9271eda14cbcSMatt Macy 	VERIFY(zio_wait(zio) == 0);
9272eda14cbcSMatt Macy }
9273eda14cbcSMatt Macy 
9274eda14cbcSMatt Macy /*
9275eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
9276eda14cbcSMatt Macy  * amount of time spent syncing deferred frees.
9277eda14cbcSMatt Macy  */
9278eda14cbcSMatt Macy static void
9279eda14cbcSMatt Macy spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
9280eda14cbcSMatt Macy {
9281eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
9282eda14cbcSMatt Macy 		return;
9283eda14cbcSMatt Macy 
9284eda14cbcSMatt Macy 	/*
9285eda14cbcSMatt Macy 	 * Note:
9286eda14cbcSMatt Macy 	 * If the log space map feature is active, we stop deferring
9287eda14cbcSMatt Macy 	 * frees to the next TXG and therefore running this function
9288eda14cbcSMatt Macy 	 * would be considered a no-op as spa_deferred_bpobj should
9289eda14cbcSMatt Macy 	 * not have any entries.
9290eda14cbcSMatt Macy 	 *
9291eda14cbcSMatt Macy 	 * That said we run this function anyway (instead of returning
9292eda14cbcSMatt Macy 	 * immediately) for the edge-case scenario where we just
9293eda14cbcSMatt Macy 	 * activated the log space map feature in this TXG but we have
9294eda14cbcSMatt Macy 	 * deferred frees from the previous TXG.
9295eda14cbcSMatt Macy 	 */
9296eda14cbcSMatt Macy 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
9297eda14cbcSMatt Macy 	VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
9298eda14cbcSMatt Macy 	    bpobj_spa_free_sync_cb, zio, tx), ==, 0);
9299eda14cbcSMatt Macy 	VERIFY0(zio_wait(zio));
9300eda14cbcSMatt Macy }
9301eda14cbcSMatt Macy 
9302eda14cbcSMatt Macy static void
9303eda14cbcSMatt Macy spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
9304eda14cbcSMatt Macy {
9305eda14cbcSMatt Macy 	char *packed = NULL;
9306eda14cbcSMatt Macy 	size_t bufsize;
9307eda14cbcSMatt Macy 	size_t nvsize = 0;
9308eda14cbcSMatt Macy 	dmu_buf_t *db;
9309eda14cbcSMatt Macy 
9310eda14cbcSMatt Macy 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
9311eda14cbcSMatt Macy 
9312eda14cbcSMatt Macy 	/*
9313eda14cbcSMatt Macy 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
9314eda14cbcSMatt Macy 	 * information.  This avoids the dmu_buf_will_dirty() path and
9315eda14cbcSMatt Macy 	 * saves us a pre-read to get data we don't actually care about.
9316eda14cbcSMatt Macy 	 */
9317eda14cbcSMatt Macy 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
9318eda14cbcSMatt Macy 	packed = vmem_alloc(bufsize, KM_SLEEP);
9319eda14cbcSMatt Macy 
9320eda14cbcSMatt Macy 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
9321eda14cbcSMatt Macy 	    KM_SLEEP) == 0);
9322da5137abSMartin Matuska 	memset(packed + nvsize, 0, bufsize - nvsize);
9323eda14cbcSMatt Macy 
9324eda14cbcSMatt Macy 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
9325eda14cbcSMatt Macy 
9326eda14cbcSMatt Macy 	vmem_free(packed, bufsize);
9327eda14cbcSMatt Macy 
9328eda14cbcSMatt Macy 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
9329eda14cbcSMatt Macy 	dmu_buf_will_dirty(db, tx);
9330eda14cbcSMatt Macy 	*(uint64_t *)db->db_data = nvsize;
9331eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
9332eda14cbcSMatt Macy }
9333eda14cbcSMatt Macy 
9334eda14cbcSMatt Macy static void
9335eda14cbcSMatt Macy spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
9336eda14cbcSMatt Macy     const char *config, const char *entry)
9337eda14cbcSMatt Macy {
9338eda14cbcSMatt Macy 	nvlist_t *nvroot;
9339eda14cbcSMatt Macy 	nvlist_t **list;
9340eda14cbcSMatt Macy 	int i;
9341eda14cbcSMatt Macy 
9342eda14cbcSMatt Macy 	if (!sav->sav_sync)
9343eda14cbcSMatt Macy 		return;
9344eda14cbcSMatt Macy 
9345eda14cbcSMatt Macy 	/*
9346eda14cbcSMatt Macy 	 * Update the MOS nvlist describing the list of available devices.
9347eda14cbcSMatt Macy 	 * spa_validate_aux() will have already made sure this nvlist is
9348eda14cbcSMatt Macy 	 * valid and the vdevs are labeled appropriately.
9349eda14cbcSMatt Macy 	 */
9350eda14cbcSMatt Macy 	if (sav->sav_object == 0) {
9351eda14cbcSMatt Macy 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
9352eda14cbcSMatt Macy 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
9353eda14cbcSMatt Macy 		    sizeof (uint64_t), tx);
9354eda14cbcSMatt Macy 		VERIFY(zap_update(spa->spa_meta_objset,
9355eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
9356eda14cbcSMatt Macy 		    &sav->sav_object, tx) == 0);
9357eda14cbcSMatt Macy 	}
9358eda14cbcSMatt Macy 
935981b22a98SMartin Matuska 	nvroot = fnvlist_alloc();
9360eda14cbcSMatt Macy 	if (sav->sav_count == 0) {
9361681ce946SMartin Matuska 		fnvlist_add_nvlist_array(nvroot, config,
9362681ce946SMartin Matuska 		    (const nvlist_t * const *)NULL, 0);
9363eda14cbcSMatt Macy 	} else {
9364eda14cbcSMatt Macy 		list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
9365eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
9366eda14cbcSMatt Macy 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
9367eda14cbcSMatt Macy 			    B_FALSE, VDEV_CONFIG_L2CACHE);
9368681ce946SMartin Matuska 		fnvlist_add_nvlist_array(nvroot, config,
9369681ce946SMartin Matuska 		    (const nvlist_t * const *)list, sav->sav_count);
9370eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
9371eda14cbcSMatt Macy 			nvlist_free(list[i]);
9372eda14cbcSMatt Macy 		kmem_free(list, sav->sav_count * sizeof (void *));
9373eda14cbcSMatt Macy 	}
9374eda14cbcSMatt Macy 
9375eda14cbcSMatt Macy 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
9376eda14cbcSMatt Macy 	nvlist_free(nvroot);
9377eda14cbcSMatt Macy 
9378eda14cbcSMatt Macy 	sav->sav_sync = B_FALSE;
9379eda14cbcSMatt Macy }
9380eda14cbcSMatt Macy 
9381eda14cbcSMatt Macy /*
9382eda14cbcSMatt Macy  * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
9383eda14cbcSMatt Macy  * The all-vdev ZAP must be empty.
9384eda14cbcSMatt Macy  */
9385eda14cbcSMatt Macy static void
9386eda14cbcSMatt Macy spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
9387eda14cbcSMatt Macy {
9388eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
9389eda14cbcSMatt Macy 
9390d411c1d6SMartin Matuska 	if (vd->vdev_root_zap != 0 &&
9391d411c1d6SMartin Matuska 	    spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
9392d411c1d6SMartin Matuska 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9393d411c1d6SMartin Matuska 		    vd->vdev_root_zap, tx));
9394d411c1d6SMartin Matuska 	}
9395eda14cbcSMatt Macy 	if (vd->vdev_top_zap != 0) {
9396eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9397eda14cbcSMatt Macy 		    vd->vdev_top_zap, tx));
9398eda14cbcSMatt Macy 	}
9399eda14cbcSMatt Macy 	if (vd->vdev_leaf_zap != 0) {
9400eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
9401eda14cbcSMatt Macy 		    vd->vdev_leaf_zap, tx));
9402eda14cbcSMatt Macy 	}
9403eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
9404eda14cbcSMatt Macy 		spa_avz_build(vd->vdev_child[i], avz, tx);
9405eda14cbcSMatt Macy 	}
9406eda14cbcSMatt Macy }
9407eda14cbcSMatt Macy 
9408eda14cbcSMatt Macy static void
9409eda14cbcSMatt Macy spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
9410eda14cbcSMatt Macy {
9411eda14cbcSMatt Macy 	nvlist_t *config;
9412eda14cbcSMatt Macy 
9413eda14cbcSMatt Macy 	/*
9414eda14cbcSMatt Macy 	 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
9415eda14cbcSMatt Macy 	 * its config may not be dirty but we still need to build per-vdev ZAPs.
9416eda14cbcSMatt Macy 	 * Similarly, if the pool is being assembled (e.g. after a split), we
9417eda14cbcSMatt Macy 	 * need to rebuild the AVZ although the config may not be dirty.
9418eda14cbcSMatt Macy 	 */
9419eda14cbcSMatt Macy 	if (list_is_empty(&spa->spa_config_dirty_list) &&
9420eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_NONE)
9421eda14cbcSMatt Macy 		return;
9422eda14cbcSMatt Macy 
9423eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9424eda14cbcSMatt Macy 
9425eda14cbcSMatt Macy 	ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
9426eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
9427eda14cbcSMatt Macy 	    spa->spa_all_vdev_zaps != 0);
9428eda14cbcSMatt Macy 
9429eda14cbcSMatt Macy 	if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
9430eda14cbcSMatt Macy 		/* Make and build the new AVZ */
9431eda14cbcSMatt Macy 		uint64_t new_avz = zap_create(spa->spa_meta_objset,
9432eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
9433eda14cbcSMatt Macy 		spa_avz_build(spa->spa_root_vdev, new_avz, tx);
9434eda14cbcSMatt Macy 
9435eda14cbcSMatt Macy 		/* Diff old AVZ with new one */
9436eda14cbcSMatt Macy 		zap_cursor_t zc;
9437eda14cbcSMatt Macy 		zap_attribute_t za;
9438eda14cbcSMatt Macy 
9439eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
9440eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
9441eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
9442eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
9443eda14cbcSMatt Macy 			uint64_t vdzap = za.za_first_integer;
9444eda14cbcSMatt Macy 			if (zap_lookup_int(spa->spa_meta_objset, new_avz,
9445eda14cbcSMatt Macy 			    vdzap) == ENOENT) {
9446eda14cbcSMatt Macy 				/*
9447eda14cbcSMatt Macy 				 * ZAP is listed in old AVZ but not in new one;
9448eda14cbcSMatt Macy 				 * destroy it
9449eda14cbcSMatt Macy 				 */
9450eda14cbcSMatt Macy 				VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
9451eda14cbcSMatt Macy 				    tx));
9452eda14cbcSMatt Macy 			}
9453eda14cbcSMatt Macy 		}
9454eda14cbcSMatt Macy 
9455eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
9456eda14cbcSMatt Macy 
9457eda14cbcSMatt Macy 		/* Destroy the old AVZ */
9458eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
9459eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
9460eda14cbcSMatt Macy 
9461eda14cbcSMatt Macy 		/* Replace the old AVZ in the dir obj with the new one */
9462eda14cbcSMatt Macy 		VERIFY0(zap_update(spa->spa_meta_objset,
9463eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
9464eda14cbcSMatt Macy 		    sizeof (new_avz), 1, &new_avz, tx));
9465eda14cbcSMatt Macy 
9466eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = new_avz;
9467eda14cbcSMatt Macy 	} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
9468eda14cbcSMatt Macy 		zap_cursor_t zc;
9469eda14cbcSMatt Macy 		zap_attribute_t za;
9470eda14cbcSMatt Macy 
9471eda14cbcSMatt Macy 		/* Walk through the AVZ and destroy all listed ZAPs */
9472eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
9473eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
9474eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
9475eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
9476eda14cbcSMatt Macy 			uint64_t zap = za.za_first_integer;
9477eda14cbcSMatt Macy 			VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
9478eda14cbcSMatt Macy 		}
9479eda14cbcSMatt Macy 
9480eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
9481eda14cbcSMatt Macy 
9482eda14cbcSMatt Macy 		/* Destroy and unlink the AVZ itself */
9483eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
9484eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
9485eda14cbcSMatt Macy 		VERIFY0(zap_remove(spa->spa_meta_objset,
9486eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
9487eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = 0;
9488eda14cbcSMatt Macy 	}
9489eda14cbcSMatt Macy 
9490eda14cbcSMatt Macy 	if (spa->spa_all_vdev_zaps == 0) {
9491eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
9492eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
9493eda14cbcSMatt Macy 		    DMU_POOL_VDEV_ZAP_MAP, tx);
9494eda14cbcSMatt Macy 	}
9495eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_NONE;
9496eda14cbcSMatt Macy 
9497eda14cbcSMatt Macy 	/* Create ZAPs for vdevs that don't have them. */
9498eda14cbcSMatt Macy 	vdev_construct_zaps(spa->spa_root_vdev, tx);
9499eda14cbcSMatt Macy 
9500eda14cbcSMatt Macy 	config = spa_config_generate(spa, spa->spa_root_vdev,
9501eda14cbcSMatt Macy 	    dmu_tx_get_txg(tx), B_FALSE);
9502eda14cbcSMatt Macy 
9503eda14cbcSMatt Macy 	/*
9504eda14cbcSMatt Macy 	 * If we're upgrading the spa version then make sure that
9505eda14cbcSMatt Macy 	 * the config object gets updated with the correct version.
9506eda14cbcSMatt Macy 	 */
9507eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
9508eda14cbcSMatt Macy 		fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
9509eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_version);
9510eda14cbcSMatt Macy 
9511eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
9512eda14cbcSMatt Macy 
9513eda14cbcSMatt Macy 	nvlist_free(spa->spa_config_syncing);
9514eda14cbcSMatt Macy 	spa->spa_config_syncing = config;
9515eda14cbcSMatt Macy 
9516eda14cbcSMatt Macy 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
9517eda14cbcSMatt Macy }
9518eda14cbcSMatt Macy 
9519eda14cbcSMatt Macy static void
9520eda14cbcSMatt Macy spa_sync_version(void *arg, dmu_tx_t *tx)
9521eda14cbcSMatt Macy {
9522eda14cbcSMatt Macy 	uint64_t *versionp = arg;
9523eda14cbcSMatt Macy 	uint64_t version = *versionp;
9524eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9525eda14cbcSMatt Macy 
9526eda14cbcSMatt Macy 	/*
9527eda14cbcSMatt Macy 	 * Setting the version is special cased when first creating the pool.
9528eda14cbcSMatt Macy 	 */
9529eda14cbcSMatt Macy 	ASSERT(tx->tx_txg != TXG_INITIAL);
9530eda14cbcSMatt Macy 
9531eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
9532eda14cbcSMatt Macy 	ASSERT(version >= spa_version(spa));
9533eda14cbcSMatt Macy 
9534eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
9535eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
9536eda14cbcSMatt Macy 	spa_history_log_internal(spa, "set", tx, "version=%lld",
9537eda14cbcSMatt Macy 	    (longlong_t)version);
9538eda14cbcSMatt Macy }
9539eda14cbcSMatt Macy 
9540eda14cbcSMatt Macy /*
9541eda14cbcSMatt Macy  * Set zpool properties.
9542eda14cbcSMatt Macy  */
9543eda14cbcSMatt Macy static void
9544eda14cbcSMatt Macy spa_sync_props(void *arg, dmu_tx_t *tx)
9545eda14cbcSMatt Macy {
9546eda14cbcSMatt Macy 	nvlist_t *nvp = arg;
9547eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
9548eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
9549eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
9550eda14cbcSMatt Macy 
9551eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
9552eda14cbcSMatt Macy 
9553eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
9554eda14cbcSMatt Macy 		uint64_t intval;
95552a58b312SMartin Matuska 		const char *strval, *fname;
9556eda14cbcSMatt Macy 		zpool_prop_t prop;
9557eda14cbcSMatt Macy 		const char *propname;
9558c98ecfceSAllan Jude 		const char *elemname = nvpair_name(elem);
9559eda14cbcSMatt Macy 		zprop_type_t proptype;
9560eda14cbcSMatt Macy 		spa_feature_t fid;
9561eda14cbcSMatt Macy 
9562c98ecfceSAllan Jude 		switch (prop = zpool_name_to_prop(elemname)) {
9563eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
9564eda14cbcSMatt Macy 			intval = fnvpair_value_uint64(elem);
9565eda14cbcSMatt Macy 			/*
9566eda14cbcSMatt Macy 			 * The version is synced separately before other
9567eda14cbcSMatt Macy 			 * properties and should be correct by now.
9568eda14cbcSMatt Macy 			 */
9569eda14cbcSMatt Macy 			ASSERT3U(spa_version(spa), >=, intval);
9570eda14cbcSMatt Macy 			break;
9571eda14cbcSMatt Macy 
9572eda14cbcSMatt Macy 		case ZPOOL_PROP_ALTROOT:
9573eda14cbcSMatt Macy 			/*
9574eda14cbcSMatt Macy 			 * 'altroot' is a non-persistent property. It should
9575eda14cbcSMatt Macy 			 * have been set temporarily at creation or import time.
9576eda14cbcSMatt Macy 			 */
9577eda14cbcSMatt Macy 			ASSERT(spa->spa_root != NULL);
9578eda14cbcSMatt Macy 			break;
9579eda14cbcSMatt Macy 
9580eda14cbcSMatt Macy 		case ZPOOL_PROP_READONLY:
9581eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
9582eda14cbcSMatt Macy 			/*
9583eda14cbcSMatt Macy 			 * 'readonly' and 'cachefile' are also non-persistent
9584eda14cbcSMatt Macy 			 * properties.
9585eda14cbcSMatt Macy 			 */
9586eda14cbcSMatt Macy 			break;
9587eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
9588eda14cbcSMatt Macy 			strval = fnvpair_value_string(elem);
9589eda14cbcSMatt Macy 			if (spa->spa_comment != NULL)
9590eda14cbcSMatt Macy 				spa_strfree(spa->spa_comment);
9591eda14cbcSMatt Macy 			spa->spa_comment = spa_strdup(strval);
9592eda14cbcSMatt Macy 			/*
9593eda14cbcSMatt Macy 			 * We need to dirty the configuration on all the vdevs
959433b8c039SMartin Matuska 			 * so that their labels get updated.  We also need to
959533b8c039SMartin Matuska 			 * update the cache file to keep it in sync with the
959633b8c039SMartin Matuska 			 * MOS version. It's unnecessary to do this for pool
959733b8c039SMartin Matuska 			 * creation since the vdev's configuration has already
959833b8c039SMartin Matuska 			 * been dirtied.
9599eda14cbcSMatt Macy 			 */
960033b8c039SMartin Matuska 			if (tx->tx_txg != TXG_INITIAL) {
9601eda14cbcSMatt Macy 				vdev_config_dirty(spa->spa_root_vdev);
960233b8c039SMartin Matuska 				spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
960333b8c039SMartin Matuska 			}
9604eda14cbcSMatt Macy 			spa_history_log_internal(spa, "set", tx,
9605c98ecfceSAllan Jude 			    "%s=%s", elemname, strval);
9606eda14cbcSMatt Macy 			break;
9607ee36e25aSMartin Matuska 		case ZPOOL_PROP_COMPATIBILITY:
9608ee36e25aSMartin Matuska 			strval = fnvpair_value_string(elem);
9609ee36e25aSMartin Matuska 			if (spa->spa_compatibility != NULL)
9610ee36e25aSMartin Matuska 				spa_strfree(spa->spa_compatibility);
9611ee36e25aSMartin Matuska 			spa->spa_compatibility = spa_strdup(strval);
9612ee36e25aSMartin Matuska 			/*
9613ee36e25aSMartin Matuska 			 * Dirty the configuration on vdevs as above.
9614ee36e25aSMartin Matuska 			 */
961533b8c039SMartin Matuska 			if (tx->tx_txg != TXG_INITIAL) {
9616ee36e25aSMartin Matuska 				vdev_config_dirty(spa->spa_root_vdev);
961733b8c039SMartin Matuska 				spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
961833b8c039SMartin Matuska 			}
961933b8c039SMartin Matuska 
9620ee36e25aSMartin Matuska 			spa_history_log_internal(spa, "set", tx,
9621ee36e25aSMartin Matuska 			    "%s=%s", nvpair_name(elem), strval);
9622ee36e25aSMartin Matuska 			break;
9623ee36e25aSMartin Matuska 
9624c98ecfceSAllan Jude 		case ZPOOL_PROP_INVAL:
9625c98ecfceSAllan Jude 			if (zpool_prop_feature(elemname)) {
9626c98ecfceSAllan Jude 				fname = strchr(elemname, '@') + 1;
9627c98ecfceSAllan Jude 				VERIFY0(zfeature_lookup_name(fname, &fid));
9628c98ecfceSAllan Jude 
9629c98ecfceSAllan Jude 				spa_feature_enable(spa, fid, tx);
9630c98ecfceSAllan Jude 				spa_history_log_internal(spa, "set", tx,
9631c98ecfceSAllan Jude 				    "%s=enabled", elemname);
9632c98ecfceSAllan Jude 				break;
9633c98ecfceSAllan Jude 			} else if (!zfs_prop_user(elemname)) {
9634c98ecfceSAllan Jude 				ASSERT(zpool_prop_feature(elemname));
9635c98ecfceSAllan Jude 				break;
9636c98ecfceSAllan Jude 			}
9637c98ecfceSAllan Jude 			zfs_fallthrough;
9638eda14cbcSMatt Macy 		default:
9639eda14cbcSMatt Macy 			/*
9640eda14cbcSMatt Macy 			 * Set pool property values in the poolprops mos object.
9641eda14cbcSMatt Macy 			 */
9642eda14cbcSMatt Macy 			if (spa->spa_pool_props_object == 0) {
9643eda14cbcSMatt Macy 				spa->spa_pool_props_object =
9644eda14cbcSMatt Macy 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
9645eda14cbcSMatt Macy 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
9646eda14cbcSMatt Macy 				    tx);
9647eda14cbcSMatt Macy 			}
9648eda14cbcSMatt Macy 
9649eda14cbcSMatt Macy 			/* normalize the property name */
9650c0a83fe0SMartin Matuska 			if (prop == ZPOOL_PROP_INVAL) {
9651c98ecfceSAllan Jude 				propname = elemname;
9652c98ecfceSAllan Jude 				proptype = PROP_TYPE_STRING;
9653c0a83fe0SMartin Matuska 			} else {
9654c0a83fe0SMartin Matuska 				propname = zpool_prop_to_name(prop);
9655c0a83fe0SMartin Matuska 				proptype = zpool_prop_get_type(prop);
9656c98ecfceSAllan Jude 			}
9657eda14cbcSMatt Macy 
9658eda14cbcSMatt Macy 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
9659eda14cbcSMatt Macy 				ASSERT(proptype == PROP_TYPE_STRING);
9660eda14cbcSMatt Macy 				strval = fnvpair_value_string(elem);
9661eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
9662eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
9663eda14cbcSMatt Macy 				    1, strlen(strval) + 1, strval, tx));
9664eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
9665c98ecfceSAllan Jude 				    "%s=%s", elemname, strval);
9666eda14cbcSMatt Macy 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
9667eda14cbcSMatt Macy 				intval = fnvpair_value_uint64(elem);
9668eda14cbcSMatt Macy 
9669eda14cbcSMatt Macy 				if (proptype == PROP_TYPE_INDEX) {
9670eda14cbcSMatt Macy 					const char *unused;
9671eda14cbcSMatt Macy 					VERIFY0(zpool_prop_index_to_string(
9672eda14cbcSMatt Macy 					    prop, intval, &unused));
9673eda14cbcSMatt Macy 				}
9674eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
9675eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
9676eda14cbcSMatt Macy 				    8, 1, &intval, tx));
9677eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
9678c98ecfceSAllan Jude 				    "%s=%lld", elemname,
9679eda14cbcSMatt Macy 				    (longlong_t)intval);
9680eda14cbcSMatt Macy 
9681eda14cbcSMatt Macy 				switch (prop) {
9682eda14cbcSMatt Macy 				case ZPOOL_PROP_DELEGATION:
9683eda14cbcSMatt Macy 					spa->spa_delegation = intval;
9684eda14cbcSMatt Macy 					break;
9685eda14cbcSMatt Macy 				case ZPOOL_PROP_BOOTFS:
9686eda14cbcSMatt Macy 					spa->spa_bootfs = intval;
9687eda14cbcSMatt Macy 					break;
9688eda14cbcSMatt Macy 				case ZPOOL_PROP_FAILUREMODE:
9689eda14cbcSMatt Macy 					spa->spa_failmode = intval;
9690eda14cbcSMatt Macy 					break;
9691eda14cbcSMatt Macy 				case ZPOOL_PROP_AUTOTRIM:
9692eda14cbcSMatt Macy 					spa->spa_autotrim = intval;
9693eda14cbcSMatt Macy 					spa_async_request(spa,
9694eda14cbcSMatt Macy 					    SPA_ASYNC_AUTOTRIM_RESTART);
9695eda14cbcSMatt Macy 					break;
9696eda14cbcSMatt Macy 				case ZPOOL_PROP_AUTOEXPAND:
9697eda14cbcSMatt Macy 					spa->spa_autoexpand = intval;
9698eda14cbcSMatt Macy 					if (tx->tx_txg != TXG_INITIAL)
9699eda14cbcSMatt Macy 						spa_async_request(spa,
9700eda14cbcSMatt Macy 						    SPA_ASYNC_AUTOEXPAND);
9701eda14cbcSMatt Macy 					break;
9702eda14cbcSMatt Macy 				case ZPOOL_PROP_MULTIHOST:
9703eda14cbcSMatt Macy 					spa->spa_multihost = intval;
9704eda14cbcSMatt Macy 					break;
9705ce4dcb97SMartin Matuska 				case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
9706ce4dcb97SMartin Matuska 					spa->spa_dedup_table_quota = intval;
9707ce4dcb97SMartin Matuska 					break;
9708eda14cbcSMatt Macy 				default:
9709eda14cbcSMatt Macy 					break;
9710eda14cbcSMatt Macy 				}
9711dbd5678dSMartin Matuska 			} else {
9712dbd5678dSMartin Matuska 				ASSERT(0); /* not allowed */
9713dbd5678dSMartin Matuska 			}
9714eda14cbcSMatt Macy 		}
9715eda14cbcSMatt Macy 
9716eda14cbcSMatt Macy 	}
9717eda14cbcSMatt Macy 
9718eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
9719eda14cbcSMatt Macy }
9720eda14cbcSMatt Macy 
9721eda14cbcSMatt Macy /*
9722eda14cbcSMatt Macy  * Perform one-time upgrade on-disk changes.  spa_version() does not
9723eda14cbcSMatt Macy  * reflect the new version this txg, so there must be no changes this
9724eda14cbcSMatt Macy  * txg to anything that the upgrade code depends on after it executes.
9725eda14cbcSMatt Macy  * Therefore this must be called after dsl_pool_sync() does the sync
9726eda14cbcSMatt Macy  * tasks.
9727eda14cbcSMatt Macy  */
9728eda14cbcSMatt Macy static void
9729eda14cbcSMatt Macy spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
9730eda14cbcSMatt Macy {
9731eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
9732eda14cbcSMatt Macy 		return;
9733eda14cbcSMatt Macy 
9734eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9735eda14cbcSMatt Macy 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
9736eda14cbcSMatt Macy 
9737eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
9738eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
9739eda14cbcSMatt Macy 		dsl_pool_create_origin(dp, tx);
9740eda14cbcSMatt Macy 
9741eda14cbcSMatt Macy 		/* Keeping the origin open increases spa_minref */
9742eda14cbcSMatt Macy 		spa->spa_minref += 3;
9743eda14cbcSMatt Macy 	}
9744eda14cbcSMatt Macy 
9745eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
9746eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
9747eda14cbcSMatt Macy 		dsl_pool_upgrade_clones(dp, tx);
9748eda14cbcSMatt Macy 	}
9749eda14cbcSMatt Macy 
9750eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
9751eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
9752eda14cbcSMatt Macy 		dsl_pool_upgrade_dir_clones(dp, tx);
9753eda14cbcSMatt Macy 
9754eda14cbcSMatt Macy 		/* Keeping the freedir open increases spa_minref */
9755eda14cbcSMatt Macy 		spa->spa_minref += 3;
9756eda14cbcSMatt Macy 	}
9757eda14cbcSMatt Macy 
9758eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
9759eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
9760eda14cbcSMatt Macy 		spa_feature_create_zap_objects(spa, tx);
9761eda14cbcSMatt Macy 	}
9762eda14cbcSMatt Macy 
9763eda14cbcSMatt Macy 	/*
9764eda14cbcSMatt Macy 	 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
9765eda14cbcSMatt Macy 	 * when possibility to use lz4 compression for metadata was added
9766eda14cbcSMatt Macy 	 * Old pools that have this feature enabled must be upgraded to have
9767eda14cbcSMatt Macy 	 * this feature active
9768eda14cbcSMatt Macy 	 */
9769eda14cbcSMatt Macy 	if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
9770eda14cbcSMatt Macy 		boolean_t lz4_en = spa_feature_is_enabled(spa,
9771eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
9772eda14cbcSMatt Macy 		boolean_t lz4_ac = spa_feature_is_active(spa,
9773eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
9774eda14cbcSMatt Macy 
9775eda14cbcSMatt Macy 		if (lz4_en && !lz4_ac)
9776eda14cbcSMatt Macy 			spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
9777eda14cbcSMatt Macy 	}
9778eda14cbcSMatt Macy 
9779eda14cbcSMatt Macy 	/*
9780eda14cbcSMatt Macy 	 * If we haven't written the salt, do so now.  Note that the
9781eda14cbcSMatt Macy 	 * feature may not be activated yet, but that's fine since
9782eda14cbcSMatt Macy 	 * the presence of this ZAP entry is backwards compatible.
9783eda14cbcSMatt Macy 	 */
9784eda14cbcSMatt Macy 	if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
9785eda14cbcSMatt Macy 	    DMU_POOL_CHECKSUM_SALT) == ENOENT) {
9786eda14cbcSMatt Macy 		VERIFY0(zap_add(spa->spa_meta_objset,
9787eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
9788eda14cbcSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes),
9789eda14cbcSMatt Macy 		    spa->spa_cksum_salt.zcs_bytes, tx));
9790eda14cbcSMatt Macy 	}
9791eda14cbcSMatt Macy 
9792eda14cbcSMatt Macy 	rrw_exit(&dp->dp_config_rwlock, FTAG);
9793eda14cbcSMatt Macy }
9794eda14cbcSMatt Macy 
9795eda14cbcSMatt Macy static void
9796eda14cbcSMatt Macy vdev_indirect_state_sync_verify(vdev_t *vd)
9797eda14cbcSMatt Macy {
9798eda14cbcSMatt Macy 	vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
9799eda14cbcSMatt Macy 	vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
9800eda14cbcSMatt Macy 
9801eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_indirect_ops) {
9802eda14cbcSMatt Macy 		ASSERT(vim != NULL);
9803eda14cbcSMatt Macy 		ASSERT(vib != NULL);
9804eda14cbcSMatt Macy 	}
9805eda14cbcSMatt Macy 
9806eda14cbcSMatt Macy 	uint64_t obsolete_sm_object = 0;
9807eda14cbcSMatt Macy 	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
9808eda14cbcSMatt Macy 	if (obsolete_sm_object != 0) {
9809eda14cbcSMatt Macy 		ASSERT(vd->vdev_obsolete_sm != NULL);
9810eda14cbcSMatt Macy 		ASSERT(vd->vdev_removing ||
9811eda14cbcSMatt Macy 		    vd->vdev_ops == &vdev_indirect_ops);
9812eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
9813eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
9814eda14cbcSMatt Macy 		ASSERT3U(obsolete_sm_object, ==,
9815eda14cbcSMatt Macy 		    space_map_object(vd->vdev_obsolete_sm));
9816eda14cbcSMatt Macy 		ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
9817eda14cbcSMatt Macy 		    space_map_allocated(vd->vdev_obsolete_sm));
9818eda14cbcSMatt Macy 	}
9819eda14cbcSMatt Macy 	ASSERT(vd->vdev_obsolete_segments != NULL);
9820eda14cbcSMatt Macy 
9821eda14cbcSMatt Macy 	/*
9822eda14cbcSMatt Macy 	 * Since frees / remaps to an indirect vdev can only
9823eda14cbcSMatt Macy 	 * happen in syncing context, the obsolete segments
9824eda14cbcSMatt Macy 	 * tree must be empty when we start syncing.
9825eda14cbcSMatt Macy 	 */
9826eda14cbcSMatt Macy 	ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
9827eda14cbcSMatt Macy }
9828eda14cbcSMatt Macy 
9829eda14cbcSMatt Macy /*
9830eda14cbcSMatt Macy  * Set the top-level vdev's max queue depth. Evaluate each top-level's
9831eda14cbcSMatt Macy  * async write queue depth in case it changed. The max queue depth will
9832eda14cbcSMatt Macy  * not change in the middle of syncing out this txg.
9833eda14cbcSMatt Macy  */
9834eda14cbcSMatt Macy static void
9835eda14cbcSMatt Macy spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
9836eda14cbcSMatt Macy {
9837eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
9838eda14cbcSMatt Macy 
9839eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9840eda14cbcSMatt Macy 	uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
9841eda14cbcSMatt Macy 	    zfs_vdev_queue_depth_pct / 100;
9842eda14cbcSMatt Macy 	metaslab_class_t *normal = spa_normal_class(spa);
9843eda14cbcSMatt Macy 	metaslab_class_t *special = spa_special_class(spa);
9844eda14cbcSMatt Macy 	metaslab_class_t *dedup = spa_dedup_class(spa);
9845eda14cbcSMatt Macy 
9846eda14cbcSMatt Macy 	uint64_t slots_per_allocator = 0;
9847eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
9848eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
9849eda14cbcSMatt Macy 
9850eda14cbcSMatt Macy 		metaslab_group_t *mg = tvd->vdev_mg;
9851eda14cbcSMatt Macy 		if (mg == NULL || !metaslab_group_initialized(mg))
9852eda14cbcSMatt Macy 			continue;
9853eda14cbcSMatt Macy 
9854eda14cbcSMatt Macy 		metaslab_class_t *mc = mg->mg_class;
9855eda14cbcSMatt Macy 		if (mc != normal && mc != special && mc != dedup)
9856eda14cbcSMatt Macy 			continue;
9857eda14cbcSMatt Macy 
9858eda14cbcSMatt Macy 		/*
9859eda14cbcSMatt Macy 		 * It is safe to do a lock-free check here because only async
9860eda14cbcSMatt Macy 		 * allocations look at mg_max_alloc_queue_depth, and async
9861eda14cbcSMatt Macy 		 * allocations all happen from spa_sync().
9862eda14cbcSMatt Macy 		 */
9863eda14cbcSMatt Macy 		for (int i = 0; i < mg->mg_allocators; i++) {
9864eda14cbcSMatt Macy 			ASSERT0(zfs_refcount_count(
9865eda14cbcSMatt Macy 			    &(mg->mg_allocator[i].mga_alloc_queue_depth)));
9866eda14cbcSMatt Macy 		}
9867eda14cbcSMatt Macy 		mg->mg_max_alloc_queue_depth = max_queue_depth;
9868eda14cbcSMatt Macy 
9869eda14cbcSMatt Macy 		for (int i = 0; i < mg->mg_allocators; i++) {
9870eda14cbcSMatt Macy 			mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
9871eda14cbcSMatt Macy 			    zfs_vdev_def_queue_depth;
9872eda14cbcSMatt Macy 		}
9873eda14cbcSMatt Macy 		slots_per_allocator += zfs_vdev_def_queue_depth;
9874eda14cbcSMatt Macy 	}
9875eda14cbcSMatt Macy 
9876eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
98777877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
98787877fdebSMatt Macy 		    mca_alloc_slots));
98797877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
98807877fdebSMatt Macy 		    mca_alloc_slots));
98817877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
98827877fdebSMatt Macy 		    mca_alloc_slots));
98837877fdebSMatt Macy 		normal->mc_allocator[i].mca_alloc_max_slots =
98847877fdebSMatt Macy 		    slots_per_allocator;
98857877fdebSMatt Macy 		special->mc_allocator[i].mca_alloc_max_slots =
98867877fdebSMatt Macy 		    slots_per_allocator;
98877877fdebSMatt Macy 		dedup->mc_allocator[i].mca_alloc_max_slots =
98887877fdebSMatt Macy 		    slots_per_allocator;
9889eda14cbcSMatt Macy 	}
9890eda14cbcSMatt Macy 	normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9891eda14cbcSMatt Macy 	special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9892eda14cbcSMatt Macy 	dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
9893eda14cbcSMatt Macy }
9894eda14cbcSMatt Macy 
9895eda14cbcSMatt Macy static void
9896eda14cbcSMatt Macy spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
9897eda14cbcSMatt Macy {
9898eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
9899eda14cbcSMatt Macy 
9900eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9901eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
9902eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
9903eda14cbcSMatt Macy 		vdev_indirect_state_sync_verify(vd);
9904eda14cbcSMatt Macy 
9905eda14cbcSMatt Macy 		if (vdev_indirect_should_condense(vd)) {
9906eda14cbcSMatt Macy 			spa_condense_indirect_start_sync(vd, tx);
9907eda14cbcSMatt Macy 			break;
9908eda14cbcSMatt Macy 		}
9909eda14cbcSMatt Macy 	}
9910eda14cbcSMatt Macy }
9911eda14cbcSMatt Macy 
9912eda14cbcSMatt Macy static void
9913eda14cbcSMatt Macy spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
9914eda14cbcSMatt Macy {
9915eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
9916eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9917eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
9918eda14cbcSMatt Macy 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
9919eda14cbcSMatt Macy 
9920eda14cbcSMatt Macy 	do {
9921eda14cbcSMatt Macy 		int pass = ++spa->spa_sync_pass;
9922eda14cbcSMatt Macy 
9923eda14cbcSMatt Macy 		spa_sync_config_object(spa, tx);
9924eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
9925eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
9926eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
9927eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
9928eda14cbcSMatt Macy 		spa_errlog_sync(spa, txg);
9929eda14cbcSMatt Macy 		dsl_pool_sync(dp, txg);
9930eda14cbcSMatt Macy 
9931eda14cbcSMatt Macy 		if (pass < zfs_sync_pass_deferred_free ||
9932eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
9933eda14cbcSMatt Macy 			/*
9934eda14cbcSMatt Macy 			 * If the log space map feature is active we don't
9935eda14cbcSMatt Macy 			 * care about deferred frees and the deferred bpobj
9936eda14cbcSMatt Macy 			 * as the log space map should effectively have the
9937eda14cbcSMatt Macy 			 * same results (i.e. appending only to one object).
9938eda14cbcSMatt Macy 			 */
9939eda14cbcSMatt Macy 			spa_sync_frees(spa, free_bpl, tx);
9940eda14cbcSMatt Macy 		} else {
9941eda14cbcSMatt Macy 			/*
9942eda14cbcSMatt Macy 			 * We can not defer frees in pass 1, because
9943eda14cbcSMatt Macy 			 * we sync the deferred frees later in pass 1.
9944eda14cbcSMatt Macy 			 */
9945eda14cbcSMatt Macy 			ASSERT3U(pass, >, 1);
9946eda14cbcSMatt Macy 			bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
9947eda14cbcSMatt Macy 			    &spa->spa_deferred_bpobj, tx);
9948eda14cbcSMatt Macy 		}
9949eda14cbcSMatt Macy 
99502a58b312SMartin Matuska 		brt_sync(spa, txg);
9951eda14cbcSMatt Macy 		ddt_sync(spa, txg);
9952eda14cbcSMatt Macy 		dsl_scan_sync(dp, tx);
9953c0a83fe0SMartin Matuska 		dsl_errorscrub_sync(dp, tx);
9954eda14cbcSMatt Macy 		svr_sync(spa, tx);
9955eda14cbcSMatt Macy 		spa_sync_upgrades(spa, tx);
9956eda14cbcSMatt Macy 
9957eda14cbcSMatt Macy 		spa_flush_metaslabs(spa, tx);
9958eda14cbcSMatt Macy 
9959eda14cbcSMatt Macy 		vdev_t *vd = NULL;
9960eda14cbcSMatt Macy 		while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
9961eda14cbcSMatt Macy 		    != NULL)
9962eda14cbcSMatt Macy 			vdev_sync(vd, txg);
9963eda14cbcSMatt Macy 
9964e716630dSMartin Matuska 		if (pass == 1) {
9965e716630dSMartin Matuska 			/*
9966e716630dSMartin Matuska 			 * dsl_pool_sync() -> dp_sync_tasks may have dirtied
9967e716630dSMartin Matuska 			 * the config. If that happens, this txg should not
9968e716630dSMartin Matuska 			 * be a no-op. So we must sync the config to the MOS
9969e716630dSMartin Matuska 			 * before checking for no-op.
9970e716630dSMartin Matuska 			 *
9971e716630dSMartin Matuska 			 * Note that when the config is dirty, it will
9972e716630dSMartin Matuska 			 * be written to the MOS (i.e. the MOS will be
9973e716630dSMartin Matuska 			 * dirtied) every time we call spa_sync_config_object()
9974e716630dSMartin Matuska 			 * in this txg.  Therefore we can't call this after
9975e716630dSMartin Matuska 			 * dsl_pool_sync() every pass, because it would
9976e716630dSMartin Matuska 			 * prevent us from converging, since we'd dirty
9977e716630dSMartin Matuska 			 * the MOS every pass.
9978e716630dSMartin Matuska 			 *
9979e716630dSMartin Matuska 			 * Sync tasks can only be processed in pass 1, so
9980e716630dSMartin Matuska 			 * there's no need to do this in later passes.
9981e716630dSMartin Matuska 			 */
9982e716630dSMartin Matuska 			spa_sync_config_object(spa, tx);
9983e716630dSMartin Matuska 		}
9984e716630dSMartin Matuska 
9985eda14cbcSMatt Macy 		/*
9986eda14cbcSMatt Macy 		 * Note: We need to check if the MOS is dirty because we could
9987eda14cbcSMatt Macy 		 * have marked the MOS dirty without updating the uberblock
9988eda14cbcSMatt Macy 		 * (e.g. if we have sync tasks but no dirty user data). We need
9989eda14cbcSMatt Macy 		 * to check the uberblock's rootbp because it is updated if we
9990eda14cbcSMatt Macy 		 * have synced out dirty data (though in this case the MOS will
9991eda14cbcSMatt Macy 		 * most likely also be dirty due to second order effects, we
9992eda14cbcSMatt Macy 		 * don't want to rely on that here).
9993eda14cbcSMatt Macy 		 */
9994eda14cbcSMatt Macy 		if (pass == 1 &&
9995783d3ff6SMartin Matuska 		    BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg &&
9996eda14cbcSMatt Macy 		    !dmu_objset_is_dirty(mos, txg)) {
9997eda14cbcSMatt Macy 			/*
9998eda14cbcSMatt Macy 			 * Nothing changed on the first pass, therefore this
9999eda14cbcSMatt Macy 			 * TXG is a no-op. Avoid syncing deferred frees, so
10000eda14cbcSMatt Macy 			 * that we can keep this TXG as a no-op.
10001eda14cbcSMatt Macy 			 */
10002eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
10003eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
10004eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
10005eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
10006eda14cbcSMatt Macy 			break;
10007eda14cbcSMatt Macy 		}
10008eda14cbcSMatt Macy 
10009eda14cbcSMatt Macy 		spa_sync_deferred_frees(spa, tx);
10010eda14cbcSMatt Macy 	} while (dmu_objset_is_dirty(mos, txg));
10011eda14cbcSMatt Macy }
10012eda14cbcSMatt Macy 
10013eda14cbcSMatt Macy /*
10014eda14cbcSMatt Macy  * Rewrite the vdev configuration (which includes the uberblock) to
10015eda14cbcSMatt Macy  * commit the transaction group.
10016eda14cbcSMatt Macy  *
10017eda14cbcSMatt Macy  * If there are no dirty vdevs, we sync the uberblock to a few random
10018eda14cbcSMatt Macy  * top-level vdevs that are known to be visible in the config cache
10019eda14cbcSMatt Macy  * (see spa_vdev_add() for a complete description). If there *are* dirty
10020eda14cbcSMatt Macy  * vdevs, sync the uberblock to all vdevs.
10021eda14cbcSMatt Macy  */
10022eda14cbcSMatt Macy static void
10023eda14cbcSMatt Macy spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
10024eda14cbcSMatt Macy {
10025eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
10026eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
10027eda14cbcSMatt Macy 
10028eda14cbcSMatt Macy 	for (;;) {
10029eda14cbcSMatt Macy 		int error = 0;
10030eda14cbcSMatt Macy 
10031eda14cbcSMatt Macy 		/*
10032eda14cbcSMatt Macy 		 * We hold SCL_STATE to prevent vdev open/close/etc.
10033eda14cbcSMatt Macy 		 * while we're attempting to write the vdev labels.
10034eda14cbcSMatt Macy 		 */
10035eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
10036eda14cbcSMatt Macy 
10037eda14cbcSMatt Macy 		if (list_is_empty(&spa->spa_config_dirty_list)) {
10038eda14cbcSMatt Macy 			vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
10039eda14cbcSMatt Macy 			int svdcount = 0;
10040eda14cbcSMatt Macy 			int children = rvd->vdev_children;
1004133b8c039SMartin Matuska 			int c0 = random_in_range(children);
10042eda14cbcSMatt Macy 
10043eda14cbcSMatt Macy 			for (int c = 0; c < children; c++) {
10044eda14cbcSMatt Macy 				vdev_t *vd =
10045eda14cbcSMatt Macy 				    rvd->vdev_child[(c0 + c) % children];
10046eda14cbcSMatt Macy 
10047eda14cbcSMatt Macy 				/* Stop when revisiting the first vdev */
10048eda14cbcSMatt Macy 				if (c > 0 && svd[0] == vd)
10049eda14cbcSMatt Macy 					break;
10050eda14cbcSMatt Macy 
10051eda14cbcSMatt Macy 				if (vd->vdev_ms_array == 0 ||
10052eda14cbcSMatt Macy 				    vd->vdev_islog ||
10053eda14cbcSMatt Macy 				    !vdev_is_concrete(vd))
10054eda14cbcSMatt Macy 					continue;
10055eda14cbcSMatt Macy 
10056eda14cbcSMatt Macy 				svd[svdcount++] = vd;
10057eda14cbcSMatt Macy 				if (svdcount == SPA_SYNC_MIN_VDEVS)
10058eda14cbcSMatt Macy 					break;
10059eda14cbcSMatt Macy 			}
10060eda14cbcSMatt Macy 			error = vdev_config_sync(svd, svdcount, txg);
10061eda14cbcSMatt Macy 		} else {
10062eda14cbcSMatt Macy 			error = vdev_config_sync(rvd->vdev_child,
10063eda14cbcSMatt Macy 			    rvd->vdev_children, txg);
10064eda14cbcSMatt Macy 		}
10065eda14cbcSMatt Macy 
10066eda14cbcSMatt Macy 		if (error == 0)
10067eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
10068eda14cbcSMatt Macy 
10069eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_STATE, FTAG);
10070eda14cbcSMatt Macy 
10071eda14cbcSMatt Macy 		if (error == 0)
10072eda14cbcSMatt Macy 			break;
10073eda14cbcSMatt Macy 		zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
10074eda14cbcSMatt Macy 		zio_resume_wait(spa);
10075eda14cbcSMatt Macy 	}
10076eda14cbcSMatt Macy }
10077eda14cbcSMatt Macy 
10078eda14cbcSMatt Macy /*
10079eda14cbcSMatt Macy  * Sync the specified transaction group.  New blocks may be dirtied as
10080eda14cbcSMatt Macy  * part of the process, so we iterate until it converges.
10081eda14cbcSMatt Macy  */
10082eda14cbcSMatt Macy void
10083eda14cbcSMatt Macy spa_sync(spa_t *spa, uint64_t txg)
10084eda14cbcSMatt Macy {
10085eda14cbcSMatt Macy 	vdev_t *vd = NULL;
10086eda14cbcSMatt Macy 
10087eda14cbcSMatt Macy 	VERIFY(spa_writeable(spa));
10088eda14cbcSMatt Macy 
10089eda14cbcSMatt Macy 	/*
10090eda14cbcSMatt Macy 	 * Wait for i/os issued in open context that need to complete
10091eda14cbcSMatt Macy 	 * before this txg syncs.
10092eda14cbcSMatt Macy 	 */
10093eda14cbcSMatt Macy 	(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
10094eda14cbcSMatt Macy 	spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
10095eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL);
10096eda14cbcSMatt Macy 
10097eda14cbcSMatt Macy 	/*
100982a58b312SMartin Matuska 	 * Now that there can be no more cloning in this transaction group,
100992a58b312SMartin Matuska 	 * but we are still before issuing frees, we can process pending BRT
101002a58b312SMartin Matuska 	 * updates.
101012a58b312SMartin Matuska 	 */
101022a58b312SMartin Matuska 	brt_pending_apply(spa, txg);
101032a58b312SMartin Matuska 
101042a58b312SMartin Matuska 	/*
10105eda14cbcSMatt Macy 	 * Lock out configuration changes.
10106eda14cbcSMatt Macy 	 */
10107eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
10108eda14cbcSMatt Macy 
10109eda14cbcSMatt Macy 	spa->spa_syncing_txg = txg;
10110eda14cbcSMatt Macy 	spa->spa_sync_pass = 0;
10111eda14cbcSMatt Macy 
10112eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
101133f9d360cSMartin Matuska 		mutex_enter(&spa->spa_allocs[i].spaa_lock);
101143f9d360cSMartin Matuska 		VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
101153f9d360cSMartin Matuska 		mutex_exit(&spa->spa_allocs[i].spaa_lock);
10116eda14cbcSMatt Macy 	}
10117eda14cbcSMatt Macy 
10118eda14cbcSMatt Macy 	/*
10119eda14cbcSMatt Macy 	 * If there are any pending vdev state changes, convert them
10120eda14cbcSMatt Macy 	 * into config changes that go out with this transaction group.
10121eda14cbcSMatt Macy 	 */
10122eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
101232a58b312SMartin Matuska 	while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
101242a58b312SMartin Matuska 		/* Avoid holding the write lock unless actually necessary */
101252a58b312SMartin Matuska 		if (vd->vdev_aux == NULL) {
101262a58b312SMartin Matuska 			vdev_state_clean(vd);
101272a58b312SMartin Matuska 			vdev_config_dirty(vd);
101282a58b312SMartin Matuska 			continue;
101292a58b312SMartin Matuska 		}
10130eda14cbcSMatt Macy 		/*
10131eda14cbcSMatt Macy 		 * We need the write lock here because, for aux vdevs,
10132eda14cbcSMatt Macy 		 * calling vdev_config_dirty() modifies sav_config.
10133eda14cbcSMatt Macy 		 * This is ugly and will become unnecessary when we
10134eda14cbcSMatt Macy 		 * eliminate the aux vdev wart by integrating all vdevs
10135eda14cbcSMatt Macy 		 * into the root vdev tree.
10136eda14cbcSMatt Macy 		 */
10137eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10138eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
10139eda14cbcSMatt Macy 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
10140eda14cbcSMatt Macy 			vdev_state_clean(vd);
10141eda14cbcSMatt Macy 			vdev_config_dirty(vd);
10142eda14cbcSMatt Macy 		}
10143eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10144eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10145eda14cbcSMatt Macy 	}
10146eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
10147eda14cbcSMatt Macy 
10148eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
10149eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
10150eda14cbcSMatt Macy 
10151eda14cbcSMatt Macy 	spa->spa_sync_starttime = gethrtime();
10152eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
10153eda14cbcSMatt Macy 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
10154eda14cbcSMatt Macy 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
10155eda14cbcSMatt Macy 	    NSEC_TO_TICK(spa->spa_deadman_synctime));
10156eda14cbcSMatt Macy 
10157eda14cbcSMatt Macy 	/*
10158eda14cbcSMatt Macy 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
10159eda14cbcSMatt Macy 	 * set spa_deflate if we have no raid-z vdevs.
10160eda14cbcSMatt Macy 	 */
10161eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
10162eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
10163eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
10164eda14cbcSMatt Macy 
10165eda14cbcSMatt Macy 		int i;
10166eda14cbcSMatt Macy 		for (i = 0; i < rvd->vdev_children; i++) {
10167eda14cbcSMatt Macy 			vd = rvd->vdev_child[i];
10168eda14cbcSMatt Macy 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
10169eda14cbcSMatt Macy 				break;
10170eda14cbcSMatt Macy 		}
10171eda14cbcSMatt Macy 		if (i == rvd->vdev_children) {
10172eda14cbcSMatt Macy 			spa->spa_deflate = TRUE;
10173eda14cbcSMatt Macy 			VERIFY0(zap_add(spa->spa_meta_objset,
10174eda14cbcSMatt Macy 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
10175eda14cbcSMatt Macy 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
10176eda14cbcSMatt Macy 		}
10177eda14cbcSMatt Macy 	}
10178eda14cbcSMatt Macy 
10179eda14cbcSMatt Macy 	spa_sync_adjust_vdev_max_queue_depth(spa);
10180eda14cbcSMatt Macy 
10181eda14cbcSMatt Macy 	spa_sync_condense_indirect(spa, tx);
10182eda14cbcSMatt Macy 
10183eda14cbcSMatt Macy 	spa_sync_iterate_to_convergence(spa, tx);
10184eda14cbcSMatt Macy 
10185eda14cbcSMatt Macy #ifdef ZFS_DEBUG
10186eda14cbcSMatt Macy 	if (!list_is_empty(&spa->spa_config_dirty_list)) {
10187eda14cbcSMatt Macy 	/*
10188eda14cbcSMatt Macy 	 * Make sure that the number of ZAPs for all the vdevs matches
10189eda14cbcSMatt Macy 	 * the number of ZAPs in the per-vdev ZAP list. This only gets
10190eda14cbcSMatt Macy 	 * called if the config is dirty; otherwise there may be
10191eda14cbcSMatt Macy 	 * outstanding AVZ operations that weren't completed in
10192eda14cbcSMatt Macy 	 * spa_sync_config_object.
10193eda14cbcSMatt Macy 	 */
10194eda14cbcSMatt Macy 		uint64_t all_vdev_zap_entry_count;
10195eda14cbcSMatt Macy 		ASSERT0(zap_count(spa->spa_meta_objset,
10196eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
10197eda14cbcSMatt Macy 		ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
10198eda14cbcSMatt Macy 		    all_vdev_zap_entry_count);
10199eda14cbcSMatt Macy 	}
10200eda14cbcSMatt Macy #endif
10201eda14cbcSMatt Macy 
10202eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
10203eda14cbcSMatt Macy 		ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
10204eda14cbcSMatt Macy 	}
10205eda14cbcSMatt Macy 
10206eda14cbcSMatt Macy 	spa_sync_rewrite_vdev_config(spa, tx);
10207eda14cbcSMatt Macy 	dmu_tx_commit(tx);
10208eda14cbcSMatt Macy 
10209eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
10210eda14cbcSMatt Macy 	spa->spa_deadman_tqid = 0;
10211eda14cbcSMatt Macy 
10212eda14cbcSMatt Macy 	/*
10213eda14cbcSMatt Macy 	 * Clear the dirty config list.
10214eda14cbcSMatt Macy 	 */
10215eda14cbcSMatt Macy 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
10216eda14cbcSMatt Macy 		vdev_config_clean(vd);
10217eda14cbcSMatt Macy 
10218eda14cbcSMatt Macy 	/*
10219eda14cbcSMatt Macy 	 * Now that the new config has synced transactionally,
10220eda14cbcSMatt Macy 	 * let it become visible to the config cache.
10221eda14cbcSMatt Macy 	 */
10222eda14cbcSMatt Macy 	if (spa->spa_config_syncing != NULL) {
10223eda14cbcSMatt Macy 		spa_config_set(spa, spa->spa_config_syncing);
10224eda14cbcSMatt Macy 		spa->spa_config_txg = txg;
10225eda14cbcSMatt Macy 		spa->spa_config_syncing = NULL;
10226eda14cbcSMatt Macy 	}
10227eda14cbcSMatt Macy 
10228eda14cbcSMatt Macy 	dsl_pool_sync_done(dp, txg);
10229eda14cbcSMatt Macy 
10230eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
102313f9d360cSMartin Matuska 		mutex_enter(&spa->spa_allocs[i].spaa_lock);
102323f9d360cSMartin Matuska 		VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
102333f9d360cSMartin Matuska 		mutex_exit(&spa->spa_allocs[i].spaa_lock);
10234eda14cbcSMatt Macy 	}
10235eda14cbcSMatt Macy 
10236eda14cbcSMatt Macy 	/*
10237eda14cbcSMatt Macy 	 * Update usable space statistics.
10238eda14cbcSMatt Macy 	 */
10239eda14cbcSMatt Macy 	while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
10240eda14cbcSMatt Macy 	    != NULL)
10241eda14cbcSMatt Macy 		vdev_sync_done(vd, txg);
10242eda14cbcSMatt Macy 
10243eda14cbcSMatt Macy 	metaslab_class_evict_old(spa->spa_normal_class, txg);
10244eda14cbcSMatt Macy 	metaslab_class_evict_old(spa->spa_log_class, txg);
10245aca928a5SMartin Matuska 	/* spa_embedded_log_class has only one metaslab per vdev. */
10246aca928a5SMartin Matuska 	metaslab_class_evict_old(spa->spa_special_class, txg);
10247aca928a5SMartin Matuska 	metaslab_class_evict_old(spa->spa_dedup_class, txg);
10248eda14cbcSMatt Macy 
10249eda14cbcSMatt Macy 	spa_sync_close_syncing_log_sm(spa);
10250eda14cbcSMatt Macy 
10251eda14cbcSMatt Macy 	spa_update_dspace(spa);
10252eda14cbcSMatt Macy 
102532a58b312SMartin Matuska 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
102542a58b312SMartin Matuska 		vdev_autotrim_kick(spa);
102552a58b312SMartin Matuska 
10256eda14cbcSMatt Macy 	/*
10257eda14cbcSMatt Macy 	 * It had better be the case that we didn't dirty anything
10258eda14cbcSMatt Macy 	 * since vdev_config_sync().
10259eda14cbcSMatt Macy 	 */
10260eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
10261eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
10262eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
10263eda14cbcSMatt Macy 
10264eda14cbcSMatt Macy 	while (zfs_pause_spa_sync)
10265eda14cbcSMatt Macy 		delay(1);
10266eda14cbcSMatt Macy 
10267eda14cbcSMatt Macy 	spa->spa_sync_pass = 0;
10268eda14cbcSMatt Macy 
10269eda14cbcSMatt Macy 	/*
10270eda14cbcSMatt Macy 	 * Update the last synced uberblock here. We want to do this at
10271eda14cbcSMatt Macy 	 * the end of spa_sync() so that consumers of spa_last_synced_txg()
10272eda14cbcSMatt Macy 	 * will be guaranteed that all the processing associated with
10273eda14cbcSMatt Macy 	 * that txg has been completed.
10274eda14cbcSMatt Macy 	 */
10275eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
10276eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG, FTAG);
10277eda14cbcSMatt Macy 
10278eda14cbcSMatt Macy 	spa_handle_ignored_writes(spa);
10279eda14cbcSMatt Macy 
10280eda14cbcSMatt Macy 	/*
10281eda14cbcSMatt Macy 	 * If any async tasks have been requested, kick them off.
10282eda14cbcSMatt Macy 	 */
10283eda14cbcSMatt Macy 	spa_async_dispatch(spa);
10284eda14cbcSMatt Macy }
10285eda14cbcSMatt Macy 
10286eda14cbcSMatt Macy /*
10287eda14cbcSMatt Macy  * Sync all pools.  We don't want to hold the namespace lock across these
10288eda14cbcSMatt Macy  * operations, so we take a reference on the spa_t and drop the lock during the
10289eda14cbcSMatt Macy  * sync.
10290eda14cbcSMatt Macy  */
10291eda14cbcSMatt Macy void
10292eda14cbcSMatt Macy spa_sync_allpools(void)
10293eda14cbcSMatt Macy {
10294eda14cbcSMatt Macy 	spa_t *spa = NULL;
10295eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
10296eda14cbcSMatt Macy 	while ((spa = spa_next(spa)) != NULL) {
10297eda14cbcSMatt Macy 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
10298eda14cbcSMatt Macy 		    !spa_writeable(spa) || spa_suspended(spa))
10299eda14cbcSMatt Macy 			continue;
10300eda14cbcSMatt Macy 		spa_open_ref(spa, FTAG);
10301eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
10302eda14cbcSMatt Macy 		txg_wait_synced(spa_get_dsl(spa), 0);
10303eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
10304eda14cbcSMatt Macy 		spa_close(spa, FTAG);
10305eda14cbcSMatt Macy 	}
10306eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
10307eda14cbcSMatt Macy }
10308eda14cbcSMatt Macy 
1030914c2e0a0SMartin Matuska taskq_t *
1031014c2e0a0SMartin Matuska spa_sync_tq_create(spa_t *spa, const char *name)
1031114c2e0a0SMartin Matuska {
1031214c2e0a0SMartin Matuska 	kthread_t **kthreads;
1031314c2e0a0SMartin Matuska 
1031414c2e0a0SMartin Matuska 	ASSERT(spa->spa_sync_tq == NULL);
1031514c2e0a0SMartin Matuska 	ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
1031614c2e0a0SMartin Matuska 
1031714c2e0a0SMartin Matuska 	/*
1031814c2e0a0SMartin Matuska 	 * - do not allow more allocators than cpus.
1031914c2e0a0SMartin Matuska 	 * - there may be more cpus than allocators.
1032014c2e0a0SMartin Matuska 	 * - do not allow more sync taskq threads than allocators or cpus.
1032114c2e0a0SMartin Matuska 	 */
1032214c2e0a0SMartin Matuska 	int nthreads = spa->spa_alloc_count;
1032314c2e0a0SMartin Matuska 	spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) *
1032414c2e0a0SMartin Matuska 	    nthreads, KM_SLEEP);
1032514c2e0a0SMartin Matuska 
1032614c2e0a0SMartin Matuska 	spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri,
1032714c2e0a0SMartin Matuska 	    nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads);
1032814c2e0a0SMartin Matuska 	VERIFY(spa->spa_sync_tq != NULL);
1032914c2e0a0SMartin Matuska 	VERIFY(kthreads != NULL);
1033014c2e0a0SMartin Matuska 
1033114c2e0a0SMartin Matuska 	spa_syncthread_info_t *ti = spa->spa_syncthreads;
10332b985c9caSMartin Matuska 	for (int i = 0; i < nthreads; i++, ti++) {
1033314c2e0a0SMartin Matuska 		ti->sti_thread = kthreads[i];
10334b985c9caSMartin Matuska 		ti->sti_allocator = i;
1033514c2e0a0SMartin Matuska 	}
1033614c2e0a0SMartin Matuska 
1033714c2e0a0SMartin Matuska 	kmem_free(kthreads, sizeof (*kthreads) * nthreads);
1033814c2e0a0SMartin Matuska 	return (spa->spa_sync_tq);
1033914c2e0a0SMartin Matuska }
1034014c2e0a0SMartin Matuska 
1034114c2e0a0SMartin Matuska void
1034214c2e0a0SMartin Matuska spa_sync_tq_destroy(spa_t *spa)
1034314c2e0a0SMartin Matuska {
1034414c2e0a0SMartin Matuska 	ASSERT(spa->spa_sync_tq != NULL);
1034514c2e0a0SMartin Matuska 
1034614c2e0a0SMartin Matuska 	taskq_wait(spa->spa_sync_tq);
1034714c2e0a0SMartin Matuska 	taskq_destroy(spa->spa_sync_tq);
1034814c2e0a0SMartin Matuska 	kmem_free(spa->spa_syncthreads,
1034914c2e0a0SMartin Matuska 	    sizeof (spa_syncthread_info_t) * spa->spa_alloc_count);
1035014c2e0a0SMartin Matuska 	spa->spa_sync_tq = NULL;
1035114c2e0a0SMartin Matuska }
1035214c2e0a0SMartin Matuska 
10353b985c9caSMartin Matuska uint_t
10354b985c9caSMartin Matuska spa_acq_allocator(spa_t *spa)
10355b985c9caSMartin Matuska {
10356b985c9caSMartin Matuska 	int i;
10357b985c9caSMartin Matuska 
10358b985c9caSMartin Matuska 	if (spa->spa_alloc_count == 1)
10359b985c9caSMartin Matuska 		return (0);
10360b985c9caSMartin Matuska 
10361b985c9caSMartin Matuska 	mutex_enter(&spa->spa_allocs_use->sau_lock);
10362b985c9caSMartin Matuska 	uint_t r = spa->spa_allocs_use->sau_rotor;
10363b985c9caSMartin Matuska 	do {
10364b985c9caSMartin Matuska 		if (++r == spa->spa_alloc_count)
10365b985c9caSMartin Matuska 			r = 0;
10366b985c9caSMartin Matuska 	} while (spa->spa_allocs_use->sau_inuse[r]);
10367b985c9caSMartin Matuska 	spa->spa_allocs_use->sau_inuse[r] = B_TRUE;
10368b985c9caSMartin Matuska 	spa->spa_allocs_use->sau_rotor = r;
10369b985c9caSMartin Matuska 	mutex_exit(&spa->spa_allocs_use->sau_lock);
10370b985c9caSMartin Matuska 
10371b985c9caSMartin Matuska 	spa_syncthread_info_t *ti = spa->spa_syncthreads;
10372b985c9caSMartin Matuska 	for (i = 0; i < spa->spa_alloc_count; i++, ti++) {
10373b985c9caSMartin Matuska 		if (ti->sti_thread == curthread) {
10374b985c9caSMartin Matuska 			ti->sti_allocator = r;
10375b985c9caSMartin Matuska 			break;
10376b985c9caSMartin Matuska 		}
10377b985c9caSMartin Matuska 	}
10378b985c9caSMartin Matuska 	ASSERT3S(i, <, spa->spa_alloc_count);
10379b985c9caSMartin Matuska 	return (r);
10380b985c9caSMartin Matuska }
10381b985c9caSMartin Matuska 
10382b985c9caSMartin Matuska void
10383b985c9caSMartin Matuska spa_rel_allocator(spa_t *spa, uint_t allocator)
10384b985c9caSMartin Matuska {
10385b985c9caSMartin Matuska 	if (spa->spa_alloc_count > 1)
10386b985c9caSMartin Matuska 		spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE;
10387b985c9caSMartin Matuska }
10388b985c9caSMartin Matuska 
1038914c2e0a0SMartin Matuska void
1039014c2e0a0SMartin Matuska spa_select_allocator(zio_t *zio)
1039114c2e0a0SMartin Matuska {
1039214c2e0a0SMartin Matuska 	zbookmark_phys_t *bm = &zio->io_bookmark;
1039314c2e0a0SMartin Matuska 	spa_t *spa = zio->io_spa;
1039414c2e0a0SMartin Matuska 
1039514c2e0a0SMartin Matuska 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1039614c2e0a0SMartin Matuska 
1039714c2e0a0SMartin Matuska 	/*
1039814c2e0a0SMartin Matuska 	 * A gang block (for example) may have inherited its parent's
1039914c2e0a0SMartin Matuska 	 * allocator, in which case there is nothing further to do here.
1040014c2e0a0SMartin Matuska 	 */
1040114c2e0a0SMartin Matuska 	if (ZIO_HAS_ALLOCATOR(zio))
1040214c2e0a0SMartin Matuska 		return;
1040314c2e0a0SMartin Matuska 
1040414c2e0a0SMartin Matuska 	ASSERT(spa != NULL);
1040514c2e0a0SMartin Matuska 	ASSERT(bm != NULL);
1040614c2e0a0SMartin Matuska 
1040714c2e0a0SMartin Matuska 	/*
1040814c2e0a0SMartin Matuska 	 * First try to use an allocator assigned to the syncthread, and set
1040914c2e0a0SMartin Matuska 	 * the corresponding write issue taskq for the allocator.
1041014c2e0a0SMartin Matuska 	 * Note, we must have an open pool to do this.
1041114c2e0a0SMartin Matuska 	 */
1041214c2e0a0SMartin Matuska 	if (spa->spa_sync_tq != NULL) {
1041314c2e0a0SMartin Matuska 		spa_syncthread_info_t *ti = spa->spa_syncthreads;
1041414c2e0a0SMartin Matuska 		for (int i = 0; i < spa->spa_alloc_count; i++, ti++) {
1041514c2e0a0SMartin Matuska 			if (ti->sti_thread == curthread) {
10416b985c9caSMartin Matuska 				zio->io_allocator = ti->sti_allocator;
1041714c2e0a0SMartin Matuska 				return;
1041814c2e0a0SMartin Matuska 			}
1041914c2e0a0SMartin Matuska 		}
1042014c2e0a0SMartin Matuska 	}
1042114c2e0a0SMartin Matuska 
1042214c2e0a0SMartin Matuska 	/*
1042314c2e0a0SMartin Matuska 	 * We want to try to use as many allocators as possible to help improve
1042414c2e0a0SMartin Matuska 	 * performance, but we also want logically adjacent IOs to be physically
1042514c2e0a0SMartin Matuska 	 * adjacent to improve sequential read performance. We chunk each object
1042614c2e0a0SMartin Matuska 	 * into 2^20 block regions, and then hash based on the objset, object,
1042714c2e0a0SMartin Matuska 	 * level, and region to accomplish both of these goals.
1042814c2e0a0SMartin Matuska 	 */
1042914c2e0a0SMartin Matuska 	uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level,
1043014c2e0a0SMartin Matuska 	    bm->zb_blkid >> 20);
1043114c2e0a0SMartin Matuska 
1043214c2e0a0SMartin Matuska 	zio->io_allocator = (uint_t)hv % spa->spa_alloc_count;
1043314c2e0a0SMartin Matuska }
1043414c2e0a0SMartin Matuska 
10435eda14cbcSMatt Macy /*
10436eda14cbcSMatt Macy  * ==========================================================================
10437eda14cbcSMatt Macy  * Miscellaneous routines
10438eda14cbcSMatt Macy  * ==========================================================================
10439eda14cbcSMatt Macy  */
10440eda14cbcSMatt Macy 
10441eda14cbcSMatt Macy /*
10442eda14cbcSMatt Macy  * Remove all pools in the system.
10443eda14cbcSMatt Macy  */
10444eda14cbcSMatt Macy void
10445eda14cbcSMatt Macy spa_evict_all(void)
10446eda14cbcSMatt Macy {
10447eda14cbcSMatt Macy 	spa_t *spa;
10448eda14cbcSMatt Macy 
10449eda14cbcSMatt Macy 	/*
10450eda14cbcSMatt Macy 	 * Remove all cached state.  All pools should be closed now,
10451eda14cbcSMatt Macy 	 * so every spa in the AVL tree should be unreferenced.
10452eda14cbcSMatt Macy 	 */
10453eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
10454eda14cbcSMatt Macy 	while ((spa = spa_next(NULL)) != NULL) {
10455eda14cbcSMatt Macy 		/*
10456eda14cbcSMatt Macy 		 * Stop async tasks.  The async thread may need to detach
10457eda14cbcSMatt Macy 		 * a device that's been replaced, which requires grabbing
10458eda14cbcSMatt Macy 		 * spa_namespace_lock, so we must drop it here.
10459eda14cbcSMatt Macy 		 */
10460eda14cbcSMatt Macy 		spa_open_ref(spa, FTAG);
10461eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
10462eda14cbcSMatt Macy 		spa_async_suspend(spa);
10463eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
10464eda14cbcSMatt Macy 		spa_close(spa, FTAG);
10465eda14cbcSMatt Macy 
10466eda14cbcSMatt Macy 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
10467eda14cbcSMatt Macy 			spa_unload(spa);
10468eda14cbcSMatt Macy 			spa_deactivate(spa);
10469eda14cbcSMatt Macy 		}
10470eda14cbcSMatt Macy 		spa_remove(spa);
10471eda14cbcSMatt Macy 	}
10472eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
10473eda14cbcSMatt Macy }
10474eda14cbcSMatt Macy 
10475eda14cbcSMatt Macy vdev_t *
10476eda14cbcSMatt Macy spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
10477eda14cbcSMatt Macy {
10478eda14cbcSMatt Macy 	vdev_t *vd;
10479eda14cbcSMatt Macy 	int i;
10480eda14cbcSMatt Macy 
10481eda14cbcSMatt Macy 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
10482eda14cbcSMatt Macy 		return (vd);
10483eda14cbcSMatt Macy 
10484eda14cbcSMatt Macy 	if (aux) {
10485eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
10486eda14cbcSMatt Macy 			vd = spa->spa_l2cache.sav_vdevs[i];
10487eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
10488eda14cbcSMatt Macy 				return (vd);
10489eda14cbcSMatt Macy 		}
10490eda14cbcSMatt Macy 
10491eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
10492eda14cbcSMatt Macy 			vd = spa->spa_spares.sav_vdevs[i];
10493eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
10494eda14cbcSMatt Macy 				return (vd);
10495eda14cbcSMatt Macy 		}
10496eda14cbcSMatt Macy 	}
10497eda14cbcSMatt Macy 
10498eda14cbcSMatt Macy 	return (NULL);
10499eda14cbcSMatt Macy }
10500eda14cbcSMatt Macy 
10501eda14cbcSMatt Macy void
10502eda14cbcSMatt Macy spa_upgrade(spa_t *spa, uint64_t version)
10503eda14cbcSMatt Macy {
10504eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
10505eda14cbcSMatt Macy 
10506eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
10507eda14cbcSMatt Macy 
10508eda14cbcSMatt Macy 	/*
10509eda14cbcSMatt Macy 	 * This should only be called for a non-faulted pool, and since a
10510eda14cbcSMatt Macy 	 * future version would result in an unopenable pool, this shouldn't be
10511eda14cbcSMatt Macy 	 * possible.
10512eda14cbcSMatt Macy 	 */
10513eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
10514eda14cbcSMatt Macy 	ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
10515eda14cbcSMatt Macy 
10516eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
10517eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
10518eda14cbcSMatt Macy 
10519eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
10520eda14cbcSMatt Macy 
10521eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), 0);
10522eda14cbcSMatt Macy }
10523eda14cbcSMatt Macy 
10524dae17134SMartin Matuska static boolean_t
10525dae17134SMartin Matuska spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
10526eda14cbcSMatt Macy {
10527e92ffd9bSMartin Matuska 	(void) spa;
10528eda14cbcSMatt Macy 	int i;
10529dae17134SMartin Matuska 	uint64_t vdev_guid;
10530eda14cbcSMatt Macy 
10531eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
10532eda14cbcSMatt Macy 		if (sav->sav_vdevs[i]->vdev_guid == guid)
10533eda14cbcSMatt Macy 			return (B_TRUE);
10534eda14cbcSMatt Macy 
10535eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_npending; i++) {
10536eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
10537dae17134SMartin Matuska 		    &vdev_guid) == 0 && vdev_guid == guid)
10538eda14cbcSMatt Macy 			return (B_TRUE);
10539eda14cbcSMatt Macy 	}
10540eda14cbcSMatt Macy 
10541eda14cbcSMatt Macy 	return (B_FALSE);
10542eda14cbcSMatt Macy }
10543eda14cbcSMatt Macy 
10544dae17134SMartin Matuska boolean_t
10545dae17134SMartin Matuska spa_has_l2cache(spa_t *spa, uint64_t guid)
10546dae17134SMartin Matuska {
10547dae17134SMartin Matuska 	return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
10548dae17134SMartin Matuska }
10549dae17134SMartin Matuska 
10550dae17134SMartin Matuska boolean_t
10551dae17134SMartin Matuska spa_has_spare(spa_t *spa, uint64_t guid)
10552dae17134SMartin Matuska {
10553dae17134SMartin Matuska 	return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
10554dae17134SMartin Matuska }
10555dae17134SMartin Matuska 
10556eda14cbcSMatt Macy /*
10557eda14cbcSMatt Macy  * Check if a pool has an active shared spare device.
10558eda14cbcSMatt Macy  * Note: reference count of an active spare is 2, as a spare and as a replace
10559eda14cbcSMatt Macy  */
10560eda14cbcSMatt Macy static boolean_t
10561eda14cbcSMatt Macy spa_has_active_shared_spare(spa_t *spa)
10562eda14cbcSMatt Macy {
10563eda14cbcSMatt Macy 	int i, refcnt;
10564eda14cbcSMatt Macy 	uint64_t pool;
10565eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_spares;
10566eda14cbcSMatt Macy 
10567eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
10568eda14cbcSMatt Macy 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
10569eda14cbcSMatt Macy 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
10570eda14cbcSMatt Macy 		    refcnt > 2)
10571eda14cbcSMatt Macy 			return (B_TRUE);
10572eda14cbcSMatt Macy 	}
10573eda14cbcSMatt Macy 
10574eda14cbcSMatt Macy 	return (B_FALSE);
10575eda14cbcSMatt Macy }
10576eda14cbcSMatt Macy 
10577eda14cbcSMatt Macy uint64_t
10578eda14cbcSMatt Macy spa_total_metaslabs(spa_t *spa)
10579eda14cbcSMatt Macy {
10580eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
10581eda14cbcSMatt Macy 
10582eda14cbcSMatt Macy 	uint64_t m = 0;
10583eda14cbcSMatt Macy 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
10584eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
10585eda14cbcSMatt Macy 		if (!vdev_is_concrete(vd))
10586eda14cbcSMatt Macy 			continue;
10587eda14cbcSMatt Macy 		m += vd->vdev_ms_count;
10588eda14cbcSMatt Macy 	}
10589eda14cbcSMatt Macy 	return (m);
10590eda14cbcSMatt Macy }
10591eda14cbcSMatt Macy 
10592eda14cbcSMatt Macy /*
10593eda14cbcSMatt Macy  * Notify any waiting threads that some activity has switched from being in-
10594eda14cbcSMatt Macy  * progress to not-in-progress so that the thread can wake up and determine
10595eda14cbcSMatt Macy  * whether it is finished waiting.
10596eda14cbcSMatt Macy  */
10597eda14cbcSMatt Macy void
10598eda14cbcSMatt Macy spa_notify_waiters(spa_t *spa)
10599eda14cbcSMatt Macy {
10600eda14cbcSMatt Macy 	/*
10601eda14cbcSMatt Macy 	 * Acquiring spa_activities_lock here prevents the cv_broadcast from
10602eda14cbcSMatt Macy 	 * happening between the waiting thread's check and cv_wait.
10603eda14cbcSMatt Macy 	 */
10604eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10605eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
10606eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10607eda14cbcSMatt Macy }
10608eda14cbcSMatt Macy 
10609eda14cbcSMatt Macy /*
10610eda14cbcSMatt Macy  * Notify any waiting threads that the pool is exporting, and then block until
10611eda14cbcSMatt Macy  * they are finished using the spa_t.
10612eda14cbcSMatt Macy  */
10613eda14cbcSMatt Macy void
10614eda14cbcSMatt Macy spa_wake_waiters(spa_t *spa)
10615eda14cbcSMatt Macy {
10616eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10617eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_TRUE;
10618eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
10619eda14cbcSMatt Macy 	while (spa->spa_waiters != 0)
10620eda14cbcSMatt Macy 		cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
10621eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_FALSE;
10622eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10623eda14cbcSMatt Macy }
10624eda14cbcSMatt Macy 
10625eda14cbcSMatt Macy /* Whether the vdev or any of its descendants are being initialized/trimmed. */
10626eda14cbcSMatt Macy static boolean_t
10627eda14cbcSMatt Macy spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
10628eda14cbcSMatt Macy {
10629eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
10630eda14cbcSMatt Macy 
10631eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
10632eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
10633eda14cbcSMatt Macy 	ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
10634eda14cbcSMatt Macy 	    activity == ZPOOL_WAIT_TRIM);
10635eda14cbcSMatt Macy 
10636eda14cbcSMatt Macy 	kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
10637eda14cbcSMatt Macy 	    &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
10638eda14cbcSMatt Macy 
10639eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10640eda14cbcSMatt Macy 	mutex_enter(lock);
10641eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10642eda14cbcSMatt Macy 
10643eda14cbcSMatt Macy 	boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
10644eda14cbcSMatt Macy 	    (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
10645eda14cbcSMatt Macy 	    (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
10646eda14cbcSMatt Macy 	mutex_exit(lock);
10647eda14cbcSMatt Macy 
10648eda14cbcSMatt Macy 	if (in_progress)
10649eda14cbcSMatt Macy 		return (B_TRUE);
10650eda14cbcSMatt Macy 
10651eda14cbcSMatt Macy 	for (int i = 0; i < vd->vdev_children; i++) {
10652eda14cbcSMatt Macy 		if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
10653eda14cbcSMatt Macy 		    activity))
10654eda14cbcSMatt Macy 			return (B_TRUE);
10655eda14cbcSMatt Macy 	}
10656eda14cbcSMatt Macy 
10657eda14cbcSMatt Macy 	return (B_FALSE);
10658eda14cbcSMatt Macy }
10659eda14cbcSMatt Macy 
10660eda14cbcSMatt Macy /*
10661eda14cbcSMatt Macy  * If use_guid is true, this checks whether the vdev specified by guid is
10662eda14cbcSMatt Macy  * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
10663eda14cbcSMatt Macy  * is being initialized/trimmed. The caller must hold the config lock and
10664eda14cbcSMatt Macy  * spa_activities_lock.
10665eda14cbcSMatt Macy  */
10666eda14cbcSMatt Macy static int
10667eda14cbcSMatt Macy spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
10668eda14cbcSMatt Macy     zpool_wait_activity_t activity, boolean_t *in_progress)
10669eda14cbcSMatt Macy {
10670eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10671eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10672eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10673eda14cbcSMatt Macy 
10674eda14cbcSMatt Macy 	vdev_t *vd;
10675eda14cbcSMatt Macy 	if (use_guid) {
10676eda14cbcSMatt Macy 		vd = spa_lookup_by_guid(spa, guid, B_FALSE);
10677eda14cbcSMatt Macy 		if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
10678eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10679eda14cbcSMatt Macy 			return (EINVAL);
10680eda14cbcSMatt Macy 		}
10681eda14cbcSMatt Macy 	} else {
10682eda14cbcSMatt Macy 		vd = spa->spa_root_vdev;
10683eda14cbcSMatt Macy 	}
10684eda14cbcSMatt Macy 
10685eda14cbcSMatt Macy 	*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
10686eda14cbcSMatt Macy 
10687eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10688eda14cbcSMatt Macy 	return (0);
10689eda14cbcSMatt Macy }
10690eda14cbcSMatt Macy 
10691eda14cbcSMatt Macy /*
10692eda14cbcSMatt Macy  * Locking for waiting threads
10693eda14cbcSMatt Macy  * ---------------------------
10694eda14cbcSMatt Macy  *
10695eda14cbcSMatt Macy  * Waiting threads need a way to check whether a given activity is in progress,
10696eda14cbcSMatt Macy  * and then, if it is, wait for it to complete. Each activity will have some
10697eda14cbcSMatt Macy  * in-memory representation of the relevant on-disk state which can be used to
10698eda14cbcSMatt Macy  * determine whether or not the activity is in progress. The in-memory state and
10699eda14cbcSMatt Macy  * the locking used to protect it will be different for each activity, and may
10700eda14cbcSMatt Macy  * not be suitable for use with a cvar (e.g., some state is protected by the
10701eda14cbcSMatt Macy  * config lock). To allow waiting threads to wait without any races, another
10702eda14cbcSMatt Macy  * lock, spa_activities_lock, is used.
10703eda14cbcSMatt Macy  *
10704eda14cbcSMatt Macy  * When the state is checked, both the activity-specific lock (if there is one)
10705eda14cbcSMatt Macy  * and spa_activities_lock are held. In some cases, the activity-specific lock
10706eda14cbcSMatt Macy  * is acquired explicitly (e.g. the config lock). In others, the locking is
10707eda14cbcSMatt Macy  * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
10708eda14cbcSMatt Macy  * thread releases the activity-specific lock and, if the activity is in
10709eda14cbcSMatt Macy  * progress, then cv_waits using spa_activities_lock.
10710eda14cbcSMatt Macy  *
10711eda14cbcSMatt Macy  * The waiting thread is woken when another thread, one completing some
10712eda14cbcSMatt Macy  * activity, updates the state of the activity and then calls
10713eda14cbcSMatt Macy  * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
10714eda14cbcSMatt Macy  * needs to hold its activity-specific lock when updating the state, and this
10715eda14cbcSMatt Macy  * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
10716eda14cbcSMatt Macy  *
10717eda14cbcSMatt Macy  * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
10718eda14cbcSMatt Macy  * and because it is held when the waiting thread checks the state of the
10719eda14cbcSMatt Macy  * activity, it can never be the case that the completing thread both updates
10720eda14cbcSMatt Macy  * the activity state and cv_broadcasts in between the waiting thread's check
10721eda14cbcSMatt Macy  * and cv_wait. Thus, a waiting thread can never miss a wakeup.
10722eda14cbcSMatt Macy  *
10723eda14cbcSMatt Macy  * In order to prevent deadlock, when the waiting thread does its check, in some
10724eda14cbcSMatt Macy  * cases it will temporarily drop spa_activities_lock in order to acquire the
10725eda14cbcSMatt Macy  * activity-specific lock. The order in which spa_activities_lock and the
10726eda14cbcSMatt Macy  * activity specific lock are acquired in the waiting thread is determined by
10727eda14cbcSMatt Macy  * the order in which they are acquired in the completing thread; if the
10728eda14cbcSMatt Macy  * completing thread calls spa_notify_waiters with the activity-specific lock
10729eda14cbcSMatt Macy  * held, then the waiting thread must also acquire the activity-specific lock
10730eda14cbcSMatt Macy  * first.
10731eda14cbcSMatt Macy  */
10732eda14cbcSMatt Macy 
10733eda14cbcSMatt Macy static int
10734eda14cbcSMatt Macy spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
10735eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
10736eda14cbcSMatt Macy {
10737eda14cbcSMatt Macy 	int error = 0;
10738eda14cbcSMatt Macy 
10739eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
10740eda14cbcSMatt Macy 
10741eda14cbcSMatt Macy 	switch (activity) {
10742eda14cbcSMatt Macy 	case ZPOOL_WAIT_CKPT_DISCARD:
10743eda14cbcSMatt Macy 		*in_progress =
10744eda14cbcSMatt Macy 		    (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
10745eda14cbcSMatt Macy 		    zap_contains(spa_meta_objset(spa),
10746eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
10747eda14cbcSMatt Macy 		    ENOENT);
10748eda14cbcSMatt Macy 		break;
10749eda14cbcSMatt Macy 	case ZPOOL_WAIT_FREE:
10750eda14cbcSMatt Macy 		*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
10751eda14cbcSMatt Macy 		    !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
10752eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
10753eda14cbcSMatt Macy 		    spa_livelist_delete_check(spa));
10754eda14cbcSMatt Macy 		break;
10755eda14cbcSMatt Macy 	case ZPOOL_WAIT_INITIALIZE:
10756eda14cbcSMatt Macy 	case ZPOOL_WAIT_TRIM:
10757eda14cbcSMatt Macy 		error = spa_vdev_activity_in_progress(spa, use_tag, tag,
10758eda14cbcSMatt Macy 		    activity, in_progress);
10759eda14cbcSMatt Macy 		break;
10760eda14cbcSMatt Macy 	case ZPOOL_WAIT_REPLACE:
10761eda14cbcSMatt Macy 		mutex_exit(&spa->spa_activities_lock);
10762eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10763eda14cbcSMatt Macy 		mutex_enter(&spa->spa_activities_lock);
10764eda14cbcSMatt Macy 
10765eda14cbcSMatt Macy 		*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
10766eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10767eda14cbcSMatt Macy 		break;
10768eda14cbcSMatt Macy 	case ZPOOL_WAIT_REMOVE:
10769eda14cbcSMatt Macy 		*in_progress = (spa->spa_removing_phys.sr_state ==
10770eda14cbcSMatt Macy 		    DSS_SCANNING);
10771eda14cbcSMatt Macy 		break;
10772eda14cbcSMatt Macy 	case ZPOOL_WAIT_RESILVER:
10773e716630dSMartin Matuska 		*in_progress = vdev_rebuild_active(spa->spa_root_vdev);
10774e716630dSMartin Matuska 		if (*in_progress)
10775eda14cbcSMatt Macy 			break;
10776c03c5b1cSMartin Matuska 		zfs_fallthrough;
10777eda14cbcSMatt Macy 	case ZPOOL_WAIT_SCRUB:
10778eda14cbcSMatt Macy 	{
10779eda14cbcSMatt Macy 		boolean_t scanning, paused, is_scrub;
10780eda14cbcSMatt Macy 		dsl_scan_t *scn =  spa->spa_dsl_pool->dp_scan;
10781eda14cbcSMatt Macy 
10782eda14cbcSMatt Macy 		is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
10783eda14cbcSMatt Macy 		scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
10784eda14cbcSMatt Macy 		paused = dsl_scan_is_paused_scrub(scn);
10785eda14cbcSMatt Macy 		*in_progress = (scanning && !paused &&
10786eda14cbcSMatt Macy 		    is_scrub == (activity == ZPOOL_WAIT_SCRUB));
10787eda14cbcSMatt Macy 		break;
10788eda14cbcSMatt Macy 	}
10789e716630dSMartin Matuska 	case ZPOOL_WAIT_RAIDZ_EXPAND:
10790e716630dSMartin Matuska 	{
10791e716630dSMartin Matuska 		vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
10792e716630dSMartin Matuska 		*in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING);
10793e716630dSMartin Matuska 		break;
10794e716630dSMartin Matuska 	}
10795eda14cbcSMatt Macy 	default:
10796eda14cbcSMatt Macy 		panic("unrecognized value for activity %d", activity);
10797eda14cbcSMatt Macy 	}
10798eda14cbcSMatt Macy 
10799eda14cbcSMatt Macy 	return (error);
10800eda14cbcSMatt Macy }
10801eda14cbcSMatt Macy 
10802eda14cbcSMatt Macy static int
10803eda14cbcSMatt Macy spa_wait_common(const char *pool, zpool_wait_activity_t activity,
10804eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *waited)
10805eda14cbcSMatt Macy {
10806eda14cbcSMatt Macy 	/*
10807eda14cbcSMatt Macy 	 * The tag is used to distinguish between instances of an activity.
10808eda14cbcSMatt Macy 	 * 'initialize' and 'trim' are the only activities that we use this for.
10809eda14cbcSMatt Macy 	 * The other activities can only have a single instance in progress in a
10810eda14cbcSMatt Macy 	 * pool at one time, making the tag unnecessary.
10811eda14cbcSMatt Macy 	 *
10812eda14cbcSMatt Macy 	 * There can be multiple devices being replaced at once, but since they
10813eda14cbcSMatt Macy 	 * all finish once resilvering finishes, we don't bother keeping track
10814eda14cbcSMatt Macy 	 * of them individually, we just wait for them all to finish.
10815eda14cbcSMatt Macy 	 */
10816eda14cbcSMatt Macy 	if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
10817eda14cbcSMatt Macy 	    activity != ZPOOL_WAIT_TRIM)
10818eda14cbcSMatt Macy 		return (EINVAL);
10819eda14cbcSMatt Macy 
10820eda14cbcSMatt Macy 	if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
10821eda14cbcSMatt Macy 		return (EINVAL);
10822eda14cbcSMatt Macy 
10823eda14cbcSMatt Macy 	spa_t *spa;
10824eda14cbcSMatt Macy 	int error = spa_open(pool, &spa, FTAG);
10825eda14cbcSMatt Macy 	if (error != 0)
10826eda14cbcSMatt Macy 		return (error);
10827eda14cbcSMatt Macy 
10828eda14cbcSMatt Macy 	/*
10829eda14cbcSMatt Macy 	 * Increment the spa's waiter count so that we can call spa_close and
10830eda14cbcSMatt Macy 	 * still ensure that the spa_t doesn't get freed before this thread is
10831eda14cbcSMatt Macy 	 * finished with it when the pool is exported. We want to call spa_close
10832eda14cbcSMatt Macy 	 * before we start waiting because otherwise the additional ref would
10833eda14cbcSMatt Macy 	 * prevent the pool from being exported or destroyed throughout the
10834eda14cbcSMatt Macy 	 * potentially long wait.
10835eda14cbcSMatt Macy 	 */
10836eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
10837eda14cbcSMatt Macy 	spa->spa_waiters++;
10838eda14cbcSMatt Macy 	spa_close(spa, FTAG);
10839eda14cbcSMatt Macy 
10840eda14cbcSMatt Macy 	*waited = B_FALSE;
10841eda14cbcSMatt Macy 	for (;;) {
10842eda14cbcSMatt Macy 		boolean_t in_progress;
10843eda14cbcSMatt Macy 		error = spa_activity_in_progress(spa, activity, use_tag, tag,
10844eda14cbcSMatt Macy 		    &in_progress);
10845eda14cbcSMatt Macy 
10846eda14cbcSMatt Macy 		if (error || !in_progress || spa->spa_waiters_cancel)
10847eda14cbcSMatt Macy 			break;
10848eda14cbcSMatt Macy 
10849eda14cbcSMatt Macy 		*waited = B_TRUE;
10850eda14cbcSMatt Macy 
10851eda14cbcSMatt Macy 		if (cv_wait_sig(&spa->spa_activities_cv,
10852eda14cbcSMatt Macy 		    &spa->spa_activities_lock) == 0) {
10853eda14cbcSMatt Macy 			error = EINTR;
10854eda14cbcSMatt Macy 			break;
10855eda14cbcSMatt Macy 		}
10856eda14cbcSMatt Macy 	}
10857eda14cbcSMatt Macy 
10858eda14cbcSMatt Macy 	spa->spa_waiters--;
10859eda14cbcSMatt Macy 	cv_signal(&spa->spa_waiters_cv);
10860eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
10861eda14cbcSMatt Macy 
10862eda14cbcSMatt Macy 	return (error);
10863eda14cbcSMatt Macy }
10864eda14cbcSMatt Macy 
10865eda14cbcSMatt Macy /*
10866eda14cbcSMatt Macy  * Wait for a particular instance of the specified activity to complete, where
10867eda14cbcSMatt Macy  * the instance is identified by 'tag'
10868eda14cbcSMatt Macy  */
10869eda14cbcSMatt Macy int
10870eda14cbcSMatt Macy spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
10871eda14cbcSMatt Macy     boolean_t *waited)
10872eda14cbcSMatt Macy {
10873eda14cbcSMatt Macy 	return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
10874eda14cbcSMatt Macy }
10875eda14cbcSMatt Macy 
10876eda14cbcSMatt Macy /*
10877eda14cbcSMatt Macy  * Wait for all instances of the specified activity complete
10878eda14cbcSMatt Macy  */
10879eda14cbcSMatt Macy int
10880eda14cbcSMatt Macy spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
10881eda14cbcSMatt Macy {
10882eda14cbcSMatt Macy 
10883eda14cbcSMatt Macy 	return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
10884eda14cbcSMatt Macy }
10885eda14cbcSMatt Macy 
10886eda14cbcSMatt Macy sysevent_t *
10887eda14cbcSMatt Macy spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
10888eda14cbcSMatt Macy {
10889eda14cbcSMatt Macy 	sysevent_t *ev = NULL;
10890eda14cbcSMatt Macy #ifdef _KERNEL
10891eda14cbcSMatt Macy 	nvlist_t *resource;
10892eda14cbcSMatt Macy 
10893eda14cbcSMatt Macy 	resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
10894eda14cbcSMatt Macy 	if (resource) {
10895eda14cbcSMatt Macy 		ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
10896eda14cbcSMatt Macy 		ev->resource = resource;
10897eda14cbcSMatt Macy 	}
10898e92ffd9bSMartin Matuska #else
10899e92ffd9bSMartin Matuska 	(void) spa, (void) vd, (void) hist_nvl, (void) name;
10900eda14cbcSMatt Macy #endif
10901eda14cbcSMatt Macy 	return (ev);
10902eda14cbcSMatt Macy }
10903eda14cbcSMatt Macy 
10904eda14cbcSMatt Macy void
10905eda14cbcSMatt Macy spa_event_post(sysevent_t *ev)
10906eda14cbcSMatt Macy {
10907eda14cbcSMatt Macy #ifdef _KERNEL
10908eda14cbcSMatt Macy 	if (ev) {
10909eda14cbcSMatt Macy 		zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
10910eda14cbcSMatt Macy 		kmem_free(ev, sizeof (*ev));
10911eda14cbcSMatt Macy 	}
10912e92ffd9bSMartin Matuska #else
10913e92ffd9bSMartin Matuska 	(void) ev;
10914eda14cbcSMatt Macy #endif
10915eda14cbcSMatt Macy }
10916eda14cbcSMatt Macy 
10917eda14cbcSMatt Macy /*
10918eda14cbcSMatt Macy  * Post a zevent corresponding to the given sysevent.   The 'name' must be one
10919eda14cbcSMatt Macy  * of the event definitions in sys/sysevent/eventdefs.h.  The payload will be
10920eda14cbcSMatt Macy  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
10921eda14cbcSMatt Macy  * in the userland libzpool, as we don't want consumers to misinterpret ztest
10922eda14cbcSMatt Macy  * or zdb as real changes.
10923eda14cbcSMatt Macy  */
10924eda14cbcSMatt Macy void
10925eda14cbcSMatt Macy spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
10926eda14cbcSMatt Macy {
10927eda14cbcSMatt Macy 	spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
10928eda14cbcSMatt Macy }
10929eda14cbcSMatt Macy 
10930eda14cbcSMatt Macy /* state manipulation functions */
10931eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open);
10932eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open_rewind);
10933eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_stats);
10934eda14cbcSMatt Macy EXPORT_SYMBOL(spa_create);
10935eda14cbcSMatt Macy EXPORT_SYMBOL(spa_import);
10936eda14cbcSMatt Macy EXPORT_SYMBOL(spa_tryimport);
10937eda14cbcSMatt Macy EXPORT_SYMBOL(spa_destroy);
10938eda14cbcSMatt Macy EXPORT_SYMBOL(spa_export);
10939eda14cbcSMatt Macy EXPORT_SYMBOL(spa_reset);
10940eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_request);
10941eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_suspend);
10942eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_resume);
10943eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_addref);
10944eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_delref);
10945eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stat_init);
10946eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_get_stats);
10947eda14cbcSMatt Macy 
10948eda14cbcSMatt Macy /* device manipulation */
10949eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_add);
10950eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_attach);
10951eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_detach);
10952eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setpath);
10953eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setfru);
10954eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_split_mirror);
10955eda14cbcSMatt Macy 
10956eda14cbcSMatt Macy /* spare statech is global across all pools) */
10957eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_add);
10958eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_remove);
10959eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_exists);
10960eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_activate);
10961eda14cbcSMatt Macy 
10962eda14cbcSMatt Macy /* L2ARC statech is global across all pools) */
10963eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_add);
10964eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_remove);
10965eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_exists);
10966eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_activate);
10967eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_drop);
10968eda14cbcSMatt Macy 
10969eda14cbcSMatt Macy /* scanning */
10970eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan);
10971eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stop);
10972eda14cbcSMatt Macy 
10973eda14cbcSMatt Macy /* spa syncing */
10974eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync); /* only for DMU use */
10975eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync_allpools);
10976eda14cbcSMatt Macy 
10977eda14cbcSMatt Macy /* properties */
10978eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_set);
10979eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_get);
10980eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_clear_bootfs);
10981eda14cbcSMatt Macy 
10982eda14cbcSMatt Macy /* asynchronous event notification */
10983eda14cbcSMatt Macy EXPORT_SYMBOL(spa_event_notify);
10984eda14cbcSMatt Macy 
10985b2526e8bSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
10986b2526e8bSMartin Matuska 	"Percentage of CPUs to run a metaslab preload taskq");
10987b2526e8bSMartin Matuska 
10988eda14cbcSMatt Macy /* BEGIN CSTYLED */
10989be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
1099016038816SMartin Matuska 	"log2 fraction of arc that can be used by inflight I/Os when "
10991eda14cbcSMatt Macy 	"verifying pool during import");
10992c03c5b1cSMartin Matuska /* END CSTYLED */
10993eda14cbcSMatt Macy 
10994eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
10995eda14cbcSMatt Macy 	"Set to traverse metadata on pool import");
10996eda14cbcSMatt Macy 
10997eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
10998eda14cbcSMatt Macy 	"Set to traverse data on pool import");
10999eda14cbcSMatt Macy 
11000eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
11001eda14cbcSMatt Macy 	"Print vdev tree to zfs_dbgmsg during pool import");
11002eda14cbcSMatt Macy 
11003b985c9caSMartin Matuska ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW,
11004eda14cbcSMatt Macy 	"Percentage of CPUs to run an IO worker thread");
11005eda14cbcSMatt Macy 
11006b985c9caSMartin Matuska ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW,
1100716038816SMartin Matuska 	"Number of threads per IO worker taskqueue");
1100816038816SMartin Matuska 
11009c03c5b1cSMartin Matuska /* BEGIN CSTYLED */
11010dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
11011eda14cbcSMatt Macy 	"Allow importing pool with up to this number of missing top-level "
11012eda14cbcSMatt Macy 	"vdevs (in read-only mode)");
11013c03c5b1cSMartin Matuska /* END CSTYLED */
11014eda14cbcSMatt Macy 
11015c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
11016c03c5b1cSMartin Matuska 	ZMOD_RW, "Set the livelist condense zthr to pause");
11017eda14cbcSMatt Macy 
11018c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
11019c03c5b1cSMartin Matuska 	ZMOD_RW, "Set the livelist condense synctask to pause");
11020eda14cbcSMatt Macy 
11021c03c5b1cSMartin Matuska /* BEGIN CSTYLED */
11022c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
11023c03c5b1cSMartin Matuska 	INT, ZMOD_RW,
11024eda14cbcSMatt Macy 	"Whether livelist condensing was canceled in the synctask");
11025eda14cbcSMatt Macy 
11026c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
11027c03c5b1cSMartin Matuska 	INT, ZMOD_RW,
11028eda14cbcSMatt Macy 	"Whether livelist condensing was canceled in the zthr function");
11029eda14cbcSMatt Macy 
11030c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
11031c03c5b1cSMartin Matuska 	ZMOD_RW,
11032eda14cbcSMatt Macy 	"Whether extra ALLOC blkptrs were added to a livelist entry while it "
11033eda14cbcSMatt Macy 	"was being condensed");
11034b356da80SMartin Matuska 
11035b356da80SMartin Matuska #ifdef _KERNEL
11036b356da80SMartin Matuska ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
11037b985c9caSMartin Matuska 	spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW,
11038b356da80SMartin Matuska 	"Configure IO queues for read IO");
11039b356da80SMartin Matuska ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
11040b985c9caSMartin Matuska 	spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW,
11041b356da80SMartin Matuska 	"Configure IO queues for write IO");
11042b356da80SMartin Matuska #endif
11043eda14cbcSMatt Macy /* END CSTYLED */
1104414c2e0a0SMartin Matuska 
11045b985c9caSMartin Matuska ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW,
11046b985c9caSMartin Matuska 	"Number of CPUs per write issue taskq");
11047