xref: /freebsd/sys/contrib/openzfs/module/zfs/spa.c (revision 1603881667360c015f6685131f2f25474fa67a72)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9eda14cbcSMatt Macy  * or http://www.opensolaris.org/os/licensing.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy 
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
242c48331dSMatt Macy  * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2018, Nexenta Systems, Inc.  All rights reserved.
26eda14cbcSMatt Macy  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27eda14cbcSMatt Macy  * Copyright 2013 Saso Kiselkov. All rights reserved.
28eda14cbcSMatt Macy  * Copyright (c) 2014 Integros [integros.com]
29eda14cbcSMatt Macy  * Copyright 2016 Toomas Soome <tsoome@me.com>
30eda14cbcSMatt Macy  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31eda14cbcSMatt Macy  * Copyright 2018 Joyent, Inc.
32eda14cbcSMatt Macy  * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
33eda14cbcSMatt Macy  * Copyright 2017 Joyent, Inc.
34eda14cbcSMatt Macy  * Copyright (c) 2017, Intel Corporation.
35ee36e25aSMartin Matuska  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
36eda14cbcSMatt Macy  */
37eda14cbcSMatt Macy 
38eda14cbcSMatt Macy /*
39eda14cbcSMatt Macy  * SPA: Storage Pool Allocator
40eda14cbcSMatt Macy  *
41eda14cbcSMatt Macy  * This file contains all the routines used when modifying on-disk SPA state.
42eda14cbcSMatt Macy  * This includes opening, importing, destroying, exporting a pool, and syncing a
43eda14cbcSMatt Macy  * pool.
44eda14cbcSMatt Macy  */
45eda14cbcSMatt Macy 
46eda14cbcSMatt Macy #include <sys/zfs_context.h>
47eda14cbcSMatt Macy #include <sys/fm/fs/zfs.h>
48eda14cbcSMatt Macy #include <sys/spa_impl.h>
49eda14cbcSMatt Macy #include <sys/zio.h>
50eda14cbcSMatt Macy #include <sys/zio_checksum.h>
51eda14cbcSMatt Macy #include <sys/dmu.h>
52eda14cbcSMatt Macy #include <sys/dmu_tx.h>
53eda14cbcSMatt Macy #include <sys/zap.h>
54eda14cbcSMatt Macy #include <sys/zil.h>
55eda14cbcSMatt Macy #include <sys/ddt.h>
56eda14cbcSMatt Macy #include <sys/vdev_impl.h>
57eda14cbcSMatt Macy #include <sys/vdev_removal.h>
58eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h>
59eda14cbcSMatt Macy #include <sys/vdev_indirect_births.h>
60eda14cbcSMatt Macy #include <sys/vdev_initialize.h>
61eda14cbcSMatt Macy #include <sys/vdev_rebuild.h>
62eda14cbcSMatt Macy #include <sys/vdev_trim.h>
63eda14cbcSMatt Macy #include <sys/vdev_disk.h>
647877fdebSMatt Macy #include <sys/vdev_draid.h>
65eda14cbcSMatt Macy #include <sys/metaslab.h>
66eda14cbcSMatt Macy #include <sys/metaslab_impl.h>
67eda14cbcSMatt Macy #include <sys/mmp.h>
68eda14cbcSMatt Macy #include <sys/uberblock_impl.h>
69eda14cbcSMatt Macy #include <sys/txg.h>
70eda14cbcSMatt Macy #include <sys/avl.h>
71eda14cbcSMatt Macy #include <sys/bpobj.h>
72eda14cbcSMatt Macy #include <sys/dmu_traverse.h>
73eda14cbcSMatt Macy #include <sys/dmu_objset.h>
74eda14cbcSMatt Macy #include <sys/unique.h>
75eda14cbcSMatt Macy #include <sys/dsl_pool.h>
76eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
77eda14cbcSMatt Macy #include <sys/dsl_dir.h>
78eda14cbcSMatt Macy #include <sys/dsl_prop.h>
79eda14cbcSMatt Macy #include <sys/dsl_synctask.h>
80eda14cbcSMatt Macy #include <sys/fs/zfs.h>
81eda14cbcSMatt Macy #include <sys/arc.h>
82eda14cbcSMatt Macy #include <sys/callb.h>
83eda14cbcSMatt Macy #include <sys/systeminfo.h>
84eda14cbcSMatt Macy #include <sys/spa_boot.h>
85eda14cbcSMatt Macy #include <sys/zfs_ioctl.h>
86eda14cbcSMatt Macy #include <sys/dsl_scan.h>
87eda14cbcSMatt Macy #include <sys/zfeature.h>
88eda14cbcSMatt Macy #include <sys/dsl_destroy.h>
89eda14cbcSMatt Macy #include <sys/zvol.h>
90eda14cbcSMatt Macy 
91eda14cbcSMatt Macy #ifdef	_KERNEL
92eda14cbcSMatt Macy #include <sys/fm/protocol.h>
93eda14cbcSMatt Macy #include <sys/fm/util.h>
94eda14cbcSMatt Macy #include <sys/callb.h>
95eda14cbcSMatt Macy #include <sys/zone.h>
96eda14cbcSMatt Macy #include <sys/vmsystm.h>
97eda14cbcSMatt Macy #endif	/* _KERNEL */
98eda14cbcSMatt Macy 
99eda14cbcSMatt Macy #include "zfs_prop.h"
100eda14cbcSMatt Macy #include "zfs_comutil.h"
101eda14cbcSMatt Macy 
102eda14cbcSMatt Macy /*
103eda14cbcSMatt Macy  * The interval, in seconds, at which failed configuration cache file writes
104eda14cbcSMatt Macy  * should be retried.
105eda14cbcSMatt Macy  */
106eda14cbcSMatt Macy int zfs_ccw_retry_interval = 300;
107eda14cbcSMatt Macy 
108eda14cbcSMatt Macy typedef enum zti_modes {
109eda14cbcSMatt Macy 	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
110eda14cbcSMatt Macy 	ZTI_MODE_BATCH,			/* cpu-intensive; value is ignored */
111*16038816SMartin Matuska 	ZTI_MODE_SCALE,			/* Taskqs scale with CPUs. */
112eda14cbcSMatt Macy 	ZTI_MODE_NULL,			/* don't create a taskq */
113eda14cbcSMatt Macy 	ZTI_NMODES
114eda14cbcSMatt Macy } zti_modes_t;
115eda14cbcSMatt Macy 
116eda14cbcSMatt Macy #define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
117eda14cbcSMatt Macy #define	ZTI_PCT(n)	{ ZTI_MODE_ONLINE_PERCENT, (n), 1 }
118eda14cbcSMatt Macy #define	ZTI_BATCH	{ ZTI_MODE_BATCH, 0, 1 }
119*16038816SMartin Matuska #define	ZTI_SCALE	{ ZTI_MODE_SCALE, 0, 1 }
120eda14cbcSMatt Macy #define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
121eda14cbcSMatt Macy 
122eda14cbcSMatt Macy #define	ZTI_N(n)	ZTI_P(n, 1)
123eda14cbcSMatt Macy #define	ZTI_ONE		ZTI_N(1)
124eda14cbcSMatt Macy 
125eda14cbcSMatt Macy typedef struct zio_taskq_info {
126eda14cbcSMatt Macy 	zti_modes_t zti_mode;
127eda14cbcSMatt Macy 	uint_t zti_value;
128eda14cbcSMatt Macy 	uint_t zti_count;
129eda14cbcSMatt Macy } zio_taskq_info_t;
130eda14cbcSMatt Macy 
131eda14cbcSMatt Macy static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
132eda14cbcSMatt Macy 	"iss", "iss_h", "int", "int_h"
133eda14cbcSMatt Macy };
134eda14cbcSMatt Macy 
135eda14cbcSMatt Macy /*
136eda14cbcSMatt Macy  * This table defines the taskq settings for each ZFS I/O type. When
137eda14cbcSMatt Macy  * initializing a pool, we use this table to create an appropriately sized
138eda14cbcSMatt Macy  * taskq. Some operations are low volume and therefore have a small, static
139eda14cbcSMatt Macy  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
140eda14cbcSMatt Macy  * macros. Other operations process a large amount of data; the ZTI_BATCH
141eda14cbcSMatt Macy  * macro causes us to create a taskq oriented for throughput. Some operations
142eda14cbcSMatt Macy  * are so high frequency and short-lived that the taskq itself can become a
143eda14cbcSMatt Macy  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
144eda14cbcSMatt Macy  * additional degree of parallelism specified by the number of threads per-
145eda14cbcSMatt Macy  * taskq and the number of taskqs; when dispatching an event in this case, the
146*16038816SMartin Matuska  * particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
147*16038816SMartin Matuska  * but with number of taskqs also scaling with number of CPUs.
148eda14cbcSMatt Macy  *
149eda14cbcSMatt Macy  * The different taskq priorities are to handle the different contexts (issue
150eda14cbcSMatt Macy  * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
151eda14cbcSMatt Macy  * need to be handled with minimum delay.
152eda14cbcSMatt Macy  */
153eda14cbcSMatt Macy const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
154eda14cbcSMatt Macy 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
155eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
156*16038816SMartin Matuska 	{ ZTI_N(8),	ZTI_NULL,	ZTI_SCALE,	ZTI_NULL }, /* READ */
157*16038816SMartin Matuska 	{ ZTI_BATCH,	ZTI_N(5),	ZTI_SCALE,	ZTI_N(5) }, /* WRITE */
158*16038816SMartin Matuska 	{ ZTI_SCALE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
159eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
160eda14cbcSMatt Macy 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* IOCTL */
161eda14cbcSMatt Macy 	{ ZTI_N(4),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* TRIM */
162eda14cbcSMatt Macy };
163eda14cbcSMatt Macy 
164eda14cbcSMatt Macy static void spa_sync_version(void *arg, dmu_tx_t *tx);
165eda14cbcSMatt Macy static void spa_sync_props(void *arg, dmu_tx_t *tx);
166eda14cbcSMatt Macy static boolean_t spa_has_active_shared_spare(spa_t *spa);
167eda14cbcSMatt Macy static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
168eda14cbcSMatt Macy static void spa_vdev_resilver_done(spa_t *spa);
169eda14cbcSMatt Macy 
170*16038816SMartin Matuska uint_t		zio_taskq_batch_pct = 80;	/* 1 thread per cpu in pset */
171*16038816SMartin Matuska uint_t		zio_taskq_batch_tpq;		/* threads per taskq */
172eda14cbcSMatt Macy boolean_t	zio_taskq_sysdc = B_TRUE;	/* use SDC scheduling class */
173eda14cbcSMatt Macy uint_t		zio_taskq_basedc = 80;		/* base duty cycle */
174eda14cbcSMatt Macy 
175eda14cbcSMatt Macy boolean_t	spa_create_process = B_TRUE;	/* no process ==> no sysdc */
176eda14cbcSMatt Macy 
177eda14cbcSMatt Macy /*
178eda14cbcSMatt Macy  * Report any spa_load_verify errors found, but do not fail spa_load.
179eda14cbcSMatt Macy  * This is used by zdb to analyze non-idle pools.
180eda14cbcSMatt Macy  */
181eda14cbcSMatt Macy boolean_t	spa_load_verify_dryrun = B_FALSE;
182eda14cbcSMatt Macy 
183eda14cbcSMatt Macy /*
184eda14cbcSMatt Macy  * This (illegal) pool name is used when temporarily importing a spa_t in order
185eda14cbcSMatt Macy  * to get the vdev stats associated with the imported devices.
186eda14cbcSMatt Macy  */
187eda14cbcSMatt Macy #define	TRYIMPORT_NAME	"$import"
188eda14cbcSMatt Macy 
189eda14cbcSMatt Macy /*
190eda14cbcSMatt Macy  * For debugging purposes: print out vdev tree during pool import.
191eda14cbcSMatt Macy  */
192eda14cbcSMatt Macy int		spa_load_print_vdev_tree = B_FALSE;
193eda14cbcSMatt Macy 
194eda14cbcSMatt Macy /*
195eda14cbcSMatt Macy  * A non-zero value for zfs_max_missing_tvds means that we allow importing
196eda14cbcSMatt Macy  * pools with missing top-level vdevs. This is strictly intended for advanced
197eda14cbcSMatt Macy  * pool recovery cases since missing data is almost inevitable. Pools with
198eda14cbcSMatt Macy  * missing devices can only be imported read-only for safety reasons, and their
199eda14cbcSMatt Macy  * fail-mode will be automatically set to "continue".
200eda14cbcSMatt Macy  *
201eda14cbcSMatt Macy  * With 1 missing vdev we should be able to import the pool and mount all
202eda14cbcSMatt Macy  * datasets. User data that was not modified after the missing device has been
203eda14cbcSMatt Macy  * added should be recoverable. This means that snapshots created prior to the
204eda14cbcSMatt Macy  * addition of that device should be completely intact.
205eda14cbcSMatt Macy  *
206eda14cbcSMatt Macy  * With 2 missing vdevs, some datasets may fail to mount since there are
207eda14cbcSMatt Macy  * dataset statistics that are stored as regular metadata. Some data might be
208eda14cbcSMatt Macy  * recoverable if those vdevs were added recently.
209eda14cbcSMatt Macy  *
210eda14cbcSMatt Macy  * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
211eda14cbcSMatt Macy  * may be missing entirely. Chances of data recovery are very low. Note that
212eda14cbcSMatt Macy  * there are also risks of performing an inadvertent rewind as we might be
213eda14cbcSMatt Macy  * missing all the vdevs with the latest uberblocks.
214eda14cbcSMatt Macy  */
215eda14cbcSMatt Macy unsigned long	zfs_max_missing_tvds = 0;
216eda14cbcSMatt Macy 
217eda14cbcSMatt Macy /*
218eda14cbcSMatt Macy  * The parameters below are similar to zfs_max_missing_tvds but are only
219eda14cbcSMatt Macy  * intended for a preliminary open of the pool with an untrusted config which
220eda14cbcSMatt Macy  * might be incomplete or out-dated.
221eda14cbcSMatt Macy  *
222eda14cbcSMatt Macy  * We are more tolerant for pools opened from a cachefile since we could have
223eda14cbcSMatt Macy  * an out-dated cachefile where a device removal was not registered.
224eda14cbcSMatt Macy  * We could have set the limit arbitrarily high but in the case where devices
225eda14cbcSMatt Macy  * are really missing we would want to return the proper error codes; we chose
226eda14cbcSMatt Macy  * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
227eda14cbcSMatt Macy  * and we get a chance to retrieve the trusted config.
228eda14cbcSMatt Macy  */
229eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
230eda14cbcSMatt Macy 
231eda14cbcSMatt Macy /*
232eda14cbcSMatt Macy  * In the case where config was assembled by scanning device paths (/dev/dsks
233eda14cbcSMatt Macy  * by default) we are less tolerant since all the existing devices should have
234eda14cbcSMatt Macy  * been detected and we want spa_load to return the right error codes.
235eda14cbcSMatt Macy  */
236eda14cbcSMatt Macy uint64_t	zfs_max_missing_tvds_scan = 0;
237eda14cbcSMatt Macy 
238eda14cbcSMatt Macy /*
239eda14cbcSMatt Macy  * Debugging aid that pauses spa_sync() towards the end.
240eda14cbcSMatt Macy  */
241eda14cbcSMatt Macy boolean_t	zfs_pause_spa_sync = B_FALSE;
242eda14cbcSMatt Macy 
243eda14cbcSMatt Macy /*
244eda14cbcSMatt Macy  * Variables to indicate the livelist condense zthr func should wait at certain
245eda14cbcSMatt Macy  * points for the livelist to be removed - used to test condense/destroy races
246eda14cbcSMatt Macy  */
247eda14cbcSMatt Macy int zfs_livelist_condense_zthr_pause = 0;
248eda14cbcSMatt Macy int zfs_livelist_condense_sync_pause = 0;
249eda14cbcSMatt Macy 
250eda14cbcSMatt Macy /*
251eda14cbcSMatt Macy  * Variables to track whether or not condense cancellation has been
252eda14cbcSMatt Macy  * triggered in testing.
253eda14cbcSMatt Macy  */
254eda14cbcSMatt Macy int zfs_livelist_condense_sync_cancel = 0;
255eda14cbcSMatt Macy int zfs_livelist_condense_zthr_cancel = 0;
256eda14cbcSMatt Macy 
257eda14cbcSMatt Macy /*
258eda14cbcSMatt Macy  * Variable to track whether or not extra ALLOC blkptrs were added to a
259eda14cbcSMatt Macy  * livelist entry while it was being condensed (caused by the way we track
260eda14cbcSMatt Macy  * remapped blkptrs in dbuf_remap_impl)
261eda14cbcSMatt Macy  */
262eda14cbcSMatt Macy int zfs_livelist_condense_new_alloc = 0;
263eda14cbcSMatt Macy 
264eda14cbcSMatt Macy /*
265eda14cbcSMatt Macy  * ==========================================================================
266eda14cbcSMatt Macy  * SPA properties routines
267eda14cbcSMatt Macy  * ==========================================================================
268eda14cbcSMatt Macy  */
269eda14cbcSMatt Macy 
270eda14cbcSMatt Macy /*
271eda14cbcSMatt Macy  * Add a (source=src, propname=propval) list to an nvlist.
272eda14cbcSMatt Macy  */
273eda14cbcSMatt Macy static void
274eda14cbcSMatt Macy spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
275eda14cbcSMatt Macy     uint64_t intval, zprop_source_t src)
276eda14cbcSMatt Macy {
277eda14cbcSMatt Macy 	const char *propname = zpool_prop_to_name(prop);
278eda14cbcSMatt Macy 	nvlist_t *propval;
279eda14cbcSMatt Macy 
280eda14cbcSMatt Macy 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
281eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
282eda14cbcSMatt Macy 
283eda14cbcSMatt Macy 	if (strval != NULL)
284eda14cbcSMatt Macy 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
285eda14cbcSMatt Macy 	else
286eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
287eda14cbcSMatt Macy 
288eda14cbcSMatt Macy 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
289eda14cbcSMatt Macy 	nvlist_free(propval);
290eda14cbcSMatt Macy }
291eda14cbcSMatt Macy 
292eda14cbcSMatt Macy /*
293eda14cbcSMatt Macy  * Get property values from the spa configuration.
294eda14cbcSMatt Macy  */
295eda14cbcSMatt Macy static void
296eda14cbcSMatt Macy spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
297eda14cbcSMatt Macy {
298eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
299eda14cbcSMatt Macy 	dsl_pool_t *pool = spa->spa_dsl_pool;
300eda14cbcSMatt Macy 	uint64_t size, alloc, cap, version;
301eda14cbcSMatt Macy 	const zprop_source_t src = ZPROP_SRC_NONE;
302eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
303eda14cbcSMatt Macy 	metaslab_class_t *mc = spa_normal_class(spa);
304eda14cbcSMatt Macy 
305eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
306eda14cbcSMatt Macy 
307eda14cbcSMatt Macy 	if (rvd != NULL) {
308eda14cbcSMatt Macy 		alloc = metaslab_class_get_alloc(mc);
309eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_special_class(spa));
310eda14cbcSMatt Macy 		alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
311184c1b94SMartin Matuska 		alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
312eda14cbcSMatt Macy 
313eda14cbcSMatt Macy 		size = metaslab_class_get_space(mc);
314eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_special_class(spa));
315eda14cbcSMatt Macy 		size += metaslab_class_get_space(spa_dedup_class(spa));
316184c1b94SMartin Matuska 		size += metaslab_class_get_space(spa_embedded_log_class(spa));
317eda14cbcSMatt Macy 
318eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
319eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
320eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
321eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
322eda14cbcSMatt Macy 		    size - alloc, src);
323eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
324eda14cbcSMatt Macy 		    spa->spa_checkpoint_info.sci_dspace, src);
325eda14cbcSMatt Macy 
326eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
327eda14cbcSMatt Macy 		    metaslab_class_fragmentation(mc), src);
328eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
329eda14cbcSMatt Macy 		    metaslab_class_expandable_space(mc), src);
330eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
331eda14cbcSMatt Macy 		    (spa_mode(spa) == SPA_MODE_READ), src);
332eda14cbcSMatt Macy 
333eda14cbcSMatt Macy 		cap = (size == 0) ? 0 : (alloc * 100 / size);
334eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
335eda14cbcSMatt Macy 
336eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
337eda14cbcSMatt Macy 		    ddt_get_pool_dedup_ratio(spa), src);
338eda14cbcSMatt Macy 
339eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
340eda14cbcSMatt Macy 		    rvd->vdev_state, src);
341eda14cbcSMatt Macy 
342eda14cbcSMatt Macy 		version = spa_version(spa);
343eda14cbcSMatt Macy 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
344eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
345eda14cbcSMatt Macy 			    version, ZPROP_SRC_DEFAULT);
346eda14cbcSMatt Macy 		} else {
347eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
348eda14cbcSMatt Macy 			    version, ZPROP_SRC_LOCAL);
349eda14cbcSMatt Macy 		}
350eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
351eda14cbcSMatt Macy 		    NULL, spa_load_guid(spa), src);
352eda14cbcSMatt Macy 	}
353eda14cbcSMatt Macy 
354eda14cbcSMatt Macy 	if (pool != NULL) {
355eda14cbcSMatt Macy 		/*
356eda14cbcSMatt Macy 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
357eda14cbcSMatt Macy 		 * when opening pools before this version freedir will be NULL.
358eda14cbcSMatt Macy 		 */
359eda14cbcSMatt Macy 		if (pool->dp_free_dir != NULL) {
360eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
361eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
362eda14cbcSMatt Macy 			    src);
363eda14cbcSMatt Macy 		} else {
364eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
365eda14cbcSMatt Macy 			    NULL, 0, src);
366eda14cbcSMatt Macy 		}
367eda14cbcSMatt Macy 
368eda14cbcSMatt Macy 		if (pool->dp_leak_dir != NULL) {
369eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
370eda14cbcSMatt Macy 			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
371eda14cbcSMatt Macy 			    src);
372eda14cbcSMatt Macy 		} else {
373eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
374eda14cbcSMatt Macy 			    NULL, 0, src);
375eda14cbcSMatt Macy 		}
376eda14cbcSMatt Macy 	}
377eda14cbcSMatt Macy 
378eda14cbcSMatt Macy 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
379eda14cbcSMatt Macy 
380eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
381eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
382eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
383eda14cbcSMatt Macy 	}
384eda14cbcSMatt Macy 
385ee36e25aSMartin Matuska 	if (spa->spa_compatibility != NULL) {
386ee36e25aSMartin Matuska 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
387ee36e25aSMartin Matuska 		    spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
388ee36e25aSMartin Matuska 	}
389ee36e25aSMartin Matuska 
390eda14cbcSMatt Macy 	if (spa->spa_root != NULL)
391eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
392eda14cbcSMatt Macy 		    0, ZPROP_SRC_LOCAL);
393eda14cbcSMatt Macy 
394eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
395eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
396eda14cbcSMatt Macy 		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
397eda14cbcSMatt Macy 	} else {
398eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
399eda14cbcSMatt Macy 		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
400eda14cbcSMatt Macy 	}
401eda14cbcSMatt Macy 
402eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
403eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
404eda14cbcSMatt Macy 		    DNODE_MAX_SIZE, ZPROP_SRC_NONE);
405eda14cbcSMatt Macy 	} else {
406eda14cbcSMatt Macy 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
407eda14cbcSMatt Macy 		    DNODE_MIN_SIZE, ZPROP_SRC_NONE);
408eda14cbcSMatt Macy 	}
409eda14cbcSMatt Macy 
410eda14cbcSMatt Macy 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
411eda14cbcSMatt Macy 		if (dp->scd_path == NULL) {
412eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
413eda14cbcSMatt Macy 			    "none", 0, ZPROP_SRC_LOCAL);
414eda14cbcSMatt Macy 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
415eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
416eda14cbcSMatt Macy 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
417eda14cbcSMatt Macy 		}
418eda14cbcSMatt Macy 	}
419eda14cbcSMatt Macy }
420eda14cbcSMatt Macy 
421eda14cbcSMatt Macy /*
422eda14cbcSMatt Macy  * Get zpool property values.
423eda14cbcSMatt Macy  */
424eda14cbcSMatt Macy int
425eda14cbcSMatt Macy spa_prop_get(spa_t *spa, nvlist_t **nvp)
426eda14cbcSMatt Macy {
427eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
428eda14cbcSMatt Macy 	zap_cursor_t zc;
429eda14cbcSMatt Macy 	zap_attribute_t za;
430eda14cbcSMatt Macy 	dsl_pool_t *dp;
431eda14cbcSMatt Macy 	int err;
432eda14cbcSMatt Macy 
433eda14cbcSMatt Macy 	err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
434eda14cbcSMatt Macy 	if (err)
435eda14cbcSMatt Macy 		return (err);
436eda14cbcSMatt Macy 
437eda14cbcSMatt Macy 	dp = spa_get_dsl(spa);
438eda14cbcSMatt Macy 	dsl_pool_config_enter(dp, FTAG);
439eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
440eda14cbcSMatt Macy 
441eda14cbcSMatt Macy 	/*
442eda14cbcSMatt Macy 	 * Get properties from the spa config.
443eda14cbcSMatt Macy 	 */
444eda14cbcSMatt Macy 	spa_prop_get_config(spa, nvp);
445eda14cbcSMatt Macy 
446eda14cbcSMatt Macy 	/* If no pool property object, no more prop to get. */
447eda14cbcSMatt Macy 	if (mos == NULL || spa->spa_pool_props_object == 0)
448eda14cbcSMatt Macy 		goto out;
449eda14cbcSMatt Macy 
450eda14cbcSMatt Macy 	/*
451eda14cbcSMatt Macy 	 * Get properties from the MOS pool property object.
452eda14cbcSMatt Macy 	 */
453eda14cbcSMatt Macy 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
454eda14cbcSMatt Macy 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
455eda14cbcSMatt Macy 	    zap_cursor_advance(&zc)) {
456eda14cbcSMatt Macy 		uint64_t intval = 0;
457eda14cbcSMatt Macy 		char *strval = NULL;
458eda14cbcSMatt Macy 		zprop_source_t src = ZPROP_SRC_DEFAULT;
459eda14cbcSMatt Macy 		zpool_prop_t prop;
460eda14cbcSMatt Macy 
461eda14cbcSMatt Macy 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
462eda14cbcSMatt Macy 			continue;
463eda14cbcSMatt Macy 
464eda14cbcSMatt Macy 		switch (za.za_integer_length) {
465eda14cbcSMatt Macy 		case 8:
466eda14cbcSMatt Macy 			/* integer property */
467eda14cbcSMatt Macy 			if (za.za_first_integer !=
468eda14cbcSMatt Macy 			    zpool_prop_default_numeric(prop))
469eda14cbcSMatt Macy 				src = ZPROP_SRC_LOCAL;
470eda14cbcSMatt Macy 
471eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_BOOTFS) {
472eda14cbcSMatt Macy 				dsl_dataset_t *ds = NULL;
473eda14cbcSMatt Macy 
474eda14cbcSMatt Macy 				err = dsl_dataset_hold_obj(dp,
475eda14cbcSMatt Macy 				    za.za_first_integer, FTAG, &ds);
476eda14cbcSMatt Macy 				if (err != 0)
477eda14cbcSMatt Macy 					break;
478eda14cbcSMatt Macy 
479eda14cbcSMatt Macy 				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
480eda14cbcSMatt Macy 				    KM_SLEEP);
481eda14cbcSMatt Macy 				dsl_dataset_name(ds, strval);
482eda14cbcSMatt Macy 				dsl_dataset_rele(ds, FTAG);
483eda14cbcSMatt Macy 			} else {
484eda14cbcSMatt Macy 				strval = NULL;
485eda14cbcSMatt Macy 				intval = za.za_first_integer;
486eda14cbcSMatt Macy 			}
487eda14cbcSMatt Macy 
488eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, prop, strval, intval, src);
489eda14cbcSMatt Macy 
490eda14cbcSMatt Macy 			if (strval != NULL)
491eda14cbcSMatt Macy 				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
492eda14cbcSMatt Macy 
493eda14cbcSMatt Macy 			break;
494eda14cbcSMatt Macy 
495eda14cbcSMatt Macy 		case 1:
496eda14cbcSMatt Macy 			/* string property */
497eda14cbcSMatt Macy 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
498eda14cbcSMatt Macy 			err = zap_lookup(mos, spa->spa_pool_props_object,
499eda14cbcSMatt Macy 			    za.za_name, 1, za.za_num_integers, strval);
500eda14cbcSMatt Macy 			if (err) {
501eda14cbcSMatt Macy 				kmem_free(strval, za.za_num_integers);
502eda14cbcSMatt Macy 				break;
503eda14cbcSMatt Macy 			}
504eda14cbcSMatt Macy 			spa_prop_add_list(*nvp, prop, strval, 0, src);
505eda14cbcSMatt Macy 			kmem_free(strval, za.za_num_integers);
506eda14cbcSMatt Macy 			break;
507eda14cbcSMatt Macy 
508eda14cbcSMatt Macy 		default:
509eda14cbcSMatt Macy 			break;
510eda14cbcSMatt Macy 		}
511eda14cbcSMatt Macy 	}
512eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
513eda14cbcSMatt Macy out:
514eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
515eda14cbcSMatt Macy 	dsl_pool_config_exit(dp, FTAG);
516eda14cbcSMatt Macy 	if (err && err != ENOENT) {
517eda14cbcSMatt Macy 		nvlist_free(*nvp);
518eda14cbcSMatt Macy 		*nvp = NULL;
519eda14cbcSMatt Macy 		return (err);
520eda14cbcSMatt Macy 	}
521eda14cbcSMatt Macy 
522eda14cbcSMatt Macy 	return (0);
523eda14cbcSMatt Macy }
524eda14cbcSMatt Macy 
525eda14cbcSMatt Macy /*
526eda14cbcSMatt Macy  * Validate the given pool properties nvlist and modify the list
527eda14cbcSMatt Macy  * for the property values to be set.
528eda14cbcSMatt Macy  */
529eda14cbcSMatt Macy static int
530eda14cbcSMatt Macy spa_prop_validate(spa_t *spa, nvlist_t *props)
531eda14cbcSMatt Macy {
532eda14cbcSMatt Macy 	nvpair_t *elem;
533eda14cbcSMatt Macy 	int error = 0, reset_bootfs = 0;
534eda14cbcSMatt Macy 	uint64_t objnum = 0;
535eda14cbcSMatt Macy 	boolean_t has_feature = B_FALSE;
536eda14cbcSMatt Macy 
537eda14cbcSMatt Macy 	elem = NULL;
538eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
539eda14cbcSMatt Macy 		uint64_t intval;
540eda14cbcSMatt Macy 		char *strval, *slash, *check, *fname;
541eda14cbcSMatt Macy 		const char *propname = nvpair_name(elem);
542eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(propname);
543eda14cbcSMatt Macy 
544eda14cbcSMatt Macy 		switch (prop) {
545eda14cbcSMatt Macy 		case ZPOOL_PROP_INVAL:
546eda14cbcSMatt Macy 			if (!zpool_prop_feature(propname)) {
547eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
548eda14cbcSMatt Macy 				break;
549eda14cbcSMatt Macy 			}
550eda14cbcSMatt Macy 
551eda14cbcSMatt Macy 			/*
552eda14cbcSMatt Macy 			 * Sanitize the input.
553eda14cbcSMatt Macy 			 */
554eda14cbcSMatt Macy 			if (nvpair_type(elem) != DATA_TYPE_UINT64) {
555eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
556eda14cbcSMatt Macy 				break;
557eda14cbcSMatt Macy 			}
558eda14cbcSMatt Macy 
559eda14cbcSMatt Macy 			if (nvpair_value_uint64(elem, &intval) != 0) {
560eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
561eda14cbcSMatt Macy 				break;
562eda14cbcSMatt Macy 			}
563eda14cbcSMatt Macy 
564eda14cbcSMatt Macy 			if (intval != 0) {
565eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
566eda14cbcSMatt Macy 				break;
567eda14cbcSMatt Macy 			}
568eda14cbcSMatt Macy 
569eda14cbcSMatt Macy 			fname = strchr(propname, '@') + 1;
570eda14cbcSMatt Macy 			if (zfeature_lookup_name(fname, NULL) != 0) {
571eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
572eda14cbcSMatt Macy 				break;
573eda14cbcSMatt Macy 			}
574eda14cbcSMatt Macy 
575eda14cbcSMatt Macy 			has_feature = B_TRUE;
576eda14cbcSMatt Macy 			break;
577eda14cbcSMatt Macy 
578eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
579eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
580eda14cbcSMatt Macy 			if (!error &&
581eda14cbcSMatt Macy 			    (intval < spa_version(spa) ||
582eda14cbcSMatt Macy 			    intval > SPA_VERSION_BEFORE_FEATURES ||
583eda14cbcSMatt Macy 			    has_feature))
584eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
585eda14cbcSMatt Macy 			break;
586eda14cbcSMatt Macy 
587eda14cbcSMatt Macy 		case ZPOOL_PROP_DELEGATION:
588eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOREPLACE:
589eda14cbcSMatt Macy 		case ZPOOL_PROP_LISTSNAPS:
590eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOEXPAND:
591eda14cbcSMatt Macy 		case ZPOOL_PROP_AUTOTRIM:
592eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
593eda14cbcSMatt Macy 			if (!error && intval > 1)
594eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
595eda14cbcSMatt Macy 			break;
596eda14cbcSMatt Macy 
597eda14cbcSMatt Macy 		case ZPOOL_PROP_MULTIHOST:
598eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
599eda14cbcSMatt Macy 			if (!error && intval > 1)
600eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
601eda14cbcSMatt Macy 
602eda14cbcSMatt Macy 			if (!error) {
603eda14cbcSMatt Macy 				uint32_t hostid = zone_get_hostid(NULL);
604eda14cbcSMatt Macy 				if (hostid)
605eda14cbcSMatt Macy 					spa->spa_hostid = hostid;
606eda14cbcSMatt Macy 				else
607eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
608eda14cbcSMatt Macy 			}
609eda14cbcSMatt Macy 
610eda14cbcSMatt Macy 			break;
611eda14cbcSMatt Macy 
612eda14cbcSMatt Macy 		case ZPOOL_PROP_BOOTFS:
613eda14cbcSMatt Macy 			/*
614eda14cbcSMatt Macy 			 * If the pool version is less than SPA_VERSION_BOOTFS,
615eda14cbcSMatt Macy 			 * or the pool is still being created (version == 0),
616eda14cbcSMatt Macy 			 * the bootfs property cannot be set.
617eda14cbcSMatt Macy 			 */
618eda14cbcSMatt Macy 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
619eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
620eda14cbcSMatt Macy 				break;
621eda14cbcSMatt Macy 			}
622eda14cbcSMatt Macy 
623eda14cbcSMatt Macy 			/*
624eda14cbcSMatt Macy 			 * Make sure the vdev config is bootable
625eda14cbcSMatt Macy 			 */
626eda14cbcSMatt Macy 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
627eda14cbcSMatt Macy 				error = SET_ERROR(ENOTSUP);
628eda14cbcSMatt Macy 				break;
629eda14cbcSMatt Macy 			}
630eda14cbcSMatt Macy 
631eda14cbcSMatt Macy 			reset_bootfs = 1;
632eda14cbcSMatt Macy 
633eda14cbcSMatt Macy 			error = nvpair_value_string(elem, &strval);
634eda14cbcSMatt Macy 
635eda14cbcSMatt Macy 			if (!error) {
636eda14cbcSMatt Macy 				objset_t *os;
637eda14cbcSMatt Macy 
638eda14cbcSMatt Macy 				if (strval == NULL || strval[0] == '\0') {
639eda14cbcSMatt Macy 					objnum = zpool_prop_default_numeric(
640eda14cbcSMatt Macy 					    ZPOOL_PROP_BOOTFS);
641eda14cbcSMatt Macy 					break;
642eda14cbcSMatt Macy 				}
643eda14cbcSMatt Macy 
644eda14cbcSMatt Macy 				error = dmu_objset_hold(strval, FTAG, &os);
645eda14cbcSMatt Macy 				if (error != 0)
646eda14cbcSMatt Macy 					break;
647eda14cbcSMatt Macy 
648eda14cbcSMatt Macy 				/* Must be ZPL. */
649eda14cbcSMatt Macy 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
650eda14cbcSMatt Macy 					error = SET_ERROR(ENOTSUP);
651eda14cbcSMatt Macy 				} else {
652eda14cbcSMatt Macy 					objnum = dmu_objset_id(os);
653eda14cbcSMatt Macy 				}
654eda14cbcSMatt Macy 				dmu_objset_rele(os, FTAG);
655eda14cbcSMatt Macy 			}
656eda14cbcSMatt Macy 			break;
657eda14cbcSMatt Macy 
658eda14cbcSMatt Macy 		case ZPOOL_PROP_FAILUREMODE:
659eda14cbcSMatt Macy 			error = nvpair_value_uint64(elem, &intval);
660eda14cbcSMatt Macy 			if (!error && intval > ZIO_FAILURE_MODE_PANIC)
661eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
662eda14cbcSMatt Macy 
663eda14cbcSMatt Macy 			/*
664eda14cbcSMatt Macy 			 * This is a special case which only occurs when
665eda14cbcSMatt Macy 			 * the pool has completely failed. This allows
666eda14cbcSMatt Macy 			 * the user to change the in-core failmode property
667eda14cbcSMatt Macy 			 * without syncing it out to disk (I/Os might
668eda14cbcSMatt Macy 			 * currently be blocked). We do this by returning
669eda14cbcSMatt Macy 			 * EIO to the caller (spa_prop_set) to trick it
670eda14cbcSMatt Macy 			 * into thinking we encountered a property validation
671eda14cbcSMatt Macy 			 * error.
672eda14cbcSMatt Macy 			 */
673eda14cbcSMatt Macy 			if (!error && spa_suspended(spa)) {
674eda14cbcSMatt Macy 				spa->spa_failmode = intval;
675eda14cbcSMatt Macy 				error = SET_ERROR(EIO);
676eda14cbcSMatt Macy 			}
677eda14cbcSMatt Macy 			break;
678eda14cbcSMatt Macy 
679eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
680eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
681eda14cbcSMatt Macy 				break;
682eda14cbcSMatt Macy 
683eda14cbcSMatt Macy 			if (strval[0] == '\0')
684eda14cbcSMatt Macy 				break;
685eda14cbcSMatt Macy 
686eda14cbcSMatt Macy 			if (strcmp(strval, "none") == 0)
687eda14cbcSMatt Macy 				break;
688eda14cbcSMatt Macy 
689eda14cbcSMatt Macy 			if (strval[0] != '/') {
690eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
691eda14cbcSMatt Macy 				break;
692eda14cbcSMatt Macy 			}
693eda14cbcSMatt Macy 
694eda14cbcSMatt Macy 			slash = strrchr(strval, '/');
695eda14cbcSMatt Macy 			ASSERT(slash != NULL);
696eda14cbcSMatt Macy 
697eda14cbcSMatt Macy 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
698eda14cbcSMatt Macy 			    strcmp(slash, "/..") == 0)
699eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
700eda14cbcSMatt Macy 			break;
701eda14cbcSMatt Macy 
702eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
703eda14cbcSMatt Macy 			if ((error = nvpair_value_string(elem, &strval)) != 0)
704eda14cbcSMatt Macy 				break;
705eda14cbcSMatt Macy 			for (check = strval; *check != '\0'; check++) {
706eda14cbcSMatt Macy 				if (!isprint(*check)) {
707eda14cbcSMatt Macy 					error = SET_ERROR(EINVAL);
708eda14cbcSMatt Macy 					break;
709eda14cbcSMatt Macy 				}
710eda14cbcSMatt Macy 			}
711eda14cbcSMatt Macy 			if (strlen(strval) > ZPROP_MAX_COMMENT)
712eda14cbcSMatt Macy 				error = SET_ERROR(E2BIG);
713eda14cbcSMatt Macy 			break;
714eda14cbcSMatt Macy 
715eda14cbcSMatt Macy 		default:
716eda14cbcSMatt Macy 			break;
717eda14cbcSMatt Macy 		}
718eda14cbcSMatt Macy 
719eda14cbcSMatt Macy 		if (error)
720eda14cbcSMatt Macy 			break;
721eda14cbcSMatt Macy 	}
722eda14cbcSMatt Macy 
723eda14cbcSMatt Macy 	(void) nvlist_remove_all(props,
724eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
725eda14cbcSMatt Macy 
726eda14cbcSMatt Macy 	if (!error && reset_bootfs) {
727eda14cbcSMatt Macy 		error = nvlist_remove(props,
728eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
729eda14cbcSMatt Macy 
730eda14cbcSMatt Macy 		if (!error) {
731eda14cbcSMatt Macy 			error = nvlist_add_uint64(props,
732eda14cbcSMatt Macy 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
733eda14cbcSMatt Macy 		}
734eda14cbcSMatt Macy 	}
735eda14cbcSMatt Macy 
736eda14cbcSMatt Macy 	return (error);
737eda14cbcSMatt Macy }
738eda14cbcSMatt Macy 
739eda14cbcSMatt Macy void
740eda14cbcSMatt Macy spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
741eda14cbcSMatt Macy {
742eda14cbcSMatt Macy 	char *cachefile;
743eda14cbcSMatt Macy 	spa_config_dirent_t *dp;
744eda14cbcSMatt Macy 
745eda14cbcSMatt Macy 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
746eda14cbcSMatt Macy 	    &cachefile) != 0)
747eda14cbcSMatt Macy 		return;
748eda14cbcSMatt Macy 
749eda14cbcSMatt Macy 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
750eda14cbcSMatt Macy 	    KM_SLEEP);
751eda14cbcSMatt Macy 
752eda14cbcSMatt Macy 	if (cachefile[0] == '\0')
753eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(spa_config_path);
754eda14cbcSMatt Macy 	else if (strcmp(cachefile, "none") == 0)
755eda14cbcSMatt Macy 		dp->scd_path = NULL;
756eda14cbcSMatt Macy 	else
757eda14cbcSMatt Macy 		dp->scd_path = spa_strdup(cachefile);
758eda14cbcSMatt Macy 
759eda14cbcSMatt Macy 	list_insert_head(&spa->spa_config_list, dp);
760eda14cbcSMatt Macy 	if (need_sync)
761eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
762eda14cbcSMatt Macy }
763eda14cbcSMatt Macy 
764eda14cbcSMatt Macy int
765eda14cbcSMatt Macy spa_prop_set(spa_t *spa, nvlist_t *nvp)
766eda14cbcSMatt Macy {
767eda14cbcSMatt Macy 	int error;
768eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
769eda14cbcSMatt Macy 	boolean_t need_sync = B_FALSE;
770eda14cbcSMatt Macy 
771eda14cbcSMatt Macy 	if ((error = spa_prop_validate(spa, nvp)) != 0)
772eda14cbcSMatt Macy 		return (error);
773eda14cbcSMatt Macy 
774eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
775eda14cbcSMatt Macy 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
776eda14cbcSMatt Macy 
777eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_CACHEFILE ||
778eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_ALTROOT ||
779eda14cbcSMatt Macy 		    prop == ZPOOL_PROP_READONLY)
780eda14cbcSMatt Macy 			continue;
781eda14cbcSMatt Macy 
782eda14cbcSMatt Macy 		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
783eda14cbcSMatt Macy 			uint64_t ver;
784eda14cbcSMatt Macy 
785eda14cbcSMatt Macy 			if (prop == ZPOOL_PROP_VERSION) {
786eda14cbcSMatt Macy 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
787eda14cbcSMatt Macy 			} else {
788eda14cbcSMatt Macy 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
789eda14cbcSMatt Macy 				ver = SPA_VERSION_FEATURES;
790eda14cbcSMatt Macy 				need_sync = B_TRUE;
791eda14cbcSMatt Macy 			}
792eda14cbcSMatt Macy 
793eda14cbcSMatt Macy 			/* Save time if the version is already set. */
794eda14cbcSMatt Macy 			if (ver == spa_version(spa))
795eda14cbcSMatt Macy 				continue;
796eda14cbcSMatt Macy 
797eda14cbcSMatt Macy 			/*
798eda14cbcSMatt Macy 			 * In addition to the pool directory object, we might
799eda14cbcSMatt Macy 			 * create the pool properties object, the features for
800eda14cbcSMatt Macy 			 * read object, the features for write object, or the
801eda14cbcSMatt Macy 			 * feature descriptions object.
802eda14cbcSMatt Macy 			 */
803eda14cbcSMatt Macy 			error = dsl_sync_task(spa->spa_name, NULL,
804eda14cbcSMatt Macy 			    spa_sync_version, &ver,
805eda14cbcSMatt Macy 			    6, ZFS_SPACE_CHECK_RESERVED);
806eda14cbcSMatt Macy 			if (error)
807eda14cbcSMatt Macy 				return (error);
808eda14cbcSMatt Macy 			continue;
809eda14cbcSMatt Macy 		}
810eda14cbcSMatt Macy 
811eda14cbcSMatt Macy 		need_sync = B_TRUE;
812eda14cbcSMatt Macy 		break;
813eda14cbcSMatt Macy 	}
814eda14cbcSMatt Macy 
815eda14cbcSMatt Macy 	if (need_sync) {
816eda14cbcSMatt Macy 		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
817eda14cbcSMatt Macy 		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
818eda14cbcSMatt Macy 	}
819eda14cbcSMatt Macy 
820eda14cbcSMatt Macy 	return (0);
821eda14cbcSMatt Macy }
822eda14cbcSMatt Macy 
823eda14cbcSMatt Macy /*
824eda14cbcSMatt Macy  * If the bootfs property value is dsobj, clear it.
825eda14cbcSMatt Macy  */
826eda14cbcSMatt Macy void
827eda14cbcSMatt Macy spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
828eda14cbcSMatt Macy {
829eda14cbcSMatt Macy 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
830eda14cbcSMatt Macy 		VERIFY(zap_remove(spa->spa_meta_objset,
831eda14cbcSMatt Macy 		    spa->spa_pool_props_object,
832eda14cbcSMatt Macy 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
833eda14cbcSMatt Macy 		spa->spa_bootfs = 0;
834eda14cbcSMatt Macy 	}
835eda14cbcSMatt Macy }
836eda14cbcSMatt Macy 
837eda14cbcSMatt Macy /*ARGSUSED*/
838eda14cbcSMatt Macy static int
839eda14cbcSMatt Macy spa_change_guid_check(void *arg, dmu_tx_t *tx)
840eda14cbcSMatt Macy {
841eda14cbcSMatt Macy 	uint64_t *newguid __maybe_unused = arg;
842eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
843eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
844eda14cbcSMatt Macy 	uint64_t vdev_state;
845eda14cbcSMatt Macy 
846eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
847eda14cbcSMatt Macy 		int error = (spa_has_checkpoint(spa)) ?
848eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
849eda14cbcSMatt Macy 		return (SET_ERROR(error));
850eda14cbcSMatt Macy 	}
851eda14cbcSMatt Macy 
852eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
853eda14cbcSMatt Macy 	vdev_state = rvd->vdev_state;
854eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
855eda14cbcSMatt Macy 
856eda14cbcSMatt Macy 	if (vdev_state != VDEV_STATE_HEALTHY)
857eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
858eda14cbcSMatt Macy 
859eda14cbcSMatt Macy 	ASSERT3U(spa_guid(spa), !=, *newguid);
860eda14cbcSMatt Macy 
861eda14cbcSMatt Macy 	return (0);
862eda14cbcSMatt Macy }
863eda14cbcSMatt Macy 
864eda14cbcSMatt Macy static void
865eda14cbcSMatt Macy spa_change_guid_sync(void *arg, dmu_tx_t *tx)
866eda14cbcSMatt Macy {
867eda14cbcSMatt Macy 	uint64_t *newguid = arg;
868eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
869eda14cbcSMatt Macy 	uint64_t oldguid;
870eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
871eda14cbcSMatt Macy 
872eda14cbcSMatt Macy 	oldguid = spa_guid(spa);
873eda14cbcSMatt Macy 
874eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
875eda14cbcSMatt Macy 	rvd->vdev_guid = *newguid;
876eda14cbcSMatt Macy 	rvd->vdev_guid_sum += (*newguid - oldguid);
877eda14cbcSMatt Macy 	vdev_config_dirty(rvd);
878eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
879eda14cbcSMatt Macy 
880eda14cbcSMatt Macy 	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
881eda14cbcSMatt Macy 	    (u_longlong_t)oldguid, (u_longlong_t)*newguid);
882eda14cbcSMatt Macy }
883eda14cbcSMatt Macy 
884eda14cbcSMatt Macy /*
885eda14cbcSMatt Macy  * Change the GUID for the pool.  This is done so that we can later
886eda14cbcSMatt Macy  * re-import a pool built from a clone of our own vdevs.  We will modify
887eda14cbcSMatt Macy  * the root vdev's guid, our own pool guid, and then mark all of our
888eda14cbcSMatt Macy  * vdevs dirty.  Note that we must make sure that all our vdevs are
889eda14cbcSMatt Macy  * online when we do this, or else any vdevs that weren't present
890eda14cbcSMatt Macy  * would be orphaned from our pool.  We are also going to issue a
891eda14cbcSMatt Macy  * sysevent to update any watchers.
892eda14cbcSMatt Macy  */
893eda14cbcSMatt Macy int
894eda14cbcSMatt Macy spa_change_guid(spa_t *spa)
895eda14cbcSMatt Macy {
896eda14cbcSMatt Macy 	int error;
897eda14cbcSMatt Macy 	uint64_t guid;
898eda14cbcSMatt Macy 
899eda14cbcSMatt Macy 	mutex_enter(&spa->spa_vdev_top_lock);
900eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
901eda14cbcSMatt Macy 	guid = spa_generate_guid(NULL);
902eda14cbcSMatt Macy 
903eda14cbcSMatt Macy 	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
904eda14cbcSMatt Macy 	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
905eda14cbcSMatt Macy 
906eda14cbcSMatt Macy 	if (error == 0) {
907eda14cbcSMatt Macy 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
908eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
909eda14cbcSMatt Macy 	}
910eda14cbcSMatt Macy 
911eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
912eda14cbcSMatt Macy 	mutex_exit(&spa->spa_vdev_top_lock);
913eda14cbcSMatt Macy 
914eda14cbcSMatt Macy 	return (error);
915eda14cbcSMatt Macy }
916eda14cbcSMatt Macy 
917eda14cbcSMatt Macy /*
918eda14cbcSMatt Macy  * ==========================================================================
919eda14cbcSMatt Macy  * SPA state manipulation (open/create/destroy/import/export)
920eda14cbcSMatt Macy  * ==========================================================================
921eda14cbcSMatt Macy  */
922eda14cbcSMatt Macy 
923eda14cbcSMatt Macy static int
924eda14cbcSMatt Macy spa_error_entry_compare(const void *a, const void *b)
925eda14cbcSMatt Macy {
926eda14cbcSMatt Macy 	const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
927eda14cbcSMatt Macy 	const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
928eda14cbcSMatt Macy 	int ret;
929eda14cbcSMatt Macy 
930eda14cbcSMatt Macy 	ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
931eda14cbcSMatt Macy 	    sizeof (zbookmark_phys_t));
932eda14cbcSMatt Macy 
933eda14cbcSMatt Macy 	return (TREE_ISIGN(ret));
934eda14cbcSMatt Macy }
935eda14cbcSMatt Macy 
936eda14cbcSMatt Macy /*
937eda14cbcSMatt Macy  * Utility function which retrieves copies of the current logs and
938eda14cbcSMatt Macy  * re-initializes them in the process.
939eda14cbcSMatt Macy  */
940eda14cbcSMatt Macy void
941eda14cbcSMatt Macy spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
942eda14cbcSMatt Macy {
943eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
944eda14cbcSMatt Macy 
945eda14cbcSMatt Macy 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
946eda14cbcSMatt Macy 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
947eda14cbcSMatt Macy 
948eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
949eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
950eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
951eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
952eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
953eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
954eda14cbcSMatt Macy }
955eda14cbcSMatt Macy 
956eda14cbcSMatt Macy static void
957eda14cbcSMatt Macy spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
958eda14cbcSMatt Macy {
959eda14cbcSMatt Macy 	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
960eda14cbcSMatt Macy 	enum zti_modes mode = ztip->zti_mode;
961eda14cbcSMatt Macy 	uint_t value = ztip->zti_value;
962eda14cbcSMatt Macy 	uint_t count = ztip->zti_count;
963eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
964*16038816SMartin Matuska 	uint_t cpus, flags = TASKQ_DYNAMIC;
965eda14cbcSMatt Macy 	boolean_t batch = B_FALSE;
966eda14cbcSMatt Macy 
967eda14cbcSMatt Macy 	switch (mode) {
968eda14cbcSMatt Macy 	case ZTI_MODE_FIXED:
969*16038816SMartin Matuska 		ASSERT3U(value, >, 0);
970eda14cbcSMatt Macy 		break;
971eda14cbcSMatt Macy 
972eda14cbcSMatt Macy 	case ZTI_MODE_BATCH:
973eda14cbcSMatt Macy 		batch = B_TRUE;
974eda14cbcSMatt Macy 		flags |= TASKQ_THREADS_CPU_PCT;
975eda14cbcSMatt Macy 		value = MIN(zio_taskq_batch_pct, 100);
976eda14cbcSMatt Macy 		break;
977eda14cbcSMatt Macy 
978*16038816SMartin Matuska 	case ZTI_MODE_SCALE:
979*16038816SMartin Matuska 		flags |= TASKQ_THREADS_CPU_PCT;
980*16038816SMartin Matuska 		/*
981*16038816SMartin Matuska 		 * We want more taskqs to reduce lock contention, but we want
982*16038816SMartin Matuska 		 * less for better request ordering and CPU utilization.
983*16038816SMartin Matuska 		 */
984*16038816SMartin Matuska 		cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
985*16038816SMartin Matuska 		if (zio_taskq_batch_tpq > 0) {
986*16038816SMartin Matuska 			count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
987*16038816SMartin Matuska 			    zio_taskq_batch_tpq);
988*16038816SMartin Matuska 		} else {
989*16038816SMartin Matuska 			/*
990*16038816SMartin Matuska 			 * Prefer 6 threads per taskq, but no more taskqs
991*16038816SMartin Matuska 			 * than threads in them on large systems. For 80%:
992*16038816SMartin Matuska 			 *
993*16038816SMartin Matuska 			 *                 taskq   taskq   total
994*16038816SMartin Matuska 			 * cpus    taskqs  percent threads threads
995*16038816SMartin Matuska 			 * ------- ------- ------- ------- -------
996*16038816SMartin Matuska 			 * 1       1       80%     1       1
997*16038816SMartin Matuska 			 * 2       1       80%     1       1
998*16038816SMartin Matuska 			 * 4       1       80%     3       3
999*16038816SMartin Matuska 			 * 8       2       40%     3       6
1000*16038816SMartin Matuska 			 * 16      3       27%     4       12
1001*16038816SMartin Matuska 			 * 32      5       16%     5       25
1002*16038816SMartin Matuska 			 * 64      7       11%     7       49
1003*16038816SMartin Matuska 			 * 128     10      8%      10      100
1004*16038816SMartin Matuska 			 * 256     14      6%      15      210
1005*16038816SMartin Matuska 			 */
1006*16038816SMartin Matuska 			count = 1 + cpus / 6;
1007*16038816SMartin Matuska 			while (count * count > cpus)
1008*16038816SMartin Matuska 				count--;
1009*16038816SMartin Matuska 		}
1010*16038816SMartin Matuska 		/* Limit each taskq within 100% to not trigger assertion. */
1011*16038816SMartin Matuska 		count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
1012*16038816SMartin Matuska 		value = (zio_taskq_batch_pct + count / 2) / count;
1013*16038816SMartin Matuska 		break;
1014*16038816SMartin Matuska 
1015*16038816SMartin Matuska 	case ZTI_MODE_NULL:
1016*16038816SMartin Matuska 		tqs->stqs_count = 0;
1017*16038816SMartin Matuska 		tqs->stqs_taskq = NULL;
1018*16038816SMartin Matuska 		return;
1019*16038816SMartin Matuska 
1020eda14cbcSMatt Macy 	default:
1021eda14cbcSMatt Macy 		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
1022eda14cbcSMatt Macy 		    "spa_activate()",
1023eda14cbcSMatt Macy 		    zio_type_name[t], zio_taskq_types[q], mode, value);
1024eda14cbcSMatt Macy 		break;
1025eda14cbcSMatt Macy 	}
1026eda14cbcSMatt Macy 
1027*16038816SMartin Matuska 	ASSERT3U(count, >, 0);
1028*16038816SMartin Matuska 	tqs->stqs_count = count;
1029*16038816SMartin Matuska 	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
1030*16038816SMartin Matuska 
1031eda14cbcSMatt Macy 	for (uint_t i = 0; i < count; i++) {
1032eda14cbcSMatt Macy 		taskq_t *tq;
1033eda14cbcSMatt Macy 		char name[32];
1034eda14cbcSMatt Macy 
1035*16038816SMartin Matuska 		if (count > 1)
1036*16038816SMartin Matuska 			(void) snprintf(name, sizeof (name), "%s_%s_%u",
1037*16038816SMartin Matuska 			    zio_type_name[t], zio_taskq_types[q], i);
1038*16038816SMartin Matuska 		else
1039eda14cbcSMatt Macy 			(void) snprintf(name, sizeof (name), "%s_%s",
1040eda14cbcSMatt Macy 			    zio_type_name[t], zio_taskq_types[q]);
1041eda14cbcSMatt Macy 
1042eda14cbcSMatt Macy 		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
1043eda14cbcSMatt Macy 			if (batch)
1044eda14cbcSMatt Macy 				flags |= TASKQ_DC_BATCH;
1045eda14cbcSMatt Macy 
1046eda14cbcSMatt Macy 			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1047eda14cbcSMatt Macy 			    spa->spa_proc, zio_taskq_basedc, flags);
1048eda14cbcSMatt Macy 		} else {
1049eda14cbcSMatt Macy 			pri_t pri = maxclsyspri;
1050eda14cbcSMatt Macy 			/*
1051eda14cbcSMatt Macy 			 * The write issue taskq can be extremely CPU
1052eda14cbcSMatt Macy 			 * intensive.  Run it at slightly less important
10532c48331dSMatt Macy 			 * priority than the other taskqs.
10542c48331dSMatt Macy 			 *
10552c48331dSMatt Macy 			 * Under Linux and FreeBSD this means incrementing
10562c48331dSMatt Macy 			 * the priority value as opposed to platforms like
10572c48331dSMatt Macy 			 * illumos where it should be decremented.
10582c48331dSMatt Macy 			 *
10592c48331dSMatt Macy 			 * On FreeBSD, if priorities divided by four (RQ_PPQ)
10602c48331dSMatt Macy 			 * are equal then a difference between them is
10612c48331dSMatt Macy 			 * insignificant.
1062eda14cbcSMatt Macy 			 */
10632c48331dSMatt Macy 			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
10642c48331dSMatt Macy #if defined(__linux__)
1065eda14cbcSMatt Macy 				pri++;
10662c48331dSMatt Macy #elif defined(__FreeBSD__)
10672c48331dSMatt Macy 				pri += 4;
10682c48331dSMatt Macy #else
10692c48331dSMatt Macy #error "unknown OS"
10702c48331dSMatt Macy #endif
10712c48331dSMatt Macy 			}
1072eda14cbcSMatt Macy 			tq = taskq_create_proc(name, value, pri, 50,
1073eda14cbcSMatt Macy 			    INT_MAX, spa->spa_proc, flags);
1074eda14cbcSMatt Macy 		}
1075eda14cbcSMatt Macy 
1076eda14cbcSMatt Macy 		tqs->stqs_taskq[i] = tq;
1077eda14cbcSMatt Macy 	}
1078eda14cbcSMatt Macy }
1079eda14cbcSMatt Macy 
1080eda14cbcSMatt Macy static void
1081eda14cbcSMatt Macy spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1082eda14cbcSMatt Macy {
1083eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1084eda14cbcSMatt Macy 
1085eda14cbcSMatt Macy 	if (tqs->stqs_taskq == NULL) {
1086eda14cbcSMatt Macy 		ASSERT3U(tqs->stqs_count, ==, 0);
1087eda14cbcSMatt Macy 		return;
1088eda14cbcSMatt Macy 	}
1089eda14cbcSMatt Macy 
1090eda14cbcSMatt Macy 	for (uint_t i = 0; i < tqs->stqs_count; i++) {
1091eda14cbcSMatt Macy 		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1092eda14cbcSMatt Macy 		taskq_destroy(tqs->stqs_taskq[i]);
1093eda14cbcSMatt Macy 	}
1094eda14cbcSMatt Macy 
1095eda14cbcSMatt Macy 	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1096eda14cbcSMatt Macy 	tqs->stqs_taskq = NULL;
1097eda14cbcSMatt Macy }
1098eda14cbcSMatt Macy 
1099eda14cbcSMatt Macy /*
1100eda14cbcSMatt Macy  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1101eda14cbcSMatt Macy  * Note that a type may have multiple discrete taskqs to avoid lock contention
1102eda14cbcSMatt Macy  * on the taskq itself. In that case we choose which taskq at random by using
1103eda14cbcSMatt Macy  * the low bits of gethrtime().
1104eda14cbcSMatt Macy  */
1105eda14cbcSMatt Macy void
1106eda14cbcSMatt Macy spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1107eda14cbcSMatt Macy     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
1108eda14cbcSMatt Macy {
1109eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1110eda14cbcSMatt Macy 	taskq_t *tq;
1111eda14cbcSMatt Macy 
1112eda14cbcSMatt Macy 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1113eda14cbcSMatt Macy 	ASSERT3U(tqs->stqs_count, !=, 0);
1114eda14cbcSMatt Macy 
1115eda14cbcSMatt Macy 	if (tqs->stqs_count == 1) {
1116eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[0];
1117eda14cbcSMatt Macy 	} else {
1118eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1119eda14cbcSMatt Macy 	}
1120eda14cbcSMatt Macy 
1121eda14cbcSMatt Macy 	taskq_dispatch_ent(tq, func, arg, flags, ent);
1122eda14cbcSMatt Macy }
1123eda14cbcSMatt Macy 
1124eda14cbcSMatt Macy /*
1125eda14cbcSMatt Macy  * Same as spa_taskq_dispatch_ent() but block on the task until completion.
1126eda14cbcSMatt Macy  */
1127eda14cbcSMatt Macy void
1128eda14cbcSMatt Macy spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1129eda14cbcSMatt Macy     task_func_t *func, void *arg, uint_t flags)
1130eda14cbcSMatt Macy {
1131eda14cbcSMatt Macy 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1132eda14cbcSMatt Macy 	taskq_t *tq;
1133eda14cbcSMatt Macy 	taskqid_t id;
1134eda14cbcSMatt Macy 
1135eda14cbcSMatt Macy 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1136eda14cbcSMatt Macy 	ASSERT3U(tqs->stqs_count, !=, 0);
1137eda14cbcSMatt Macy 
1138eda14cbcSMatt Macy 	if (tqs->stqs_count == 1) {
1139eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[0];
1140eda14cbcSMatt Macy 	} else {
1141eda14cbcSMatt Macy 		tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1142eda14cbcSMatt Macy 	}
1143eda14cbcSMatt Macy 
1144eda14cbcSMatt Macy 	id = taskq_dispatch(tq, func, arg, flags);
1145eda14cbcSMatt Macy 	if (id)
1146eda14cbcSMatt Macy 		taskq_wait_id(tq, id);
1147eda14cbcSMatt Macy }
1148eda14cbcSMatt Macy 
1149eda14cbcSMatt Macy static void
1150eda14cbcSMatt Macy spa_create_zio_taskqs(spa_t *spa)
1151eda14cbcSMatt Macy {
1152eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1153eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1154eda14cbcSMatt Macy 			spa_taskqs_init(spa, t, q);
1155eda14cbcSMatt Macy 		}
1156eda14cbcSMatt Macy 	}
1157eda14cbcSMatt Macy }
1158eda14cbcSMatt Macy 
1159eda14cbcSMatt Macy /*
1160eda14cbcSMatt Macy  * Disabled until spa_thread() can be adapted for Linux.
1161eda14cbcSMatt Macy  */
1162eda14cbcSMatt Macy #undef HAVE_SPA_THREAD
1163eda14cbcSMatt Macy 
1164eda14cbcSMatt Macy #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1165eda14cbcSMatt Macy static void
1166eda14cbcSMatt Macy spa_thread(void *arg)
1167eda14cbcSMatt Macy {
1168eda14cbcSMatt Macy 	psetid_t zio_taskq_psrset_bind = PS_NONE;
1169eda14cbcSMatt Macy 	callb_cpr_t cprinfo;
1170eda14cbcSMatt Macy 
1171eda14cbcSMatt Macy 	spa_t *spa = arg;
1172eda14cbcSMatt Macy 	user_t *pu = PTOU(curproc);
1173eda14cbcSMatt Macy 
1174eda14cbcSMatt Macy 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1175eda14cbcSMatt Macy 	    spa->spa_name);
1176eda14cbcSMatt Macy 
1177eda14cbcSMatt Macy 	ASSERT(curproc != &p0);
1178eda14cbcSMatt Macy 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1179eda14cbcSMatt Macy 	    "zpool-%s", spa->spa_name);
1180eda14cbcSMatt Macy 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1181eda14cbcSMatt Macy 
1182eda14cbcSMatt Macy 	/* bind this thread to the requested psrset */
1183eda14cbcSMatt Macy 	if (zio_taskq_psrset_bind != PS_NONE) {
1184eda14cbcSMatt Macy 		pool_lock();
1185eda14cbcSMatt Macy 		mutex_enter(&cpu_lock);
1186eda14cbcSMatt Macy 		mutex_enter(&pidlock);
1187eda14cbcSMatt Macy 		mutex_enter(&curproc->p_lock);
1188eda14cbcSMatt Macy 
1189eda14cbcSMatt Macy 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1190eda14cbcSMatt Macy 		    0, NULL, NULL) == 0)  {
1191eda14cbcSMatt Macy 			curthread->t_bind_pset = zio_taskq_psrset_bind;
1192eda14cbcSMatt Macy 		} else {
1193eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1194eda14cbcSMatt Macy 			    "Couldn't bind process for zfs pool \"%s\" to "
1195eda14cbcSMatt Macy 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1196eda14cbcSMatt Macy 		}
1197eda14cbcSMatt Macy 
1198eda14cbcSMatt Macy 		mutex_exit(&curproc->p_lock);
1199eda14cbcSMatt Macy 		mutex_exit(&pidlock);
1200eda14cbcSMatt Macy 		mutex_exit(&cpu_lock);
1201eda14cbcSMatt Macy 		pool_unlock();
1202eda14cbcSMatt Macy 	}
1203eda14cbcSMatt Macy 
1204eda14cbcSMatt Macy 	if (zio_taskq_sysdc) {
1205eda14cbcSMatt Macy 		sysdc_thread_enter(curthread, 100, 0);
1206eda14cbcSMatt Macy 	}
1207eda14cbcSMatt Macy 
1208eda14cbcSMatt Macy 	spa->spa_proc = curproc;
1209eda14cbcSMatt Macy 	spa->spa_did = curthread->t_did;
1210eda14cbcSMatt Macy 
1211eda14cbcSMatt Macy 	spa_create_zio_taskqs(spa);
1212eda14cbcSMatt Macy 
1213eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1214eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1215eda14cbcSMatt Macy 
1216eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_ACTIVE;
1217eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1218eda14cbcSMatt Macy 
1219eda14cbcSMatt Macy 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1220eda14cbcSMatt Macy 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1221eda14cbcSMatt Macy 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1222eda14cbcSMatt Macy 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1223eda14cbcSMatt Macy 
1224eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1225eda14cbcSMatt Macy 	spa->spa_proc_state = SPA_PROC_GONE;
1226eda14cbcSMatt Macy 	spa->spa_proc = &p0;
1227eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_proc_cv);
1228eda14cbcSMatt Macy 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1229eda14cbcSMatt Macy 
1230eda14cbcSMatt Macy 	mutex_enter(&curproc->p_lock);
1231eda14cbcSMatt Macy 	lwp_exit();
1232eda14cbcSMatt Macy }
1233eda14cbcSMatt Macy #endif
1234eda14cbcSMatt Macy 
1235eda14cbcSMatt Macy /*
1236eda14cbcSMatt Macy  * Activate an uninitialized pool.
1237eda14cbcSMatt Macy  */
1238eda14cbcSMatt Macy static void
1239eda14cbcSMatt Macy spa_activate(spa_t *spa, spa_mode_t mode)
1240eda14cbcSMatt Macy {
1241eda14cbcSMatt Macy 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1242eda14cbcSMatt Macy 
1243eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
1244eda14cbcSMatt Macy 	spa->spa_mode = mode;
1245eda14cbcSMatt Macy 
1246eda14cbcSMatt Macy 	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1247eda14cbcSMatt Macy 	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1248184c1b94SMartin Matuska 	spa->spa_embedded_log_class =
1249184c1b94SMartin Matuska 	    metaslab_class_create(spa, zfs_metaslab_ops);
1250eda14cbcSMatt Macy 	spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
1251eda14cbcSMatt Macy 	spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
1252eda14cbcSMatt Macy 
1253eda14cbcSMatt Macy 	/* Try to create a covering process */
1254eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1255eda14cbcSMatt Macy 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1256eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1257eda14cbcSMatt Macy 	spa->spa_did = 0;
1258eda14cbcSMatt Macy 
1259eda14cbcSMatt Macy #ifdef HAVE_SPA_THREAD
1260eda14cbcSMatt Macy 	/* Only create a process if we're going to be around a while. */
1261eda14cbcSMatt Macy 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1262eda14cbcSMatt Macy 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1263eda14cbcSMatt Macy 		    NULL, 0) == 0) {
1264eda14cbcSMatt Macy 			spa->spa_proc_state = SPA_PROC_CREATED;
1265eda14cbcSMatt Macy 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1266eda14cbcSMatt Macy 				cv_wait(&spa->spa_proc_cv,
1267eda14cbcSMatt Macy 				    &spa->spa_proc_lock);
1268eda14cbcSMatt Macy 			}
1269eda14cbcSMatt Macy 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1270eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1271eda14cbcSMatt Macy 			ASSERT(spa->spa_did != 0);
1272eda14cbcSMatt Macy 		} else {
1273eda14cbcSMatt Macy #ifdef _KERNEL
1274eda14cbcSMatt Macy 			cmn_err(CE_WARN,
1275eda14cbcSMatt Macy 			    "Couldn't create process for zfs pool \"%s\"\n",
1276eda14cbcSMatt Macy 			    spa->spa_name);
1277eda14cbcSMatt Macy #endif
1278eda14cbcSMatt Macy 		}
1279eda14cbcSMatt Macy 	}
1280eda14cbcSMatt Macy #endif /* HAVE_SPA_THREAD */
1281eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1282eda14cbcSMatt Macy 
1283eda14cbcSMatt Macy 	/* If we didn't create a process, we need to create our taskqs. */
1284eda14cbcSMatt Macy 	if (spa->spa_proc == &p0) {
1285eda14cbcSMatt Macy 		spa_create_zio_taskqs(spa);
1286eda14cbcSMatt Macy 	}
1287eda14cbcSMatt Macy 
1288eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1289eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1290eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL);
1291eda14cbcSMatt Macy 	}
1292eda14cbcSMatt Macy 
1293eda14cbcSMatt Macy 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1294eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_config_dirty_node));
1295eda14cbcSMatt Macy 	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1296eda14cbcSMatt Macy 	    offsetof(objset_t, os_evicting_node));
1297eda14cbcSMatt Macy 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1298eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_state_dirty_node));
1299eda14cbcSMatt Macy 
1300eda14cbcSMatt Macy 	txg_list_create(&spa->spa_vdev_txg_list, spa,
1301eda14cbcSMatt Macy 	    offsetof(struct vdev, vdev_txg_node));
1302eda14cbcSMatt Macy 
1303eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_scrub,
1304eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1305eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1306eda14cbcSMatt Macy 	avl_create(&spa->spa_errlist_last,
1307eda14cbcSMatt Macy 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1308eda14cbcSMatt Macy 	    offsetof(spa_error_entry_t, se_avl));
1309eda14cbcSMatt Macy 
1310eda14cbcSMatt Macy 	spa_keystore_init(&spa->spa_keystore);
1311eda14cbcSMatt Macy 
1312eda14cbcSMatt Macy 	/*
1313eda14cbcSMatt Macy 	 * This taskq is used to perform zvol-minor-related tasks
1314eda14cbcSMatt Macy 	 * asynchronously. This has several advantages, including easy
1315180f8225SMatt Macy 	 * resolution of various deadlocks.
1316eda14cbcSMatt Macy 	 *
1317eda14cbcSMatt Macy 	 * The taskq must be single threaded to ensure tasks are always
1318eda14cbcSMatt Macy 	 * processed in the order in which they were dispatched.
1319eda14cbcSMatt Macy 	 *
1320eda14cbcSMatt Macy 	 * A taskq per pool allows one to keep the pools independent.
1321eda14cbcSMatt Macy 	 * This way if one pool is suspended, it will not impact another.
1322eda14cbcSMatt Macy 	 *
1323eda14cbcSMatt Macy 	 * The preferred location to dispatch a zvol minor task is a sync
1324eda14cbcSMatt Macy 	 * task. In this context, there is easy access to the spa_t and minimal
1325eda14cbcSMatt Macy 	 * error handling is required because the sync task must succeed.
1326eda14cbcSMatt Macy 	 */
1327eda14cbcSMatt Macy 	spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1328eda14cbcSMatt Macy 	    1, INT_MAX, 0);
1329eda14cbcSMatt Macy 
1330eda14cbcSMatt Macy 	/*
1331eda14cbcSMatt Macy 	 * Taskq dedicated to prefetcher threads: this is used to prevent the
1332eda14cbcSMatt Macy 	 * pool traverse code from monopolizing the global (and limited)
1333eda14cbcSMatt Macy 	 * system_taskq by inappropriately scheduling long running tasks on it.
1334eda14cbcSMatt Macy 	 */
13357877fdebSMatt Macy 	spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
13367877fdebSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1337eda14cbcSMatt Macy 
1338eda14cbcSMatt Macy 	/*
1339eda14cbcSMatt Macy 	 * The taskq to upgrade datasets in this pool. Currently used by
1340eda14cbcSMatt Macy 	 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1341eda14cbcSMatt Macy 	 */
13427877fdebSMatt Macy 	spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
13437877fdebSMatt Macy 	    defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1344eda14cbcSMatt Macy }
1345eda14cbcSMatt Macy 
1346eda14cbcSMatt Macy /*
1347eda14cbcSMatt Macy  * Opposite of spa_activate().
1348eda14cbcSMatt Macy  */
1349eda14cbcSMatt Macy static void
1350eda14cbcSMatt Macy spa_deactivate(spa_t *spa)
1351eda14cbcSMatt Macy {
1352eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on == B_FALSE);
1353eda14cbcSMatt Macy 	ASSERT(spa->spa_dsl_pool == NULL);
1354eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
1355eda14cbcSMatt Macy 	ASSERT(spa->spa_async_zio_root == NULL);
1356eda14cbcSMatt Macy 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1357eda14cbcSMatt Macy 
1358eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
1359eda14cbcSMatt Macy 
1360eda14cbcSMatt Macy 	if (spa->spa_zvol_taskq) {
1361eda14cbcSMatt Macy 		taskq_destroy(spa->spa_zvol_taskq);
1362eda14cbcSMatt Macy 		spa->spa_zvol_taskq = NULL;
1363eda14cbcSMatt Macy 	}
1364eda14cbcSMatt Macy 
1365eda14cbcSMatt Macy 	if (spa->spa_prefetch_taskq) {
1366eda14cbcSMatt Macy 		taskq_destroy(spa->spa_prefetch_taskq);
1367eda14cbcSMatt Macy 		spa->spa_prefetch_taskq = NULL;
1368eda14cbcSMatt Macy 	}
1369eda14cbcSMatt Macy 
1370eda14cbcSMatt Macy 	if (spa->spa_upgrade_taskq) {
1371eda14cbcSMatt Macy 		taskq_destroy(spa->spa_upgrade_taskq);
1372eda14cbcSMatt Macy 		spa->spa_upgrade_taskq = NULL;
1373eda14cbcSMatt Macy 	}
1374eda14cbcSMatt Macy 
1375eda14cbcSMatt Macy 	txg_list_destroy(&spa->spa_vdev_txg_list);
1376eda14cbcSMatt Macy 
1377eda14cbcSMatt Macy 	list_destroy(&spa->spa_config_dirty_list);
1378eda14cbcSMatt Macy 	list_destroy(&spa->spa_evicting_os_list);
1379eda14cbcSMatt Macy 	list_destroy(&spa->spa_state_dirty_list);
1380eda14cbcSMatt Macy 
1381eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
1382eda14cbcSMatt Macy 
1383eda14cbcSMatt Macy 	for (int t = 0; t < ZIO_TYPES; t++) {
1384eda14cbcSMatt Macy 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1385eda14cbcSMatt Macy 			spa_taskqs_fini(spa, t, q);
1386eda14cbcSMatt Macy 		}
1387eda14cbcSMatt Macy 	}
1388eda14cbcSMatt Macy 
1389eda14cbcSMatt Macy 	for (size_t i = 0; i < TXG_SIZE; i++) {
1390eda14cbcSMatt Macy 		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1391eda14cbcSMatt Macy 		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1392eda14cbcSMatt Macy 		spa->spa_txg_zio[i] = NULL;
1393eda14cbcSMatt Macy 	}
1394eda14cbcSMatt Macy 
1395eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_normal_class);
1396eda14cbcSMatt Macy 	spa->spa_normal_class = NULL;
1397eda14cbcSMatt Macy 
1398eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_log_class);
1399eda14cbcSMatt Macy 	spa->spa_log_class = NULL;
1400eda14cbcSMatt Macy 
1401184c1b94SMartin Matuska 	metaslab_class_destroy(spa->spa_embedded_log_class);
1402184c1b94SMartin Matuska 	spa->spa_embedded_log_class = NULL;
1403184c1b94SMartin Matuska 
1404eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_special_class);
1405eda14cbcSMatt Macy 	spa->spa_special_class = NULL;
1406eda14cbcSMatt Macy 
1407eda14cbcSMatt Macy 	metaslab_class_destroy(spa->spa_dedup_class);
1408eda14cbcSMatt Macy 	spa->spa_dedup_class = NULL;
1409eda14cbcSMatt Macy 
1410eda14cbcSMatt Macy 	/*
1411eda14cbcSMatt Macy 	 * If this was part of an import or the open otherwise failed, we may
1412eda14cbcSMatt Macy 	 * still have errors left in the queues.  Empty them just in case.
1413eda14cbcSMatt Macy 	 */
1414eda14cbcSMatt Macy 	spa_errlog_drain(spa);
1415eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_scrub);
1416eda14cbcSMatt Macy 	avl_destroy(&spa->spa_errlist_last);
1417eda14cbcSMatt Macy 
1418eda14cbcSMatt Macy 	spa_keystore_fini(&spa->spa_keystore);
1419eda14cbcSMatt Macy 
1420eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_UNINITIALIZED;
1421eda14cbcSMatt Macy 
1422eda14cbcSMatt Macy 	mutex_enter(&spa->spa_proc_lock);
1423eda14cbcSMatt Macy 	if (spa->spa_proc_state != SPA_PROC_NONE) {
1424eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1425eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1426eda14cbcSMatt Macy 		cv_broadcast(&spa->spa_proc_cv);
1427eda14cbcSMatt Macy 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1428eda14cbcSMatt Macy 			ASSERT(spa->spa_proc != &p0);
1429eda14cbcSMatt Macy 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1430eda14cbcSMatt Macy 		}
1431eda14cbcSMatt Macy 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1432eda14cbcSMatt Macy 		spa->spa_proc_state = SPA_PROC_NONE;
1433eda14cbcSMatt Macy 	}
1434eda14cbcSMatt Macy 	ASSERT(spa->spa_proc == &p0);
1435eda14cbcSMatt Macy 	mutex_exit(&spa->spa_proc_lock);
1436eda14cbcSMatt Macy 
1437eda14cbcSMatt Macy 	/*
1438eda14cbcSMatt Macy 	 * We want to make sure spa_thread() has actually exited the ZFS
1439eda14cbcSMatt Macy 	 * module, so that the module can't be unloaded out from underneath
1440eda14cbcSMatt Macy 	 * it.
1441eda14cbcSMatt Macy 	 */
1442eda14cbcSMatt Macy 	if (spa->spa_did != 0) {
1443eda14cbcSMatt Macy 		thread_join(spa->spa_did);
1444eda14cbcSMatt Macy 		spa->spa_did = 0;
1445eda14cbcSMatt Macy 	}
1446eda14cbcSMatt Macy }
1447eda14cbcSMatt Macy 
1448eda14cbcSMatt Macy /*
1449eda14cbcSMatt Macy  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1450eda14cbcSMatt Macy  * will create all the necessary vdevs in the appropriate layout, with each vdev
1451eda14cbcSMatt Macy  * in the CLOSED state.  This will prep the pool before open/creation/import.
1452eda14cbcSMatt Macy  * All vdev validation is done by the vdev_alloc() routine.
1453eda14cbcSMatt Macy  */
1454eda14cbcSMatt Macy int
1455eda14cbcSMatt Macy spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1456eda14cbcSMatt Macy     uint_t id, int atype)
1457eda14cbcSMatt Macy {
1458eda14cbcSMatt Macy 	nvlist_t **child;
1459eda14cbcSMatt Macy 	uint_t children;
1460eda14cbcSMatt Macy 	int error;
1461eda14cbcSMatt Macy 
1462eda14cbcSMatt Macy 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1463eda14cbcSMatt Macy 		return (error);
1464eda14cbcSMatt Macy 
1465eda14cbcSMatt Macy 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1466eda14cbcSMatt Macy 		return (0);
1467eda14cbcSMatt Macy 
1468eda14cbcSMatt Macy 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1469eda14cbcSMatt Macy 	    &child, &children);
1470eda14cbcSMatt Macy 
1471eda14cbcSMatt Macy 	if (error == ENOENT)
1472eda14cbcSMatt Macy 		return (0);
1473eda14cbcSMatt Macy 
1474eda14cbcSMatt Macy 	if (error) {
1475eda14cbcSMatt Macy 		vdev_free(*vdp);
1476eda14cbcSMatt Macy 		*vdp = NULL;
1477eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
1478eda14cbcSMatt Macy 	}
1479eda14cbcSMatt Macy 
1480eda14cbcSMatt Macy 	for (int c = 0; c < children; c++) {
1481eda14cbcSMatt Macy 		vdev_t *vd;
1482eda14cbcSMatt Macy 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1483eda14cbcSMatt Macy 		    atype)) != 0) {
1484eda14cbcSMatt Macy 			vdev_free(*vdp);
1485eda14cbcSMatt Macy 			*vdp = NULL;
1486eda14cbcSMatt Macy 			return (error);
1487eda14cbcSMatt Macy 		}
1488eda14cbcSMatt Macy 	}
1489eda14cbcSMatt Macy 
1490eda14cbcSMatt Macy 	ASSERT(*vdp != NULL);
1491eda14cbcSMatt Macy 
1492eda14cbcSMatt Macy 	return (0);
1493eda14cbcSMatt Macy }
1494eda14cbcSMatt Macy 
1495eda14cbcSMatt Macy static boolean_t
1496eda14cbcSMatt Macy spa_should_flush_logs_on_unload(spa_t *spa)
1497eda14cbcSMatt Macy {
1498eda14cbcSMatt Macy 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1499eda14cbcSMatt Macy 		return (B_FALSE);
1500eda14cbcSMatt Macy 
1501eda14cbcSMatt Macy 	if (!spa_writeable(spa))
1502eda14cbcSMatt Macy 		return (B_FALSE);
1503eda14cbcSMatt Macy 
1504eda14cbcSMatt Macy 	if (!spa->spa_sync_on)
1505eda14cbcSMatt Macy 		return (B_FALSE);
1506eda14cbcSMatt Macy 
1507eda14cbcSMatt Macy 	if (spa_state(spa) != POOL_STATE_EXPORTED)
1508eda14cbcSMatt Macy 		return (B_FALSE);
1509eda14cbcSMatt Macy 
1510eda14cbcSMatt Macy 	if (zfs_keep_log_spacemaps_at_export)
1511eda14cbcSMatt Macy 		return (B_FALSE);
1512eda14cbcSMatt Macy 
1513eda14cbcSMatt Macy 	return (B_TRUE);
1514eda14cbcSMatt Macy }
1515eda14cbcSMatt Macy 
1516eda14cbcSMatt Macy /*
1517eda14cbcSMatt Macy  * Opens a transaction that will set the flag that will instruct
1518eda14cbcSMatt Macy  * spa_sync to attempt to flush all the metaslabs for that txg.
1519eda14cbcSMatt Macy  */
1520eda14cbcSMatt Macy static void
1521eda14cbcSMatt Macy spa_unload_log_sm_flush_all(spa_t *spa)
1522eda14cbcSMatt Macy {
1523eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1524eda14cbcSMatt Macy 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1525eda14cbcSMatt Macy 
1526eda14cbcSMatt Macy 	ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1527eda14cbcSMatt Macy 	spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1528eda14cbcSMatt Macy 
1529eda14cbcSMatt Macy 	dmu_tx_commit(tx);
1530eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1531eda14cbcSMatt Macy }
1532eda14cbcSMatt Macy 
1533eda14cbcSMatt Macy static void
1534eda14cbcSMatt Macy spa_unload_log_sm_metadata(spa_t *spa)
1535eda14cbcSMatt Macy {
1536eda14cbcSMatt Macy 	void *cookie = NULL;
1537eda14cbcSMatt Macy 	spa_log_sm_t *sls;
1538eda14cbcSMatt Macy 	while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
1539eda14cbcSMatt Macy 	    &cookie)) != NULL) {
1540eda14cbcSMatt Macy 		VERIFY0(sls->sls_mscount);
1541eda14cbcSMatt Macy 		kmem_free(sls, sizeof (spa_log_sm_t));
1542eda14cbcSMatt Macy 	}
1543eda14cbcSMatt Macy 
1544eda14cbcSMatt Macy 	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
1545eda14cbcSMatt Macy 	    e != NULL; e = list_head(&spa->spa_log_summary)) {
1546eda14cbcSMatt Macy 		VERIFY0(e->lse_mscount);
1547eda14cbcSMatt Macy 		list_remove(&spa->spa_log_summary, e);
1548eda14cbcSMatt Macy 		kmem_free(e, sizeof (log_summary_entry_t));
1549eda14cbcSMatt Macy 	}
1550eda14cbcSMatt Macy 
1551eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_nblocks = 0;
1552eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused = 0;
1553eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_blocklimit = 0;
1554eda14cbcSMatt Macy }
1555eda14cbcSMatt Macy 
1556eda14cbcSMatt Macy static void
1557eda14cbcSMatt Macy spa_destroy_aux_threads(spa_t *spa)
1558eda14cbcSMatt Macy {
1559eda14cbcSMatt Macy 	if (spa->spa_condense_zthr != NULL) {
1560eda14cbcSMatt Macy 		zthr_destroy(spa->spa_condense_zthr);
1561eda14cbcSMatt Macy 		spa->spa_condense_zthr = NULL;
1562eda14cbcSMatt Macy 	}
1563eda14cbcSMatt Macy 	if (spa->spa_checkpoint_discard_zthr != NULL) {
1564eda14cbcSMatt Macy 		zthr_destroy(spa->spa_checkpoint_discard_zthr);
1565eda14cbcSMatt Macy 		spa->spa_checkpoint_discard_zthr = NULL;
1566eda14cbcSMatt Macy 	}
1567eda14cbcSMatt Macy 	if (spa->spa_livelist_delete_zthr != NULL) {
1568eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_delete_zthr);
1569eda14cbcSMatt Macy 		spa->spa_livelist_delete_zthr = NULL;
1570eda14cbcSMatt Macy 	}
1571eda14cbcSMatt Macy 	if (spa->spa_livelist_condense_zthr != NULL) {
1572eda14cbcSMatt Macy 		zthr_destroy(spa->spa_livelist_condense_zthr);
1573eda14cbcSMatt Macy 		spa->spa_livelist_condense_zthr = NULL;
1574eda14cbcSMatt Macy 	}
1575eda14cbcSMatt Macy }
1576eda14cbcSMatt Macy 
1577eda14cbcSMatt Macy /*
1578eda14cbcSMatt Macy  * Opposite of spa_load().
1579eda14cbcSMatt Macy  */
1580eda14cbcSMatt Macy static void
1581eda14cbcSMatt Macy spa_unload(spa_t *spa)
1582eda14cbcSMatt Macy {
1583eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1584eda14cbcSMatt Macy 	ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
1585eda14cbcSMatt Macy 
1586eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
1587eda14cbcSMatt Macy 	spa_load_note(spa, "UNLOADING");
1588eda14cbcSMatt Macy 
1589eda14cbcSMatt Macy 	spa_wake_waiters(spa);
1590eda14cbcSMatt Macy 
1591eda14cbcSMatt Macy 	/*
1592eda14cbcSMatt Macy 	 * If the log space map feature is enabled and the pool is getting
1593eda14cbcSMatt Macy 	 * exported (but not destroyed), we want to spend some time flushing
1594eda14cbcSMatt Macy 	 * as many metaslabs as we can in an attempt to destroy log space
1595eda14cbcSMatt Macy 	 * maps and save import time.
1596eda14cbcSMatt Macy 	 */
1597eda14cbcSMatt Macy 	if (spa_should_flush_logs_on_unload(spa))
1598eda14cbcSMatt Macy 		spa_unload_log_sm_flush_all(spa);
1599eda14cbcSMatt Macy 
1600eda14cbcSMatt Macy 	/*
1601eda14cbcSMatt Macy 	 * Stop async tasks.
1602eda14cbcSMatt Macy 	 */
1603eda14cbcSMatt Macy 	spa_async_suspend(spa);
1604eda14cbcSMatt Macy 
1605eda14cbcSMatt Macy 	if (spa->spa_root_vdev) {
1606eda14cbcSMatt Macy 		vdev_t *root_vdev = spa->spa_root_vdev;
1607eda14cbcSMatt Macy 		vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
1608eda14cbcSMatt Macy 		vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
1609eda14cbcSMatt Macy 		vdev_autotrim_stop_all(spa);
1610eda14cbcSMatt Macy 		vdev_rebuild_stop_all(spa);
1611eda14cbcSMatt Macy 	}
1612eda14cbcSMatt Macy 
1613eda14cbcSMatt Macy 	/*
1614eda14cbcSMatt Macy 	 * Stop syncing.
1615eda14cbcSMatt Macy 	 */
1616eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
1617eda14cbcSMatt Macy 		txg_sync_stop(spa->spa_dsl_pool);
1618eda14cbcSMatt Macy 		spa->spa_sync_on = B_FALSE;
1619eda14cbcSMatt Macy 	}
1620eda14cbcSMatt Macy 
1621eda14cbcSMatt Macy 	/*
1622eda14cbcSMatt Macy 	 * This ensures that there is no async metaslab prefetching
1623eda14cbcSMatt Macy 	 * while we attempt to unload the spa.
1624eda14cbcSMatt Macy 	 */
1625eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL) {
1626eda14cbcSMatt Macy 		for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
1627eda14cbcSMatt Macy 			vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
1628eda14cbcSMatt Macy 			if (vc->vdev_mg != NULL)
1629eda14cbcSMatt Macy 				taskq_wait(vc->vdev_mg->mg_taskq);
1630eda14cbcSMatt Macy 		}
1631eda14cbcSMatt Macy 	}
1632eda14cbcSMatt Macy 
1633eda14cbcSMatt Macy 	if (spa->spa_mmp.mmp_thread)
1634eda14cbcSMatt Macy 		mmp_thread_stop(spa);
1635eda14cbcSMatt Macy 
1636eda14cbcSMatt Macy 	/*
1637eda14cbcSMatt Macy 	 * Wait for any outstanding async I/O to complete.
1638eda14cbcSMatt Macy 	 */
1639eda14cbcSMatt Macy 	if (spa->spa_async_zio_root != NULL) {
1640eda14cbcSMatt Macy 		for (int i = 0; i < max_ncpus; i++)
1641eda14cbcSMatt Macy 			(void) zio_wait(spa->spa_async_zio_root[i]);
1642eda14cbcSMatt Macy 		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1643eda14cbcSMatt Macy 		spa->spa_async_zio_root = NULL;
1644eda14cbcSMatt Macy 	}
1645eda14cbcSMatt Macy 
1646eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
1647eda14cbcSMatt Macy 		spa_vdev_removal_destroy(spa->spa_vdev_removal);
1648eda14cbcSMatt Macy 		spa->spa_vdev_removal = NULL;
1649eda14cbcSMatt Macy 	}
1650eda14cbcSMatt Macy 
1651eda14cbcSMatt Macy 	spa_destroy_aux_threads(spa);
1652eda14cbcSMatt Macy 
1653eda14cbcSMatt Macy 	spa_condense_fini(spa);
1654eda14cbcSMatt Macy 
1655eda14cbcSMatt Macy 	bpobj_close(&spa->spa_deferred_bpobj);
1656eda14cbcSMatt Macy 
1657eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1658eda14cbcSMatt Macy 
1659eda14cbcSMatt Macy 	/*
1660eda14cbcSMatt Macy 	 * Close all vdevs.
1661eda14cbcSMatt Macy 	 */
1662eda14cbcSMatt Macy 	if (spa->spa_root_vdev)
1663eda14cbcSMatt Macy 		vdev_free(spa->spa_root_vdev);
1664eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == NULL);
1665eda14cbcSMatt Macy 
1666eda14cbcSMatt Macy 	/*
1667eda14cbcSMatt Macy 	 * Close the dsl pool.
1668eda14cbcSMatt Macy 	 */
1669eda14cbcSMatt Macy 	if (spa->spa_dsl_pool) {
1670eda14cbcSMatt Macy 		dsl_pool_close(spa->spa_dsl_pool);
1671eda14cbcSMatt Macy 		spa->spa_dsl_pool = NULL;
1672eda14cbcSMatt Macy 		spa->spa_meta_objset = NULL;
1673eda14cbcSMatt Macy 	}
1674eda14cbcSMatt Macy 
1675eda14cbcSMatt Macy 	ddt_unload(spa);
1676eda14cbcSMatt Macy 	spa_unload_log_sm_metadata(spa);
1677eda14cbcSMatt Macy 
1678eda14cbcSMatt Macy 	/*
1679eda14cbcSMatt Macy 	 * Drop and purge level 2 cache
1680eda14cbcSMatt Macy 	 */
1681eda14cbcSMatt Macy 	spa_l2cache_drop(spa);
1682eda14cbcSMatt Macy 
1683eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_spares.sav_count; i++)
1684eda14cbcSMatt Macy 		vdev_free(spa->spa_spares.sav_vdevs[i]);
1685eda14cbcSMatt Macy 	if (spa->spa_spares.sav_vdevs) {
1686eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
1687eda14cbcSMatt Macy 		    spa->spa_spares.sav_count * sizeof (void *));
1688eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs = NULL;
1689eda14cbcSMatt Macy 	}
1690eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
1691eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
1692eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
1693eda14cbcSMatt Macy 	}
1694eda14cbcSMatt Macy 	spa->spa_spares.sav_count = 0;
1695eda14cbcSMatt Macy 
1696eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
1697eda14cbcSMatt Macy 		vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1698eda14cbcSMatt Macy 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1699eda14cbcSMatt Macy 	}
1700eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_vdevs) {
1701eda14cbcSMatt Macy 		kmem_free(spa->spa_l2cache.sav_vdevs,
1702eda14cbcSMatt Macy 		    spa->spa_l2cache.sav_count * sizeof (void *));
1703eda14cbcSMatt Macy 		spa->spa_l2cache.sav_vdevs = NULL;
1704eda14cbcSMatt Macy 	}
1705eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
1706eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
1707eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
1708eda14cbcSMatt Macy 	}
1709eda14cbcSMatt Macy 	spa->spa_l2cache.sav_count = 0;
1710eda14cbcSMatt Macy 
1711eda14cbcSMatt Macy 	spa->spa_async_suspended = 0;
1712eda14cbcSMatt Macy 
1713eda14cbcSMatt Macy 	spa->spa_indirect_vdevs_loaded = B_FALSE;
1714eda14cbcSMatt Macy 
1715eda14cbcSMatt Macy 	if (spa->spa_comment != NULL) {
1716eda14cbcSMatt Macy 		spa_strfree(spa->spa_comment);
1717eda14cbcSMatt Macy 		spa->spa_comment = NULL;
1718eda14cbcSMatt Macy 	}
1719ee36e25aSMartin Matuska 	if (spa->spa_compatibility != NULL) {
1720ee36e25aSMartin Matuska 		spa_strfree(spa->spa_compatibility);
1721ee36e25aSMartin Matuska 		spa->spa_compatibility = NULL;
1722ee36e25aSMartin Matuska 	}
1723eda14cbcSMatt Macy 
1724eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, spa);
1725eda14cbcSMatt Macy }
1726eda14cbcSMatt Macy 
1727eda14cbcSMatt Macy /*
1728eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active spares for
1729eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
1730eda14cbcSMatt Macy  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1731eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
1732eda14cbcSMatt Macy  */
1733eda14cbcSMatt Macy void
1734eda14cbcSMatt Macy spa_load_spares(spa_t *spa)
1735eda14cbcSMatt Macy {
1736eda14cbcSMatt Macy 	nvlist_t **spares;
1737eda14cbcSMatt Macy 	uint_t nspares;
1738eda14cbcSMatt Macy 	int i;
1739eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
1740eda14cbcSMatt Macy 
1741eda14cbcSMatt Macy #ifndef _KERNEL
1742eda14cbcSMatt Macy 	/*
1743eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
1744eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
1745eda14cbcSMatt Macy 	 *
1746eda14cbcSMatt Macy 	 * As spare vdevs are shared among open pools, we skip loading
1747eda14cbcSMatt Macy 	 * them when we load the checkpointed state of the pool.
1748eda14cbcSMatt Macy 	 */
1749eda14cbcSMatt Macy 	if (!spa_writeable(spa))
1750eda14cbcSMatt Macy 		return;
1751eda14cbcSMatt Macy #endif
1752eda14cbcSMatt Macy 
1753eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1754eda14cbcSMatt Macy 
1755eda14cbcSMatt Macy 	/*
1756eda14cbcSMatt Macy 	 * First, close and free any existing spare vdevs.
1757eda14cbcSMatt Macy 	 */
1758eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1759eda14cbcSMatt Macy 		vd = spa->spa_spares.sav_vdevs[i];
1760eda14cbcSMatt Macy 
1761eda14cbcSMatt Macy 		/* Undo the call to spa_activate() below */
1762eda14cbcSMatt Macy 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1763eda14cbcSMatt Macy 		    B_FALSE)) != NULL && tvd->vdev_isspare)
1764eda14cbcSMatt Macy 			spa_spare_remove(tvd);
1765eda14cbcSMatt Macy 		vdev_close(vd);
1766eda14cbcSMatt Macy 		vdev_free(vd);
1767eda14cbcSMatt Macy 	}
1768eda14cbcSMatt Macy 
1769eda14cbcSMatt Macy 	if (spa->spa_spares.sav_vdevs)
1770eda14cbcSMatt Macy 		kmem_free(spa->spa_spares.sav_vdevs,
1771eda14cbcSMatt Macy 		    spa->spa_spares.sav_count * sizeof (void *));
1772eda14cbcSMatt Macy 
1773eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config == NULL)
1774eda14cbcSMatt Macy 		nspares = 0;
1775eda14cbcSMatt Macy 	else
1776eda14cbcSMatt Macy 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1777eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1778eda14cbcSMatt Macy 
1779eda14cbcSMatt Macy 	spa->spa_spares.sav_count = (int)nspares;
1780eda14cbcSMatt Macy 	spa->spa_spares.sav_vdevs = NULL;
1781eda14cbcSMatt Macy 
1782eda14cbcSMatt Macy 	if (nspares == 0)
1783eda14cbcSMatt Macy 		return;
1784eda14cbcSMatt Macy 
1785eda14cbcSMatt Macy 	/*
1786eda14cbcSMatt Macy 	 * Construct the array of vdevs, opening them to get status in the
1787eda14cbcSMatt Macy 	 * process.   For each spare, there is potentially two different vdev_t
1788eda14cbcSMatt Macy 	 * structures associated with it: one in the list of spares (used only
1789eda14cbcSMatt Macy 	 * for basic validation purposes) and one in the active vdev
1790eda14cbcSMatt Macy 	 * configuration (if it's spared in).  During this phase we open and
1791eda14cbcSMatt Macy 	 * validate each vdev on the spare list.  If the vdev also exists in the
1792eda14cbcSMatt Macy 	 * active configuration, then we also mark this vdev as an active spare.
1793eda14cbcSMatt Macy 	 */
1794eda14cbcSMatt Macy 	spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
1795eda14cbcSMatt Macy 	    KM_SLEEP);
1796eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1797eda14cbcSMatt Macy 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1798eda14cbcSMatt Macy 		    VDEV_ALLOC_SPARE) == 0);
1799eda14cbcSMatt Macy 		ASSERT(vd != NULL);
1800eda14cbcSMatt Macy 
1801eda14cbcSMatt Macy 		spa->spa_spares.sav_vdevs[i] = vd;
1802eda14cbcSMatt Macy 
1803eda14cbcSMatt Macy 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1804eda14cbcSMatt Macy 		    B_FALSE)) != NULL) {
1805eda14cbcSMatt Macy 			if (!tvd->vdev_isspare)
1806eda14cbcSMatt Macy 				spa_spare_add(tvd);
1807eda14cbcSMatt Macy 
1808eda14cbcSMatt Macy 			/*
1809eda14cbcSMatt Macy 			 * We only mark the spare active if we were successfully
1810eda14cbcSMatt Macy 			 * able to load the vdev.  Otherwise, importing a pool
1811eda14cbcSMatt Macy 			 * with a bad active spare would result in strange
1812eda14cbcSMatt Macy 			 * behavior, because multiple pool would think the spare
1813eda14cbcSMatt Macy 			 * is actively in use.
1814eda14cbcSMatt Macy 			 *
1815eda14cbcSMatt Macy 			 * There is a vulnerability here to an equally bizarre
1816eda14cbcSMatt Macy 			 * circumstance, where a dead active spare is later
1817eda14cbcSMatt Macy 			 * brought back to life (onlined or otherwise).  Given
1818eda14cbcSMatt Macy 			 * the rarity of this scenario, and the extra complexity
1819eda14cbcSMatt Macy 			 * it adds, we ignore the possibility.
1820eda14cbcSMatt Macy 			 */
1821eda14cbcSMatt Macy 			if (!vdev_is_dead(tvd))
1822eda14cbcSMatt Macy 				spa_spare_activate(tvd);
1823eda14cbcSMatt Macy 		}
1824eda14cbcSMatt Macy 
1825eda14cbcSMatt Macy 		vd->vdev_top = vd;
1826eda14cbcSMatt Macy 		vd->vdev_aux = &spa->spa_spares;
1827eda14cbcSMatt Macy 
1828eda14cbcSMatt Macy 		if (vdev_open(vd) != 0)
1829eda14cbcSMatt Macy 			continue;
1830eda14cbcSMatt Macy 
1831eda14cbcSMatt Macy 		if (vdev_validate_aux(vd) == 0)
1832eda14cbcSMatt Macy 			spa_spare_add(vd);
1833eda14cbcSMatt Macy 	}
1834eda14cbcSMatt Macy 
1835eda14cbcSMatt Macy 	/*
1836eda14cbcSMatt Macy 	 * Recompute the stashed list of spares, with status information
1837eda14cbcSMatt Macy 	 * this time.
1838eda14cbcSMatt Macy 	 */
1839eda14cbcSMatt Macy 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1840eda14cbcSMatt Macy 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1841eda14cbcSMatt Macy 
1842eda14cbcSMatt Macy 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1843eda14cbcSMatt Macy 	    KM_SLEEP);
1844eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1845eda14cbcSMatt Macy 		spares[i] = vdev_config_generate(spa,
1846eda14cbcSMatt Macy 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1847eda14cbcSMatt Macy 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1848eda14cbcSMatt Macy 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1849eda14cbcSMatt Macy 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1850eda14cbcSMatt Macy 		nvlist_free(spares[i]);
1851eda14cbcSMatt Macy 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1852eda14cbcSMatt Macy }
1853eda14cbcSMatt Macy 
1854eda14cbcSMatt Macy /*
1855eda14cbcSMatt Macy  * Load (or re-load) the current list of vdevs describing the active l2cache for
1856eda14cbcSMatt Macy  * this pool.  When this is called, we have some form of basic information in
1857eda14cbcSMatt Macy  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1858eda14cbcSMatt Macy  * then re-generate a more complete list including status information.
1859eda14cbcSMatt Macy  * Devices which are already active have their details maintained, and are
1860eda14cbcSMatt Macy  * not re-opened.
1861eda14cbcSMatt Macy  */
1862eda14cbcSMatt Macy void
1863eda14cbcSMatt Macy spa_load_l2cache(spa_t *spa)
1864eda14cbcSMatt Macy {
1865eda14cbcSMatt Macy 	nvlist_t **l2cache = NULL;
1866eda14cbcSMatt Macy 	uint_t nl2cache;
1867eda14cbcSMatt Macy 	int i, j, oldnvdevs;
1868eda14cbcSMatt Macy 	uint64_t guid;
1869eda14cbcSMatt Macy 	vdev_t *vd, **oldvdevs, **newvdevs;
1870eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1871eda14cbcSMatt Macy 
1872eda14cbcSMatt Macy #ifndef _KERNEL
1873eda14cbcSMatt Macy 	/*
1874eda14cbcSMatt Macy 	 * zdb opens both the current state of the pool and the
1875eda14cbcSMatt Macy 	 * checkpointed state (if present), with a different spa_t.
1876eda14cbcSMatt Macy 	 *
1877eda14cbcSMatt Macy 	 * As L2 caches are part of the ARC which is shared among open
1878eda14cbcSMatt Macy 	 * pools, we skip loading them when we load the checkpointed
1879eda14cbcSMatt Macy 	 * state of the pool.
1880eda14cbcSMatt Macy 	 */
1881eda14cbcSMatt Macy 	if (!spa_writeable(spa))
1882eda14cbcSMatt Macy 		return;
1883eda14cbcSMatt Macy #endif
1884eda14cbcSMatt Macy 
1885eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1886eda14cbcSMatt Macy 
1887eda14cbcSMatt Macy 	oldvdevs = sav->sav_vdevs;
1888eda14cbcSMatt Macy 	oldnvdevs = sav->sav_count;
1889eda14cbcSMatt Macy 	sav->sav_vdevs = NULL;
1890eda14cbcSMatt Macy 	sav->sav_count = 0;
1891eda14cbcSMatt Macy 
1892eda14cbcSMatt Macy 	if (sav->sav_config == NULL) {
1893eda14cbcSMatt Macy 		nl2cache = 0;
1894eda14cbcSMatt Macy 		newvdevs = NULL;
1895eda14cbcSMatt Macy 		goto out;
1896eda14cbcSMatt Macy 	}
1897eda14cbcSMatt Macy 
1898eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1899eda14cbcSMatt Macy 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1900eda14cbcSMatt Macy 	newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1901eda14cbcSMatt Macy 
1902eda14cbcSMatt Macy 	/*
1903eda14cbcSMatt Macy 	 * Process new nvlist of vdevs.
1904eda14cbcSMatt Macy 	 */
1905eda14cbcSMatt Macy 	for (i = 0; i < nl2cache; i++) {
1906eda14cbcSMatt Macy 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1907eda14cbcSMatt Macy 		    &guid) == 0);
1908eda14cbcSMatt Macy 
1909eda14cbcSMatt Macy 		newvdevs[i] = NULL;
1910eda14cbcSMatt Macy 		for (j = 0; j < oldnvdevs; j++) {
1911eda14cbcSMatt Macy 			vd = oldvdevs[j];
1912eda14cbcSMatt Macy 			if (vd != NULL && guid == vd->vdev_guid) {
1913eda14cbcSMatt Macy 				/*
1914eda14cbcSMatt Macy 				 * Retain previous vdev for add/remove ops.
1915eda14cbcSMatt Macy 				 */
1916eda14cbcSMatt Macy 				newvdevs[i] = vd;
1917eda14cbcSMatt Macy 				oldvdevs[j] = NULL;
1918eda14cbcSMatt Macy 				break;
1919eda14cbcSMatt Macy 			}
1920eda14cbcSMatt Macy 		}
1921eda14cbcSMatt Macy 
1922eda14cbcSMatt Macy 		if (newvdevs[i] == NULL) {
1923eda14cbcSMatt Macy 			/*
1924eda14cbcSMatt Macy 			 * Create new vdev
1925eda14cbcSMatt Macy 			 */
1926eda14cbcSMatt Macy 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1927eda14cbcSMatt Macy 			    VDEV_ALLOC_L2CACHE) == 0);
1928eda14cbcSMatt Macy 			ASSERT(vd != NULL);
1929eda14cbcSMatt Macy 			newvdevs[i] = vd;
1930eda14cbcSMatt Macy 
1931eda14cbcSMatt Macy 			/*
1932eda14cbcSMatt Macy 			 * Commit this vdev as an l2cache device,
1933eda14cbcSMatt Macy 			 * even if it fails to open.
1934eda14cbcSMatt Macy 			 */
1935eda14cbcSMatt Macy 			spa_l2cache_add(vd);
1936eda14cbcSMatt Macy 
1937eda14cbcSMatt Macy 			vd->vdev_top = vd;
1938eda14cbcSMatt Macy 			vd->vdev_aux = sav;
1939eda14cbcSMatt Macy 
1940eda14cbcSMatt Macy 			spa_l2cache_activate(vd);
1941eda14cbcSMatt Macy 
1942eda14cbcSMatt Macy 			if (vdev_open(vd) != 0)
1943eda14cbcSMatt Macy 				continue;
1944eda14cbcSMatt Macy 
1945eda14cbcSMatt Macy 			(void) vdev_validate_aux(vd);
1946eda14cbcSMatt Macy 
1947eda14cbcSMatt Macy 			if (!vdev_is_dead(vd))
1948eda14cbcSMatt Macy 				l2arc_add_vdev(spa, vd);
1949eda14cbcSMatt Macy 
1950eda14cbcSMatt Macy 			/*
1951eda14cbcSMatt Macy 			 * Upon cache device addition to a pool or pool
1952eda14cbcSMatt Macy 			 * creation with a cache device or if the header
1953eda14cbcSMatt Macy 			 * of the device is invalid we issue an async
1954eda14cbcSMatt Macy 			 * TRIM command for the whole device which will
1955eda14cbcSMatt Macy 			 * execute if l2arc_trim_ahead > 0.
1956eda14cbcSMatt Macy 			 */
1957eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
1958eda14cbcSMatt Macy 		}
1959eda14cbcSMatt Macy 	}
1960eda14cbcSMatt Macy 
1961eda14cbcSMatt Macy 	sav->sav_vdevs = newvdevs;
1962eda14cbcSMatt Macy 	sav->sav_count = (int)nl2cache;
1963eda14cbcSMatt Macy 
1964eda14cbcSMatt Macy 	/*
1965eda14cbcSMatt Macy 	 * Recompute the stashed list of l2cache devices, with status
1966eda14cbcSMatt Macy 	 * information this time.
1967eda14cbcSMatt Macy 	 */
1968eda14cbcSMatt Macy 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1969eda14cbcSMatt Macy 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1970eda14cbcSMatt Macy 
1971eda14cbcSMatt Macy 	if (sav->sav_count > 0)
1972eda14cbcSMatt Macy 		l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
1973eda14cbcSMatt Macy 		    KM_SLEEP);
1974eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
1975eda14cbcSMatt Macy 		l2cache[i] = vdev_config_generate(spa,
1976eda14cbcSMatt Macy 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1977eda14cbcSMatt Macy 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1978eda14cbcSMatt Macy 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1979eda14cbcSMatt Macy 
1980eda14cbcSMatt Macy out:
1981eda14cbcSMatt Macy 	/*
1982eda14cbcSMatt Macy 	 * Purge vdevs that were dropped
1983eda14cbcSMatt Macy 	 */
1984eda14cbcSMatt Macy 	for (i = 0; i < oldnvdevs; i++) {
1985eda14cbcSMatt Macy 		uint64_t pool;
1986eda14cbcSMatt Macy 
1987eda14cbcSMatt Macy 		vd = oldvdevs[i];
1988eda14cbcSMatt Macy 		if (vd != NULL) {
1989eda14cbcSMatt Macy 			ASSERT(vd->vdev_isl2cache);
1990eda14cbcSMatt Macy 
1991eda14cbcSMatt Macy 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1992eda14cbcSMatt Macy 			    pool != 0ULL && l2arc_vdev_present(vd))
1993eda14cbcSMatt Macy 				l2arc_remove_vdev(vd);
1994eda14cbcSMatt Macy 			vdev_clear_stats(vd);
1995eda14cbcSMatt Macy 			vdev_free(vd);
1996eda14cbcSMatt Macy 		}
1997eda14cbcSMatt Macy 	}
1998eda14cbcSMatt Macy 
1999eda14cbcSMatt Macy 	if (oldvdevs)
2000eda14cbcSMatt Macy 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
2001eda14cbcSMatt Macy 
2002eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
2003eda14cbcSMatt Macy 		nvlist_free(l2cache[i]);
2004eda14cbcSMatt Macy 	if (sav->sav_count)
2005eda14cbcSMatt Macy 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
2006eda14cbcSMatt Macy }
2007eda14cbcSMatt Macy 
2008eda14cbcSMatt Macy static int
2009eda14cbcSMatt Macy load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
2010eda14cbcSMatt Macy {
2011eda14cbcSMatt Macy 	dmu_buf_t *db;
2012eda14cbcSMatt Macy 	char *packed = NULL;
2013eda14cbcSMatt Macy 	size_t nvsize = 0;
2014eda14cbcSMatt Macy 	int error;
2015eda14cbcSMatt Macy 	*value = NULL;
2016eda14cbcSMatt Macy 
2017eda14cbcSMatt Macy 	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
2018eda14cbcSMatt Macy 	if (error)
2019eda14cbcSMatt Macy 		return (error);
2020eda14cbcSMatt Macy 
2021eda14cbcSMatt Macy 	nvsize = *(uint64_t *)db->db_data;
2022eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
2023eda14cbcSMatt Macy 
2024eda14cbcSMatt Macy 	packed = vmem_alloc(nvsize, KM_SLEEP);
2025eda14cbcSMatt Macy 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
2026eda14cbcSMatt Macy 	    DMU_READ_PREFETCH);
2027eda14cbcSMatt Macy 	if (error == 0)
2028eda14cbcSMatt Macy 		error = nvlist_unpack(packed, nvsize, value, 0);
2029eda14cbcSMatt Macy 	vmem_free(packed, nvsize);
2030eda14cbcSMatt Macy 
2031eda14cbcSMatt Macy 	return (error);
2032eda14cbcSMatt Macy }
2033eda14cbcSMatt Macy 
2034eda14cbcSMatt Macy /*
2035eda14cbcSMatt Macy  * Concrete top-level vdevs that are not missing and are not logs. At every
2036eda14cbcSMatt Macy  * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
2037eda14cbcSMatt Macy  */
2038eda14cbcSMatt Macy static uint64_t
2039eda14cbcSMatt Macy spa_healthy_core_tvds(spa_t *spa)
2040eda14cbcSMatt Macy {
2041eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2042eda14cbcSMatt Macy 	uint64_t tvds = 0;
2043eda14cbcSMatt Macy 
2044eda14cbcSMatt Macy 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
2045eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[i];
2046eda14cbcSMatt Macy 		if (vd->vdev_islog)
2047eda14cbcSMatt Macy 			continue;
2048eda14cbcSMatt Macy 		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
2049eda14cbcSMatt Macy 			tvds++;
2050eda14cbcSMatt Macy 	}
2051eda14cbcSMatt Macy 
2052eda14cbcSMatt Macy 	return (tvds);
2053eda14cbcSMatt Macy }
2054eda14cbcSMatt Macy 
2055eda14cbcSMatt Macy /*
2056eda14cbcSMatt Macy  * Checks to see if the given vdev could not be opened, in which case we post a
2057eda14cbcSMatt Macy  * sysevent to notify the autoreplace code that the device has been removed.
2058eda14cbcSMatt Macy  */
2059eda14cbcSMatt Macy static void
2060eda14cbcSMatt Macy spa_check_removed(vdev_t *vd)
2061eda14cbcSMatt Macy {
2062eda14cbcSMatt Macy 	for (uint64_t c = 0; c < vd->vdev_children; c++)
2063eda14cbcSMatt Macy 		spa_check_removed(vd->vdev_child[c]);
2064eda14cbcSMatt Macy 
2065eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
2066eda14cbcSMatt Macy 	    vdev_is_concrete(vd)) {
2067eda14cbcSMatt Macy 		zfs_post_autoreplace(vd->vdev_spa, vd);
2068eda14cbcSMatt Macy 		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
2069eda14cbcSMatt Macy 	}
2070eda14cbcSMatt Macy }
2071eda14cbcSMatt Macy 
2072eda14cbcSMatt Macy static int
2073eda14cbcSMatt Macy spa_check_for_missing_logs(spa_t *spa)
2074eda14cbcSMatt Macy {
2075eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2076eda14cbcSMatt Macy 
2077eda14cbcSMatt Macy 	/*
2078eda14cbcSMatt Macy 	 * If we're doing a normal import, then build up any additional
2079eda14cbcSMatt Macy 	 * diagnostic information about missing log devices.
2080eda14cbcSMatt Macy 	 * We'll pass this up to the user for further processing.
2081eda14cbcSMatt Macy 	 */
2082eda14cbcSMatt Macy 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2083eda14cbcSMatt Macy 		nvlist_t **child, *nv;
2084eda14cbcSMatt Macy 		uint64_t idx = 0;
2085eda14cbcSMatt Macy 
2086eda14cbcSMatt Macy 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2087eda14cbcSMatt Macy 		    KM_SLEEP);
2088eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2089eda14cbcSMatt Macy 
2090eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2091eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
2092eda14cbcSMatt Macy 
2093eda14cbcSMatt Macy 			/*
2094eda14cbcSMatt Macy 			 * We consider a device as missing only if it failed
2095eda14cbcSMatt Macy 			 * to open (i.e. offline or faulted is not considered
2096eda14cbcSMatt Macy 			 * as missing).
2097eda14cbcSMatt Macy 			 */
2098eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2099eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2100eda14cbcSMatt Macy 				child[idx++] = vdev_config_generate(spa, tvd,
2101eda14cbcSMatt Macy 				    B_FALSE, VDEV_CONFIG_MISSING);
2102eda14cbcSMatt Macy 			}
2103eda14cbcSMatt Macy 		}
2104eda14cbcSMatt Macy 
2105eda14cbcSMatt Macy 		if (idx > 0) {
2106eda14cbcSMatt Macy 			fnvlist_add_nvlist_array(nv,
2107eda14cbcSMatt Macy 			    ZPOOL_CONFIG_CHILDREN, child, idx);
2108eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
2109eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
2110eda14cbcSMatt Macy 
2111eda14cbcSMatt Macy 			for (uint64_t i = 0; i < idx; i++)
2112eda14cbcSMatt Macy 				nvlist_free(child[i]);
2113eda14cbcSMatt Macy 		}
2114eda14cbcSMatt Macy 		nvlist_free(nv);
2115eda14cbcSMatt Macy 		kmem_free(child, rvd->vdev_children * sizeof (char **));
2116eda14cbcSMatt Macy 
2117eda14cbcSMatt Macy 		if (idx > 0) {
2118eda14cbcSMatt Macy 			spa_load_failed(spa, "some log devices are missing");
2119eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
2120eda14cbcSMatt Macy 			return (SET_ERROR(ENXIO));
2121eda14cbcSMatt Macy 		}
2122eda14cbcSMatt Macy 	} else {
2123eda14cbcSMatt Macy 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2124eda14cbcSMatt Macy 			vdev_t *tvd = rvd->vdev_child[c];
2125eda14cbcSMatt Macy 
2126eda14cbcSMatt Macy 			if (tvd->vdev_islog &&
2127eda14cbcSMatt Macy 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2128eda14cbcSMatt Macy 				spa_set_log_state(spa, SPA_LOG_CLEAR);
2129eda14cbcSMatt Macy 				spa_load_note(spa, "some log devices are "
2130eda14cbcSMatt Macy 				    "missing, ZIL is dropped.");
2131eda14cbcSMatt Macy 				vdev_dbgmsg_print_tree(rvd, 2);
2132eda14cbcSMatt Macy 				break;
2133eda14cbcSMatt Macy 			}
2134eda14cbcSMatt Macy 		}
2135eda14cbcSMatt Macy 	}
2136eda14cbcSMatt Macy 
2137eda14cbcSMatt Macy 	return (0);
2138eda14cbcSMatt Macy }
2139eda14cbcSMatt Macy 
2140eda14cbcSMatt Macy /*
2141eda14cbcSMatt Macy  * Check for missing log devices
2142eda14cbcSMatt Macy  */
2143eda14cbcSMatt Macy static boolean_t
2144eda14cbcSMatt Macy spa_check_logs(spa_t *spa)
2145eda14cbcSMatt Macy {
2146eda14cbcSMatt Macy 	boolean_t rv = B_FALSE;
2147eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
2148eda14cbcSMatt Macy 
2149eda14cbcSMatt Macy 	switch (spa->spa_log_state) {
2150eda14cbcSMatt Macy 	default:
2151eda14cbcSMatt Macy 		break;
2152eda14cbcSMatt Macy 	case SPA_LOG_MISSING:
2153eda14cbcSMatt Macy 		/* need to recheck in case slog has been restored */
2154eda14cbcSMatt Macy 	case SPA_LOG_UNKNOWN:
2155eda14cbcSMatt Macy 		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2156eda14cbcSMatt Macy 		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2157eda14cbcSMatt Macy 		if (rv)
2158eda14cbcSMatt Macy 			spa_set_log_state(spa, SPA_LOG_MISSING);
2159eda14cbcSMatt Macy 		break;
2160eda14cbcSMatt Macy 	}
2161eda14cbcSMatt Macy 	return (rv);
2162eda14cbcSMatt Macy }
2163eda14cbcSMatt Macy 
2164184c1b94SMartin Matuska /*
2165184c1b94SMartin Matuska  * Passivate any log vdevs (note, does not apply to embedded log metaslabs).
2166184c1b94SMartin Matuska  */
2167eda14cbcSMatt Macy static boolean_t
2168eda14cbcSMatt Macy spa_passivate_log(spa_t *spa)
2169eda14cbcSMatt Macy {
2170eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2171eda14cbcSMatt Macy 	boolean_t slog_found = B_FALSE;
2172eda14cbcSMatt Macy 
2173eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2174eda14cbcSMatt Macy 
2175eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2176eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2177eda14cbcSMatt Macy 
2178eda14cbcSMatt Macy 		if (tvd->vdev_islog) {
2179184c1b94SMartin Matuska 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2180184c1b94SMartin Matuska 			metaslab_group_passivate(tvd->vdev_mg);
2181eda14cbcSMatt Macy 			slog_found = B_TRUE;
2182eda14cbcSMatt Macy 		}
2183eda14cbcSMatt Macy 	}
2184eda14cbcSMatt Macy 
2185eda14cbcSMatt Macy 	return (slog_found);
2186eda14cbcSMatt Macy }
2187eda14cbcSMatt Macy 
2188184c1b94SMartin Matuska /*
2189184c1b94SMartin Matuska  * Activate any log vdevs (note, does not apply to embedded log metaslabs).
2190184c1b94SMartin Matuska  */
2191eda14cbcSMatt Macy static void
2192eda14cbcSMatt Macy spa_activate_log(spa_t *spa)
2193eda14cbcSMatt Macy {
2194eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
2195eda14cbcSMatt Macy 
2196eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2197eda14cbcSMatt Macy 
2198eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
2199eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
2200eda14cbcSMatt Macy 
2201184c1b94SMartin Matuska 		if (tvd->vdev_islog) {
2202184c1b94SMartin Matuska 			ASSERT3P(tvd->vdev_log_mg, ==, NULL);
2203184c1b94SMartin Matuska 			metaslab_group_activate(tvd->vdev_mg);
2204184c1b94SMartin Matuska 		}
2205eda14cbcSMatt Macy 	}
2206eda14cbcSMatt Macy }
2207eda14cbcSMatt Macy 
2208eda14cbcSMatt Macy int
2209eda14cbcSMatt Macy spa_reset_logs(spa_t *spa)
2210eda14cbcSMatt Macy {
2211eda14cbcSMatt Macy 	int error;
2212eda14cbcSMatt Macy 
2213eda14cbcSMatt Macy 	error = dmu_objset_find(spa_name(spa), zil_reset,
2214eda14cbcSMatt Macy 	    NULL, DS_FIND_CHILDREN);
2215eda14cbcSMatt Macy 	if (error == 0) {
2216eda14cbcSMatt Macy 		/*
2217eda14cbcSMatt Macy 		 * We successfully offlined the log device, sync out the
2218eda14cbcSMatt Macy 		 * current txg so that the "stubby" block can be removed
2219eda14cbcSMatt Macy 		 * by zil_sync().
2220eda14cbcSMatt Macy 		 */
2221eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
2222eda14cbcSMatt Macy 	}
2223eda14cbcSMatt Macy 	return (error);
2224eda14cbcSMatt Macy }
2225eda14cbcSMatt Macy 
2226eda14cbcSMatt Macy static void
2227eda14cbcSMatt Macy spa_aux_check_removed(spa_aux_vdev_t *sav)
2228eda14cbcSMatt Macy {
2229eda14cbcSMatt Macy 	for (int i = 0; i < sav->sav_count; i++)
2230eda14cbcSMatt Macy 		spa_check_removed(sav->sav_vdevs[i]);
2231eda14cbcSMatt Macy }
2232eda14cbcSMatt Macy 
2233eda14cbcSMatt Macy void
2234eda14cbcSMatt Macy spa_claim_notify(zio_t *zio)
2235eda14cbcSMatt Macy {
2236eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2237eda14cbcSMatt Macy 
2238eda14cbcSMatt Macy 	if (zio->io_error)
2239eda14cbcSMatt Macy 		return;
2240eda14cbcSMatt Macy 
2241eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
2242eda14cbcSMatt Macy 	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
2243eda14cbcSMatt Macy 		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
2244eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
2245eda14cbcSMatt Macy }
2246eda14cbcSMatt Macy 
2247eda14cbcSMatt Macy typedef struct spa_load_error {
2248eda14cbcSMatt Macy 	uint64_t	sle_meta_count;
2249eda14cbcSMatt Macy 	uint64_t	sle_data_count;
2250eda14cbcSMatt Macy } spa_load_error_t;
2251eda14cbcSMatt Macy 
2252eda14cbcSMatt Macy static void
2253eda14cbcSMatt Macy spa_load_verify_done(zio_t *zio)
2254eda14cbcSMatt Macy {
2255eda14cbcSMatt Macy 	blkptr_t *bp = zio->io_bp;
2256eda14cbcSMatt Macy 	spa_load_error_t *sle = zio->io_private;
2257eda14cbcSMatt Macy 	dmu_object_type_t type = BP_GET_TYPE(bp);
2258eda14cbcSMatt Macy 	int error = zio->io_error;
2259eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
2260eda14cbcSMatt Macy 
2261eda14cbcSMatt Macy 	abd_free(zio->io_abd);
2262eda14cbcSMatt Macy 	if (error) {
2263eda14cbcSMatt Macy 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2264eda14cbcSMatt Macy 		    type != DMU_OT_INTENT_LOG)
2265eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_meta_count);
2266eda14cbcSMatt Macy 		else
2267eda14cbcSMatt Macy 			atomic_inc_64(&sle->sle_data_count);
2268eda14cbcSMatt Macy 	}
2269eda14cbcSMatt Macy 
2270eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2271eda14cbcSMatt Macy 	spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
2272eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_scrub_io_cv);
2273eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2274eda14cbcSMatt Macy }
2275eda14cbcSMatt Macy 
2276eda14cbcSMatt Macy /*
2277eda14cbcSMatt Macy  * Maximum number of inflight bytes is the log2 fraction of the arc size.
2278eda14cbcSMatt Macy  * By default, we set it to 1/16th of the arc.
2279eda14cbcSMatt Macy  */
2280eda14cbcSMatt Macy int spa_load_verify_shift = 4;
2281eda14cbcSMatt Macy int spa_load_verify_metadata = B_TRUE;
2282eda14cbcSMatt Macy int spa_load_verify_data = B_TRUE;
2283eda14cbcSMatt Macy 
2284eda14cbcSMatt Macy /*ARGSUSED*/
2285eda14cbcSMatt Macy static int
2286eda14cbcSMatt Macy spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2287eda14cbcSMatt Macy     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2288eda14cbcSMatt Macy {
2289eda14cbcSMatt Macy 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
2290eda14cbcSMatt Macy 	    BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
2291eda14cbcSMatt Macy 		return (0);
2292eda14cbcSMatt Macy 	/*
2293eda14cbcSMatt Macy 	 * Note: normally this routine will not be called if
2294eda14cbcSMatt Macy 	 * spa_load_verify_metadata is not set.  However, it may be useful
2295eda14cbcSMatt Macy 	 * to manually set the flag after the traversal has begun.
2296eda14cbcSMatt Macy 	 */
2297eda14cbcSMatt Macy 	if (!spa_load_verify_metadata)
2298eda14cbcSMatt Macy 		return (0);
2299eda14cbcSMatt Macy 	if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
2300eda14cbcSMatt Macy 		return (0);
2301eda14cbcSMatt Macy 
2302eda14cbcSMatt Macy 	uint64_t maxinflight_bytes =
2303eda14cbcSMatt Macy 	    arc_target_bytes() >> spa_load_verify_shift;
2304eda14cbcSMatt Macy 	zio_t *rio = arg;
2305eda14cbcSMatt Macy 	size_t size = BP_GET_PSIZE(bp);
2306eda14cbcSMatt Macy 
2307eda14cbcSMatt Macy 	mutex_enter(&spa->spa_scrub_lock);
2308eda14cbcSMatt Macy 	while (spa->spa_load_verify_bytes >= maxinflight_bytes)
2309eda14cbcSMatt Macy 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2310eda14cbcSMatt Macy 	spa->spa_load_verify_bytes += size;
2311eda14cbcSMatt Macy 	mutex_exit(&spa->spa_scrub_lock);
2312eda14cbcSMatt Macy 
2313eda14cbcSMatt Macy 	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2314eda14cbcSMatt Macy 	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2315eda14cbcSMatt Macy 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2316eda14cbcSMatt Macy 	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2317eda14cbcSMatt Macy 	return (0);
2318eda14cbcSMatt Macy }
2319eda14cbcSMatt Macy 
2320eda14cbcSMatt Macy /* ARGSUSED */
2321eda14cbcSMatt Macy static int
2322eda14cbcSMatt Macy verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2323eda14cbcSMatt Macy {
2324eda14cbcSMatt Macy 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2325eda14cbcSMatt Macy 		return (SET_ERROR(ENAMETOOLONG));
2326eda14cbcSMatt Macy 
2327eda14cbcSMatt Macy 	return (0);
2328eda14cbcSMatt Macy }
2329eda14cbcSMatt Macy 
2330eda14cbcSMatt Macy static int
2331eda14cbcSMatt Macy spa_load_verify(spa_t *spa)
2332eda14cbcSMatt Macy {
2333eda14cbcSMatt Macy 	zio_t *rio;
2334eda14cbcSMatt Macy 	spa_load_error_t sle = { 0 };
2335eda14cbcSMatt Macy 	zpool_load_policy_t policy;
2336eda14cbcSMatt Macy 	boolean_t verify_ok = B_FALSE;
2337eda14cbcSMatt Macy 	int error = 0;
2338eda14cbcSMatt Macy 
2339eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
2340eda14cbcSMatt Macy 
2341eda14cbcSMatt Macy 	if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
2342eda14cbcSMatt Macy 		return (0);
2343eda14cbcSMatt Macy 
2344eda14cbcSMatt Macy 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2345eda14cbcSMatt Macy 	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2346eda14cbcSMatt Macy 	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2347eda14cbcSMatt Macy 	    DS_FIND_CHILDREN);
2348eda14cbcSMatt Macy 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2349eda14cbcSMatt Macy 	if (error != 0)
2350eda14cbcSMatt Macy 		return (error);
2351eda14cbcSMatt Macy 
2352eda14cbcSMatt Macy 	rio = zio_root(spa, NULL, &sle,
2353eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2354eda14cbcSMatt Macy 
2355eda14cbcSMatt Macy 	if (spa_load_verify_metadata) {
2356eda14cbcSMatt Macy 		if (spa->spa_extreme_rewind) {
2357eda14cbcSMatt Macy 			spa_load_note(spa, "performing a complete scan of the "
2358eda14cbcSMatt Macy 			    "pool since extreme rewind is on. This may take "
2359eda14cbcSMatt Macy 			    "a very long time.\n  (spa_load_verify_data=%u, "
2360eda14cbcSMatt Macy 			    "spa_load_verify_metadata=%u)",
2361eda14cbcSMatt Macy 			    spa_load_verify_data, spa_load_verify_metadata);
2362eda14cbcSMatt Macy 		}
2363eda14cbcSMatt Macy 
2364eda14cbcSMatt Macy 		error = traverse_pool(spa, spa->spa_verify_min_txg,
2365eda14cbcSMatt Macy 		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2366eda14cbcSMatt Macy 		    TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2367eda14cbcSMatt Macy 	}
2368eda14cbcSMatt Macy 
2369eda14cbcSMatt Macy 	(void) zio_wait(rio);
2370eda14cbcSMatt Macy 	ASSERT0(spa->spa_load_verify_bytes);
2371eda14cbcSMatt Macy 
2372eda14cbcSMatt Macy 	spa->spa_load_meta_errors = sle.sle_meta_count;
2373eda14cbcSMatt Macy 	spa->spa_load_data_errors = sle.sle_data_count;
2374eda14cbcSMatt Macy 
2375eda14cbcSMatt Macy 	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2376eda14cbcSMatt Macy 		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2377eda14cbcSMatt Macy 		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2378eda14cbcSMatt Macy 		    (u_longlong_t)sle.sle_data_count);
2379eda14cbcSMatt Macy 	}
2380eda14cbcSMatt Macy 
2381eda14cbcSMatt Macy 	if (spa_load_verify_dryrun ||
2382eda14cbcSMatt Macy 	    (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2383eda14cbcSMatt Macy 	    sle.sle_data_count <= policy.zlp_maxdata)) {
2384eda14cbcSMatt Macy 		int64_t loss = 0;
2385eda14cbcSMatt Macy 
2386eda14cbcSMatt Macy 		verify_ok = B_TRUE;
2387eda14cbcSMatt Macy 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2388eda14cbcSMatt Macy 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2389eda14cbcSMatt Macy 
2390eda14cbcSMatt Macy 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2391eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2392eda14cbcSMatt Macy 		    ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2393eda14cbcSMatt Macy 		VERIFY(nvlist_add_int64(spa->spa_load_info,
2394eda14cbcSMatt Macy 		    ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2395eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2396eda14cbcSMatt Macy 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2397eda14cbcSMatt Macy 	} else {
2398eda14cbcSMatt Macy 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2399eda14cbcSMatt Macy 	}
2400eda14cbcSMatt Macy 
2401eda14cbcSMatt Macy 	if (spa_load_verify_dryrun)
2402eda14cbcSMatt Macy 		return (0);
2403eda14cbcSMatt Macy 
2404eda14cbcSMatt Macy 	if (error) {
2405eda14cbcSMatt Macy 		if (error != ENXIO && error != EIO)
2406eda14cbcSMatt Macy 			error = SET_ERROR(EIO);
2407eda14cbcSMatt Macy 		return (error);
2408eda14cbcSMatt Macy 	}
2409eda14cbcSMatt Macy 
2410eda14cbcSMatt Macy 	return (verify_ok ? 0 : EIO);
2411eda14cbcSMatt Macy }
2412eda14cbcSMatt Macy 
2413eda14cbcSMatt Macy /*
2414eda14cbcSMatt Macy  * Find a value in the pool props object.
2415eda14cbcSMatt Macy  */
2416eda14cbcSMatt Macy static void
2417eda14cbcSMatt Macy spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2418eda14cbcSMatt Macy {
2419eda14cbcSMatt Macy 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2420eda14cbcSMatt Macy 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2421eda14cbcSMatt Macy }
2422eda14cbcSMatt Macy 
2423eda14cbcSMatt Macy /*
2424eda14cbcSMatt Macy  * Find a value in the pool directory object.
2425eda14cbcSMatt Macy  */
2426eda14cbcSMatt Macy static int
2427eda14cbcSMatt Macy spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2428eda14cbcSMatt Macy {
2429eda14cbcSMatt Macy 	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2430eda14cbcSMatt Macy 	    name, sizeof (uint64_t), 1, val);
2431eda14cbcSMatt Macy 
2432eda14cbcSMatt Macy 	if (error != 0 && (error != ENOENT || log_enoent)) {
2433eda14cbcSMatt Macy 		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2434eda14cbcSMatt Macy 		    "[error=%d]", name, error);
2435eda14cbcSMatt Macy 	}
2436eda14cbcSMatt Macy 
2437eda14cbcSMatt Macy 	return (error);
2438eda14cbcSMatt Macy }
2439eda14cbcSMatt Macy 
2440eda14cbcSMatt Macy static int
2441eda14cbcSMatt Macy spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2442eda14cbcSMatt Macy {
2443eda14cbcSMatt Macy 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2444eda14cbcSMatt Macy 	return (SET_ERROR(err));
2445eda14cbcSMatt Macy }
2446eda14cbcSMatt Macy 
2447eda14cbcSMatt Macy boolean_t
2448eda14cbcSMatt Macy spa_livelist_delete_check(spa_t *spa)
2449eda14cbcSMatt Macy {
2450eda14cbcSMatt Macy 	return (spa->spa_livelists_to_delete != 0);
2451eda14cbcSMatt Macy }
2452eda14cbcSMatt Macy 
2453eda14cbcSMatt Macy /* ARGSUSED */
2454eda14cbcSMatt Macy static boolean_t
2455eda14cbcSMatt Macy spa_livelist_delete_cb_check(void *arg, zthr_t *z)
2456eda14cbcSMatt Macy {
2457eda14cbcSMatt Macy 	spa_t *spa = arg;
2458eda14cbcSMatt Macy 	return (spa_livelist_delete_check(spa));
2459eda14cbcSMatt Macy }
2460eda14cbcSMatt Macy 
2461eda14cbcSMatt Macy static int
2462eda14cbcSMatt Macy delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2463eda14cbcSMatt Macy {
2464eda14cbcSMatt Macy 	spa_t *spa = arg;
2465eda14cbcSMatt Macy 	zio_free(spa, tx->tx_txg, bp);
2466eda14cbcSMatt Macy 	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2467eda14cbcSMatt Macy 	    -bp_get_dsize_sync(spa, bp),
2468eda14cbcSMatt Macy 	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2469eda14cbcSMatt Macy 	return (0);
2470eda14cbcSMatt Macy }
2471eda14cbcSMatt Macy 
2472eda14cbcSMatt Macy static int
2473eda14cbcSMatt Macy dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
2474eda14cbcSMatt Macy {
2475eda14cbcSMatt Macy 	int err;
2476eda14cbcSMatt Macy 	zap_cursor_t zc;
2477eda14cbcSMatt Macy 	zap_attribute_t za;
2478eda14cbcSMatt Macy 	zap_cursor_init(&zc, os, zap_obj);
2479eda14cbcSMatt Macy 	err = zap_cursor_retrieve(&zc, &za);
2480eda14cbcSMatt Macy 	zap_cursor_fini(&zc);
2481eda14cbcSMatt Macy 	if (err == 0)
2482eda14cbcSMatt Macy 		*llp = za.za_first_integer;
2483eda14cbcSMatt Macy 	return (err);
2484eda14cbcSMatt Macy }
2485eda14cbcSMatt Macy 
2486eda14cbcSMatt Macy /*
2487eda14cbcSMatt Macy  * Components of livelist deletion that must be performed in syncing
2488eda14cbcSMatt Macy  * context: freeing block pointers and updating the pool-wide data
2489eda14cbcSMatt Macy  * structures to indicate how much work is left to do
2490eda14cbcSMatt Macy  */
2491eda14cbcSMatt Macy typedef struct sublist_delete_arg {
2492eda14cbcSMatt Macy 	spa_t *spa;
2493eda14cbcSMatt Macy 	dsl_deadlist_t *ll;
2494eda14cbcSMatt Macy 	uint64_t key;
2495eda14cbcSMatt Macy 	bplist_t *to_free;
2496eda14cbcSMatt Macy } sublist_delete_arg_t;
2497eda14cbcSMatt Macy 
2498eda14cbcSMatt Macy static void
2499eda14cbcSMatt Macy sublist_delete_sync(void *arg, dmu_tx_t *tx)
2500eda14cbcSMatt Macy {
2501eda14cbcSMatt Macy 	sublist_delete_arg_t *sda = arg;
2502eda14cbcSMatt Macy 	spa_t *spa = sda->spa;
2503eda14cbcSMatt Macy 	dsl_deadlist_t *ll = sda->ll;
2504eda14cbcSMatt Macy 	uint64_t key = sda->key;
2505eda14cbcSMatt Macy 	bplist_t *to_free = sda->to_free;
2506eda14cbcSMatt Macy 
2507eda14cbcSMatt Macy 	bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
2508eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, key, tx);
2509eda14cbcSMatt Macy }
2510eda14cbcSMatt Macy 
2511eda14cbcSMatt Macy typedef struct livelist_delete_arg {
2512eda14cbcSMatt Macy 	spa_t *spa;
2513eda14cbcSMatt Macy 	uint64_t ll_obj;
2514eda14cbcSMatt Macy 	uint64_t zap_obj;
2515eda14cbcSMatt Macy } livelist_delete_arg_t;
2516eda14cbcSMatt Macy 
2517eda14cbcSMatt Macy static void
2518eda14cbcSMatt Macy livelist_delete_sync(void *arg, dmu_tx_t *tx)
2519eda14cbcSMatt Macy {
2520eda14cbcSMatt Macy 	livelist_delete_arg_t *lda = arg;
2521eda14cbcSMatt Macy 	spa_t *spa = lda->spa;
2522eda14cbcSMatt Macy 	uint64_t ll_obj = lda->ll_obj;
2523eda14cbcSMatt Macy 	uint64_t zap_obj = lda->zap_obj;
2524eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
2525eda14cbcSMatt Macy 	uint64_t count;
2526eda14cbcSMatt Macy 
2527eda14cbcSMatt Macy 	/* free the livelist and decrement the feature count */
2528eda14cbcSMatt Macy 	VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
2529eda14cbcSMatt Macy 	dsl_deadlist_free(mos, ll_obj, tx);
2530eda14cbcSMatt Macy 	spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2531eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, zap_obj, &count));
2532eda14cbcSMatt Macy 	if (count == 0) {
2533eda14cbcSMatt Macy 		/* no more livelists to delete */
2534eda14cbcSMatt Macy 		VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
2535eda14cbcSMatt Macy 		    DMU_POOL_DELETED_CLONES, tx));
2536eda14cbcSMatt Macy 		VERIFY0(zap_destroy(mos, zap_obj, tx));
2537eda14cbcSMatt Macy 		spa->spa_livelists_to_delete = 0;
2538eda14cbcSMatt Macy 		spa_notify_waiters(spa);
2539eda14cbcSMatt Macy 	}
2540eda14cbcSMatt Macy }
2541eda14cbcSMatt Macy 
2542eda14cbcSMatt Macy /*
2543eda14cbcSMatt Macy  * Load in the value for the livelist to be removed and open it. Then,
2544eda14cbcSMatt Macy  * load its first sublist and determine which block pointers should actually
2545eda14cbcSMatt Macy  * be freed. Then, call a synctask which performs the actual frees and updates
2546eda14cbcSMatt Macy  * the pool-wide livelist data.
2547eda14cbcSMatt Macy  */
2548eda14cbcSMatt Macy /* ARGSUSED */
2549eda14cbcSMatt Macy static void
2550eda14cbcSMatt Macy spa_livelist_delete_cb(void *arg, zthr_t *z)
2551eda14cbcSMatt Macy {
2552eda14cbcSMatt Macy 	spa_t *spa = arg;
2553eda14cbcSMatt Macy 	uint64_t ll_obj = 0, count;
2554eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
2555eda14cbcSMatt Macy 	uint64_t zap_obj = spa->spa_livelists_to_delete;
2556eda14cbcSMatt Macy 	/*
2557eda14cbcSMatt Macy 	 * Determine the next livelist to delete. This function should only
2558eda14cbcSMatt Macy 	 * be called if there is at least one deleted clone.
2559eda14cbcSMatt Macy 	 */
2560eda14cbcSMatt Macy 	VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
2561eda14cbcSMatt Macy 	VERIFY0(zap_count(mos, ll_obj, &count));
2562eda14cbcSMatt Macy 	if (count > 0) {
25632c48331dSMatt Macy 		dsl_deadlist_t *ll;
2564eda14cbcSMatt Macy 		dsl_deadlist_entry_t *dle;
2565eda14cbcSMatt Macy 		bplist_t to_free;
25662c48331dSMatt Macy 		ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
25672c48331dSMatt Macy 		dsl_deadlist_open(ll, mos, ll_obj);
25682c48331dSMatt Macy 		dle = dsl_deadlist_first(ll);
2569eda14cbcSMatt Macy 		ASSERT3P(dle, !=, NULL);
2570eda14cbcSMatt Macy 		bplist_create(&to_free);
2571eda14cbcSMatt Macy 		int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
2572eda14cbcSMatt Macy 		    z, NULL);
2573eda14cbcSMatt Macy 		if (err == 0) {
2574eda14cbcSMatt Macy 			sublist_delete_arg_t sync_arg = {
2575eda14cbcSMatt Macy 			    .spa = spa,
25762c48331dSMatt Macy 			    .ll = ll,
2577eda14cbcSMatt Macy 			    .key = dle->dle_mintxg,
2578eda14cbcSMatt Macy 			    .to_free = &to_free
2579eda14cbcSMatt Macy 			};
2580eda14cbcSMatt Macy 			zfs_dbgmsg("deleting sublist (id %llu) from"
2581eda14cbcSMatt Macy 			    " livelist %llu, %d remaining",
2582eda14cbcSMatt Macy 			    dle->dle_bpobj.bpo_object, ll_obj, count - 1);
2583eda14cbcSMatt Macy 			VERIFY0(dsl_sync_task(spa_name(spa), NULL,
2584eda14cbcSMatt Macy 			    sublist_delete_sync, &sync_arg, 0,
2585eda14cbcSMatt Macy 			    ZFS_SPACE_CHECK_DESTROY));
2586eda14cbcSMatt Macy 		} else {
2587eda14cbcSMatt Macy 			VERIFY3U(err, ==, EINTR);
2588eda14cbcSMatt Macy 		}
2589eda14cbcSMatt Macy 		bplist_clear(&to_free);
2590eda14cbcSMatt Macy 		bplist_destroy(&to_free);
25912c48331dSMatt Macy 		dsl_deadlist_close(ll);
25922c48331dSMatt Macy 		kmem_free(ll, sizeof (dsl_deadlist_t));
2593eda14cbcSMatt Macy 	} else {
2594eda14cbcSMatt Macy 		livelist_delete_arg_t sync_arg = {
2595eda14cbcSMatt Macy 		    .spa = spa,
2596eda14cbcSMatt Macy 		    .ll_obj = ll_obj,
2597eda14cbcSMatt Macy 		    .zap_obj = zap_obj
2598eda14cbcSMatt Macy 		};
2599eda14cbcSMatt Macy 		zfs_dbgmsg("deletion of livelist %llu completed", ll_obj);
2600eda14cbcSMatt Macy 		VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
2601eda14cbcSMatt Macy 		    &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
2602eda14cbcSMatt Macy 	}
2603eda14cbcSMatt Macy }
2604eda14cbcSMatt Macy 
2605eda14cbcSMatt Macy static void
2606eda14cbcSMatt Macy spa_start_livelist_destroy_thread(spa_t *spa)
2607eda14cbcSMatt Macy {
2608eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
2609eda14cbcSMatt Macy 	spa->spa_livelist_delete_zthr =
2610eda14cbcSMatt Macy 	    zthr_create("z_livelist_destroy",
2611eda14cbcSMatt Macy 	    spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa);
2612eda14cbcSMatt Macy }
2613eda14cbcSMatt Macy 
2614eda14cbcSMatt Macy typedef struct livelist_new_arg {
2615eda14cbcSMatt Macy 	bplist_t *allocs;
2616eda14cbcSMatt Macy 	bplist_t *frees;
2617eda14cbcSMatt Macy } livelist_new_arg_t;
2618eda14cbcSMatt Macy 
2619eda14cbcSMatt Macy static int
2620eda14cbcSMatt Macy livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
2621eda14cbcSMatt Macy     dmu_tx_t *tx)
2622eda14cbcSMatt Macy {
2623eda14cbcSMatt Macy 	ASSERT(tx == NULL);
2624eda14cbcSMatt Macy 	livelist_new_arg_t *lna = arg;
2625eda14cbcSMatt Macy 	if (bp_freed) {
2626eda14cbcSMatt Macy 		bplist_append(lna->frees, bp);
2627eda14cbcSMatt Macy 	} else {
2628eda14cbcSMatt Macy 		bplist_append(lna->allocs, bp);
2629eda14cbcSMatt Macy 		zfs_livelist_condense_new_alloc++;
2630eda14cbcSMatt Macy 	}
2631eda14cbcSMatt Macy 	return (0);
2632eda14cbcSMatt Macy }
2633eda14cbcSMatt Macy 
2634eda14cbcSMatt Macy typedef struct livelist_condense_arg {
2635eda14cbcSMatt Macy 	spa_t *spa;
2636eda14cbcSMatt Macy 	bplist_t to_keep;
2637eda14cbcSMatt Macy 	uint64_t first_size;
2638eda14cbcSMatt Macy 	uint64_t next_size;
2639eda14cbcSMatt Macy } livelist_condense_arg_t;
2640eda14cbcSMatt Macy 
2641eda14cbcSMatt Macy static void
2642eda14cbcSMatt Macy spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
2643eda14cbcSMatt Macy {
2644eda14cbcSMatt Macy 	livelist_condense_arg_t *lca = arg;
2645eda14cbcSMatt Macy 	spa_t *spa = lca->spa;
2646eda14cbcSMatt Macy 	bplist_t new_frees;
2647eda14cbcSMatt Macy 	dsl_dataset_t *ds = spa->spa_to_condense.ds;
2648eda14cbcSMatt Macy 
2649eda14cbcSMatt Macy 	/* Have we been cancelled? */
2650eda14cbcSMatt Macy 	if (spa->spa_to_condense.cancelled) {
2651eda14cbcSMatt Macy 		zfs_livelist_condense_sync_cancel++;
2652eda14cbcSMatt Macy 		goto out;
2653eda14cbcSMatt Macy 	}
2654eda14cbcSMatt Macy 
2655eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2656eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2657eda14cbcSMatt Macy 	dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
2658eda14cbcSMatt Macy 
2659eda14cbcSMatt Macy 	/*
2660eda14cbcSMatt Macy 	 * It's possible that the livelist was changed while the zthr was
2661eda14cbcSMatt Macy 	 * running. Therefore, we need to check for new blkptrs in the two
2662eda14cbcSMatt Macy 	 * entries being condensed and continue to track them in the livelist.
2663eda14cbcSMatt Macy 	 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
2664eda14cbcSMatt Macy 	 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
2665eda14cbcSMatt Macy 	 * we need to sort them into two different bplists.
2666eda14cbcSMatt Macy 	 */
2667eda14cbcSMatt Macy 	uint64_t first_obj = first->dle_bpobj.bpo_object;
2668eda14cbcSMatt Macy 	uint64_t next_obj = next->dle_bpobj.bpo_object;
2669eda14cbcSMatt Macy 	uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2670eda14cbcSMatt Macy 	uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2671eda14cbcSMatt Macy 
2672eda14cbcSMatt Macy 	bplist_create(&new_frees);
2673eda14cbcSMatt Macy 	livelist_new_arg_t new_bps = {
2674eda14cbcSMatt Macy 	    .allocs = &lca->to_keep,
2675eda14cbcSMatt Macy 	    .frees = &new_frees,
2676eda14cbcSMatt Macy 	};
2677eda14cbcSMatt Macy 
2678eda14cbcSMatt Macy 	if (cur_first_size > lca->first_size) {
2679eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
2680eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->first_size));
2681eda14cbcSMatt Macy 	}
2682eda14cbcSMatt Macy 	if (cur_next_size > lca->next_size) {
2683eda14cbcSMatt Macy 		VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
2684eda14cbcSMatt Macy 		    livelist_track_new_cb, &new_bps, lca->next_size));
2685eda14cbcSMatt Macy 	}
2686eda14cbcSMatt Macy 
2687eda14cbcSMatt Macy 	dsl_deadlist_clear_entry(first, ll, tx);
2688eda14cbcSMatt Macy 	ASSERT(bpobj_is_empty(&first->dle_bpobj));
2689eda14cbcSMatt Macy 	dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
2690eda14cbcSMatt Macy 
2691eda14cbcSMatt Macy 	bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
2692eda14cbcSMatt Macy 	bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
2693eda14cbcSMatt Macy 	bplist_destroy(&new_frees);
2694eda14cbcSMatt Macy 
2695eda14cbcSMatt Macy 	char dsname[ZFS_MAX_DATASET_NAME_LEN];
2696eda14cbcSMatt Macy 	dsl_dataset_name(ds, dsname);
2697eda14cbcSMatt Macy 	zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
2698eda14cbcSMatt Macy 	    "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
2699eda14cbcSMatt Macy 	    "(%llu blkptrs)", tx->tx_txg, dsname, ds->ds_object, first_obj,
2700eda14cbcSMatt Macy 	    cur_first_size, next_obj, cur_next_size,
2701eda14cbcSMatt Macy 	    first->dle_bpobj.bpo_object,
2702eda14cbcSMatt Macy 	    first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
2703eda14cbcSMatt Macy out:
2704eda14cbcSMatt Macy 	dmu_buf_rele(ds->ds_dbuf, spa);
2705eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
2706eda14cbcSMatt Macy 	bplist_clear(&lca->to_keep);
2707eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
2708eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
2709eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
2710eda14cbcSMatt Macy }
2711eda14cbcSMatt Macy 
2712eda14cbcSMatt Macy static void
2713eda14cbcSMatt Macy spa_livelist_condense_cb(void *arg, zthr_t *t)
2714eda14cbcSMatt Macy {
2715eda14cbcSMatt Macy 	while (zfs_livelist_condense_zthr_pause &&
2716eda14cbcSMatt Macy 	    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2717eda14cbcSMatt Macy 		delay(1);
2718eda14cbcSMatt Macy 
2719eda14cbcSMatt Macy 	spa_t *spa = arg;
2720eda14cbcSMatt Macy 	dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
2721eda14cbcSMatt Macy 	dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
2722eda14cbcSMatt Macy 	uint64_t first_size, next_size;
2723eda14cbcSMatt Macy 
2724eda14cbcSMatt Macy 	livelist_condense_arg_t *lca =
2725eda14cbcSMatt Macy 	    kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
2726eda14cbcSMatt Macy 	bplist_create(&lca->to_keep);
2727eda14cbcSMatt Macy 
2728eda14cbcSMatt Macy 	/*
2729eda14cbcSMatt Macy 	 * Process the livelists (matching FREEs and ALLOCs) in open context
2730eda14cbcSMatt Macy 	 * so we have minimal work in syncing context to condense.
2731eda14cbcSMatt Macy 	 *
2732eda14cbcSMatt Macy 	 * We save bpobj sizes (first_size and next_size) to use later in
2733eda14cbcSMatt Macy 	 * syncing context to determine if entries were added to these sublists
2734eda14cbcSMatt Macy 	 * while in open context. This is possible because the clone is still
2735eda14cbcSMatt Macy 	 * active and open for normal writes and we want to make sure the new,
2736eda14cbcSMatt Macy 	 * unprocessed blockpointers are inserted into the livelist normally.
2737eda14cbcSMatt Macy 	 *
2738eda14cbcSMatt Macy 	 * Note that dsl_process_sub_livelist() both stores the size number of
2739eda14cbcSMatt Macy 	 * blockpointers and iterates over them while the bpobj's lock held, so
2740eda14cbcSMatt Macy 	 * the sizes returned to us are consistent which what was actually
2741eda14cbcSMatt Macy 	 * processed.
2742eda14cbcSMatt Macy 	 */
2743eda14cbcSMatt Macy 	int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
2744eda14cbcSMatt Macy 	    &first_size);
2745eda14cbcSMatt Macy 	if (err == 0)
2746eda14cbcSMatt Macy 		err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
2747eda14cbcSMatt Macy 		    t, &next_size);
2748eda14cbcSMatt Macy 
2749eda14cbcSMatt Macy 	if (err == 0) {
2750eda14cbcSMatt Macy 		while (zfs_livelist_condense_sync_pause &&
2751eda14cbcSMatt Macy 		    !(zthr_has_waiters(t) || zthr_iscancelled(t)))
2752eda14cbcSMatt Macy 			delay(1);
2753eda14cbcSMatt Macy 
2754eda14cbcSMatt Macy 		dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2755eda14cbcSMatt Macy 		dmu_tx_mark_netfree(tx);
2756eda14cbcSMatt Macy 		dmu_tx_hold_space(tx, 1);
2757eda14cbcSMatt Macy 		err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
2758eda14cbcSMatt Macy 		if (err == 0) {
2759eda14cbcSMatt Macy 			/*
2760eda14cbcSMatt Macy 			 * Prevent the condense zthr restarting before
2761eda14cbcSMatt Macy 			 * the synctask completes.
2762eda14cbcSMatt Macy 			 */
2763eda14cbcSMatt Macy 			spa->spa_to_condense.syncing = B_TRUE;
2764eda14cbcSMatt Macy 			lca->spa = spa;
2765eda14cbcSMatt Macy 			lca->first_size = first_size;
2766eda14cbcSMatt Macy 			lca->next_size = next_size;
2767eda14cbcSMatt Macy 			dsl_sync_task_nowait(spa_get_dsl(spa),
27682c48331dSMatt Macy 			    spa_livelist_condense_sync, lca, tx);
2769eda14cbcSMatt Macy 			dmu_tx_commit(tx);
2770eda14cbcSMatt Macy 			return;
2771eda14cbcSMatt Macy 		}
2772eda14cbcSMatt Macy 	}
2773eda14cbcSMatt Macy 	/*
2774eda14cbcSMatt Macy 	 * Condensing can not continue: either it was externally stopped or
2775eda14cbcSMatt Macy 	 * we were unable to assign to a tx because the pool has run out of
2776eda14cbcSMatt Macy 	 * space. In the second case, we'll just end up trying to condense
2777eda14cbcSMatt Macy 	 * again in a later txg.
2778eda14cbcSMatt Macy 	 */
2779eda14cbcSMatt Macy 	ASSERT(err != 0);
2780eda14cbcSMatt Macy 	bplist_clear(&lca->to_keep);
2781eda14cbcSMatt Macy 	bplist_destroy(&lca->to_keep);
2782eda14cbcSMatt Macy 	kmem_free(lca, sizeof (livelist_condense_arg_t));
2783eda14cbcSMatt Macy 	dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
2784eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
2785eda14cbcSMatt Macy 	if (err == EINTR)
2786eda14cbcSMatt Macy 		zfs_livelist_condense_zthr_cancel++;
2787eda14cbcSMatt Macy }
2788eda14cbcSMatt Macy 
2789eda14cbcSMatt Macy /* ARGSUSED */
2790eda14cbcSMatt Macy /*
2791eda14cbcSMatt Macy  * Check that there is something to condense but that a condense is not
2792eda14cbcSMatt Macy  * already in progress and that condensing has not been cancelled.
2793eda14cbcSMatt Macy  */
2794eda14cbcSMatt Macy static boolean_t
2795eda14cbcSMatt Macy spa_livelist_condense_cb_check(void *arg, zthr_t *z)
2796eda14cbcSMatt Macy {
2797eda14cbcSMatt Macy 	spa_t *spa = arg;
2798eda14cbcSMatt Macy 	if ((spa->spa_to_condense.ds != NULL) &&
2799eda14cbcSMatt Macy 	    (spa->spa_to_condense.syncing == B_FALSE) &&
2800eda14cbcSMatt Macy 	    (spa->spa_to_condense.cancelled == B_FALSE)) {
2801eda14cbcSMatt Macy 		return (B_TRUE);
2802eda14cbcSMatt Macy 	}
2803eda14cbcSMatt Macy 	return (B_FALSE);
2804eda14cbcSMatt Macy }
2805eda14cbcSMatt Macy 
2806eda14cbcSMatt Macy static void
2807eda14cbcSMatt Macy spa_start_livelist_condensing_thread(spa_t *spa)
2808eda14cbcSMatt Macy {
2809eda14cbcSMatt Macy 	spa->spa_to_condense.ds = NULL;
2810eda14cbcSMatt Macy 	spa->spa_to_condense.first = NULL;
2811eda14cbcSMatt Macy 	spa->spa_to_condense.next = NULL;
2812eda14cbcSMatt Macy 	spa->spa_to_condense.syncing = B_FALSE;
2813eda14cbcSMatt Macy 	spa->spa_to_condense.cancelled = B_FALSE;
2814eda14cbcSMatt Macy 
2815eda14cbcSMatt Macy 	ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
2816eda14cbcSMatt Macy 	spa->spa_livelist_condense_zthr =
2817eda14cbcSMatt Macy 	    zthr_create("z_livelist_condense",
2818eda14cbcSMatt Macy 	    spa_livelist_condense_cb_check,
2819eda14cbcSMatt Macy 	    spa_livelist_condense_cb, spa);
2820eda14cbcSMatt Macy }
2821eda14cbcSMatt Macy 
2822eda14cbcSMatt Macy static void
2823eda14cbcSMatt Macy spa_spawn_aux_threads(spa_t *spa)
2824eda14cbcSMatt Macy {
2825eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
2826eda14cbcSMatt Macy 
2827eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2828eda14cbcSMatt Macy 
2829eda14cbcSMatt Macy 	spa_start_indirect_condensing_thread(spa);
2830eda14cbcSMatt Macy 	spa_start_livelist_destroy_thread(spa);
2831eda14cbcSMatt Macy 	spa_start_livelist_condensing_thread(spa);
2832eda14cbcSMatt Macy 
2833eda14cbcSMatt Macy 	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
2834eda14cbcSMatt Macy 	spa->spa_checkpoint_discard_zthr =
2835eda14cbcSMatt Macy 	    zthr_create("z_checkpoint_discard",
2836eda14cbcSMatt Macy 	    spa_checkpoint_discard_thread_check,
2837eda14cbcSMatt Macy 	    spa_checkpoint_discard_thread, spa);
2838eda14cbcSMatt Macy }
2839eda14cbcSMatt Macy 
2840eda14cbcSMatt Macy /*
2841eda14cbcSMatt Macy  * Fix up config after a partly-completed split.  This is done with the
2842eda14cbcSMatt Macy  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
2843eda14cbcSMatt Macy  * pool have that entry in their config, but only the splitting one contains
2844eda14cbcSMatt Macy  * a list of all the guids of the vdevs that are being split off.
2845eda14cbcSMatt Macy  *
2846eda14cbcSMatt Macy  * This function determines what to do with that list: either rejoin
2847eda14cbcSMatt Macy  * all the disks to the pool, or complete the splitting process.  To attempt
2848eda14cbcSMatt Macy  * the rejoin, each disk that is offlined is marked online again, and
2849eda14cbcSMatt Macy  * we do a reopen() call.  If the vdev label for every disk that was
2850eda14cbcSMatt Macy  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2851eda14cbcSMatt Macy  * then we call vdev_split() on each disk, and complete the split.
2852eda14cbcSMatt Macy  *
2853eda14cbcSMatt Macy  * Otherwise we leave the config alone, with all the vdevs in place in
2854eda14cbcSMatt Macy  * the original pool.
2855eda14cbcSMatt Macy  */
2856eda14cbcSMatt Macy static void
2857eda14cbcSMatt Macy spa_try_repair(spa_t *spa, nvlist_t *config)
2858eda14cbcSMatt Macy {
2859eda14cbcSMatt Macy 	uint_t extracted;
2860eda14cbcSMatt Macy 	uint64_t *glist;
2861eda14cbcSMatt Macy 	uint_t i, gcount;
2862eda14cbcSMatt Macy 	nvlist_t *nvl;
2863eda14cbcSMatt Macy 	vdev_t **vd;
2864eda14cbcSMatt Macy 	boolean_t attempt_reopen;
2865eda14cbcSMatt Macy 
2866eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2867eda14cbcSMatt Macy 		return;
2868eda14cbcSMatt Macy 
2869eda14cbcSMatt Macy 	/* check that the config is complete */
2870eda14cbcSMatt Macy 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2871eda14cbcSMatt Macy 	    &glist, &gcount) != 0)
2872eda14cbcSMatt Macy 		return;
2873eda14cbcSMatt Macy 
2874eda14cbcSMatt Macy 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2875eda14cbcSMatt Macy 
2876eda14cbcSMatt Macy 	/* attempt to online all the vdevs & validate */
2877eda14cbcSMatt Macy 	attempt_reopen = B_TRUE;
2878eda14cbcSMatt Macy 	for (i = 0; i < gcount; i++) {
2879eda14cbcSMatt Macy 		if (glist[i] == 0)	/* vdev is hole */
2880eda14cbcSMatt Macy 			continue;
2881eda14cbcSMatt Macy 
2882eda14cbcSMatt Macy 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2883eda14cbcSMatt Macy 		if (vd[i] == NULL) {
2884eda14cbcSMatt Macy 			/*
2885eda14cbcSMatt Macy 			 * Don't bother attempting to reopen the disks;
2886eda14cbcSMatt Macy 			 * just do the split.
2887eda14cbcSMatt Macy 			 */
2888eda14cbcSMatt Macy 			attempt_reopen = B_FALSE;
2889eda14cbcSMatt Macy 		} else {
2890eda14cbcSMatt Macy 			/* attempt to re-online it */
2891eda14cbcSMatt Macy 			vd[i]->vdev_offline = B_FALSE;
2892eda14cbcSMatt Macy 		}
2893eda14cbcSMatt Macy 	}
2894eda14cbcSMatt Macy 
2895eda14cbcSMatt Macy 	if (attempt_reopen) {
2896eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
2897eda14cbcSMatt Macy 
2898eda14cbcSMatt Macy 		/* check each device to see what state it's in */
2899eda14cbcSMatt Macy 		for (extracted = 0, i = 0; i < gcount; i++) {
2900eda14cbcSMatt Macy 			if (vd[i] != NULL &&
2901eda14cbcSMatt Macy 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2902eda14cbcSMatt Macy 				break;
2903eda14cbcSMatt Macy 			++extracted;
2904eda14cbcSMatt Macy 		}
2905eda14cbcSMatt Macy 	}
2906eda14cbcSMatt Macy 
2907eda14cbcSMatt Macy 	/*
2908eda14cbcSMatt Macy 	 * If every disk has been moved to the new pool, or if we never
2909eda14cbcSMatt Macy 	 * even attempted to look at them, then we split them off for
2910eda14cbcSMatt Macy 	 * good.
2911eda14cbcSMatt Macy 	 */
2912eda14cbcSMatt Macy 	if (!attempt_reopen || gcount == extracted) {
2913eda14cbcSMatt Macy 		for (i = 0; i < gcount; i++)
2914eda14cbcSMatt Macy 			if (vd[i] != NULL)
2915eda14cbcSMatt Macy 				vdev_split(vd[i]);
2916eda14cbcSMatt Macy 		vdev_reopen(spa->spa_root_vdev);
2917eda14cbcSMatt Macy 	}
2918eda14cbcSMatt Macy 
2919eda14cbcSMatt Macy 	kmem_free(vd, gcount * sizeof (vdev_t *));
2920eda14cbcSMatt Macy }
2921eda14cbcSMatt Macy 
2922eda14cbcSMatt Macy static int
2923eda14cbcSMatt Macy spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
2924eda14cbcSMatt Macy {
2925eda14cbcSMatt Macy 	char *ereport = FM_EREPORT_ZFS_POOL;
2926eda14cbcSMatt Macy 	int error;
2927eda14cbcSMatt Macy 
2928eda14cbcSMatt Macy 	spa->spa_load_state = state;
2929eda14cbcSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
2930eda14cbcSMatt Macy 	    spa_load_state(spa));
2931eda14cbcSMatt Macy 
2932eda14cbcSMatt Macy 	gethrestime(&spa->spa_loaded_ts);
2933eda14cbcSMatt Macy 	error = spa_load_impl(spa, type, &ereport);
2934eda14cbcSMatt Macy 
2935eda14cbcSMatt Macy 	/*
2936eda14cbcSMatt Macy 	 * Don't count references from objsets that are already closed
2937eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
2938eda14cbcSMatt Macy 	 */
2939eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
2940eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2941eda14cbcSMatt Macy 	if (error) {
2942eda14cbcSMatt Macy 		if (error != EEXIST) {
2943eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_sec = 0;
2944eda14cbcSMatt Macy 			spa->spa_loaded_ts.tv_nsec = 0;
2945eda14cbcSMatt Macy 		}
2946eda14cbcSMatt Macy 		if (error != EBADF) {
2947eac7052fSMatt Macy 			(void) zfs_ereport_post(ereport, spa,
29482c48331dSMatt Macy 			    NULL, NULL, NULL, 0);
2949eda14cbcSMatt Macy 		}
2950eda14cbcSMatt Macy 	}
2951eda14cbcSMatt Macy 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2952eda14cbcSMatt Macy 	spa->spa_ena = 0;
2953eda14cbcSMatt Macy 
2954eda14cbcSMatt Macy 	(void) spa_import_progress_set_state(spa_guid(spa),
2955eda14cbcSMatt Macy 	    spa_load_state(spa));
2956eda14cbcSMatt Macy 
2957eda14cbcSMatt Macy 	return (error);
2958eda14cbcSMatt Macy }
2959eda14cbcSMatt Macy 
2960eda14cbcSMatt Macy #ifdef ZFS_DEBUG
2961eda14cbcSMatt Macy /*
2962eda14cbcSMatt Macy  * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2963eda14cbcSMatt Macy  * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2964eda14cbcSMatt Macy  * spa's per-vdev ZAP list.
2965eda14cbcSMatt Macy  */
2966eda14cbcSMatt Macy static uint64_t
2967eda14cbcSMatt Macy vdev_count_verify_zaps(vdev_t *vd)
2968eda14cbcSMatt Macy {
2969eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
2970eda14cbcSMatt Macy 	uint64_t total = 0;
2971eda14cbcSMatt Macy 
2972eda14cbcSMatt Macy 	if (vd->vdev_top_zap != 0) {
2973eda14cbcSMatt Macy 		total++;
2974eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2975eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2976eda14cbcSMatt Macy 	}
2977eda14cbcSMatt Macy 	if (vd->vdev_leaf_zap != 0) {
2978eda14cbcSMatt Macy 		total++;
2979eda14cbcSMatt Macy 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2980eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2981eda14cbcSMatt Macy 	}
2982eda14cbcSMatt Macy 
2983eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
2984eda14cbcSMatt Macy 		total += vdev_count_verify_zaps(vd->vdev_child[i]);
2985eda14cbcSMatt Macy 	}
2986eda14cbcSMatt Macy 
2987eda14cbcSMatt Macy 	return (total);
2988eda14cbcSMatt Macy }
2989eda14cbcSMatt Macy #endif
2990eda14cbcSMatt Macy 
2991eda14cbcSMatt Macy /*
2992eda14cbcSMatt Macy  * Determine whether the activity check is required.
2993eda14cbcSMatt Macy  */
2994eda14cbcSMatt Macy static boolean_t
2995eda14cbcSMatt Macy spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
2996eda14cbcSMatt Macy     nvlist_t *config)
2997eda14cbcSMatt Macy {
2998eda14cbcSMatt Macy 	uint64_t state = 0;
2999eda14cbcSMatt Macy 	uint64_t hostid = 0;
3000eda14cbcSMatt Macy 	uint64_t tryconfig_txg = 0;
3001eda14cbcSMatt Macy 	uint64_t tryconfig_timestamp = 0;
3002eda14cbcSMatt Macy 	uint16_t tryconfig_mmp_seq = 0;
3003eda14cbcSMatt Macy 	nvlist_t *nvinfo;
3004eda14cbcSMatt Macy 
3005eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3006eda14cbcSMatt Macy 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3007eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
3008eda14cbcSMatt Macy 		    &tryconfig_txg);
3009eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3010eda14cbcSMatt Macy 		    &tryconfig_timestamp);
3011eda14cbcSMatt Macy 		(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
3012eda14cbcSMatt Macy 		    &tryconfig_mmp_seq);
3013eda14cbcSMatt Macy 	}
3014eda14cbcSMatt Macy 
3015eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
3016eda14cbcSMatt Macy 
3017eda14cbcSMatt Macy 	/*
3018eda14cbcSMatt Macy 	 * Disable the MMP activity check - This is used by zdb which
3019eda14cbcSMatt Macy 	 * is intended to be used on potentially active pools.
3020eda14cbcSMatt Macy 	 */
3021eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
3022eda14cbcSMatt Macy 		return (B_FALSE);
3023eda14cbcSMatt Macy 
3024eda14cbcSMatt Macy 	/*
3025eda14cbcSMatt Macy 	 * Skip the activity check when the MMP feature is disabled.
3026eda14cbcSMatt Macy 	 */
3027eda14cbcSMatt Macy 	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
3028eda14cbcSMatt Macy 		return (B_FALSE);
3029eda14cbcSMatt Macy 
3030eda14cbcSMatt Macy 	/*
3031eda14cbcSMatt Macy 	 * If the tryconfig_ values are nonzero, they are the results of an
3032eda14cbcSMatt Macy 	 * earlier tryimport.  If they all match the uberblock we just found,
3033eda14cbcSMatt Macy 	 * then the pool has not changed and we return false so we do not test
3034eda14cbcSMatt Macy 	 * a second time.
3035eda14cbcSMatt Macy 	 */
3036eda14cbcSMatt Macy 	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
3037eda14cbcSMatt Macy 	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
3038eda14cbcSMatt Macy 	    tryconfig_mmp_seq && tryconfig_mmp_seq ==
3039eda14cbcSMatt Macy 	    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
3040eda14cbcSMatt Macy 		return (B_FALSE);
3041eda14cbcSMatt Macy 
3042eda14cbcSMatt Macy 	/*
3043eda14cbcSMatt Macy 	 * Allow the activity check to be skipped when importing the pool
3044eda14cbcSMatt Macy 	 * on the same host which last imported it.  Since the hostid from
3045eda14cbcSMatt Macy 	 * configuration may be stale use the one read from the label.
3046eda14cbcSMatt Macy 	 */
3047eda14cbcSMatt Macy 	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
3048eda14cbcSMatt Macy 		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
3049eda14cbcSMatt Macy 
3050eda14cbcSMatt Macy 	if (hostid == spa_get_hostid(spa))
3051eda14cbcSMatt Macy 		return (B_FALSE);
3052eda14cbcSMatt Macy 
3053eda14cbcSMatt Macy 	/*
3054eda14cbcSMatt Macy 	 * Skip the activity test when the pool was cleanly exported.
3055eda14cbcSMatt Macy 	 */
3056eda14cbcSMatt Macy 	if (state != POOL_STATE_ACTIVE)
3057eda14cbcSMatt Macy 		return (B_FALSE);
3058eda14cbcSMatt Macy 
3059eda14cbcSMatt Macy 	return (B_TRUE);
3060eda14cbcSMatt Macy }
3061eda14cbcSMatt Macy 
3062eda14cbcSMatt Macy /*
3063eda14cbcSMatt Macy  * Nanoseconds the activity check must watch for changes on-disk.
3064eda14cbcSMatt Macy  */
3065eda14cbcSMatt Macy static uint64_t
3066eda14cbcSMatt Macy spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
3067eda14cbcSMatt Macy {
3068eda14cbcSMatt Macy 	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
3069eda14cbcSMatt Macy 	uint64_t multihost_interval = MSEC2NSEC(
3070eda14cbcSMatt Macy 	    MMP_INTERVAL_OK(zfs_multihost_interval));
3071eda14cbcSMatt Macy 	uint64_t import_delay = MAX(NANOSEC, import_intervals *
3072eda14cbcSMatt Macy 	    multihost_interval);
3073eda14cbcSMatt Macy 
3074eda14cbcSMatt Macy 	/*
3075eda14cbcSMatt Macy 	 * Local tunables determine a minimum duration except for the case
3076eda14cbcSMatt Macy 	 * where we know when the remote host will suspend the pool if MMP
3077eda14cbcSMatt Macy 	 * writes do not land.
3078eda14cbcSMatt Macy 	 *
3079eda14cbcSMatt Macy 	 * See Big Theory comment at the top of mmp.c for the reasoning behind
3080eda14cbcSMatt Macy 	 * these cases and times.
3081eda14cbcSMatt Macy 	 */
3082eda14cbcSMatt Macy 
3083eda14cbcSMatt Macy 	ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3084eda14cbcSMatt Macy 
3085eda14cbcSMatt Macy 	if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3086eda14cbcSMatt Macy 	    MMP_FAIL_INT(ub) > 0) {
3087eda14cbcSMatt Macy 
3088eda14cbcSMatt Macy 		/* MMP on remote host will suspend pool after failed writes */
3089eda14cbcSMatt Macy 		import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3090eda14cbcSMatt Macy 		    MMP_IMPORT_SAFETY_FACTOR / 100;
3091eda14cbcSMatt Macy 
3092eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
3093eda14cbcSMatt Macy 		    "mmp_fails=%llu ub_mmp mmp_interval=%llu "
3094eda14cbcSMatt Macy 		    "import_intervals=%u", import_delay, MMP_FAIL_INT(ub),
3095eda14cbcSMatt Macy 		    MMP_INTERVAL(ub), import_intervals);
3096eda14cbcSMatt Macy 
3097eda14cbcSMatt Macy 	} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3098eda14cbcSMatt Macy 	    MMP_FAIL_INT(ub) == 0) {
3099eda14cbcSMatt Macy 
3100eda14cbcSMatt Macy 		/* MMP on remote host will never suspend pool */
3101eda14cbcSMatt Macy 		import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3102eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3103eda14cbcSMatt Macy 
3104eda14cbcSMatt Macy 		zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
3105eda14cbcSMatt Macy 		    "mmp_interval=%llu ub_mmp_delay=%llu "
3106eda14cbcSMatt Macy 		    "import_intervals=%u", import_delay, MMP_INTERVAL(ub),
3107eda14cbcSMatt Macy 		    ub->ub_mmp_delay, import_intervals);
3108eda14cbcSMatt Macy 
3109eda14cbcSMatt Macy 	} else if (MMP_VALID(ub)) {
3110eda14cbcSMatt Macy 		/*
3111eda14cbcSMatt Macy 		 * zfs-0.7 compatibility case
3112eda14cbcSMatt Macy 		 */
3113eda14cbcSMatt Macy 
3114eda14cbcSMatt Macy 		import_delay = MAX(import_delay, (multihost_interval +
3115eda14cbcSMatt Macy 		    ub->ub_mmp_delay) * import_intervals);
3116eda14cbcSMatt Macy 
3117eda14cbcSMatt Macy 		zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
3118eda14cbcSMatt Macy 		    "import_intervals=%u leaves=%u", import_delay,
3119eda14cbcSMatt Macy 		    ub->ub_mmp_delay, import_intervals,
3120eda14cbcSMatt Macy 		    vdev_count_leaves(spa));
3121eda14cbcSMatt Macy 	} else {
3122eda14cbcSMatt Macy 		/* Using local tunings is the only reasonable option */
3123eda14cbcSMatt Macy 		zfs_dbgmsg("pool last imported on non-MMP aware "
3124eda14cbcSMatt Macy 		    "host using import_delay=%llu multihost_interval=%llu "
3125eda14cbcSMatt Macy 		    "import_intervals=%u", import_delay, multihost_interval,
3126eda14cbcSMatt Macy 		    import_intervals);
3127eda14cbcSMatt Macy 	}
3128eda14cbcSMatt Macy 
3129eda14cbcSMatt Macy 	return (import_delay);
3130eda14cbcSMatt Macy }
3131eda14cbcSMatt Macy 
3132eda14cbcSMatt Macy /*
3133eda14cbcSMatt Macy  * Perform the import activity check.  If the user canceled the import or
3134eda14cbcSMatt Macy  * we detected activity then fail.
3135eda14cbcSMatt Macy  */
3136eda14cbcSMatt Macy static int
3137eda14cbcSMatt Macy spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
3138eda14cbcSMatt Macy {
3139eda14cbcSMatt Macy 	uint64_t txg = ub->ub_txg;
3140eda14cbcSMatt Macy 	uint64_t timestamp = ub->ub_timestamp;
3141eda14cbcSMatt Macy 	uint64_t mmp_config = ub->ub_mmp_config;
3142eda14cbcSMatt Macy 	uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
3143eda14cbcSMatt Macy 	uint64_t import_delay;
3144eda14cbcSMatt Macy 	hrtime_t import_expire;
3145eda14cbcSMatt Macy 	nvlist_t *mmp_label = NULL;
3146eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3147eda14cbcSMatt Macy 	kcondvar_t cv;
3148eda14cbcSMatt Macy 	kmutex_t mtx;
3149eda14cbcSMatt Macy 	int error = 0;
3150eda14cbcSMatt Macy 
3151eda14cbcSMatt Macy 	cv_init(&cv, NULL, CV_DEFAULT, NULL);
3152eda14cbcSMatt Macy 	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
3153eda14cbcSMatt Macy 	mutex_enter(&mtx);
3154eda14cbcSMatt Macy 
3155eda14cbcSMatt Macy 	/*
3156eda14cbcSMatt Macy 	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3157eda14cbcSMatt Macy 	 * during the earlier tryimport.  If the txg recorded there is 0 then
3158eda14cbcSMatt Macy 	 * the pool is known to be active on another host.
3159eda14cbcSMatt Macy 	 *
3160eda14cbcSMatt Macy 	 * Otherwise, the pool might be in use on another host.  Check for
3161eda14cbcSMatt Macy 	 * changes in the uberblocks on disk if necessary.
3162eda14cbcSMatt Macy 	 */
3163eda14cbcSMatt Macy 	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
3164eda14cbcSMatt Macy 		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
3165eda14cbcSMatt Macy 		    ZPOOL_CONFIG_LOAD_INFO);
3166eda14cbcSMatt Macy 
3167eda14cbcSMatt Macy 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
3168eda14cbcSMatt Macy 		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
3169eda14cbcSMatt Macy 			vdev_uberblock_load(rvd, ub, &mmp_label);
3170eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3171eda14cbcSMatt Macy 			goto out;
3172eda14cbcSMatt Macy 		}
3173eda14cbcSMatt Macy 	}
3174eda14cbcSMatt Macy 
3175eda14cbcSMatt Macy 	import_delay = spa_activity_check_duration(spa, ub);
3176eda14cbcSMatt Macy 
3177eda14cbcSMatt Macy 	/* Add a small random factor in case of simultaneous imports (0-25%) */
3178eda14cbcSMatt Macy 	import_delay += import_delay * spa_get_random(250) / 1000;
3179eda14cbcSMatt Macy 
3180eda14cbcSMatt Macy 	import_expire = gethrtime() + import_delay;
3181eda14cbcSMatt Macy 
3182eda14cbcSMatt Macy 	while (gethrtime() < import_expire) {
3183eda14cbcSMatt Macy 		(void) spa_import_progress_set_mmp_check(spa_guid(spa),
3184eda14cbcSMatt Macy 		    NSEC2SEC(import_expire - gethrtime()));
3185eda14cbcSMatt Macy 
3186eda14cbcSMatt Macy 		vdev_uberblock_load(rvd, ub, &mmp_label);
3187eda14cbcSMatt Macy 
3188eda14cbcSMatt Macy 		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
3189eda14cbcSMatt Macy 		    mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
3190eda14cbcSMatt Macy 			zfs_dbgmsg("multihost activity detected "
3191eda14cbcSMatt Macy 			    "txg %llu ub_txg  %llu "
3192eda14cbcSMatt Macy 			    "timestamp %llu ub_timestamp  %llu "
3193eda14cbcSMatt Macy 			    "mmp_config %#llx ub_mmp_config %#llx",
3194eda14cbcSMatt Macy 			    txg, ub->ub_txg, timestamp, ub->ub_timestamp,
3195eda14cbcSMatt Macy 			    mmp_config, ub->ub_mmp_config);
3196eda14cbcSMatt Macy 
3197eda14cbcSMatt Macy 			error = SET_ERROR(EREMOTEIO);
3198eda14cbcSMatt Macy 			break;
3199eda14cbcSMatt Macy 		}
3200eda14cbcSMatt Macy 
3201eda14cbcSMatt Macy 		if (mmp_label) {
3202eda14cbcSMatt Macy 			nvlist_free(mmp_label);
3203eda14cbcSMatt Macy 			mmp_label = NULL;
3204eda14cbcSMatt Macy 		}
3205eda14cbcSMatt Macy 
3206eda14cbcSMatt Macy 		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
3207eda14cbcSMatt Macy 		if (error != -1) {
3208eda14cbcSMatt Macy 			error = SET_ERROR(EINTR);
3209eda14cbcSMatt Macy 			break;
3210eda14cbcSMatt Macy 		}
3211eda14cbcSMatt Macy 		error = 0;
3212eda14cbcSMatt Macy 	}
3213eda14cbcSMatt Macy 
3214eda14cbcSMatt Macy out:
3215eda14cbcSMatt Macy 	mutex_exit(&mtx);
3216eda14cbcSMatt Macy 	mutex_destroy(&mtx);
3217eda14cbcSMatt Macy 	cv_destroy(&cv);
3218eda14cbcSMatt Macy 
3219eda14cbcSMatt Macy 	/*
3220eda14cbcSMatt Macy 	 * If the pool is determined to be active store the status in the
3221eda14cbcSMatt Macy 	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
3222eda14cbcSMatt Macy 	 * available from configuration read from disk store them as well.
3223eda14cbcSMatt Macy 	 * This allows 'zpool import' to generate a more useful message.
3224eda14cbcSMatt Macy 	 *
3225eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
3226eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
3227eda14cbcSMatt Macy 	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
3228eda14cbcSMatt Macy 	 */
3229eda14cbcSMatt Macy 	if (error == EREMOTEIO) {
3230eda14cbcSMatt Macy 		char *hostname = "<unknown>";
3231eda14cbcSMatt Macy 		uint64_t hostid = 0;
3232eda14cbcSMatt Macy 
3233eda14cbcSMatt Macy 		if (mmp_label) {
3234eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
3235eda14cbcSMatt Macy 				hostname = fnvlist_lookup_string(mmp_label,
3236eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTNAME);
3237eda14cbcSMatt Macy 				fnvlist_add_string(spa->spa_load_info,
3238eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
3239eda14cbcSMatt Macy 			}
3240eda14cbcSMatt Macy 
3241eda14cbcSMatt Macy 			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
3242eda14cbcSMatt Macy 				hostid = fnvlist_lookup_uint64(mmp_label,
3243eda14cbcSMatt Macy 				    ZPOOL_CONFIG_HOSTID);
3244eda14cbcSMatt Macy 				fnvlist_add_uint64(spa->spa_load_info,
3245eda14cbcSMatt Macy 				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
3246eda14cbcSMatt Macy 			}
3247eda14cbcSMatt Macy 		}
3248eda14cbcSMatt Macy 
3249eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3250eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
3251eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3252eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, 0);
3253eda14cbcSMatt Macy 
3254eda14cbcSMatt Macy 		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
3255eda14cbcSMatt Macy 	}
3256eda14cbcSMatt Macy 
3257eda14cbcSMatt Macy 	if (mmp_label)
3258eda14cbcSMatt Macy 		nvlist_free(mmp_label);
3259eda14cbcSMatt Macy 
3260eda14cbcSMatt Macy 	return (error);
3261eda14cbcSMatt Macy }
3262eda14cbcSMatt Macy 
3263eda14cbcSMatt Macy static int
3264eda14cbcSMatt Macy spa_verify_host(spa_t *spa, nvlist_t *mos_config)
3265eda14cbcSMatt Macy {
3266eda14cbcSMatt Macy 	uint64_t hostid;
3267eda14cbcSMatt Macy 	char *hostname;
3268eda14cbcSMatt Macy 	uint64_t myhostid = 0;
3269eda14cbcSMatt Macy 
3270eda14cbcSMatt Macy 	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
3271eda14cbcSMatt Macy 	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
3272eda14cbcSMatt Macy 		hostname = fnvlist_lookup_string(mos_config,
3273eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HOSTNAME);
3274eda14cbcSMatt Macy 
3275eda14cbcSMatt Macy 		myhostid = zone_get_hostid(NULL);
3276eda14cbcSMatt Macy 
3277eda14cbcSMatt Macy 		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
3278eda14cbcSMatt Macy 			cmn_err(CE_WARN, "pool '%s' could not be "
3279eda14cbcSMatt Macy 			    "loaded as it was last accessed by "
3280eda14cbcSMatt Macy 			    "another system (host: %s hostid: 0x%llx). "
3281ac0bf12eSMatt Macy 			    "See: https://openzfs.github.io/openzfs-docs/msg/"
3282ac0bf12eSMatt Macy 			    "ZFS-8000-EY",
3283eda14cbcSMatt Macy 			    spa_name(spa), hostname, (u_longlong_t)hostid);
3284eda14cbcSMatt Macy 			spa_load_failed(spa, "hostid verification failed: pool "
3285eda14cbcSMatt Macy 			    "last accessed by host: %s (hostid: 0x%llx)",
3286eda14cbcSMatt Macy 			    hostname, (u_longlong_t)hostid);
3287eda14cbcSMatt Macy 			return (SET_ERROR(EBADF));
3288eda14cbcSMatt Macy 		}
3289eda14cbcSMatt Macy 	}
3290eda14cbcSMatt Macy 
3291eda14cbcSMatt Macy 	return (0);
3292eda14cbcSMatt Macy }
3293eda14cbcSMatt Macy 
3294eda14cbcSMatt Macy static int
3295eda14cbcSMatt Macy spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
3296eda14cbcSMatt Macy {
3297eda14cbcSMatt Macy 	int error = 0;
3298eda14cbcSMatt Macy 	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
3299eda14cbcSMatt Macy 	int parse;
3300eda14cbcSMatt Macy 	vdev_t *rvd;
3301eda14cbcSMatt Macy 	uint64_t pool_guid;
3302eda14cbcSMatt Macy 	char *comment;
3303ee36e25aSMartin Matuska 	char *compatibility;
3304eda14cbcSMatt Macy 
3305eda14cbcSMatt Macy 	/*
3306eda14cbcSMatt Macy 	 * Versioning wasn't explicitly added to the label until later, so if
3307eda14cbcSMatt Macy 	 * it's not present treat it as the initial version.
3308eda14cbcSMatt Macy 	 */
3309eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
3310eda14cbcSMatt Macy 	    &spa->spa_ubsync.ub_version) != 0)
3311eda14cbcSMatt Macy 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
3312eda14cbcSMatt Macy 
3313eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
3314eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3315eda14cbcSMatt Macy 		    ZPOOL_CONFIG_POOL_GUID);
3316eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3317eda14cbcSMatt Macy 	}
3318eda14cbcSMatt Macy 
3319eda14cbcSMatt Macy 	/*
3320eda14cbcSMatt Macy 	 * If we are doing an import, ensure that the pool is not already
3321eda14cbcSMatt Macy 	 * imported by checking if its pool guid already exists in the
3322eda14cbcSMatt Macy 	 * spa namespace.
3323eda14cbcSMatt Macy 	 *
3324eda14cbcSMatt Macy 	 * The only case that we allow an already imported pool to be
3325eda14cbcSMatt Macy 	 * imported again, is when the pool is checkpointed and we want to
3326eda14cbcSMatt Macy 	 * look at its checkpointed state from userland tools like zdb.
3327eda14cbcSMatt Macy 	 */
3328eda14cbcSMatt Macy #ifdef _KERNEL
3329eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3330eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3331eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0)) {
3332eda14cbcSMatt Macy #else
3333eda14cbcSMatt Macy 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
3334eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
3335eda14cbcSMatt Macy 	    spa_guid_exists(pool_guid, 0) &&
3336eda14cbcSMatt Macy 	    !spa_importing_readonly_checkpoint(spa)) {
3337eda14cbcSMatt Macy #endif
3338eda14cbcSMatt Macy 		spa_load_failed(spa, "a pool with guid %llu is already open",
3339eda14cbcSMatt Macy 		    (u_longlong_t)pool_guid);
3340eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
3341eda14cbcSMatt Macy 	}
3342eda14cbcSMatt Macy 
3343eda14cbcSMatt Macy 	spa->spa_config_guid = pool_guid;
3344eda14cbcSMatt Macy 
3345eda14cbcSMatt Macy 	nvlist_free(spa->spa_load_info);
3346eda14cbcSMatt Macy 	spa->spa_load_info = fnvlist_alloc();
3347eda14cbcSMatt Macy 
3348eda14cbcSMatt Macy 	ASSERT(spa->spa_comment == NULL);
3349eda14cbcSMatt Macy 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3350eda14cbcSMatt Macy 		spa->spa_comment = spa_strdup(comment);
3351eda14cbcSMatt Macy 
3352ee36e25aSMartin Matuska 	ASSERT(spa->spa_compatibility == NULL);
3353ee36e25aSMartin Matuska 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
3354ee36e25aSMartin Matuska 	    &compatibility) == 0)
3355ee36e25aSMartin Matuska 		spa->spa_compatibility = spa_strdup(compatibility);
3356ee36e25aSMartin Matuska 
3357eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
3358eda14cbcSMatt Macy 	    &spa->spa_config_txg);
3359eda14cbcSMatt Macy 
3360eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
3361eda14cbcSMatt Macy 		spa->spa_config_splitting = fnvlist_dup(nvl);
3362eda14cbcSMatt Macy 
3363eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
3364eda14cbcSMatt Macy 		spa_load_failed(spa, "invalid config provided: '%s' missing",
3365eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TREE);
3366eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
3367eda14cbcSMatt Macy 	}
3368eda14cbcSMatt Macy 
3369eda14cbcSMatt Macy 	/*
3370eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
3371eda14cbcSMatt Macy 	 */
3372eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
3373eda14cbcSMatt Macy 	    KM_SLEEP);
3374eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
3375eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
3376eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3377eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
3378eda14cbcSMatt Macy 	}
3379eda14cbcSMatt Macy 
3380eda14cbcSMatt Macy 	/*
3381eda14cbcSMatt Macy 	 * Parse the configuration into a vdev tree.  We explicitly set the
3382eda14cbcSMatt Macy 	 * value that will be returned by spa_version() since parsing the
3383eda14cbcSMatt Macy 	 * configuration requires knowing the version number.
3384eda14cbcSMatt Macy 	 */
3385eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3386eda14cbcSMatt Macy 	parse = (type == SPA_IMPORT_EXISTING ?
3387eda14cbcSMatt Macy 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
3388eda14cbcSMatt Macy 	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
3389eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3390eda14cbcSMatt Macy 
3391eda14cbcSMatt Macy 	if (error != 0) {
3392eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to parse config [error=%d]",
3393eda14cbcSMatt Macy 		    error);
3394eda14cbcSMatt Macy 		return (error);
3395eda14cbcSMatt Macy 	}
3396eda14cbcSMatt Macy 
3397eda14cbcSMatt Macy 	ASSERT(spa->spa_root_vdev == rvd);
3398eda14cbcSMatt Macy 	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
3399eda14cbcSMatt Macy 	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
3400eda14cbcSMatt Macy 
3401eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
3402eda14cbcSMatt Macy 		ASSERT(spa_guid(spa) == pool_guid);
3403eda14cbcSMatt Macy 	}
3404eda14cbcSMatt Macy 
3405eda14cbcSMatt Macy 	return (0);
3406eda14cbcSMatt Macy }
3407eda14cbcSMatt Macy 
3408eda14cbcSMatt Macy /*
3409eda14cbcSMatt Macy  * Recursively open all vdevs in the vdev tree. This function is called twice:
3410eda14cbcSMatt Macy  * first with the untrusted config, then with the trusted config.
3411eda14cbcSMatt Macy  */
3412eda14cbcSMatt Macy static int
3413eda14cbcSMatt Macy spa_ld_open_vdevs(spa_t *spa)
3414eda14cbcSMatt Macy {
3415eda14cbcSMatt Macy 	int error = 0;
3416eda14cbcSMatt Macy 
3417eda14cbcSMatt Macy 	/*
3418eda14cbcSMatt Macy 	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
3419eda14cbcSMatt Macy 	 * missing/unopenable for the root vdev to be still considered openable.
3420eda14cbcSMatt Macy 	 */
3421eda14cbcSMatt Macy 	if (spa->spa_trust_config) {
3422eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
3423eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
3424eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
3425eda14cbcSMatt Macy 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
3426eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
3427eda14cbcSMatt Macy 	} else {
3428eda14cbcSMatt Macy 		spa->spa_missing_tvds_allowed = 0;
3429eda14cbcSMatt Macy 	}
3430eda14cbcSMatt Macy 
3431eda14cbcSMatt Macy 	spa->spa_missing_tvds_allowed =
3432eda14cbcSMatt Macy 	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
3433eda14cbcSMatt Macy 
3434eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3435eda14cbcSMatt Macy 	error = vdev_open(spa->spa_root_vdev);
3436eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3437eda14cbcSMatt Macy 
3438eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0) {
3439eda14cbcSMatt Macy 		spa_load_note(spa, "vdev tree has %lld missing top-level "
3440eda14cbcSMatt Macy 		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
3441eda14cbcSMatt Macy 		if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
3442eda14cbcSMatt Macy 			/*
3443eda14cbcSMatt Macy 			 * Although theoretically we could allow users to open
3444eda14cbcSMatt Macy 			 * incomplete pools in RW mode, we'd need to add a lot
3445eda14cbcSMatt Macy 			 * of extra logic (e.g. adjust pool space to account
3446eda14cbcSMatt Macy 			 * for missing vdevs).
3447eda14cbcSMatt Macy 			 * This limitation also prevents users from accidentally
3448eda14cbcSMatt Macy 			 * opening the pool in RW mode during data recovery and
3449eda14cbcSMatt Macy 			 * damaging it further.
3450eda14cbcSMatt Macy 			 */
3451eda14cbcSMatt Macy 			spa_load_note(spa, "pools with missing top-level "
3452eda14cbcSMatt Macy 			    "vdevs can only be opened in read-only mode.");
3453eda14cbcSMatt Macy 			error = SET_ERROR(ENXIO);
3454eda14cbcSMatt Macy 		} else {
3455eda14cbcSMatt Macy 			spa_load_note(spa, "current settings allow for maximum "
3456eda14cbcSMatt Macy 			    "%lld missing top-level vdevs at this stage.",
3457eda14cbcSMatt Macy 			    (u_longlong_t)spa->spa_missing_tvds_allowed);
3458eda14cbcSMatt Macy 		}
3459eda14cbcSMatt Macy 	}
3460eda14cbcSMatt Macy 	if (error != 0) {
3461eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
3462eda14cbcSMatt Macy 		    error);
3463eda14cbcSMatt Macy 	}
3464eda14cbcSMatt Macy 	if (spa->spa_missing_tvds != 0 || error != 0)
3465eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
3466eda14cbcSMatt Macy 
3467eda14cbcSMatt Macy 	return (error);
3468eda14cbcSMatt Macy }
3469eda14cbcSMatt Macy 
3470eda14cbcSMatt Macy /*
3471eda14cbcSMatt Macy  * We need to validate the vdev labels against the configuration that
3472eda14cbcSMatt Macy  * we have in hand. This function is called twice: first with an untrusted
3473eda14cbcSMatt Macy  * config, then with a trusted config. The validation is more strict when the
3474eda14cbcSMatt Macy  * config is trusted.
3475eda14cbcSMatt Macy  */
3476eda14cbcSMatt Macy static int
3477eda14cbcSMatt Macy spa_ld_validate_vdevs(spa_t *spa)
3478eda14cbcSMatt Macy {
3479eda14cbcSMatt Macy 	int error = 0;
3480eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3481eda14cbcSMatt Macy 
3482eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3483eda14cbcSMatt Macy 	error = vdev_validate(rvd);
3484eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3485eda14cbcSMatt Macy 
3486eda14cbcSMatt Macy 	if (error != 0) {
3487eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
3488eda14cbcSMatt Macy 		return (error);
3489eda14cbcSMatt Macy 	}
3490eda14cbcSMatt Macy 
3491eda14cbcSMatt Macy 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
3492eda14cbcSMatt Macy 		spa_load_failed(spa, "cannot open vdev tree after invalidating "
3493eda14cbcSMatt Macy 		    "some vdevs");
3494eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
3495eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
3496eda14cbcSMatt Macy 	}
3497eda14cbcSMatt Macy 
3498eda14cbcSMatt Macy 	return (0);
3499eda14cbcSMatt Macy }
3500eda14cbcSMatt Macy 
3501eda14cbcSMatt Macy static void
3502eda14cbcSMatt Macy spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
3503eda14cbcSMatt Macy {
3504eda14cbcSMatt Macy 	spa->spa_state = POOL_STATE_ACTIVE;
3505eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
3506eda14cbcSMatt Macy 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
3507eda14cbcSMatt Macy 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
3508eda14cbcSMatt Macy 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
3509eda14cbcSMatt Macy 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
3510eda14cbcSMatt Macy 	spa->spa_claim_max_txg = spa->spa_first_txg;
3511eda14cbcSMatt Macy 	spa->spa_prev_software_version = ub->ub_software_version;
3512eda14cbcSMatt Macy }
3513eda14cbcSMatt Macy 
3514eda14cbcSMatt Macy static int
3515eda14cbcSMatt Macy spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
3516eda14cbcSMatt Macy {
3517eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3518eda14cbcSMatt Macy 	nvlist_t *label;
3519eda14cbcSMatt Macy 	uberblock_t *ub = &spa->spa_uberblock;
3520eda14cbcSMatt Macy 	boolean_t activity_check = B_FALSE;
3521eda14cbcSMatt Macy 
3522eda14cbcSMatt Macy 	/*
3523eda14cbcSMatt Macy 	 * If we are opening the checkpointed state of the pool by
3524eda14cbcSMatt Macy 	 * rewinding to it, at this point we will have written the
3525eda14cbcSMatt Macy 	 * checkpointed uberblock to the vdev labels, so searching
3526eda14cbcSMatt Macy 	 * the labels will find the right uberblock.  However, if
3527eda14cbcSMatt Macy 	 * we are opening the checkpointed state read-only, we have
3528eda14cbcSMatt Macy 	 * not modified the labels. Therefore, we must ignore the
3529eda14cbcSMatt Macy 	 * labels and continue using the spa_uberblock that was set
3530eda14cbcSMatt Macy 	 * by spa_ld_checkpoint_rewind.
3531eda14cbcSMatt Macy 	 *
3532eda14cbcSMatt Macy 	 * Note that it would be fine to ignore the labels when
3533eda14cbcSMatt Macy 	 * rewinding (opening writeable) as well. However, if we
3534eda14cbcSMatt Macy 	 * crash just after writing the labels, we will end up
3535eda14cbcSMatt Macy 	 * searching the labels. Doing so in the common case means
3536eda14cbcSMatt Macy 	 * that this code path gets exercised normally, rather than
3537eda14cbcSMatt Macy 	 * just in the edge case.
3538eda14cbcSMatt Macy 	 */
3539eda14cbcSMatt Macy 	if (ub->ub_checkpoint_txg != 0 &&
3540eda14cbcSMatt Macy 	    spa_importing_readonly_checkpoint(spa)) {
3541eda14cbcSMatt Macy 		spa_ld_select_uberblock_done(spa, ub);
3542eda14cbcSMatt Macy 		return (0);
3543eda14cbcSMatt Macy 	}
3544eda14cbcSMatt Macy 
3545eda14cbcSMatt Macy 	/*
3546eda14cbcSMatt Macy 	 * Find the best uberblock.
3547eda14cbcSMatt Macy 	 */
3548eda14cbcSMatt Macy 	vdev_uberblock_load(rvd, ub, &label);
3549eda14cbcSMatt Macy 
3550eda14cbcSMatt Macy 	/*
3551eda14cbcSMatt Macy 	 * If we weren't able to find a single valid uberblock, return failure.
3552eda14cbcSMatt Macy 	 */
3553eda14cbcSMatt Macy 	if (ub->ub_txg == 0) {
3554eda14cbcSMatt Macy 		nvlist_free(label);
3555eda14cbcSMatt Macy 		spa_load_failed(spa, "no valid uberblock found");
3556eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
3557eda14cbcSMatt Macy 	}
3558eda14cbcSMatt Macy 
3559eda14cbcSMatt Macy 	if (spa->spa_load_max_txg != UINT64_MAX) {
3560eda14cbcSMatt Macy 		(void) spa_import_progress_set_max_txg(spa_guid(spa),
3561eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_load_max_txg);
3562eda14cbcSMatt Macy 	}
3563eda14cbcSMatt Macy 	spa_load_note(spa, "using uberblock with txg=%llu",
3564eda14cbcSMatt Macy 	    (u_longlong_t)ub->ub_txg);
3565eda14cbcSMatt Macy 
3566eda14cbcSMatt Macy 
3567eda14cbcSMatt Macy 	/*
3568eda14cbcSMatt Macy 	 * For pools which have the multihost property on determine if the
3569eda14cbcSMatt Macy 	 * pool is truly inactive and can be safely imported.  Prevent
3570eda14cbcSMatt Macy 	 * hosts which don't have a hostid set from importing the pool.
3571eda14cbcSMatt Macy 	 */
3572eda14cbcSMatt Macy 	activity_check = spa_activity_check_required(spa, ub, label,
3573eda14cbcSMatt Macy 	    spa->spa_config);
3574eda14cbcSMatt Macy 	if (activity_check) {
3575eda14cbcSMatt Macy 		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
3576eda14cbcSMatt Macy 		    spa_get_hostid(spa) == 0) {
3577eda14cbcSMatt Macy 			nvlist_free(label);
3578eda14cbcSMatt Macy 			fnvlist_add_uint64(spa->spa_load_info,
3579eda14cbcSMatt Macy 			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3580eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3581eda14cbcSMatt Macy 		}
3582eda14cbcSMatt Macy 
3583eda14cbcSMatt Macy 		int error = spa_activity_check(spa, ub, spa->spa_config);
3584eda14cbcSMatt Macy 		if (error) {
3585eda14cbcSMatt Macy 			nvlist_free(label);
3586eda14cbcSMatt Macy 			return (error);
3587eda14cbcSMatt Macy 		}
3588eda14cbcSMatt Macy 
3589eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3590eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
3591eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
3592eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
3593eda14cbcSMatt Macy 		fnvlist_add_uint16(spa->spa_load_info,
3594eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_SEQ,
3595eda14cbcSMatt Macy 		    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
3596eda14cbcSMatt Macy 	}
3597eda14cbcSMatt Macy 
3598eda14cbcSMatt Macy 	/*
3599eda14cbcSMatt Macy 	 * If the pool has an unsupported version we can't open it.
3600eda14cbcSMatt Macy 	 */
3601eda14cbcSMatt Macy 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
3602eda14cbcSMatt Macy 		nvlist_free(label);
3603eda14cbcSMatt Macy 		spa_load_failed(spa, "version %llu is not supported",
3604eda14cbcSMatt Macy 		    (u_longlong_t)ub->ub_version);
3605eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
3606eda14cbcSMatt Macy 	}
3607eda14cbcSMatt Macy 
3608eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3609eda14cbcSMatt Macy 		nvlist_t *features;
3610eda14cbcSMatt Macy 
3611eda14cbcSMatt Macy 		/*
3612eda14cbcSMatt Macy 		 * If we weren't able to find what's necessary for reading the
3613eda14cbcSMatt Macy 		 * MOS in the label, return failure.
3614eda14cbcSMatt Macy 		 */
3615eda14cbcSMatt Macy 		if (label == NULL) {
3616eda14cbcSMatt Macy 			spa_load_failed(spa, "label config unavailable");
3617eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3618eda14cbcSMatt Macy 			    ENXIO));
3619eda14cbcSMatt Macy 		}
3620eda14cbcSMatt Macy 
3621eda14cbcSMatt Macy 		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
3622eda14cbcSMatt Macy 		    &features) != 0) {
3623eda14cbcSMatt Macy 			nvlist_free(label);
3624eda14cbcSMatt Macy 			spa_load_failed(spa, "invalid label: '%s' missing",
3625eda14cbcSMatt Macy 			    ZPOOL_CONFIG_FEATURES_FOR_READ);
3626eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3627eda14cbcSMatt Macy 			    ENXIO));
3628eda14cbcSMatt Macy 		}
3629eda14cbcSMatt Macy 
3630eda14cbcSMatt Macy 		/*
3631eda14cbcSMatt Macy 		 * Update our in-core representation with the definitive values
3632eda14cbcSMatt Macy 		 * from the label.
3633eda14cbcSMatt Macy 		 */
3634eda14cbcSMatt Macy 		nvlist_free(spa->spa_label_features);
3635eda14cbcSMatt Macy 		VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
3636eda14cbcSMatt Macy 	}
3637eda14cbcSMatt Macy 
3638eda14cbcSMatt Macy 	nvlist_free(label);
3639eda14cbcSMatt Macy 
3640eda14cbcSMatt Macy 	/*
3641eda14cbcSMatt Macy 	 * Look through entries in the label nvlist's features_for_read. If
3642eda14cbcSMatt Macy 	 * there is a feature listed there which we don't understand then we
3643eda14cbcSMatt Macy 	 * cannot open a pool.
3644eda14cbcSMatt Macy 	 */
3645eda14cbcSMatt Macy 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3646eda14cbcSMatt Macy 		nvlist_t *unsup_feat;
3647eda14cbcSMatt Macy 
3648eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
3649eda14cbcSMatt Macy 		    0);
3650eda14cbcSMatt Macy 
3651eda14cbcSMatt Macy 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
3652eda14cbcSMatt Macy 		    NULL); nvp != NULL;
3653eda14cbcSMatt Macy 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
3654eda14cbcSMatt Macy 			if (!zfeature_is_supported(nvpair_name(nvp))) {
3655eda14cbcSMatt Macy 				VERIFY(nvlist_add_string(unsup_feat,
3656eda14cbcSMatt Macy 				    nvpair_name(nvp), "") == 0);
3657eda14cbcSMatt Macy 			}
3658eda14cbcSMatt Macy 		}
3659eda14cbcSMatt Macy 
3660eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
3661eda14cbcSMatt Macy 			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
3662eda14cbcSMatt Macy 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
3663eda14cbcSMatt Macy 			nvlist_free(unsup_feat);
3664eda14cbcSMatt Macy 			spa_load_failed(spa, "some features are unsupported");
3665eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3666eda14cbcSMatt Macy 			    ENOTSUP));
3667eda14cbcSMatt Macy 		}
3668eda14cbcSMatt Macy 
3669eda14cbcSMatt Macy 		nvlist_free(unsup_feat);
3670eda14cbcSMatt Macy 	}
3671eda14cbcSMatt Macy 
3672eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
3673eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3674eda14cbcSMatt Macy 		spa_try_repair(spa, spa->spa_config);
3675eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
3676eda14cbcSMatt Macy 		nvlist_free(spa->spa_config_splitting);
3677eda14cbcSMatt Macy 		spa->spa_config_splitting = NULL;
3678eda14cbcSMatt Macy 	}
3679eda14cbcSMatt Macy 
3680eda14cbcSMatt Macy 	/*
3681eda14cbcSMatt Macy 	 * Initialize internal SPA structures.
3682eda14cbcSMatt Macy 	 */
3683eda14cbcSMatt Macy 	spa_ld_select_uberblock_done(spa, ub);
3684eda14cbcSMatt Macy 
3685eda14cbcSMatt Macy 	return (0);
3686eda14cbcSMatt Macy }
3687eda14cbcSMatt Macy 
3688eda14cbcSMatt Macy static int
3689eda14cbcSMatt Macy spa_ld_open_rootbp(spa_t *spa)
3690eda14cbcSMatt Macy {
3691eda14cbcSMatt Macy 	int error = 0;
3692eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3693eda14cbcSMatt Macy 
3694eda14cbcSMatt Macy 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
3695eda14cbcSMatt Macy 	if (error != 0) {
3696eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
3697eda14cbcSMatt Macy 		    "[error=%d]", error);
3698eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3699eda14cbcSMatt Macy 	}
3700eda14cbcSMatt Macy 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
3701eda14cbcSMatt Macy 
3702eda14cbcSMatt Macy 	return (0);
3703eda14cbcSMatt Macy }
3704eda14cbcSMatt Macy 
3705eda14cbcSMatt Macy static int
3706eda14cbcSMatt Macy spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
3707eda14cbcSMatt Macy     boolean_t reloading)
3708eda14cbcSMatt Macy {
3709eda14cbcSMatt Macy 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
3710eda14cbcSMatt Macy 	nvlist_t *nv, *mos_config, *policy;
3711eda14cbcSMatt Macy 	int error = 0, copy_error;
3712eda14cbcSMatt Macy 	uint64_t healthy_tvds, healthy_tvds_mos;
3713eda14cbcSMatt Macy 	uint64_t mos_config_txg;
3714eda14cbcSMatt Macy 
3715eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
3716eda14cbcSMatt Macy 	    != 0)
3717eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3718eda14cbcSMatt Macy 
3719eda14cbcSMatt Macy 	/*
3720eda14cbcSMatt Macy 	 * If we're assembling a pool from a split, the config provided is
3721eda14cbcSMatt Macy 	 * already trusted so there is nothing to do.
3722eda14cbcSMatt Macy 	 */
3723eda14cbcSMatt Macy 	if (type == SPA_IMPORT_ASSEMBLE)
3724eda14cbcSMatt Macy 		return (0);
3725eda14cbcSMatt Macy 
3726eda14cbcSMatt Macy 	healthy_tvds = spa_healthy_core_tvds(spa);
3727eda14cbcSMatt Macy 
3728eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
3729eda14cbcSMatt Macy 	    != 0) {
3730eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
3731eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3732eda14cbcSMatt Macy 	}
3733eda14cbcSMatt Macy 
3734eda14cbcSMatt Macy 	/*
3735eda14cbcSMatt Macy 	 * If we are doing an open, pool owner wasn't verified yet, thus do
3736eda14cbcSMatt Macy 	 * the verification here.
3737eda14cbcSMatt Macy 	 */
3738eda14cbcSMatt Macy 	if (spa->spa_load_state == SPA_LOAD_OPEN) {
3739eda14cbcSMatt Macy 		error = spa_verify_host(spa, mos_config);
3740eda14cbcSMatt Macy 		if (error != 0) {
3741eda14cbcSMatt Macy 			nvlist_free(mos_config);
3742eda14cbcSMatt Macy 			return (error);
3743eda14cbcSMatt Macy 		}
3744eda14cbcSMatt Macy 	}
3745eda14cbcSMatt Macy 
3746eda14cbcSMatt Macy 	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
3747eda14cbcSMatt Macy 
3748eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3749eda14cbcSMatt Macy 
3750eda14cbcSMatt Macy 	/*
3751eda14cbcSMatt Macy 	 * Build a new vdev tree from the trusted config
3752eda14cbcSMatt Macy 	 */
37537877fdebSMatt Macy 	error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
37547877fdebSMatt Macy 	if (error != 0) {
37557877fdebSMatt Macy 		nvlist_free(mos_config);
37567877fdebSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
37577877fdebSMatt Macy 		spa_load_failed(spa, "spa_config_parse failed [error=%d]",
37587877fdebSMatt Macy 		    error);
37597877fdebSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
37607877fdebSMatt Macy 	}
3761eda14cbcSMatt Macy 
3762eda14cbcSMatt Macy 	/*
3763eda14cbcSMatt Macy 	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
3764eda14cbcSMatt Macy 	 * obtained by scanning /dev/dsk, then it will have the right vdev
3765eda14cbcSMatt Macy 	 * paths. We update the trusted MOS config with this information.
3766eda14cbcSMatt Macy 	 * We first try to copy the paths with vdev_copy_path_strict, which
3767eda14cbcSMatt Macy 	 * succeeds only when both configs have exactly the same vdev tree.
3768eda14cbcSMatt Macy 	 * If that fails, we fall back to a more flexible method that has a
3769eda14cbcSMatt Macy 	 * best effort policy.
3770eda14cbcSMatt Macy 	 */
3771eda14cbcSMatt Macy 	copy_error = vdev_copy_path_strict(rvd, mrvd);
3772eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
3773eda14cbcSMatt Macy 		spa_load_note(spa, "provided vdev tree:");
3774eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
3775eda14cbcSMatt Macy 		spa_load_note(spa, "MOS vdev tree:");
3776eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(mrvd, 2);
3777eda14cbcSMatt Macy 	}
3778eda14cbcSMatt Macy 	if (copy_error != 0) {
3779eda14cbcSMatt Macy 		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
3780eda14cbcSMatt Macy 		    "back to vdev_copy_path_relaxed");
3781eda14cbcSMatt Macy 		vdev_copy_path_relaxed(rvd, mrvd);
3782eda14cbcSMatt Macy 	}
3783eda14cbcSMatt Macy 
3784eda14cbcSMatt Macy 	vdev_close(rvd);
3785eda14cbcSMatt Macy 	vdev_free(rvd);
3786eda14cbcSMatt Macy 	spa->spa_root_vdev = mrvd;
3787eda14cbcSMatt Macy 	rvd = mrvd;
3788eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
3789eda14cbcSMatt Macy 
3790eda14cbcSMatt Macy 	/*
3791eda14cbcSMatt Macy 	 * We will use spa_config if we decide to reload the spa or if spa_load
3792eda14cbcSMatt Macy 	 * fails and we rewind. We must thus regenerate the config using the
3793eda14cbcSMatt Macy 	 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
3794eda14cbcSMatt Macy 	 * pass settings on how to load the pool and is not stored in the MOS.
3795eda14cbcSMatt Macy 	 * We copy it over to our new, trusted config.
3796eda14cbcSMatt Macy 	 */
3797eda14cbcSMatt Macy 	mos_config_txg = fnvlist_lookup_uint64(mos_config,
3798eda14cbcSMatt Macy 	    ZPOOL_CONFIG_POOL_TXG);
3799eda14cbcSMatt Macy 	nvlist_free(mos_config);
3800eda14cbcSMatt Macy 	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
3801eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
3802eda14cbcSMatt Macy 	    &policy) == 0)
3803eda14cbcSMatt Macy 		fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
3804eda14cbcSMatt Macy 	spa_config_set(spa, mos_config);
3805eda14cbcSMatt Macy 	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
3806eda14cbcSMatt Macy 
3807eda14cbcSMatt Macy 	/*
3808eda14cbcSMatt Macy 	 * Now that we got the config from the MOS, we should be more strict
3809eda14cbcSMatt Macy 	 * in checking blkptrs and can make assumptions about the consistency
3810eda14cbcSMatt Macy 	 * of the vdev tree. spa_trust_config must be set to true before opening
3811eda14cbcSMatt Macy 	 * vdevs in order for them to be writeable.
3812eda14cbcSMatt Macy 	 */
3813eda14cbcSMatt Macy 	spa->spa_trust_config = B_TRUE;
3814eda14cbcSMatt Macy 
3815eda14cbcSMatt Macy 	/*
3816eda14cbcSMatt Macy 	 * Open and validate the new vdev tree
3817eda14cbcSMatt Macy 	 */
3818eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
3819eda14cbcSMatt Macy 	if (error != 0)
3820eda14cbcSMatt Macy 		return (error);
3821eda14cbcSMatt Macy 
3822eda14cbcSMatt Macy 	error = spa_ld_validate_vdevs(spa);
3823eda14cbcSMatt Macy 	if (error != 0)
3824eda14cbcSMatt Macy 		return (error);
3825eda14cbcSMatt Macy 
3826eda14cbcSMatt Macy 	if (copy_error != 0 || spa_load_print_vdev_tree) {
3827eda14cbcSMatt Macy 		spa_load_note(spa, "final vdev tree:");
3828eda14cbcSMatt Macy 		vdev_dbgmsg_print_tree(rvd, 2);
3829eda14cbcSMatt Macy 	}
3830eda14cbcSMatt Macy 
3831eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
3832eda14cbcSMatt Macy 	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
3833eda14cbcSMatt Macy 		/*
3834eda14cbcSMatt Macy 		 * Sanity check to make sure that we are indeed loading the
3835eda14cbcSMatt Macy 		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
3836eda14cbcSMatt Macy 		 * in the config provided and they happened to be the only ones
3837eda14cbcSMatt Macy 		 * to have the latest uberblock, we could involuntarily perform
3838eda14cbcSMatt Macy 		 * an extreme rewind.
3839eda14cbcSMatt Macy 		 */
3840eda14cbcSMatt Macy 		healthy_tvds_mos = spa_healthy_core_tvds(spa);
3841eda14cbcSMatt Macy 		if (healthy_tvds_mos - healthy_tvds >=
3842eda14cbcSMatt Macy 		    SPA_SYNC_MIN_VDEVS) {
3843eda14cbcSMatt Macy 			spa_load_note(spa, "config provided misses too many "
3844eda14cbcSMatt Macy 			    "top-level vdevs compared to MOS (%lld vs %lld). ",
3845eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds,
3846eda14cbcSMatt Macy 			    (u_longlong_t)healthy_tvds_mos);
3847eda14cbcSMatt Macy 			spa_load_note(spa, "vdev tree:");
3848eda14cbcSMatt Macy 			vdev_dbgmsg_print_tree(rvd, 2);
3849eda14cbcSMatt Macy 			if (reloading) {
3850eda14cbcSMatt Macy 				spa_load_failed(spa, "config was already "
3851eda14cbcSMatt Macy 				    "provided from MOS. Aborting.");
3852eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
3853eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
3854eda14cbcSMatt Macy 			}
3855eda14cbcSMatt Macy 			spa_load_note(spa, "spa must be reloaded using MOS "
3856eda14cbcSMatt Macy 			    "config");
3857eda14cbcSMatt Macy 			return (SET_ERROR(EAGAIN));
3858eda14cbcSMatt Macy 		}
3859eda14cbcSMatt Macy 	}
3860eda14cbcSMatt Macy 
3861eda14cbcSMatt Macy 	error = spa_check_for_missing_logs(spa);
3862eda14cbcSMatt Macy 	if (error != 0)
3863eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
3864eda14cbcSMatt Macy 
3865eda14cbcSMatt Macy 	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
3866eda14cbcSMatt Macy 		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
3867eda14cbcSMatt Macy 		    "guid sum (%llu != %llu)",
3868eda14cbcSMatt Macy 		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
3869eda14cbcSMatt Macy 		    (u_longlong_t)rvd->vdev_guid_sum);
3870eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
3871eda14cbcSMatt Macy 		    ENXIO));
3872eda14cbcSMatt Macy 	}
3873eda14cbcSMatt Macy 
3874eda14cbcSMatt Macy 	return (0);
3875eda14cbcSMatt Macy }
3876eda14cbcSMatt Macy 
3877eda14cbcSMatt Macy static int
3878eda14cbcSMatt Macy spa_ld_open_indirect_vdev_metadata(spa_t *spa)
3879eda14cbcSMatt Macy {
3880eda14cbcSMatt Macy 	int error = 0;
3881eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3882eda14cbcSMatt Macy 
3883eda14cbcSMatt Macy 	/*
3884eda14cbcSMatt Macy 	 * Everything that we read before spa_remove_init() must be stored
3885eda14cbcSMatt Macy 	 * on concreted vdevs.  Therefore we do this as early as possible.
3886eda14cbcSMatt Macy 	 */
3887eda14cbcSMatt Macy 	error = spa_remove_init(spa);
3888eda14cbcSMatt Macy 	if (error != 0) {
3889eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
3890eda14cbcSMatt Macy 		    error);
3891eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3892eda14cbcSMatt Macy 	}
3893eda14cbcSMatt Macy 
3894eda14cbcSMatt Macy 	/*
3895eda14cbcSMatt Macy 	 * Retrieve information needed to condense indirect vdev mappings.
3896eda14cbcSMatt Macy 	 */
3897eda14cbcSMatt Macy 	error = spa_condense_init(spa);
3898eda14cbcSMatt Macy 	if (error != 0) {
3899eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
3900eda14cbcSMatt Macy 		    error);
3901eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3902eda14cbcSMatt Macy 	}
3903eda14cbcSMatt Macy 
3904eda14cbcSMatt Macy 	return (0);
3905eda14cbcSMatt Macy }
3906eda14cbcSMatt Macy 
3907eda14cbcSMatt Macy static int
3908eda14cbcSMatt Macy spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
3909eda14cbcSMatt Macy {
3910eda14cbcSMatt Macy 	int error = 0;
3911eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
3912eda14cbcSMatt Macy 
3913eda14cbcSMatt Macy 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
3914eda14cbcSMatt Macy 		boolean_t missing_feat_read = B_FALSE;
3915eda14cbcSMatt Macy 		nvlist_t *unsup_feat, *enabled_feat;
3916eda14cbcSMatt Macy 
3917eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
3918eda14cbcSMatt Macy 		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
3919eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3920eda14cbcSMatt Macy 		}
3921eda14cbcSMatt Macy 
3922eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
3923eda14cbcSMatt Macy 		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
3924eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3925eda14cbcSMatt Macy 		}
3926eda14cbcSMatt Macy 
3927eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
3928eda14cbcSMatt Macy 		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
3929eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3930eda14cbcSMatt Macy 		}
3931eda14cbcSMatt Macy 
3932eda14cbcSMatt Macy 		enabled_feat = fnvlist_alloc();
3933eda14cbcSMatt Macy 		unsup_feat = fnvlist_alloc();
3934eda14cbcSMatt Macy 
3935eda14cbcSMatt Macy 		if (!spa_features_check(spa, B_FALSE,
3936eda14cbcSMatt Macy 		    unsup_feat, enabled_feat))
3937eda14cbcSMatt Macy 			missing_feat_read = B_TRUE;
3938eda14cbcSMatt Macy 
3939eda14cbcSMatt Macy 		if (spa_writeable(spa) ||
3940eda14cbcSMatt Macy 		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
3941eda14cbcSMatt Macy 			if (!spa_features_check(spa, B_TRUE,
3942eda14cbcSMatt Macy 			    unsup_feat, enabled_feat)) {
3943eda14cbcSMatt Macy 				*missing_feat_writep = B_TRUE;
3944eda14cbcSMatt Macy 			}
3945eda14cbcSMatt Macy 		}
3946eda14cbcSMatt Macy 
3947eda14cbcSMatt Macy 		fnvlist_add_nvlist(spa->spa_load_info,
3948eda14cbcSMatt Macy 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
3949eda14cbcSMatt Macy 
3950eda14cbcSMatt Macy 		if (!nvlist_empty(unsup_feat)) {
3951eda14cbcSMatt Macy 			fnvlist_add_nvlist(spa->spa_load_info,
3952eda14cbcSMatt Macy 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
3953eda14cbcSMatt Macy 		}
3954eda14cbcSMatt Macy 
3955eda14cbcSMatt Macy 		fnvlist_free(enabled_feat);
3956eda14cbcSMatt Macy 		fnvlist_free(unsup_feat);
3957eda14cbcSMatt Macy 
3958eda14cbcSMatt Macy 		if (!missing_feat_read) {
3959eda14cbcSMatt Macy 			fnvlist_add_boolean(spa->spa_load_info,
3960eda14cbcSMatt Macy 			    ZPOOL_CONFIG_CAN_RDONLY);
3961eda14cbcSMatt Macy 		}
3962eda14cbcSMatt Macy 
3963eda14cbcSMatt Macy 		/*
3964eda14cbcSMatt Macy 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
3965eda14cbcSMatt Macy 		 * twofold: to determine whether the pool is available for
3966eda14cbcSMatt Macy 		 * import in read-write mode and (if it is not) whether the
3967eda14cbcSMatt Macy 		 * pool is available for import in read-only mode. If the pool
3968eda14cbcSMatt Macy 		 * is available for import in read-write mode, it is displayed
3969eda14cbcSMatt Macy 		 * as available in userland; if it is not available for import
3970eda14cbcSMatt Macy 		 * in read-only mode, it is displayed as unavailable in
3971eda14cbcSMatt Macy 		 * userland. If the pool is available for import in read-only
3972eda14cbcSMatt Macy 		 * mode but not read-write mode, it is displayed as unavailable
3973eda14cbcSMatt Macy 		 * in userland with a special note that the pool is actually
3974eda14cbcSMatt Macy 		 * available for open in read-only mode.
3975eda14cbcSMatt Macy 		 *
3976eda14cbcSMatt Macy 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
3977eda14cbcSMatt Macy 		 * missing a feature for write, we must first determine whether
3978eda14cbcSMatt Macy 		 * the pool can be opened read-only before returning to
3979eda14cbcSMatt Macy 		 * userland in order to know whether to display the
3980eda14cbcSMatt Macy 		 * abovementioned note.
3981eda14cbcSMatt Macy 		 */
3982eda14cbcSMatt Macy 		if (missing_feat_read || (*missing_feat_writep &&
3983eda14cbcSMatt Macy 		    spa_writeable(spa))) {
3984eda14cbcSMatt Macy 			spa_load_failed(spa, "pool uses unsupported features");
3985eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3986eda14cbcSMatt Macy 			    ENOTSUP));
3987eda14cbcSMatt Macy 		}
3988eda14cbcSMatt Macy 
3989eda14cbcSMatt Macy 		/*
3990eda14cbcSMatt Macy 		 * Load refcounts for ZFS features from disk into an in-memory
3991eda14cbcSMatt Macy 		 * cache during SPA initialization.
3992eda14cbcSMatt Macy 		 */
3993eda14cbcSMatt Macy 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
3994eda14cbcSMatt Macy 			uint64_t refcount;
3995eda14cbcSMatt Macy 
3996eda14cbcSMatt Macy 			error = feature_get_refcount_from_disk(spa,
3997eda14cbcSMatt Macy 			    &spa_feature_table[i], &refcount);
3998eda14cbcSMatt Macy 			if (error == 0) {
3999eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] = refcount;
4000eda14cbcSMatt Macy 			} else if (error == ENOTSUP) {
4001eda14cbcSMatt Macy 				spa->spa_feat_refcount_cache[i] =
4002eda14cbcSMatt Macy 				    SPA_FEATURE_DISABLED;
4003eda14cbcSMatt Macy 			} else {
4004eda14cbcSMatt Macy 				spa_load_failed(spa, "error getting refcount "
4005eda14cbcSMatt Macy 				    "for feature %s [error=%d]",
4006eda14cbcSMatt Macy 				    spa_feature_table[i].fi_guid, error);
4007eda14cbcSMatt Macy 				return (spa_vdev_err(rvd,
4008eda14cbcSMatt Macy 				    VDEV_AUX_CORRUPT_DATA, EIO));
4009eda14cbcSMatt Macy 			}
4010eda14cbcSMatt Macy 		}
4011eda14cbcSMatt Macy 	}
4012eda14cbcSMatt Macy 
4013eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
4014eda14cbcSMatt Macy 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
4015eda14cbcSMatt Macy 		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
4016eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4017eda14cbcSMatt Macy 	}
4018eda14cbcSMatt Macy 
4019eda14cbcSMatt Macy 	/*
4020eda14cbcSMatt Macy 	 * Encryption was added before bookmark_v2, even though bookmark_v2
4021eda14cbcSMatt Macy 	 * is now a dependency. If this pool has encryption enabled without
4022eda14cbcSMatt Macy 	 * bookmark_v2, trigger an errata message.
4023eda14cbcSMatt Macy 	 */
4024eda14cbcSMatt Macy 	if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
4025eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
4026eda14cbcSMatt Macy 		spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
4027eda14cbcSMatt Macy 	}
4028eda14cbcSMatt Macy 
4029eda14cbcSMatt Macy 	return (0);
4030eda14cbcSMatt Macy }
4031eda14cbcSMatt Macy 
4032eda14cbcSMatt Macy static int
4033eda14cbcSMatt Macy spa_ld_load_special_directories(spa_t *spa)
4034eda14cbcSMatt Macy {
4035eda14cbcSMatt Macy 	int error = 0;
4036eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4037eda14cbcSMatt Macy 
4038eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
4039eda14cbcSMatt Macy 	error = dsl_pool_open(spa->spa_dsl_pool);
4040eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
4041eda14cbcSMatt Macy 	if (error != 0) {
4042eda14cbcSMatt Macy 		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
4043eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4044eda14cbcSMatt Macy 	}
4045eda14cbcSMatt Macy 
4046eda14cbcSMatt Macy 	return (0);
4047eda14cbcSMatt Macy }
4048eda14cbcSMatt Macy 
4049eda14cbcSMatt Macy static int
4050eda14cbcSMatt Macy spa_ld_get_props(spa_t *spa)
4051eda14cbcSMatt Macy {
4052eda14cbcSMatt Macy 	int error = 0;
4053eda14cbcSMatt Macy 	uint64_t obj;
4054eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4055eda14cbcSMatt Macy 
4056eda14cbcSMatt Macy 	/* Grab the checksum salt from the MOS. */
4057eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4058eda14cbcSMatt Macy 	    DMU_POOL_CHECKSUM_SALT, 1,
4059eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes),
4060eda14cbcSMatt Macy 	    spa->spa_cksum_salt.zcs_bytes);
4061eda14cbcSMatt Macy 	if (error == ENOENT) {
4062eda14cbcSMatt Macy 		/* Generate a new salt for subsequent use */
4063eda14cbcSMatt Macy 		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
4064eda14cbcSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes));
4065eda14cbcSMatt Macy 	} else if (error != 0) {
4066eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checksum salt from "
4067eda14cbcSMatt Macy 		    "MOS [error=%d]", error);
4068eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4069eda14cbcSMatt Macy 	}
4070eda14cbcSMatt Macy 
4071eda14cbcSMatt Macy 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
4072eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4073eda14cbcSMatt Macy 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
4074eda14cbcSMatt Macy 	if (error != 0) {
4075eda14cbcSMatt Macy 		spa_load_failed(spa, "error opening deferred-frees bpobj "
4076eda14cbcSMatt Macy 		    "[error=%d]", error);
4077eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4078eda14cbcSMatt Macy 	}
4079eda14cbcSMatt Macy 
4080eda14cbcSMatt Macy 	/*
4081eda14cbcSMatt Macy 	 * Load the bit that tells us to use the new accounting function
4082eda14cbcSMatt Macy 	 * (raid-z deflation).  If we have an older pool, this will not
4083eda14cbcSMatt Macy 	 * be present.
4084eda14cbcSMatt Macy 	 */
4085eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
4086eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4087eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4088eda14cbcSMatt Macy 
4089eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
4090eda14cbcSMatt Macy 	    &spa->spa_creation_version, B_FALSE);
4091eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4092eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4093eda14cbcSMatt Macy 
4094eda14cbcSMatt Macy 	/*
4095eda14cbcSMatt Macy 	 * Load the persistent error log.  If we have an older pool, this will
4096eda14cbcSMatt Macy 	 * not be present.
4097eda14cbcSMatt Macy 	 */
4098eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
4099eda14cbcSMatt Macy 	    B_FALSE);
4100eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4101eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4102eda14cbcSMatt Macy 
4103eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
4104eda14cbcSMatt Macy 	    &spa->spa_errlog_scrub, B_FALSE);
4105eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4106eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4107eda14cbcSMatt Macy 
4108eda14cbcSMatt Macy 	/*
4109eda14cbcSMatt Macy 	 * Load the livelist deletion field. If a livelist is queued for
4110eda14cbcSMatt Macy 	 * deletion, indicate that in the spa
4111eda14cbcSMatt Macy 	 */
4112eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
4113eda14cbcSMatt Macy 	    &spa->spa_livelists_to_delete, B_FALSE);
4114eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4115eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4116eda14cbcSMatt Macy 
4117eda14cbcSMatt Macy 	/*
4118eda14cbcSMatt Macy 	 * Load the history object.  If we have an older pool, this
4119eda14cbcSMatt Macy 	 * will not be present.
4120eda14cbcSMatt Macy 	 */
4121eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
4122eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4123eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4124eda14cbcSMatt Macy 
4125eda14cbcSMatt Macy 	/*
4126eda14cbcSMatt Macy 	 * Load the per-vdev ZAP map. If we have an older pool, this will not
4127eda14cbcSMatt Macy 	 * be present; in this case, defer its creation to a later time to
4128eda14cbcSMatt Macy 	 * avoid dirtying the MOS this early / out of sync context. See
4129eda14cbcSMatt Macy 	 * spa_sync_config_object.
4130eda14cbcSMatt Macy 	 */
4131eda14cbcSMatt Macy 
4132eda14cbcSMatt Macy 	/* The sentinel is only available in the MOS config. */
4133eda14cbcSMatt Macy 	nvlist_t *mos_config;
4134eda14cbcSMatt Macy 	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
4135eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve MOS config");
4136eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4137eda14cbcSMatt Macy 	}
4138eda14cbcSMatt Macy 
4139eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
4140eda14cbcSMatt Macy 	    &spa->spa_all_vdev_zaps, B_FALSE);
4141eda14cbcSMatt Macy 
4142eda14cbcSMatt Macy 	if (error == ENOENT) {
4143eda14cbcSMatt Macy 		VERIFY(!nvlist_exists(mos_config,
4144eda14cbcSMatt Macy 		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
4145eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
4146eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4147eda14cbcSMatt Macy 	} else if (error != 0) {
4148eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4149eda14cbcSMatt Macy 	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
4150eda14cbcSMatt Macy 		/*
4151eda14cbcSMatt Macy 		 * An older version of ZFS overwrote the sentinel value, so
4152eda14cbcSMatt Macy 		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
4153eda14cbcSMatt Macy 		 * destruction to later; see spa_sync_config_object.
4154eda14cbcSMatt Macy 		 */
4155eda14cbcSMatt Macy 		spa->spa_avz_action = AVZ_ACTION_DESTROY;
4156eda14cbcSMatt Macy 		/*
4157eda14cbcSMatt Macy 		 * We're assuming that no vdevs have had their ZAPs created
4158eda14cbcSMatt Macy 		 * before this. Better be sure of it.
4159eda14cbcSMatt Macy 		 */
4160eda14cbcSMatt Macy 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
4161eda14cbcSMatt Macy 	}
4162eda14cbcSMatt Macy 	nvlist_free(mos_config);
4163eda14cbcSMatt Macy 
4164eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4165eda14cbcSMatt Macy 
4166eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
4167eda14cbcSMatt Macy 	    B_FALSE);
4168eda14cbcSMatt Macy 	if (error && error != ENOENT)
4169eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4170eda14cbcSMatt Macy 
4171eda14cbcSMatt Macy 	if (error == 0) {
4172eda14cbcSMatt Macy 		uint64_t autoreplace;
4173eda14cbcSMatt Macy 
4174eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
4175eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
4176eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
4177eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
4178eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
4179eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
4180eda14cbcSMatt Macy 		spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
4181eda14cbcSMatt Macy 		spa->spa_autoreplace = (autoreplace != 0);
4182eda14cbcSMatt Macy 	}
4183eda14cbcSMatt Macy 
4184eda14cbcSMatt Macy 	/*
4185eda14cbcSMatt Macy 	 * If we are importing a pool with missing top-level vdevs,
4186eda14cbcSMatt Macy 	 * we enforce that the pool doesn't panic or get suspended on
4187eda14cbcSMatt Macy 	 * error since the likelihood of missing data is extremely high.
4188eda14cbcSMatt Macy 	 */
4189eda14cbcSMatt Macy 	if (spa->spa_missing_tvds > 0 &&
4190eda14cbcSMatt Macy 	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
4191eda14cbcSMatt Macy 	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4192eda14cbcSMatt Macy 		spa_load_note(spa, "forcing failmode to 'continue' "
4193eda14cbcSMatt Macy 		    "as some top level vdevs are missing");
4194eda14cbcSMatt Macy 		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
4195eda14cbcSMatt Macy 	}
4196eda14cbcSMatt Macy 
4197eda14cbcSMatt Macy 	return (0);
4198eda14cbcSMatt Macy }
4199eda14cbcSMatt Macy 
4200eda14cbcSMatt Macy static int
4201eda14cbcSMatt Macy spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
4202eda14cbcSMatt Macy {
4203eda14cbcSMatt Macy 	int error = 0;
4204eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4205eda14cbcSMatt Macy 
4206eda14cbcSMatt Macy 	/*
4207eda14cbcSMatt Macy 	 * If we're assembling the pool from the split-off vdevs of
4208eda14cbcSMatt Macy 	 * an existing pool, we don't want to attach the spares & cache
4209eda14cbcSMatt Macy 	 * devices.
4210eda14cbcSMatt Macy 	 */
4211eda14cbcSMatt Macy 
4212eda14cbcSMatt Macy 	/*
4213eda14cbcSMatt Macy 	 * Load any hot spares for this pool.
4214eda14cbcSMatt Macy 	 */
4215eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
4216eda14cbcSMatt Macy 	    B_FALSE);
4217eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4218eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4219eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4220eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
4221eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_spares.sav_object,
4222eda14cbcSMatt Macy 		    &spa->spa_spares.sav_config) != 0) {
4223eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading spares nvlist");
4224eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4225eda14cbcSMatt Macy 		}
4226eda14cbcSMatt Macy 
4227eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4228eda14cbcSMatt Macy 		spa_load_spares(spa);
4229eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4230eda14cbcSMatt Macy 	} else if (error == 0) {
4231eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
4232eda14cbcSMatt Macy 	}
4233eda14cbcSMatt Macy 
4234eda14cbcSMatt Macy 	/*
4235eda14cbcSMatt Macy 	 * Load any level 2 ARC devices for this pool.
4236eda14cbcSMatt Macy 	 */
4237eda14cbcSMatt Macy 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
4238eda14cbcSMatt Macy 	    &spa->spa_l2cache.sav_object, B_FALSE);
4239eda14cbcSMatt Macy 	if (error != 0 && error != ENOENT)
4240eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4241eda14cbcSMatt Macy 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
4242eda14cbcSMatt Macy 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
4243eda14cbcSMatt Macy 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
4244eda14cbcSMatt Macy 		    &spa->spa_l2cache.sav_config) != 0) {
4245eda14cbcSMatt Macy 			spa_load_failed(spa, "error loading l2cache nvlist");
4246eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4247eda14cbcSMatt Macy 		}
4248eda14cbcSMatt Macy 
4249eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4250eda14cbcSMatt Macy 		spa_load_l2cache(spa);
4251eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4252eda14cbcSMatt Macy 	} else if (error == 0) {
4253eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
4254eda14cbcSMatt Macy 	}
4255eda14cbcSMatt Macy 
4256eda14cbcSMatt Macy 	return (0);
4257eda14cbcSMatt Macy }
4258eda14cbcSMatt Macy 
4259eda14cbcSMatt Macy static int
4260eda14cbcSMatt Macy spa_ld_load_vdev_metadata(spa_t *spa)
4261eda14cbcSMatt Macy {
4262eda14cbcSMatt Macy 	int error = 0;
4263eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4264eda14cbcSMatt Macy 
4265eda14cbcSMatt Macy 	/*
4266eda14cbcSMatt Macy 	 * If the 'multihost' property is set, then never allow a pool to
4267eda14cbcSMatt Macy 	 * be imported when the system hostid is zero.  The exception to
4268eda14cbcSMatt Macy 	 * this rule is zdb which is always allowed to access pools.
4269eda14cbcSMatt Macy 	 */
4270eda14cbcSMatt Macy 	if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
4271eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
4272eda14cbcSMatt Macy 		fnvlist_add_uint64(spa->spa_load_info,
4273eda14cbcSMatt Macy 		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
4274eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4275eda14cbcSMatt Macy 	}
4276eda14cbcSMatt Macy 
4277eda14cbcSMatt Macy 	/*
4278eda14cbcSMatt Macy 	 * If the 'autoreplace' property is set, then post a resource notifying
4279eda14cbcSMatt Macy 	 * the ZFS DE that it should not issue any faults for unopenable
4280eda14cbcSMatt Macy 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
4281eda14cbcSMatt Macy 	 * unopenable vdevs so that the normal autoreplace handler can take
4282eda14cbcSMatt Macy 	 * over.
4283eda14cbcSMatt Macy 	 */
4284eda14cbcSMatt Macy 	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4285eda14cbcSMatt Macy 		spa_check_removed(spa->spa_root_vdev);
4286eda14cbcSMatt Macy 		/*
4287eda14cbcSMatt Macy 		 * For the import case, this is done in spa_import(), because
4288eda14cbcSMatt Macy 		 * at this point we're using the spare definitions from
4289eda14cbcSMatt Macy 		 * the MOS config, not necessarily from the userland config.
4290eda14cbcSMatt Macy 		 */
4291eda14cbcSMatt Macy 		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
4292eda14cbcSMatt Macy 			spa_aux_check_removed(&spa->spa_spares);
4293eda14cbcSMatt Macy 			spa_aux_check_removed(&spa->spa_l2cache);
4294eda14cbcSMatt Macy 		}
4295eda14cbcSMatt Macy 	}
4296eda14cbcSMatt Macy 
4297eda14cbcSMatt Macy 	/*
4298eda14cbcSMatt Macy 	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
4299eda14cbcSMatt Macy 	 */
4300eda14cbcSMatt Macy 	error = vdev_load(rvd);
4301eda14cbcSMatt Macy 	if (error != 0) {
4302eda14cbcSMatt Macy 		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
4303eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4304eda14cbcSMatt Macy 	}
4305eda14cbcSMatt Macy 
4306eda14cbcSMatt Macy 	error = spa_ld_log_spacemaps(spa);
4307eda14cbcSMatt Macy 	if (error != 0) {
4308eda14cbcSMatt Macy 		spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
4309eda14cbcSMatt Macy 		    error);
4310eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
4311eda14cbcSMatt Macy 	}
4312eda14cbcSMatt Macy 
4313eda14cbcSMatt Macy 	/*
4314eda14cbcSMatt Macy 	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
4315eda14cbcSMatt Macy 	 */
4316eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4317eda14cbcSMatt Macy 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
4318eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
4319eda14cbcSMatt Macy 
4320eda14cbcSMatt Macy 	return (0);
4321eda14cbcSMatt Macy }
4322eda14cbcSMatt Macy 
4323eda14cbcSMatt Macy static int
4324eda14cbcSMatt Macy spa_ld_load_dedup_tables(spa_t *spa)
4325eda14cbcSMatt Macy {
4326eda14cbcSMatt Macy 	int error = 0;
4327eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4328eda14cbcSMatt Macy 
4329eda14cbcSMatt Macy 	error = ddt_load(spa);
4330eda14cbcSMatt Macy 	if (error != 0) {
4331eda14cbcSMatt Macy 		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
4332eda14cbcSMatt Macy 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4333eda14cbcSMatt Macy 	}
4334eda14cbcSMatt Macy 
4335eda14cbcSMatt Macy 	return (0);
4336eda14cbcSMatt Macy }
4337eda14cbcSMatt Macy 
4338eda14cbcSMatt Macy static int
4339eda14cbcSMatt Macy spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
4340eda14cbcSMatt Macy {
4341eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4342eda14cbcSMatt Macy 
4343eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
4344eda14cbcSMatt Macy 		boolean_t missing = spa_check_logs(spa);
4345eda14cbcSMatt Macy 		if (missing) {
4346eda14cbcSMatt Macy 			if (spa->spa_missing_tvds != 0) {
4347eda14cbcSMatt Macy 				spa_load_note(spa, "spa_check_logs failed "
4348eda14cbcSMatt Macy 				    "so dropping the logs");
4349eda14cbcSMatt Macy 			} else {
4350eda14cbcSMatt Macy 				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
4351eda14cbcSMatt Macy 				spa_load_failed(spa, "spa_check_logs failed");
4352eda14cbcSMatt Macy 				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
4353eda14cbcSMatt Macy 				    ENXIO));
4354eda14cbcSMatt Macy 			}
4355eda14cbcSMatt Macy 		}
4356eda14cbcSMatt Macy 	}
4357eda14cbcSMatt Macy 
4358eda14cbcSMatt Macy 	return (0);
4359eda14cbcSMatt Macy }
4360eda14cbcSMatt Macy 
4361eda14cbcSMatt Macy static int
4362eda14cbcSMatt Macy spa_ld_verify_pool_data(spa_t *spa)
4363eda14cbcSMatt Macy {
4364eda14cbcSMatt Macy 	int error = 0;
4365eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4366eda14cbcSMatt Macy 
4367eda14cbcSMatt Macy 	/*
4368eda14cbcSMatt Macy 	 * We've successfully opened the pool, verify that we're ready
4369eda14cbcSMatt Macy 	 * to start pushing transactions.
4370eda14cbcSMatt Macy 	 */
4371eda14cbcSMatt Macy 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
4372eda14cbcSMatt Macy 		error = spa_load_verify(spa);
4373eda14cbcSMatt Macy 		if (error != 0) {
4374eda14cbcSMatt Macy 			spa_load_failed(spa, "spa_load_verify failed "
4375eda14cbcSMatt Macy 			    "[error=%d]", error);
4376eda14cbcSMatt Macy 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4377eda14cbcSMatt Macy 			    error));
4378eda14cbcSMatt Macy 		}
4379eda14cbcSMatt Macy 	}
4380eda14cbcSMatt Macy 
4381eda14cbcSMatt Macy 	return (0);
4382eda14cbcSMatt Macy }
4383eda14cbcSMatt Macy 
4384eda14cbcSMatt Macy static void
4385eda14cbcSMatt Macy spa_ld_claim_log_blocks(spa_t *spa)
4386eda14cbcSMatt Macy {
4387eda14cbcSMatt Macy 	dmu_tx_t *tx;
4388eda14cbcSMatt Macy 	dsl_pool_t *dp = spa_get_dsl(spa);
4389eda14cbcSMatt Macy 
4390eda14cbcSMatt Macy 	/*
4391eda14cbcSMatt Macy 	 * Claim log blocks that haven't been committed yet.
4392eda14cbcSMatt Macy 	 * This must all happen in a single txg.
4393eda14cbcSMatt Macy 	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
4394eda14cbcSMatt Macy 	 * invoked from zil_claim_log_block()'s i/o done callback.
4395eda14cbcSMatt Macy 	 * Price of rollback is that we abandon the log.
4396eda14cbcSMatt Macy 	 */
4397eda14cbcSMatt Macy 	spa->spa_claiming = B_TRUE;
4398eda14cbcSMatt Macy 
4399eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
4400eda14cbcSMatt Macy 	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
4401eda14cbcSMatt Macy 	    zil_claim, tx, DS_FIND_CHILDREN);
4402eda14cbcSMatt Macy 	dmu_tx_commit(tx);
4403eda14cbcSMatt Macy 
4404eda14cbcSMatt Macy 	spa->spa_claiming = B_FALSE;
4405eda14cbcSMatt Macy 
4406eda14cbcSMatt Macy 	spa_set_log_state(spa, SPA_LOG_GOOD);
4407eda14cbcSMatt Macy }
4408eda14cbcSMatt Macy 
4409eda14cbcSMatt Macy static void
4410eda14cbcSMatt Macy spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
4411eda14cbcSMatt Macy     boolean_t update_config_cache)
4412eda14cbcSMatt Macy {
4413eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
4414eda14cbcSMatt Macy 	int need_update = B_FALSE;
4415eda14cbcSMatt Macy 
4416eda14cbcSMatt Macy 	/*
4417eda14cbcSMatt Macy 	 * If the config cache is stale, or we have uninitialized
4418eda14cbcSMatt Macy 	 * metaslabs (see spa_vdev_add()), then update the config.
4419eda14cbcSMatt Macy 	 *
4420eda14cbcSMatt Macy 	 * If this is a verbatim import, trust the current
4421eda14cbcSMatt Macy 	 * in-core spa_config and update the disk labels.
4422eda14cbcSMatt Macy 	 */
4423eda14cbcSMatt Macy 	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
4424eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_IMPORT ||
4425eda14cbcSMatt Macy 	    spa->spa_load_state == SPA_LOAD_RECOVER ||
4426eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
4427eda14cbcSMatt Macy 		need_update = B_TRUE;
4428eda14cbcSMatt Macy 
4429eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++)
4430eda14cbcSMatt Macy 		if (rvd->vdev_child[c]->vdev_ms_array == 0)
4431eda14cbcSMatt Macy 			need_update = B_TRUE;
4432eda14cbcSMatt Macy 
4433eda14cbcSMatt Macy 	/*
4434eda14cbcSMatt Macy 	 * Update the config cache asynchronously in case we're the
4435eda14cbcSMatt Macy 	 * root pool, in which case the config cache isn't writable yet.
4436eda14cbcSMatt Macy 	 */
4437eda14cbcSMatt Macy 	if (need_update)
4438eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4439eda14cbcSMatt Macy }
4440eda14cbcSMatt Macy 
4441eda14cbcSMatt Macy static void
4442eda14cbcSMatt Macy spa_ld_prepare_for_reload(spa_t *spa)
4443eda14cbcSMatt Macy {
4444eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
4445eda14cbcSMatt Macy 	int async_suspended = spa->spa_async_suspended;
4446eda14cbcSMatt Macy 
4447eda14cbcSMatt Macy 	spa_unload(spa);
4448eda14cbcSMatt Macy 	spa_deactivate(spa);
4449eda14cbcSMatt Macy 	spa_activate(spa, mode);
4450eda14cbcSMatt Macy 
4451eda14cbcSMatt Macy 	/*
4452eda14cbcSMatt Macy 	 * We save the value of spa_async_suspended as it gets reset to 0 by
4453eda14cbcSMatt Macy 	 * spa_unload(). We want to restore it back to the original value before
4454eda14cbcSMatt Macy 	 * returning as we might be calling spa_async_resume() later.
4455eda14cbcSMatt Macy 	 */
4456eda14cbcSMatt Macy 	spa->spa_async_suspended = async_suspended;
4457eda14cbcSMatt Macy }
4458eda14cbcSMatt Macy 
4459eda14cbcSMatt Macy static int
4460eda14cbcSMatt Macy spa_ld_read_checkpoint_txg(spa_t *spa)
4461eda14cbcSMatt Macy {
4462eda14cbcSMatt Macy 	uberblock_t checkpoint;
4463eda14cbcSMatt Macy 	int error = 0;
4464eda14cbcSMatt Macy 
4465eda14cbcSMatt Macy 	ASSERT0(spa->spa_checkpoint_txg);
4466eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4467eda14cbcSMatt Macy 
4468eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4469eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4470eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4471eda14cbcSMatt Macy 
4472eda14cbcSMatt Macy 	if (error == ENOENT)
4473eda14cbcSMatt Macy 		return (0);
4474eda14cbcSMatt Macy 
4475eda14cbcSMatt Macy 	if (error != 0)
4476eda14cbcSMatt Macy 		return (error);
4477eda14cbcSMatt Macy 
4478eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, !=, 0);
4479eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
4480eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
4481eda14cbcSMatt Macy 	spa->spa_checkpoint_txg = checkpoint.ub_txg;
4482eda14cbcSMatt Macy 	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
4483eda14cbcSMatt Macy 
4484eda14cbcSMatt Macy 	return (0);
4485eda14cbcSMatt Macy }
4486eda14cbcSMatt Macy 
4487eda14cbcSMatt Macy static int
4488eda14cbcSMatt Macy spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
4489eda14cbcSMatt Macy {
4490eda14cbcSMatt Macy 	int error = 0;
4491eda14cbcSMatt Macy 
4492eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4493eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4494eda14cbcSMatt Macy 
4495eda14cbcSMatt Macy 	/*
4496eda14cbcSMatt Macy 	 * Never trust the config that is provided unless we are assembling
4497eda14cbcSMatt Macy 	 * a pool following a split.
4498eda14cbcSMatt Macy 	 * This means don't trust blkptrs and the vdev tree in general. This
4499eda14cbcSMatt Macy 	 * also effectively puts the spa in read-only mode since
4500eda14cbcSMatt Macy 	 * spa_writeable() checks for spa_trust_config to be true.
4501eda14cbcSMatt Macy 	 * We will later load a trusted config from the MOS.
4502eda14cbcSMatt Macy 	 */
4503eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE)
4504eda14cbcSMatt Macy 		spa->spa_trust_config = B_FALSE;
4505eda14cbcSMatt Macy 
4506eda14cbcSMatt Macy 	/*
4507eda14cbcSMatt Macy 	 * Parse the config provided to create a vdev tree.
4508eda14cbcSMatt Macy 	 */
4509eda14cbcSMatt Macy 	error = spa_ld_parse_config(spa, type);
4510eda14cbcSMatt Macy 	if (error != 0)
4511eda14cbcSMatt Macy 		return (error);
4512eda14cbcSMatt Macy 
4513eda14cbcSMatt Macy 	spa_import_progress_add(spa);
4514eda14cbcSMatt Macy 
4515eda14cbcSMatt Macy 	/*
4516eda14cbcSMatt Macy 	 * Now that we have the vdev tree, try to open each vdev. This involves
4517eda14cbcSMatt Macy 	 * opening the underlying physical device, retrieving its geometry and
4518eda14cbcSMatt Macy 	 * probing the vdev with a dummy I/O. The state of each vdev will be set
4519eda14cbcSMatt Macy 	 * based on the success of those operations. After this we'll be ready
4520eda14cbcSMatt Macy 	 * to read from the vdevs.
4521eda14cbcSMatt Macy 	 */
4522eda14cbcSMatt Macy 	error = spa_ld_open_vdevs(spa);
4523eda14cbcSMatt Macy 	if (error != 0)
4524eda14cbcSMatt Macy 		return (error);
4525eda14cbcSMatt Macy 
4526eda14cbcSMatt Macy 	/*
4527eda14cbcSMatt Macy 	 * Read the label of each vdev and make sure that the GUIDs stored
4528eda14cbcSMatt Macy 	 * there match the GUIDs in the config provided.
4529eda14cbcSMatt Macy 	 * If we're assembling a new pool that's been split off from an
4530eda14cbcSMatt Macy 	 * existing pool, the labels haven't yet been updated so we skip
4531eda14cbcSMatt Macy 	 * validation for now.
4532eda14cbcSMatt Macy 	 */
4533eda14cbcSMatt Macy 	if (type != SPA_IMPORT_ASSEMBLE) {
4534eda14cbcSMatt Macy 		error = spa_ld_validate_vdevs(spa);
4535eda14cbcSMatt Macy 		if (error != 0)
4536eda14cbcSMatt Macy 			return (error);
4537eda14cbcSMatt Macy 	}
4538eda14cbcSMatt Macy 
4539eda14cbcSMatt Macy 	/*
4540eda14cbcSMatt Macy 	 * Read all vdev labels to find the best uberblock (i.e. latest,
4541eda14cbcSMatt Macy 	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
4542eda14cbcSMatt Macy 	 * get the list of features required to read blkptrs in the MOS from
4543eda14cbcSMatt Macy 	 * the vdev label with the best uberblock and verify that our version
4544eda14cbcSMatt Macy 	 * of zfs supports them all.
4545eda14cbcSMatt Macy 	 */
4546eda14cbcSMatt Macy 	error = spa_ld_select_uberblock(spa, type);
4547eda14cbcSMatt Macy 	if (error != 0)
4548eda14cbcSMatt Macy 		return (error);
4549eda14cbcSMatt Macy 
4550eda14cbcSMatt Macy 	/*
4551eda14cbcSMatt Macy 	 * Pass that uberblock to the dsl_pool layer which will open the root
4552eda14cbcSMatt Macy 	 * blkptr. This blkptr points to the latest version of the MOS and will
4553eda14cbcSMatt Macy 	 * allow us to read its contents.
4554eda14cbcSMatt Macy 	 */
4555eda14cbcSMatt Macy 	error = spa_ld_open_rootbp(spa);
4556eda14cbcSMatt Macy 	if (error != 0)
4557eda14cbcSMatt Macy 		return (error);
4558eda14cbcSMatt Macy 
4559eda14cbcSMatt Macy 	return (0);
4560eda14cbcSMatt Macy }
4561eda14cbcSMatt Macy 
4562eda14cbcSMatt Macy static int
4563eda14cbcSMatt Macy spa_ld_checkpoint_rewind(spa_t *spa)
4564eda14cbcSMatt Macy {
4565eda14cbcSMatt Macy 	uberblock_t checkpoint;
4566eda14cbcSMatt Macy 	int error = 0;
4567eda14cbcSMatt Macy 
4568eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4569eda14cbcSMatt Macy 	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4570eda14cbcSMatt Macy 
4571eda14cbcSMatt Macy 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4572eda14cbcSMatt Macy 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
4573eda14cbcSMatt Macy 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
4574eda14cbcSMatt Macy 
4575eda14cbcSMatt Macy 	if (error != 0) {
4576eda14cbcSMatt Macy 		spa_load_failed(spa, "unable to retrieve checkpointed "
4577eda14cbcSMatt Macy 		    "uberblock from the MOS config [error=%d]", error);
4578eda14cbcSMatt Macy 
4579eda14cbcSMatt Macy 		if (error == ENOENT)
4580eda14cbcSMatt Macy 			error = ZFS_ERR_NO_CHECKPOINT;
4581eda14cbcSMatt Macy 
4582eda14cbcSMatt Macy 		return (error);
4583eda14cbcSMatt Macy 	}
4584eda14cbcSMatt Macy 
4585eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
4586eda14cbcSMatt Macy 	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
4587eda14cbcSMatt Macy 
4588eda14cbcSMatt Macy 	/*
4589eda14cbcSMatt Macy 	 * We need to update the txg and timestamp of the checkpointed
4590eda14cbcSMatt Macy 	 * uberblock to be higher than the latest one. This ensures that
4591eda14cbcSMatt Macy 	 * the checkpointed uberblock is selected if we were to close and
4592eda14cbcSMatt Macy 	 * reopen the pool right after we've written it in the vdev labels.
4593eda14cbcSMatt Macy 	 * (also see block comment in vdev_uberblock_compare)
4594eda14cbcSMatt Macy 	 */
4595eda14cbcSMatt Macy 	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
4596eda14cbcSMatt Macy 	checkpoint.ub_timestamp = gethrestime_sec();
4597eda14cbcSMatt Macy 
4598eda14cbcSMatt Macy 	/*
4599eda14cbcSMatt Macy 	 * Set current uberblock to be the checkpointed uberblock.
4600eda14cbcSMatt Macy 	 */
4601eda14cbcSMatt Macy 	spa->spa_uberblock = checkpoint;
4602eda14cbcSMatt Macy 
4603eda14cbcSMatt Macy 	/*
4604eda14cbcSMatt Macy 	 * If we are doing a normal rewind, then the pool is open for
4605eda14cbcSMatt Macy 	 * writing and we sync the "updated" checkpointed uberblock to
4606eda14cbcSMatt Macy 	 * disk. Once this is done, we've basically rewound the whole
4607eda14cbcSMatt Macy 	 * pool and there is no way back.
4608eda14cbcSMatt Macy 	 *
4609eda14cbcSMatt Macy 	 * There are cases when we don't want to attempt and sync the
4610eda14cbcSMatt Macy 	 * checkpointed uberblock to disk because we are opening a
4611eda14cbcSMatt Macy 	 * pool as read-only. Specifically, verifying the checkpointed
4612eda14cbcSMatt Macy 	 * state with zdb, and importing the checkpointed state to get
4613eda14cbcSMatt Macy 	 * a "preview" of its content.
4614eda14cbcSMatt Macy 	 */
4615eda14cbcSMatt Macy 	if (spa_writeable(spa)) {
4616eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
4617eda14cbcSMatt Macy 
4618eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4619eda14cbcSMatt Macy 		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
4620eda14cbcSMatt Macy 		int svdcount = 0;
4621eda14cbcSMatt Macy 		int children = rvd->vdev_children;
4622eda14cbcSMatt Macy 		int c0 = spa_get_random(children);
4623eda14cbcSMatt Macy 
4624eda14cbcSMatt Macy 		for (int c = 0; c < children; c++) {
4625eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
4626eda14cbcSMatt Macy 
4627eda14cbcSMatt Macy 			/* Stop when revisiting the first vdev */
4628eda14cbcSMatt Macy 			if (c > 0 && svd[0] == vd)
4629eda14cbcSMatt Macy 				break;
4630eda14cbcSMatt Macy 
4631eda14cbcSMatt Macy 			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
4632eda14cbcSMatt Macy 			    !vdev_is_concrete(vd))
4633eda14cbcSMatt Macy 				continue;
4634eda14cbcSMatt Macy 
4635eda14cbcSMatt Macy 			svd[svdcount++] = vd;
4636eda14cbcSMatt Macy 			if (svdcount == SPA_SYNC_MIN_VDEVS)
4637eda14cbcSMatt Macy 				break;
4638eda14cbcSMatt Macy 		}
4639eda14cbcSMatt Macy 		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
4640eda14cbcSMatt Macy 		if (error == 0)
4641eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
4642eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
4643eda14cbcSMatt Macy 
4644eda14cbcSMatt Macy 		if (error != 0) {
4645eda14cbcSMatt Macy 			spa_load_failed(spa, "failed to write checkpointed "
4646eda14cbcSMatt Macy 			    "uberblock to the vdev labels [error=%d]", error);
4647eda14cbcSMatt Macy 			return (error);
4648eda14cbcSMatt Macy 		}
4649eda14cbcSMatt Macy 	}
4650eda14cbcSMatt Macy 
4651eda14cbcSMatt Macy 	return (0);
4652eda14cbcSMatt Macy }
4653eda14cbcSMatt Macy 
4654eda14cbcSMatt Macy static int
4655eda14cbcSMatt Macy spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
4656eda14cbcSMatt Macy     boolean_t *update_config_cache)
4657eda14cbcSMatt Macy {
4658eda14cbcSMatt Macy 	int error;
4659eda14cbcSMatt Macy 
4660eda14cbcSMatt Macy 	/*
4661eda14cbcSMatt Macy 	 * Parse the config for pool, open and validate vdevs,
4662eda14cbcSMatt Macy 	 * select an uberblock, and use that uberblock to open
4663eda14cbcSMatt Macy 	 * the MOS.
4664eda14cbcSMatt Macy 	 */
4665eda14cbcSMatt Macy 	error = spa_ld_mos_init(spa, type);
4666eda14cbcSMatt Macy 	if (error != 0)
4667eda14cbcSMatt Macy 		return (error);
4668eda14cbcSMatt Macy 
4669eda14cbcSMatt Macy 	/*
4670eda14cbcSMatt Macy 	 * Retrieve the trusted config stored in the MOS and use it to create
4671eda14cbcSMatt Macy 	 * a new, exact version of the vdev tree, then reopen all vdevs.
4672eda14cbcSMatt Macy 	 */
4673eda14cbcSMatt Macy 	error = spa_ld_trusted_config(spa, type, B_FALSE);
4674eda14cbcSMatt Macy 	if (error == EAGAIN) {
4675eda14cbcSMatt Macy 		if (update_config_cache != NULL)
4676eda14cbcSMatt Macy 			*update_config_cache = B_TRUE;
4677eda14cbcSMatt Macy 
4678eda14cbcSMatt Macy 		/*
4679eda14cbcSMatt Macy 		 * Redo the loading process with the trusted config if it is
4680eda14cbcSMatt Macy 		 * too different from the untrusted config.
4681eda14cbcSMatt Macy 		 */
4682eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
4683eda14cbcSMatt Macy 		spa_load_note(spa, "RELOADING");
4684eda14cbcSMatt Macy 		error = spa_ld_mos_init(spa, type);
4685eda14cbcSMatt Macy 		if (error != 0)
4686eda14cbcSMatt Macy 			return (error);
4687eda14cbcSMatt Macy 
4688eda14cbcSMatt Macy 		error = spa_ld_trusted_config(spa, type, B_TRUE);
4689eda14cbcSMatt Macy 		if (error != 0)
4690eda14cbcSMatt Macy 			return (error);
4691eda14cbcSMatt Macy 
4692eda14cbcSMatt Macy 	} else if (error != 0) {
4693eda14cbcSMatt Macy 		return (error);
4694eda14cbcSMatt Macy 	}
4695eda14cbcSMatt Macy 
4696eda14cbcSMatt Macy 	return (0);
4697eda14cbcSMatt Macy }
4698eda14cbcSMatt Macy 
4699eda14cbcSMatt Macy /*
4700eda14cbcSMatt Macy  * Load an existing storage pool, using the config provided. This config
4701eda14cbcSMatt Macy  * describes which vdevs are part of the pool and is later validated against
4702eda14cbcSMatt Macy  * partial configs present in each vdev's label and an entire copy of the
4703eda14cbcSMatt Macy  * config stored in the MOS.
4704eda14cbcSMatt Macy  */
4705eda14cbcSMatt Macy static int
4706eda14cbcSMatt Macy spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
4707eda14cbcSMatt Macy {
4708eda14cbcSMatt Macy 	int error = 0;
4709eda14cbcSMatt Macy 	boolean_t missing_feat_write = B_FALSE;
4710eda14cbcSMatt Macy 	boolean_t checkpoint_rewind =
4711eda14cbcSMatt Macy 	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4712eda14cbcSMatt Macy 	boolean_t update_config_cache = B_FALSE;
4713eda14cbcSMatt Macy 
4714eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4715eda14cbcSMatt Macy 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4716eda14cbcSMatt Macy 
4717eda14cbcSMatt Macy 	spa_load_note(spa, "LOADING");
4718eda14cbcSMatt Macy 
4719eda14cbcSMatt Macy 	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
4720eda14cbcSMatt Macy 	if (error != 0)
4721eda14cbcSMatt Macy 		return (error);
4722eda14cbcSMatt Macy 
4723eda14cbcSMatt Macy 	/*
4724eda14cbcSMatt Macy 	 * If we are rewinding to the checkpoint then we need to repeat
4725eda14cbcSMatt Macy 	 * everything we've done so far in this function but this time
4726eda14cbcSMatt Macy 	 * selecting the checkpointed uberblock and using that to open
4727eda14cbcSMatt Macy 	 * the MOS.
4728eda14cbcSMatt Macy 	 */
4729eda14cbcSMatt Macy 	if (checkpoint_rewind) {
4730eda14cbcSMatt Macy 		/*
4731eda14cbcSMatt Macy 		 * If we are rewinding to the checkpoint update config cache
4732eda14cbcSMatt Macy 		 * anyway.
4733eda14cbcSMatt Macy 		 */
4734eda14cbcSMatt Macy 		update_config_cache = B_TRUE;
4735eda14cbcSMatt Macy 
4736eda14cbcSMatt Macy 		/*
4737eda14cbcSMatt Macy 		 * Extract the checkpointed uberblock from the current MOS
4738eda14cbcSMatt Macy 		 * and use this as the pool's uberblock from now on. If the
4739eda14cbcSMatt Macy 		 * pool is imported as writeable we also write the checkpoint
4740eda14cbcSMatt Macy 		 * uberblock to the labels, making the rewind permanent.
4741eda14cbcSMatt Macy 		 */
4742eda14cbcSMatt Macy 		error = spa_ld_checkpoint_rewind(spa);
4743eda14cbcSMatt Macy 		if (error != 0)
4744eda14cbcSMatt Macy 			return (error);
4745eda14cbcSMatt Macy 
4746eda14cbcSMatt Macy 		/*
4747eda14cbcSMatt Macy 		 * Redo the loading process again with the
4748eda14cbcSMatt Macy 		 * checkpointed uberblock.
4749eda14cbcSMatt Macy 		 */
4750eda14cbcSMatt Macy 		spa_ld_prepare_for_reload(spa);
4751eda14cbcSMatt Macy 		spa_load_note(spa, "LOADING checkpointed uberblock");
4752eda14cbcSMatt Macy 		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
4753eda14cbcSMatt Macy 		if (error != 0)
4754eda14cbcSMatt Macy 			return (error);
4755eda14cbcSMatt Macy 	}
4756eda14cbcSMatt Macy 
4757eda14cbcSMatt Macy 	/*
4758eda14cbcSMatt Macy 	 * Retrieve the checkpoint txg if the pool has a checkpoint.
4759eda14cbcSMatt Macy 	 */
4760eda14cbcSMatt Macy 	error = spa_ld_read_checkpoint_txg(spa);
4761eda14cbcSMatt Macy 	if (error != 0)
4762eda14cbcSMatt Macy 		return (error);
4763eda14cbcSMatt Macy 
4764eda14cbcSMatt Macy 	/*
4765eda14cbcSMatt Macy 	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
4766eda14cbcSMatt Macy 	 * from the pool and their contents were re-mapped to other vdevs. Note
4767eda14cbcSMatt Macy 	 * that everything that we read before this step must have been
4768eda14cbcSMatt Macy 	 * rewritten on concrete vdevs after the last device removal was
4769eda14cbcSMatt Macy 	 * initiated. Otherwise we could be reading from indirect vdevs before
4770eda14cbcSMatt Macy 	 * we have loaded their mappings.
4771eda14cbcSMatt Macy 	 */
4772eda14cbcSMatt Macy 	error = spa_ld_open_indirect_vdev_metadata(spa);
4773eda14cbcSMatt Macy 	if (error != 0)
4774eda14cbcSMatt Macy 		return (error);
4775eda14cbcSMatt Macy 
4776eda14cbcSMatt Macy 	/*
4777eda14cbcSMatt Macy 	 * Retrieve the full list of active features from the MOS and check if
4778eda14cbcSMatt Macy 	 * they are all supported.
4779eda14cbcSMatt Macy 	 */
4780eda14cbcSMatt Macy 	error = spa_ld_check_features(spa, &missing_feat_write);
4781eda14cbcSMatt Macy 	if (error != 0)
4782eda14cbcSMatt Macy 		return (error);
4783eda14cbcSMatt Macy 
4784eda14cbcSMatt Macy 	/*
4785eda14cbcSMatt Macy 	 * Load several special directories from the MOS needed by the dsl_pool
4786eda14cbcSMatt Macy 	 * layer.
4787eda14cbcSMatt Macy 	 */
4788eda14cbcSMatt Macy 	error = spa_ld_load_special_directories(spa);
4789eda14cbcSMatt Macy 	if (error != 0)
4790eda14cbcSMatt Macy 		return (error);
4791eda14cbcSMatt Macy 
4792eda14cbcSMatt Macy 	/*
4793eda14cbcSMatt Macy 	 * Retrieve pool properties from the MOS.
4794eda14cbcSMatt Macy 	 */
4795eda14cbcSMatt Macy 	error = spa_ld_get_props(spa);
4796eda14cbcSMatt Macy 	if (error != 0)
4797eda14cbcSMatt Macy 		return (error);
4798eda14cbcSMatt Macy 
4799eda14cbcSMatt Macy 	/*
4800eda14cbcSMatt Macy 	 * Retrieve the list of auxiliary devices - cache devices and spares -
4801eda14cbcSMatt Macy 	 * and open them.
4802eda14cbcSMatt Macy 	 */
4803eda14cbcSMatt Macy 	error = spa_ld_open_aux_vdevs(spa, type);
4804eda14cbcSMatt Macy 	if (error != 0)
4805eda14cbcSMatt Macy 		return (error);
4806eda14cbcSMatt Macy 
4807eda14cbcSMatt Macy 	/*
4808eda14cbcSMatt Macy 	 * Load the metadata for all vdevs. Also check if unopenable devices
4809eda14cbcSMatt Macy 	 * should be autoreplaced.
4810eda14cbcSMatt Macy 	 */
4811eda14cbcSMatt Macy 	error = spa_ld_load_vdev_metadata(spa);
4812eda14cbcSMatt Macy 	if (error != 0)
4813eda14cbcSMatt Macy 		return (error);
4814eda14cbcSMatt Macy 
4815eda14cbcSMatt Macy 	error = spa_ld_load_dedup_tables(spa);
4816eda14cbcSMatt Macy 	if (error != 0)
4817eda14cbcSMatt Macy 		return (error);
4818eda14cbcSMatt Macy 
4819eda14cbcSMatt Macy 	/*
4820eda14cbcSMatt Macy 	 * Verify the logs now to make sure we don't have any unexpected errors
4821eda14cbcSMatt Macy 	 * when we claim log blocks later.
4822eda14cbcSMatt Macy 	 */
4823eda14cbcSMatt Macy 	error = spa_ld_verify_logs(spa, type, ereport);
4824eda14cbcSMatt Macy 	if (error != 0)
4825eda14cbcSMatt Macy 		return (error);
4826eda14cbcSMatt Macy 
4827eda14cbcSMatt Macy 	if (missing_feat_write) {
4828eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
4829eda14cbcSMatt Macy 
4830eda14cbcSMatt Macy 		/*
4831eda14cbcSMatt Macy 		 * At this point, we know that we can open the pool in
4832eda14cbcSMatt Macy 		 * read-only mode but not read-write mode. We now have enough
4833eda14cbcSMatt Macy 		 * information and can return to userland.
4834eda14cbcSMatt Macy 		 */
4835eda14cbcSMatt Macy 		return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
4836eda14cbcSMatt Macy 		    ENOTSUP));
4837eda14cbcSMatt Macy 	}
4838eda14cbcSMatt Macy 
4839eda14cbcSMatt Macy 	/*
4840eda14cbcSMatt Macy 	 * Traverse the last txgs to make sure the pool was left off in a safe
4841eda14cbcSMatt Macy 	 * state. When performing an extreme rewind, we verify the whole pool,
4842eda14cbcSMatt Macy 	 * which can take a very long time.
4843eda14cbcSMatt Macy 	 */
4844eda14cbcSMatt Macy 	error = spa_ld_verify_pool_data(spa);
4845eda14cbcSMatt Macy 	if (error != 0)
4846eda14cbcSMatt Macy 		return (error);
4847eda14cbcSMatt Macy 
4848eda14cbcSMatt Macy 	/*
4849eda14cbcSMatt Macy 	 * Calculate the deflated space for the pool. This must be done before
4850eda14cbcSMatt Macy 	 * we write anything to the pool because we'd need to update the space
4851eda14cbcSMatt Macy 	 * accounting using the deflated sizes.
4852eda14cbcSMatt Macy 	 */
4853eda14cbcSMatt Macy 	spa_update_dspace(spa);
4854eda14cbcSMatt Macy 
4855eda14cbcSMatt Macy 	/*
4856eda14cbcSMatt Macy 	 * We have now retrieved all the information we needed to open the
4857eda14cbcSMatt Macy 	 * pool. If we are importing the pool in read-write mode, a few
4858eda14cbcSMatt Macy 	 * additional steps must be performed to finish the import.
4859eda14cbcSMatt Macy 	 */
4860eda14cbcSMatt Macy 	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
4861eda14cbcSMatt Macy 	    spa->spa_load_max_txg == UINT64_MAX)) {
4862eda14cbcSMatt Macy 		uint64_t config_cache_txg = spa->spa_config_txg;
4863eda14cbcSMatt Macy 
4864eda14cbcSMatt Macy 		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
4865eda14cbcSMatt Macy 
4866eda14cbcSMatt Macy 		/*
4867eda14cbcSMatt Macy 		 * In case of a checkpoint rewind, log the original txg
4868eda14cbcSMatt Macy 		 * of the checkpointed uberblock.
4869eda14cbcSMatt Macy 		 */
4870eda14cbcSMatt Macy 		if (checkpoint_rewind) {
4871eda14cbcSMatt Macy 			spa_history_log_internal(spa, "checkpoint rewind",
4872eda14cbcSMatt Macy 			    NULL, "rewound state to txg=%llu",
4873eda14cbcSMatt Macy 			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
4874eda14cbcSMatt Macy 		}
4875eda14cbcSMatt Macy 
4876eda14cbcSMatt Macy 		/*
4877eda14cbcSMatt Macy 		 * Traverse the ZIL and claim all blocks.
4878eda14cbcSMatt Macy 		 */
4879eda14cbcSMatt Macy 		spa_ld_claim_log_blocks(spa);
4880eda14cbcSMatt Macy 
4881eda14cbcSMatt Macy 		/*
4882eda14cbcSMatt Macy 		 * Kick-off the syncing thread.
4883eda14cbcSMatt Macy 		 */
4884eda14cbcSMatt Macy 		spa->spa_sync_on = B_TRUE;
4885eda14cbcSMatt Macy 		txg_sync_start(spa->spa_dsl_pool);
4886eda14cbcSMatt Macy 		mmp_thread_start(spa);
4887eda14cbcSMatt Macy 
4888eda14cbcSMatt Macy 		/*
4889eda14cbcSMatt Macy 		 * Wait for all claims to sync.  We sync up to the highest
4890eda14cbcSMatt Macy 		 * claimed log block birth time so that claimed log blocks
4891eda14cbcSMatt Macy 		 * don't appear to be from the future.  spa_claim_max_txg
4892eda14cbcSMatt Macy 		 * will have been set for us by ZIL traversal operations
4893eda14cbcSMatt Macy 		 * performed above.
4894eda14cbcSMatt Macy 		 */
4895eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
4896eda14cbcSMatt Macy 
4897eda14cbcSMatt Macy 		/*
4898eda14cbcSMatt Macy 		 * Check if we need to request an update of the config. On the
4899eda14cbcSMatt Macy 		 * next sync, we would update the config stored in vdev labels
4900eda14cbcSMatt Macy 		 * and the cachefile (by default /etc/zfs/zpool.cache).
4901eda14cbcSMatt Macy 		 */
4902eda14cbcSMatt Macy 		spa_ld_check_for_config_update(spa, config_cache_txg,
4903eda14cbcSMatt Macy 		    update_config_cache);
4904eda14cbcSMatt Macy 
4905eda14cbcSMatt Macy 		/*
4906eda14cbcSMatt Macy 		 * Check if a rebuild was in progress and if so resume it.
4907eda14cbcSMatt Macy 		 * Then check all DTLs to see if anything needs resilvering.
4908eda14cbcSMatt Macy 		 * The resilver will be deferred if a rebuild was started.
4909eda14cbcSMatt Macy 		 */
4910eda14cbcSMatt Macy 		if (vdev_rebuild_active(spa->spa_root_vdev)) {
4911eda14cbcSMatt Macy 			vdev_rebuild_restart(spa);
4912eda14cbcSMatt Macy 		} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
4913eda14cbcSMatt Macy 		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
4914eda14cbcSMatt Macy 			spa_async_request(spa, SPA_ASYNC_RESILVER);
4915eda14cbcSMatt Macy 		}
4916eda14cbcSMatt Macy 
4917eda14cbcSMatt Macy 		/*
4918eda14cbcSMatt Macy 		 * Log the fact that we booted up (so that we can detect if
4919eda14cbcSMatt Macy 		 * we rebooted in the middle of an operation).
4920eda14cbcSMatt Macy 		 */
4921eda14cbcSMatt Macy 		spa_history_log_version(spa, "open", NULL);
4922eda14cbcSMatt Macy 
4923eda14cbcSMatt Macy 		spa_restart_removal(spa);
4924eda14cbcSMatt Macy 		spa_spawn_aux_threads(spa);
4925eda14cbcSMatt Macy 
4926eda14cbcSMatt Macy 		/*
4927eda14cbcSMatt Macy 		 * Delete any inconsistent datasets.
4928eda14cbcSMatt Macy 		 *
4929eda14cbcSMatt Macy 		 * Note:
4930eda14cbcSMatt Macy 		 * Since we may be issuing deletes for clones here,
4931eda14cbcSMatt Macy 		 * we make sure to do so after we've spawned all the
4932eda14cbcSMatt Macy 		 * auxiliary threads above (from which the livelist
4933eda14cbcSMatt Macy 		 * deletion zthr is part of).
4934eda14cbcSMatt Macy 		 */
4935eda14cbcSMatt Macy 		(void) dmu_objset_find(spa_name(spa),
4936eda14cbcSMatt Macy 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
4937eda14cbcSMatt Macy 
4938eda14cbcSMatt Macy 		/*
4939eda14cbcSMatt Macy 		 * Clean up any stale temporary dataset userrefs.
4940eda14cbcSMatt Macy 		 */
4941eda14cbcSMatt Macy 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
4942eda14cbcSMatt Macy 
4943eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4944eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
4945eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
4946eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
4947eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
4948eda14cbcSMatt Macy 	}
4949eda14cbcSMatt Macy 
4950eda14cbcSMatt Macy 	spa_import_progress_remove(spa_guid(spa));
4951eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
4952eda14cbcSMatt Macy 
4953eda14cbcSMatt Macy 	spa_load_note(spa, "LOADED");
4954eda14cbcSMatt Macy 
4955eda14cbcSMatt Macy 	return (0);
4956eda14cbcSMatt Macy }
4957eda14cbcSMatt Macy 
4958eda14cbcSMatt Macy static int
4959eda14cbcSMatt Macy spa_load_retry(spa_t *spa, spa_load_state_t state)
4960eda14cbcSMatt Macy {
4961eda14cbcSMatt Macy 	spa_mode_t mode = spa->spa_mode;
4962eda14cbcSMatt Macy 
4963eda14cbcSMatt Macy 	spa_unload(spa);
4964eda14cbcSMatt Macy 	spa_deactivate(spa);
4965eda14cbcSMatt Macy 
4966eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
4967eda14cbcSMatt Macy 
4968eda14cbcSMatt Macy 	spa_activate(spa, mode);
4969eda14cbcSMatt Macy 	spa_async_suspend(spa);
4970eda14cbcSMatt Macy 
4971eda14cbcSMatt Macy 	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
4972eda14cbcSMatt Macy 	    (u_longlong_t)spa->spa_load_max_txg);
4973eda14cbcSMatt Macy 
4974eda14cbcSMatt Macy 	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
4975eda14cbcSMatt Macy }
4976eda14cbcSMatt Macy 
4977eda14cbcSMatt Macy /*
4978eda14cbcSMatt Macy  * If spa_load() fails this function will try loading prior txg's. If
4979eda14cbcSMatt Macy  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
4980eda14cbcSMatt Macy  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
4981eda14cbcSMatt Macy  * function will not rewind the pool and will return the same error as
4982eda14cbcSMatt Macy  * spa_load().
4983eda14cbcSMatt Macy  */
4984eda14cbcSMatt Macy static int
4985eda14cbcSMatt Macy spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
4986eda14cbcSMatt Macy     int rewind_flags)
4987eda14cbcSMatt Macy {
4988eda14cbcSMatt Macy 	nvlist_t *loadinfo = NULL;
4989eda14cbcSMatt Macy 	nvlist_t *config = NULL;
4990eda14cbcSMatt Macy 	int load_error, rewind_error;
4991eda14cbcSMatt Macy 	uint64_t safe_rewind_txg;
4992eda14cbcSMatt Macy 	uint64_t min_txg;
4993eda14cbcSMatt Macy 
4994eda14cbcSMatt Macy 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
4995eda14cbcSMatt Macy 		spa->spa_load_max_txg = spa->spa_load_txg;
4996eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
4997eda14cbcSMatt Macy 	} else {
4998eda14cbcSMatt Macy 		spa->spa_load_max_txg = max_request;
4999eda14cbcSMatt Macy 		if (max_request != UINT64_MAX)
5000eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5001eda14cbcSMatt Macy 	}
5002eda14cbcSMatt Macy 
5003eda14cbcSMatt Macy 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
5004eda14cbcSMatt Macy 	if (load_error == 0)
5005eda14cbcSMatt Macy 		return (0);
5006eda14cbcSMatt Macy 	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
5007eda14cbcSMatt Macy 		/*
5008eda14cbcSMatt Macy 		 * When attempting checkpoint-rewind on a pool with no
5009eda14cbcSMatt Macy 		 * checkpoint, we should not attempt to load uberblocks
5010eda14cbcSMatt Macy 		 * from previous txgs when spa_load fails.
5011eda14cbcSMatt Macy 		 */
5012eda14cbcSMatt Macy 		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5013eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5014eda14cbcSMatt Macy 		return (load_error);
5015eda14cbcSMatt Macy 	}
5016eda14cbcSMatt Macy 
5017eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL)
5018eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5019eda14cbcSMatt Macy 
5020eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
5021eda14cbcSMatt Macy 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
5022eda14cbcSMatt Macy 
5023eda14cbcSMatt Macy 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
5024eda14cbcSMatt Macy 		nvlist_free(config);
5025eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5026eda14cbcSMatt Macy 		return (load_error);
5027eda14cbcSMatt Macy 	}
5028eda14cbcSMatt Macy 
5029eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5030eda14cbcSMatt Macy 		/* Price of rolling back is discarding txgs, including log */
5031eda14cbcSMatt Macy 		spa_set_log_state(spa, SPA_LOG_CLEAR);
5032eda14cbcSMatt Macy 	} else {
5033eda14cbcSMatt Macy 		/*
5034eda14cbcSMatt Macy 		 * If we aren't rolling back save the load info from our first
5035eda14cbcSMatt Macy 		 * import attempt so that we can restore it after attempting
5036eda14cbcSMatt Macy 		 * to rewind.
5037eda14cbcSMatt Macy 		 */
5038eda14cbcSMatt Macy 		loadinfo = spa->spa_load_info;
5039eda14cbcSMatt Macy 		spa->spa_load_info = fnvlist_alloc();
5040eda14cbcSMatt Macy 	}
5041eda14cbcSMatt Macy 
5042eda14cbcSMatt Macy 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
5043eda14cbcSMatt Macy 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
5044eda14cbcSMatt Macy 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
5045eda14cbcSMatt Macy 	    TXG_INITIAL : safe_rewind_txg;
5046eda14cbcSMatt Macy 
5047eda14cbcSMatt Macy 	/*
5048eda14cbcSMatt Macy 	 * Continue as long as we're finding errors, we're still within
5049eda14cbcSMatt Macy 	 * the acceptable rewind range, and we're still finding uberblocks
5050eda14cbcSMatt Macy 	 */
5051eda14cbcSMatt Macy 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
5052eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
5053eda14cbcSMatt Macy 		if (spa->spa_load_max_txg < safe_rewind_txg)
5054eda14cbcSMatt Macy 			spa->spa_extreme_rewind = B_TRUE;
5055eda14cbcSMatt Macy 		rewind_error = spa_load_retry(spa, state);
5056eda14cbcSMatt Macy 	}
5057eda14cbcSMatt Macy 
5058eda14cbcSMatt Macy 	spa->spa_extreme_rewind = B_FALSE;
5059eda14cbcSMatt Macy 	spa->spa_load_max_txg = UINT64_MAX;
5060eda14cbcSMatt Macy 
5061eda14cbcSMatt Macy 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
5062eda14cbcSMatt Macy 		spa_config_set(spa, config);
5063eda14cbcSMatt Macy 	else
5064eda14cbcSMatt Macy 		nvlist_free(config);
5065eda14cbcSMatt Macy 
5066eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5067eda14cbcSMatt Macy 		ASSERT3P(loadinfo, ==, NULL);
5068eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5069eda14cbcSMatt Macy 		return (rewind_error);
5070eda14cbcSMatt Macy 	} else {
5071eda14cbcSMatt Macy 		/* Store the rewind info as part of the initial load info */
5072eda14cbcSMatt Macy 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
5073eda14cbcSMatt Macy 		    spa->spa_load_info);
5074eda14cbcSMatt Macy 
5075eda14cbcSMatt Macy 		/* Restore the initial load info */
5076eda14cbcSMatt Macy 		fnvlist_free(spa->spa_load_info);
5077eda14cbcSMatt Macy 		spa->spa_load_info = loadinfo;
5078eda14cbcSMatt Macy 
5079eda14cbcSMatt Macy 		spa_import_progress_remove(spa_guid(spa));
5080eda14cbcSMatt Macy 		return (load_error);
5081eda14cbcSMatt Macy 	}
5082eda14cbcSMatt Macy }
5083eda14cbcSMatt Macy 
5084eda14cbcSMatt Macy /*
5085eda14cbcSMatt Macy  * Pool Open/Import
5086eda14cbcSMatt Macy  *
5087eda14cbcSMatt Macy  * The import case is identical to an open except that the configuration is sent
5088eda14cbcSMatt Macy  * down from userland, instead of grabbed from the configuration cache.  For the
5089eda14cbcSMatt Macy  * case of an open, the pool configuration will exist in the
5090eda14cbcSMatt Macy  * POOL_STATE_UNINITIALIZED state.
5091eda14cbcSMatt Macy  *
5092eda14cbcSMatt Macy  * The stats information (gen/count/ustats) is used to gather vdev statistics at
5093eda14cbcSMatt Macy  * the same time open the pool, without having to keep around the spa_t in some
5094eda14cbcSMatt Macy  * ambiguous state.
5095eda14cbcSMatt Macy  */
5096eda14cbcSMatt Macy static int
5097eda14cbcSMatt Macy spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
5098eda14cbcSMatt Macy     nvlist_t **config)
5099eda14cbcSMatt Macy {
5100eda14cbcSMatt Macy 	spa_t *spa;
5101eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_OPEN;
5102eda14cbcSMatt Macy 	int error;
5103eda14cbcSMatt Macy 	int locked = B_FALSE;
5104eda14cbcSMatt Macy 	int firstopen = B_FALSE;
5105eda14cbcSMatt Macy 
5106eda14cbcSMatt Macy 	*spapp = NULL;
5107eda14cbcSMatt Macy 
5108eda14cbcSMatt Macy 	/*
5109eda14cbcSMatt Macy 	 * As disgusting as this is, we need to support recursive calls to this
5110eda14cbcSMatt Macy 	 * function because dsl_dir_open() is called during spa_load(), and ends
5111eda14cbcSMatt Macy 	 * up calling spa_open() again.  The real fix is to figure out how to
5112eda14cbcSMatt Macy 	 * avoid dsl_dir_open() calling this in the first place.
5113eda14cbcSMatt Macy 	 */
5114eda14cbcSMatt Macy 	if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
5115eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
5116eda14cbcSMatt Macy 		locked = B_TRUE;
5117eda14cbcSMatt Macy 	}
5118eda14cbcSMatt Macy 
5119eda14cbcSMatt Macy 	if ((spa = spa_lookup(pool)) == NULL) {
5120eda14cbcSMatt Macy 		if (locked)
5121eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
5122eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
5123eda14cbcSMatt Macy 	}
5124eda14cbcSMatt Macy 
5125eda14cbcSMatt Macy 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
5126eda14cbcSMatt Macy 		zpool_load_policy_t policy;
5127eda14cbcSMatt Macy 
5128eda14cbcSMatt Macy 		firstopen = B_TRUE;
5129eda14cbcSMatt Macy 
5130eda14cbcSMatt Macy 		zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
5131eda14cbcSMatt Macy 		    &policy);
5132eda14cbcSMatt Macy 		if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5133eda14cbcSMatt Macy 			state = SPA_LOAD_RECOVER;
5134eda14cbcSMatt Macy 
5135eda14cbcSMatt Macy 		spa_activate(spa, spa_mode_global);
5136eda14cbcSMatt Macy 
5137eda14cbcSMatt Macy 		if (state != SPA_LOAD_RECOVER)
5138eda14cbcSMatt Macy 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5139eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5140eda14cbcSMatt Macy 
5141eda14cbcSMatt Macy 		zfs_dbgmsg("spa_open_common: opening %s", pool);
5142eda14cbcSMatt Macy 		error = spa_load_best(spa, state, policy.zlp_txg,
5143eda14cbcSMatt Macy 		    policy.zlp_rewind);
5144eda14cbcSMatt Macy 
5145eda14cbcSMatt Macy 		if (error == EBADF) {
5146eda14cbcSMatt Macy 			/*
5147eda14cbcSMatt Macy 			 * If vdev_validate() returns failure (indicated by
5148eda14cbcSMatt Macy 			 * EBADF), it indicates that one of the vdevs indicates
5149eda14cbcSMatt Macy 			 * that the pool has been exported or destroyed.  If
5150eda14cbcSMatt Macy 			 * this is the case, the config cache is out of sync and
5151eda14cbcSMatt Macy 			 * we should remove the pool from the namespace.
5152eda14cbcSMatt Macy 			 */
5153eda14cbcSMatt Macy 			spa_unload(spa);
5154eda14cbcSMatt Macy 			spa_deactivate(spa);
5155eda14cbcSMatt Macy 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
5156eda14cbcSMatt Macy 			spa_remove(spa);
5157eda14cbcSMatt Macy 			if (locked)
5158eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5159eda14cbcSMatt Macy 			return (SET_ERROR(ENOENT));
5160eda14cbcSMatt Macy 		}
5161eda14cbcSMatt Macy 
5162eda14cbcSMatt Macy 		if (error) {
5163eda14cbcSMatt Macy 			/*
5164eda14cbcSMatt Macy 			 * We can't open the pool, but we still have useful
5165eda14cbcSMatt Macy 			 * information: the state of each vdev after the
5166eda14cbcSMatt Macy 			 * attempted vdev_open().  Return this to the user.
5167eda14cbcSMatt Macy 			 */
5168eda14cbcSMatt Macy 			if (config != NULL && spa->spa_config) {
5169eda14cbcSMatt Macy 				VERIFY(nvlist_dup(spa->spa_config, config,
5170eda14cbcSMatt Macy 				    KM_SLEEP) == 0);
5171eda14cbcSMatt Macy 				VERIFY(nvlist_add_nvlist(*config,
5172eda14cbcSMatt Macy 				    ZPOOL_CONFIG_LOAD_INFO,
5173eda14cbcSMatt Macy 				    spa->spa_load_info) == 0);
5174eda14cbcSMatt Macy 			}
5175eda14cbcSMatt Macy 			spa_unload(spa);
5176eda14cbcSMatt Macy 			spa_deactivate(spa);
5177eda14cbcSMatt Macy 			spa->spa_last_open_failed = error;
5178eda14cbcSMatt Macy 			if (locked)
5179eda14cbcSMatt Macy 				mutex_exit(&spa_namespace_lock);
5180eda14cbcSMatt Macy 			*spapp = NULL;
5181eda14cbcSMatt Macy 			return (error);
5182eda14cbcSMatt Macy 		}
5183eda14cbcSMatt Macy 	}
5184eda14cbcSMatt Macy 
5185eda14cbcSMatt Macy 	spa_open_ref(spa, tag);
5186eda14cbcSMatt Macy 
5187eda14cbcSMatt Macy 	if (config != NULL)
5188eda14cbcSMatt Macy 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5189eda14cbcSMatt Macy 
5190eda14cbcSMatt Macy 	/*
5191eda14cbcSMatt Macy 	 * If we've recovered the pool, pass back any information we
5192eda14cbcSMatt Macy 	 * gathered while doing the load.
5193eda14cbcSMatt Macy 	 */
5194eda14cbcSMatt Macy 	if (state == SPA_LOAD_RECOVER) {
5195eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
5196eda14cbcSMatt Macy 		    spa->spa_load_info) == 0);
5197eda14cbcSMatt Macy 	}
5198eda14cbcSMatt Macy 
5199eda14cbcSMatt Macy 	if (locked) {
5200eda14cbcSMatt Macy 		spa->spa_last_open_failed = 0;
5201eda14cbcSMatt Macy 		spa->spa_last_ubsync_txg = 0;
5202eda14cbcSMatt Macy 		spa->spa_load_txg = 0;
5203eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5204eda14cbcSMatt Macy 	}
5205eda14cbcSMatt Macy 
5206eda14cbcSMatt Macy 	if (firstopen)
5207eda14cbcSMatt Macy 		zvol_create_minors_recursive(spa_name(spa));
5208eda14cbcSMatt Macy 
5209eda14cbcSMatt Macy 	*spapp = spa;
5210eda14cbcSMatt Macy 
5211eda14cbcSMatt Macy 	return (0);
5212eda14cbcSMatt Macy }
5213eda14cbcSMatt Macy 
5214eda14cbcSMatt Macy int
5215eda14cbcSMatt Macy spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
5216eda14cbcSMatt Macy     nvlist_t **config)
5217eda14cbcSMatt Macy {
5218eda14cbcSMatt Macy 	return (spa_open_common(name, spapp, tag, policy, config));
5219eda14cbcSMatt Macy }
5220eda14cbcSMatt Macy 
5221eda14cbcSMatt Macy int
5222eda14cbcSMatt Macy spa_open(const char *name, spa_t **spapp, void *tag)
5223eda14cbcSMatt Macy {
5224eda14cbcSMatt Macy 	return (spa_open_common(name, spapp, tag, NULL, NULL));
5225eda14cbcSMatt Macy }
5226eda14cbcSMatt Macy 
5227eda14cbcSMatt Macy /*
5228eda14cbcSMatt Macy  * Lookup the given spa_t, incrementing the inject count in the process,
5229eda14cbcSMatt Macy  * preventing it from being exported or destroyed.
5230eda14cbcSMatt Macy  */
5231eda14cbcSMatt Macy spa_t *
5232eda14cbcSMatt Macy spa_inject_addref(char *name)
5233eda14cbcSMatt Macy {
5234eda14cbcSMatt Macy 	spa_t *spa;
5235eda14cbcSMatt Macy 
5236eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5237eda14cbcSMatt Macy 	if ((spa = spa_lookup(name)) == NULL) {
5238eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5239eda14cbcSMatt Macy 		return (NULL);
5240eda14cbcSMatt Macy 	}
5241eda14cbcSMatt Macy 	spa->spa_inject_ref++;
5242eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5243eda14cbcSMatt Macy 
5244eda14cbcSMatt Macy 	return (spa);
5245eda14cbcSMatt Macy }
5246eda14cbcSMatt Macy 
5247eda14cbcSMatt Macy void
5248eda14cbcSMatt Macy spa_inject_delref(spa_t *spa)
5249eda14cbcSMatt Macy {
5250eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5251eda14cbcSMatt Macy 	spa->spa_inject_ref--;
5252eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5253eda14cbcSMatt Macy }
5254eda14cbcSMatt Macy 
5255eda14cbcSMatt Macy /*
5256eda14cbcSMatt Macy  * Add spares device information to the nvlist.
5257eda14cbcSMatt Macy  */
5258eda14cbcSMatt Macy static void
5259eda14cbcSMatt Macy spa_add_spares(spa_t *spa, nvlist_t *config)
5260eda14cbcSMatt Macy {
5261eda14cbcSMatt Macy 	nvlist_t **spares;
5262eda14cbcSMatt Macy 	uint_t i, nspares;
5263eda14cbcSMatt Macy 	nvlist_t *nvroot;
5264eda14cbcSMatt Macy 	uint64_t guid;
5265eda14cbcSMatt Macy 	vdev_stat_t *vs;
5266eda14cbcSMatt Macy 	uint_t vsc;
5267eda14cbcSMatt Macy 	uint64_t pool;
5268eda14cbcSMatt Macy 
5269eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5270eda14cbcSMatt Macy 
5271eda14cbcSMatt Macy 	if (spa->spa_spares.sav_count == 0)
5272eda14cbcSMatt Macy 		return;
5273eda14cbcSMatt Macy 
5274eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist(config,
5275eda14cbcSMatt Macy 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5276eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5277eda14cbcSMatt Macy 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5278eda14cbcSMatt Macy 	if (nspares != 0) {
5279eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(nvroot,
5280eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5281eda14cbcSMatt Macy 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
5282eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
5283eda14cbcSMatt Macy 
5284eda14cbcSMatt Macy 		/*
5285eda14cbcSMatt Macy 		 * Go through and find any spares which have since been
5286eda14cbcSMatt Macy 		 * repurposed as an active spare.  If this is the case, update
5287eda14cbcSMatt Macy 		 * their status appropriately.
5288eda14cbcSMatt Macy 		 */
5289eda14cbcSMatt Macy 		for (i = 0; i < nspares; i++) {
5290eda14cbcSMatt Macy 			VERIFY(nvlist_lookup_uint64(spares[i],
5291eda14cbcSMatt Macy 			    ZPOOL_CONFIG_GUID, &guid) == 0);
5292eda14cbcSMatt Macy 			if (spa_spare_exists(guid, &pool, NULL) &&
5293eda14cbcSMatt Macy 			    pool != 0ULL) {
5294eda14cbcSMatt Macy 				VERIFY(nvlist_lookup_uint64_array(
5295eda14cbcSMatt Macy 				    spares[i], ZPOOL_CONFIG_VDEV_STATS,
5296eda14cbcSMatt Macy 				    (uint64_t **)&vs, &vsc) == 0);
5297eda14cbcSMatt Macy 				vs->vs_state = VDEV_STATE_CANT_OPEN;
5298eda14cbcSMatt Macy 				vs->vs_aux = VDEV_AUX_SPARED;
5299eda14cbcSMatt Macy 			}
5300eda14cbcSMatt Macy 		}
5301eda14cbcSMatt Macy 	}
5302eda14cbcSMatt Macy }
5303eda14cbcSMatt Macy 
5304eda14cbcSMatt Macy /*
5305eda14cbcSMatt Macy  * Add l2cache device information to the nvlist, including vdev stats.
5306eda14cbcSMatt Macy  */
5307eda14cbcSMatt Macy static void
5308eda14cbcSMatt Macy spa_add_l2cache(spa_t *spa, nvlist_t *config)
5309eda14cbcSMatt Macy {
5310eda14cbcSMatt Macy 	nvlist_t **l2cache;
5311eda14cbcSMatt Macy 	uint_t i, j, nl2cache;
5312eda14cbcSMatt Macy 	nvlist_t *nvroot;
5313eda14cbcSMatt Macy 	uint64_t guid;
5314eda14cbcSMatt Macy 	vdev_t *vd;
5315eda14cbcSMatt Macy 	vdev_stat_t *vs;
5316eda14cbcSMatt Macy 	uint_t vsc;
5317eda14cbcSMatt Macy 
5318eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5319eda14cbcSMatt Macy 
5320eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_count == 0)
5321eda14cbcSMatt Macy 		return;
5322eda14cbcSMatt Macy 
5323eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist(config,
5324eda14cbcSMatt Macy 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
5325eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5326eda14cbcSMatt Macy 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5327eda14cbcSMatt Macy 	if (nl2cache != 0) {
5328eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(nvroot,
5329eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5330eda14cbcSMatt Macy 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
5331eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
5332eda14cbcSMatt Macy 
5333eda14cbcSMatt Macy 		/*
5334eda14cbcSMatt Macy 		 * Update level 2 cache device stats.
5335eda14cbcSMatt Macy 		 */
5336eda14cbcSMatt Macy 
5337eda14cbcSMatt Macy 		for (i = 0; i < nl2cache; i++) {
5338eda14cbcSMatt Macy 			VERIFY(nvlist_lookup_uint64(l2cache[i],
5339eda14cbcSMatt Macy 			    ZPOOL_CONFIG_GUID, &guid) == 0);
5340eda14cbcSMatt Macy 
5341eda14cbcSMatt Macy 			vd = NULL;
5342eda14cbcSMatt Macy 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
5343eda14cbcSMatt Macy 				if (guid ==
5344eda14cbcSMatt Macy 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
5345eda14cbcSMatt Macy 					vd = spa->spa_l2cache.sav_vdevs[j];
5346eda14cbcSMatt Macy 					break;
5347eda14cbcSMatt Macy 				}
5348eda14cbcSMatt Macy 			}
5349eda14cbcSMatt Macy 			ASSERT(vd != NULL);
5350eda14cbcSMatt Macy 
5351eda14cbcSMatt Macy 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
5352eda14cbcSMatt Macy 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
5353eda14cbcSMatt Macy 			    == 0);
5354eda14cbcSMatt Macy 			vdev_get_stats(vd, vs);
5355eda14cbcSMatt Macy 			vdev_config_generate_stats(vd, l2cache[i]);
5356eda14cbcSMatt Macy 
5357eda14cbcSMatt Macy 		}
5358eda14cbcSMatt Macy 	}
5359eda14cbcSMatt Macy }
5360eda14cbcSMatt Macy 
5361eda14cbcSMatt Macy static void
5362eda14cbcSMatt Macy spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
5363eda14cbcSMatt Macy {
5364eda14cbcSMatt Macy 	zap_cursor_t zc;
5365eda14cbcSMatt Macy 	zap_attribute_t za;
5366eda14cbcSMatt Macy 
5367eda14cbcSMatt Macy 	if (spa->spa_feat_for_read_obj != 0) {
5368eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
5369eda14cbcSMatt Macy 		    spa->spa_feat_for_read_obj);
5370eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
5371eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
5372eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5373eda14cbcSMatt Macy 			    za.za_num_integers == 1);
5374eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
5375eda14cbcSMatt Macy 			    za.za_first_integer));
5376eda14cbcSMatt Macy 		}
5377eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
5378eda14cbcSMatt Macy 	}
5379eda14cbcSMatt Macy 
5380eda14cbcSMatt Macy 	if (spa->spa_feat_for_write_obj != 0) {
5381eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
5382eda14cbcSMatt Macy 		    spa->spa_feat_for_write_obj);
5383eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
5384eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
5385eda14cbcSMatt Macy 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
5386eda14cbcSMatt Macy 			    za.za_num_integers == 1);
5387eda14cbcSMatt Macy 			VERIFY0(nvlist_add_uint64(features, za.za_name,
5388eda14cbcSMatt Macy 			    za.za_first_integer));
5389eda14cbcSMatt Macy 		}
5390eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
5391eda14cbcSMatt Macy 	}
5392eda14cbcSMatt Macy }
5393eda14cbcSMatt Macy 
5394eda14cbcSMatt Macy static void
5395eda14cbcSMatt Macy spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
5396eda14cbcSMatt Macy {
5397eda14cbcSMatt Macy 	int i;
5398eda14cbcSMatt Macy 
5399eda14cbcSMatt Macy 	for (i = 0; i < SPA_FEATURES; i++) {
5400eda14cbcSMatt Macy 		zfeature_info_t feature = spa_feature_table[i];
5401eda14cbcSMatt Macy 		uint64_t refcount;
5402eda14cbcSMatt Macy 
5403eda14cbcSMatt Macy 		if (feature_get_refcount(spa, &feature, &refcount) != 0)
5404eda14cbcSMatt Macy 			continue;
5405eda14cbcSMatt Macy 
5406eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
5407eda14cbcSMatt Macy 	}
5408eda14cbcSMatt Macy }
5409eda14cbcSMatt Macy 
5410eda14cbcSMatt Macy /*
5411eda14cbcSMatt Macy  * Store a list of pool features and their reference counts in the
5412eda14cbcSMatt Macy  * config.
5413eda14cbcSMatt Macy  *
5414eda14cbcSMatt Macy  * The first time this is called on a spa, allocate a new nvlist, fetch
5415eda14cbcSMatt Macy  * the pool features and reference counts from disk, then save the list
5416eda14cbcSMatt Macy  * in the spa. In subsequent calls on the same spa use the saved nvlist
5417eda14cbcSMatt Macy  * and refresh its values from the cached reference counts.  This
5418eda14cbcSMatt Macy  * ensures we don't block here on I/O on a suspended pool so 'zpool
5419eda14cbcSMatt Macy  * clear' can resume the pool.
5420eda14cbcSMatt Macy  */
5421eda14cbcSMatt Macy static void
5422eda14cbcSMatt Macy spa_add_feature_stats(spa_t *spa, nvlist_t *config)
5423eda14cbcSMatt Macy {
5424eda14cbcSMatt Macy 	nvlist_t *features;
5425eda14cbcSMatt Macy 
5426eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
5427eda14cbcSMatt Macy 
5428eda14cbcSMatt Macy 	mutex_enter(&spa->spa_feat_stats_lock);
5429eda14cbcSMatt Macy 	features = spa->spa_feat_stats;
5430eda14cbcSMatt Macy 
5431eda14cbcSMatt Macy 	if (features != NULL) {
5432eda14cbcSMatt Macy 		spa_feature_stats_from_cache(spa, features);
5433eda14cbcSMatt Macy 	} else {
5434eda14cbcSMatt Macy 		VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
5435eda14cbcSMatt Macy 		spa->spa_feat_stats = features;
5436eda14cbcSMatt Macy 		spa_feature_stats_from_disk(spa, features);
5437eda14cbcSMatt Macy 	}
5438eda14cbcSMatt Macy 
5439eda14cbcSMatt Macy 	VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
5440eda14cbcSMatt Macy 	    features));
5441eda14cbcSMatt Macy 
5442eda14cbcSMatt Macy 	mutex_exit(&spa->spa_feat_stats_lock);
5443eda14cbcSMatt Macy }
5444eda14cbcSMatt Macy 
5445eda14cbcSMatt Macy int
5446eda14cbcSMatt Macy spa_get_stats(const char *name, nvlist_t **config,
5447eda14cbcSMatt Macy     char *altroot, size_t buflen)
5448eda14cbcSMatt Macy {
5449eda14cbcSMatt Macy 	int error;
5450eda14cbcSMatt Macy 	spa_t *spa;
5451eda14cbcSMatt Macy 
5452eda14cbcSMatt Macy 	*config = NULL;
5453eda14cbcSMatt Macy 	error = spa_open_common(name, &spa, FTAG, NULL, config);
5454eda14cbcSMatt Macy 
5455eda14cbcSMatt Macy 	if (spa != NULL) {
5456eda14cbcSMatt Macy 		/*
5457eda14cbcSMatt Macy 		 * This still leaves a window of inconsistency where the spares
5458eda14cbcSMatt Macy 		 * or l2cache devices could change and the config would be
5459eda14cbcSMatt Macy 		 * self-inconsistent.
5460eda14cbcSMatt Macy 		 */
5461eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5462eda14cbcSMatt Macy 
5463eda14cbcSMatt Macy 		if (*config != NULL) {
5464eda14cbcSMatt Macy 			uint64_t loadtimes[2];
5465eda14cbcSMatt Macy 
5466eda14cbcSMatt Macy 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
5467eda14cbcSMatt Macy 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
5468eda14cbcSMatt Macy 			VERIFY(nvlist_add_uint64_array(*config,
5469eda14cbcSMatt Macy 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
5470eda14cbcSMatt Macy 
5471eda14cbcSMatt Macy 			VERIFY(nvlist_add_uint64(*config,
5472eda14cbcSMatt Macy 			    ZPOOL_CONFIG_ERRCOUNT,
5473eda14cbcSMatt Macy 			    spa_get_errlog_size(spa)) == 0);
5474eda14cbcSMatt Macy 
5475eda14cbcSMatt Macy 			if (spa_suspended(spa)) {
5476eda14cbcSMatt Macy 				VERIFY(nvlist_add_uint64(*config,
5477eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED,
5478eda14cbcSMatt Macy 				    spa->spa_failmode) == 0);
5479eda14cbcSMatt Macy 				VERIFY(nvlist_add_uint64(*config,
5480eda14cbcSMatt Macy 				    ZPOOL_CONFIG_SUSPENDED_REASON,
5481eda14cbcSMatt Macy 				    spa->spa_suspended) == 0);
5482eda14cbcSMatt Macy 			}
5483eda14cbcSMatt Macy 
5484eda14cbcSMatt Macy 			spa_add_spares(spa, *config);
5485eda14cbcSMatt Macy 			spa_add_l2cache(spa, *config);
5486eda14cbcSMatt Macy 			spa_add_feature_stats(spa, *config);
5487eda14cbcSMatt Macy 		}
5488eda14cbcSMatt Macy 	}
5489eda14cbcSMatt Macy 
5490eda14cbcSMatt Macy 	/*
5491eda14cbcSMatt Macy 	 * We want to get the alternate root even for faulted pools, so we cheat
5492eda14cbcSMatt Macy 	 * and call spa_lookup() directly.
5493eda14cbcSMatt Macy 	 */
5494eda14cbcSMatt Macy 	if (altroot) {
5495eda14cbcSMatt Macy 		if (spa == NULL) {
5496eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
5497eda14cbcSMatt Macy 			spa = spa_lookup(name);
5498eda14cbcSMatt Macy 			if (spa)
5499eda14cbcSMatt Macy 				spa_altroot(spa, altroot, buflen);
5500eda14cbcSMatt Macy 			else
5501eda14cbcSMatt Macy 				altroot[0] = '\0';
5502eda14cbcSMatt Macy 			spa = NULL;
5503eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
5504eda14cbcSMatt Macy 		} else {
5505eda14cbcSMatt Macy 			spa_altroot(spa, altroot, buflen);
5506eda14cbcSMatt Macy 		}
5507eda14cbcSMatt Macy 	}
5508eda14cbcSMatt Macy 
5509eda14cbcSMatt Macy 	if (spa != NULL) {
5510eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
5511eda14cbcSMatt Macy 		spa_close(spa, FTAG);
5512eda14cbcSMatt Macy 	}
5513eda14cbcSMatt Macy 
5514eda14cbcSMatt Macy 	return (error);
5515eda14cbcSMatt Macy }
5516eda14cbcSMatt Macy 
5517eda14cbcSMatt Macy /*
5518eda14cbcSMatt Macy  * Validate that the auxiliary device array is well formed.  We must have an
5519eda14cbcSMatt Macy  * array of nvlists, each which describes a valid leaf vdev.  If this is an
5520eda14cbcSMatt Macy  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
5521eda14cbcSMatt Macy  * specified, as long as they are well-formed.
5522eda14cbcSMatt Macy  */
5523eda14cbcSMatt Macy static int
5524eda14cbcSMatt Macy spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
5525eda14cbcSMatt Macy     spa_aux_vdev_t *sav, const char *config, uint64_t version,
5526eda14cbcSMatt Macy     vdev_labeltype_t label)
5527eda14cbcSMatt Macy {
5528eda14cbcSMatt Macy 	nvlist_t **dev;
5529eda14cbcSMatt Macy 	uint_t i, ndev;
5530eda14cbcSMatt Macy 	vdev_t *vd;
5531eda14cbcSMatt Macy 	int error;
5532eda14cbcSMatt Macy 
5533eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5534eda14cbcSMatt Macy 
5535eda14cbcSMatt Macy 	/*
5536eda14cbcSMatt Macy 	 * It's acceptable to have no devs specified.
5537eda14cbcSMatt Macy 	 */
5538eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
5539eda14cbcSMatt Macy 		return (0);
5540eda14cbcSMatt Macy 
5541eda14cbcSMatt Macy 	if (ndev == 0)
5542eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
5543eda14cbcSMatt Macy 
5544eda14cbcSMatt Macy 	/*
5545eda14cbcSMatt Macy 	 * Make sure the pool is formatted with a version that supports this
5546eda14cbcSMatt Macy 	 * device type.
5547eda14cbcSMatt Macy 	 */
5548eda14cbcSMatt Macy 	if (spa_version(spa) < version)
5549eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
5550eda14cbcSMatt Macy 
5551eda14cbcSMatt Macy 	/*
5552eda14cbcSMatt Macy 	 * Set the pending device list so we correctly handle device in-use
5553eda14cbcSMatt Macy 	 * checking.
5554eda14cbcSMatt Macy 	 */
5555eda14cbcSMatt Macy 	sav->sav_pending = dev;
5556eda14cbcSMatt Macy 	sav->sav_npending = ndev;
5557eda14cbcSMatt Macy 
5558eda14cbcSMatt Macy 	for (i = 0; i < ndev; i++) {
5559eda14cbcSMatt Macy 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
5560eda14cbcSMatt Macy 		    mode)) != 0)
5561eda14cbcSMatt Macy 			goto out;
5562eda14cbcSMatt Macy 
5563eda14cbcSMatt Macy 		if (!vd->vdev_ops->vdev_op_leaf) {
5564eda14cbcSMatt Macy 			vdev_free(vd);
5565eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
5566eda14cbcSMatt Macy 			goto out;
5567eda14cbcSMatt Macy 		}
5568eda14cbcSMatt Macy 
5569eda14cbcSMatt Macy 		vd->vdev_top = vd;
5570eda14cbcSMatt Macy 
5571eda14cbcSMatt Macy 		if ((error = vdev_open(vd)) == 0 &&
5572eda14cbcSMatt Macy 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
5573eda14cbcSMatt Macy 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
5574eda14cbcSMatt Macy 			    vd->vdev_guid) == 0);
5575eda14cbcSMatt Macy 		}
5576eda14cbcSMatt Macy 
5577eda14cbcSMatt Macy 		vdev_free(vd);
5578eda14cbcSMatt Macy 
5579eda14cbcSMatt Macy 		if (error &&
5580eda14cbcSMatt Macy 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
5581eda14cbcSMatt Macy 			goto out;
5582eda14cbcSMatt Macy 		else
5583eda14cbcSMatt Macy 			error = 0;
5584eda14cbcSMatt Macy 	}
5585eda14cbcSMatt Macy 
5586eda14cbcSMatt Macy out:
5587eda14cbcSMatt Macy 	sav->sav_pending = NULL;
5588eda14cbcSMatt Macy 	sav->sav_npending = 0;
5589eda14cbcSMatt Macy 	return (error);
5590eda14cbcSMatt Macy }
5591eda14cbcSMatt Macy 
5592eda14cbcSMatt Macy static int
5593eda14cbcSMatt Macy spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
5594eda14cbcSMatt Macy {
5595eda14cbcSMatt Macy 	int error;
5596eda14cbcSMatt Macy 
5597eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5598eda14cbcSMatt Macy 
5599eda14cbcSMatt Macy 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5600eda14cbcSMatt Macy 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
5601eda14cbcSMatt Macy 	    VDEV_LABEL_SPARE)) != 0) {
5602eda14cbcSMatt Macy 		return (error);
5603eda14cbcSMatt Macy 	}
5604eda14cbcSMatt Macy 
5605eda14cbcSMatt Macy 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
5606eda14cbcSMatt Macy 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
5607eda14cbcSMatt Macy 	    VDEV_LABEL_L2CACHE));
5608eda14cbcSMatt Macy }
5609eda14cbcSMatt Macy 
5610eda14cbcSMatt Macy static void
5611eda14cbcSMatt Macy spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
5612eda14cbcSMatt Macy     const char *config)
5613eda14cbcSMatt Macy {
5614eda14cbcSMatt Macy 	int i;
5615eda14cbcSMatt Macy 
5616eda14cbcSMatt Macy 	if (sav->sav_config != NULL) {
5617eda14cbcSMatt Macy 		nvlist_t **olddevs;
5618eda14cbcSMatt Macy 		uint_t oldndevs;
5619eda14cbcSMatt Macy 		nvlist_t **newdevs;
5620eda14cbcSMatt Macy 
5621eda14cbcSMatt Macy 		/*
5622eda14cbcSMatt Macy 		 * Generate new dev list by concatenating with the
5623eda14cbcSMatt Macy 		 * current dev list.
5624eda14cbcSMatt Macy 		 */
5625eda14cbcSMatt Macy 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
5626eda14cbcSMatt Macy 		    &olddevs, &oldndevs) == 0);
5627eda14cbcSMatt Macy 
5628eda14cbcSMatt Macy 		newdevs = kmem_alloc(sizeof (void *) *
5629eda14cbcSMatt Macy 		    (ndevs + oldndevs), KM_SLEEP);
5630eda14cbcSMatt Macy 		for (i = 0; i < oldndevs; i++)
5631eda14cbcSMatt Macy 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
5632eda14cbcSMatt Macy 			    KM_SLEEP) == 0);
5633eda14cbcSMatt Macy 		for (i = 0; i < ndevs; i++)
5634eda14cbcSMatt Macy 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
5635eda14cbcSMatt Macy 			    KM_SLEEP) == 0);
5636eda14cbcSMatt Macy 
5637eda14cbcSMatt Macy 		VERIFY(nvlist_remove(sav->sav_config, config,
5638eda14cbcSMatt Macy 		    DATA_TYPE_NVLIST_ARRAY) == 0);
5639eda14cbcSMatt Macy 
5640eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
5641eda14cbcSMatt Macy 		    config, newdevs, ndevs + oldndevs) == 0);
5642eda14cbcSMatt Macy 		for (i = 0; i < oldndevs + ndevs; i++)
5643eda14cbcSMatt Macy 			nvlist_free(newdevs[i]);
5644eda14cbcSMatt Macy 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
5645eda14cbcSMatt Macy 	} else {
5646eda14cbcSMatt Macy 		/*
5647eda14cbcSMatt Macy 		 * Generate a new dev list.
5648eda14cbcSMatt Macy 		 */
5649eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
5650eda14cbcSMatt Macy 		    KM_SLEEP) == 0);
5651eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
5652eda14cbcSMatt Macy 		    devs, ndevs) == 0);
5653eda14cbcSMatt Macy 	}
5654eda14cbcSMatt Macy }
5655eda14cbcSMatt Macy 
5656eda14cbcSMatt Macy /*
5657eda14cbcSMatt Macy  * Stop and drop level 2 ARC devices
5658eda14cbcSMatt Macy  */
5659eda14cbcSMatt Macy void
5660eda14cbcSMatt Macy spa_l2cache_drop(spa_t *spa)
5661eda14cbcSMatt Macy {
5662eda14cbcSMatt Macy 	vdev_t *vd;
5663eda14cbcSMatt Macy 	int i;
5664eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
5665eda14cbcSMatt Macy 
5666eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
5667eda14cbcSMatt Macy 		uint64_t pool;
5668eda14cbcSMatt Macy 
5669eda14cbcSMatt Macy 		vd = sav->sav_vdevs[i];
5670eda14cbcSMatt Macy 		ASSERT(vd != NULL);
5671eda14cbcSMatt Macy 
5672eda14cbcSMatt Macy 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
5673eda14cbcSMatt Macy 		    pool != 0ULL && l2arc_vdev_present(vd))
5674eda14cbcSMatt Macy 			l2arc_remove_vdev(vd);
5675eda14cbcSMatt Macy 	}
5676eda14cbcSMatt Macy }
5677eda14cbcSMatt Macy 
5678eda14cbcSMatt Macy /*
5679eda14cbcSMatt Macy  * Verify encryption parameters for spa creation. If we are encrypting, we must
5680eda14cbcSMatt Macy  * have the encryption feature flag enabled.
5681eda14cbcSMatt Macy  */
5682eda14cbcSMatt Macy static int
5683eda14cbcSMatt Macy spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
5684eda14cbcSMatt Macy     boolean_t has_encryption)
5685eda14cbcSMatt Macy {
5686eda14cbcSMatt Macy 	if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
5687eda14cbcSMatt Macy 	    dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
5688eda14cbcSMatt Macy 	    !has_encryption)
5689eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
5690eda14cbcSMatt Macy 
5691eda14cbcSMatt Macy 	return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
5692eda14cbcSMatt Macy }
5693eda14cbcSMatt Macy 
5694eda14cbcSMatt Macy /*
5695eda14cbcSMatt Macy  * Pool Creation
5696eda14cbcSMatt Macy  */
5697eda14cbcSMatt Macy int
5698eda14cbcSMatt Macy spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
5699eda14cbcSMatt Macy     nvlist_t *zplprops, dsl_crypto_params_t *dcp)
5700eda14cbcSMatt Macy {
5701eda14cbcSMatt Macy 	spa_t *spa;
5702eda14cbcSMatt Macy 	char *altroot = NULL;
5703eda14cbcSMatt Macy 	vdev_t *rvd;
5704eda14cbcSMatt Macy 	dsl_pool_t *dp;
5705eda14cbcSMatt Macy 	dmu_tx_t *tx;
5706eda14cbcSMatt Macy 	int error = 0;
5707eda14cbcSMatt Macy 	uint64_t txg = TXG_INITIAL;
5708eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
5709eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
57107877fdebSMatt Macy 	uint64_t version, obj, ndraid = 0;
5711eda14cbcSMatt Macy 	boolean_t has_features;
5712eda14cbcSMatt Macy 	boolean_t has_encryption;
5713eda14cbcSMatt Macy 	boolean_t has_allocclass;
5714eda14cbcSMatt Macy 	spa_feature_t feat;
5715eda14cbcSMatt Macy 	char *feat_name;
5716eda14cbcSMatt Macy 	char *poolname;
5717eda14cbcSMatt Macy 	nvlist_t *nvl;
5718eda14cbcSMatt Macy 
5719eda14cbcSMatt Macy 	if (props == NULL ||
5720eda14cbcSMatt Macy 	    nvlist_lookup_string(props, "tname", &poolname) != 0)
5721eda14cbcSMatt Macy 		poolname = (char *)pool;
5722eda14cbcSMatt Macy 
5723eda14cbcSMatt Macy 	/*
5724eda14cbcSMatt Macy 	 * If this pool already exists, return failure.
5725eda14cbcSMatt Macy 	 */
5726eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
5727eda14cbcSMatt Macy 	if (spa_lookup(poolname) != NULL) {
5728eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5729eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
5730eda14cbcSMatt Macy 	}
5731eda14cbcSMatt Macy 
5732eda14cbcSMatt Macy 	/*
5733eda14cbcSMatt Macy 	 * Allocate a new spa_t structure.
5734eda14cbcSMatt Macy 	 */
5735eda14cbcSMatt Macy 	nvl = fnvlist_alloc();
5736eda14cbcSMatt Macy 	fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
5737eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
5738eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5739eda14cbcSMatt Macy 	spa = spa_add(poolname, nvl, altroot);
5740eda14cbcSMatt Macy 	fnvlist_free(nvl);
5741eda14cbcSMatt Macy 	spa_activate(spa, spa_mode_global);
5742eda14cbcSMatt Macy 
5743eda14cbcSMatt Macy 	if (props && (error = spa_prop_validate(spa, props))) {
5744eda14cbcSMatt Macy 		spa_deactivate(spa);
5745eda14cbcSMatt Macy 		spa_remove(spa);
5746eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5747eda14cbcSMatt Macy 		return (error);
5748eda14cbcSMatt Macy 	}
5749eda14cbcSMatt Macy 
5750eda14cbcSMatt Macy 	/*
5751eda14cbcSMatt Macy 	 * Temporary pool names should never be written to disk.
5752eda14cbcSMatt Macy 	 */
5753eda14cbcSMatt Macy 	if (poolname != pool)
5754eda14cbcSMatt Macy 		spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
5755eda14cbcSMatt Macy 
5756eda14cbcSMatt Macy 	has_features = B_FALSE;
5757eda14cbcSMatt Macy 	has_encryption = B_FALSE;
5758eda14cbcSMatt Macy 	has_allocclass = B_FALSE;
5759eda14cbcSMatt Macy 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
5760eda14cbcSMatt Macy 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
5761eda14cbcSMatt Macy 		if (zpool_prop_feature(nvpair_name(elem))) {
5762eda14cbcSMatt Macy 			has_features = B_TRUE;
5763eda14cbcSMatt Macy 
5764eda14cbcSMatt Macy 			feat_name = strchr(nvpair_name(elem), '@') + 1;
5765eda14cbcSMatt Macy 			VERIFY0(zfeature_lookup_name(feat_name, &feat));
5766eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ENCRYPTION)
5767eda14cbcSMatt Macy 				has_encryption = B_TRUE;
5768eda14cbcSMatt Macy 			if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
5769eda14cbcSMatt Macy 				has_allocclass = B_TRUE;
5770eda14cbcSMatt Macy 		}
5771eda14cbcSMatt Macy 	}
5772eda14cbcSMatt Macy 
5773eda14cbcSMatt Macy 	/* verify encryption params, if they were provided */
5774eda14cbcSMatt Macy 	if (dcp != NULL) {
5775eda14cbcSMatt Macy 		error = spa_create_check_encryption_params(dcp, has_encryption);
5776eda14cbcSMatt Macy 		if (error != 0) {
5777eda14cbcSMatt Macy 			spa_deactivate(spa);
5778eda14cbcSMatt Macy 			spa_remove(spa);
5779eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
5780eda14cbcSMatt Macy 			return (error);
5781eda14cbcSMatt Macy 		}
5782eda14cbcSMatt Macy 	}
5783eda14cbcSMatt Macy 	if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
5784eda14cbcSMatt Macy 		spa_deactivate(spa);
5785eda14cbcSMatt Macy 		spa_remove(spa);
5786eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5787eda14cbcSMatt Macy 		return (ENOTSUP);
5788eda14cbcSMatt Macy 	}
5789eda14cbcSMatt Macy 
5790eda14cbcSMatt Macy 	if (has_features || nvlist_lookup_uint64(props,
5791eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
5792eda14cbcSMatt Macy 		version = SPA_VERSION;
5793eda14cbcSMatt Macy 	}
5794eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5795eda14cbcSMatt Macy 
5796eda14cbcSMatt Macy 	spa->spa_first_txg = txg;
5797eda14cbcSMatt Macy 	spa->spa_uberblock.ub_txg = txg - 1;
5798eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
5799eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
5800eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_CREATE;
5801eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_state = DSS_NONE;
5802eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_removing_vdev = -1;
5803eda14cbcSMatt Macy 	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
5804eda14cbcSMatt Macy 	spa->spa_indirect_vdevs_loaded = B_TRUE;
5805eda14cbcSMatt Macy 
5806eda14cbcSMatt Macy 	/*
5807eda14cbcSMatt Macy 	 * Create "The Godfather" zio to hold all async IOs
5808eda14cbcSMatt Macy 	 */
5809eda14cbcSMatt Macy 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
5810eda14cbcSMatt Macy 	    KM_SLEEP);
5811eda14cbcSMatt Macy 	for (int i = 0; i < max_ncpus; i++) {
5812eda14cbcSMatt Macy 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
5813eda14cbcSMatt Macy 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
5814eda14cbcSMatt Macy 		    ZIO_FLAG_GODFATHER);
5815eda14cbcSMatt Macy 	}
5816eda14cbcSMatt Macy 
5817eda14cbcSMatt Macy 	/*
5818eda14cbcSMatt Macy 	 * Create the root vdev.
5819eda14cbcSMatt Macy 	 */
5820eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5821eda14cbcSMatt Macy 
5822eda14cbcSMatt Macy 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
5823eda14cbcSMatt Macy 
5824eda14cbcSMatt Macy 	ASSERT(error != 0 || rvd != NULL);
5825eda14cbcSMatt Macy 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
5826eda14cbcSMatt Macy 
5827eda14cbcSMatt Macy 	if (error == 0 && !zfs_allocatable_devs(nvroot))
5828eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
5829eda14cbcSMatt Macy 
5830eda14cbcSMatt Macy 	if (error == 0 &&
5831eda14cbcSMatt Macy 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
58327877fdebSMatt Macy 	    (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
58337877fdebSMatt Macy 	    (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
5834eda14cbcSMatt Macy 		/*
5835eda14cbcSMatt Macy 		 * instantiate the metaslab groups (this will dirty the vdevs)
5836eda14cbcSMatt Macy 		 * we can no longer error exit past this point
5837eda14cbcSMatt Macy 		 */
5838eda14cbcSMatt Macy 		for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
5839eda14cbcSMatt Macy 			vdev_t *vd = rvd->vdev_child[c];
5840eda14cbcSMatt Macy 
5841eda14cbcSMatt Macy 			vdev_metaslab_set_size(vd);
5842eda14cbcSMatt Macy 			vdev_expand(vd, txg);
5843eda14cbcSMatt Macy 		}
5844eda14cbcSMatt Macy 	}
5845eda14cbcSMatt Macy 
5846eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
5847eda14cbcSMatt Macy 
5848eda14cbcSMatt Macy 	if (error != 0) {
5849eda14cbcSMatt Macy 		spa_unload(spa);
5850eda14cbcSMatt Macy 		spa_deactivate(spa);
5851eda14cbcSMatt Macy 		spa_remove(spa);
5852eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
5853eda14cbcSMatt Macy 		return (error);
5854eda14cbcSMatt Macy 	}
5855eda14cbcSMatt Macy 
5856eda14cbcSMatt Macy 	/*
5857eda14cbcSMatt Macy 	 * Get the list of spares, if specified.
5858eda14cbcSMatt Macy 	 */
5859eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5860eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
5861eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
5862eda14cbcSMatt Macy 		    KM_SLEEP) == 0);
5863eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5864eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5865eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5866eda14cbcSMatt Macy 		spa_load_spares(spa);
5867eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
5868eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
5869eda14cbcSMatt Macy 	}
5870eda14cbcSMatt Macy 
5871eda14cbcSMatt Macy 	/*
5872eda14cbcSMatt Macy 	 * Get the list of level 2 cache devices, if specified.
5873eda14cbcSMatt Macy 	 */
5874eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5875eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
5876eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5877eda14cbcSMatt Macy 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5878eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5879eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5880eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5881eda14cbcSMatt Macy 		spa_load_l2cache(spa);
5882eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
5883eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
5884eda14cbcSMatt Macy 	}
5885eda14cbcSMatt Macy 
5886eda14cbcSMatt Macy 	spa->spa_is_initializing = B_TRUE;
5887eda14cbcSMatt Macy 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
5888eda14cbcSMatt Macy 	spa->spa_is_initializing = B_FALSE;
5889eda14cbcSMatt Macy 
5890eda14cbcSMatt Macy 	/*
5891eda14cbcSMatt Macy 	 * Create DDTs (dedup tables).
5892eda14cbcSMatt Macy 	 */
5893eda14cbcSMatt Macy 	ddt_create(spa);
5894eda14cbcSMatt Macy 
5895eda14cbcSMatt Macy 	spa_update_dspace(spa);
5896eda14cbcSMatt Macy 
5897eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(dp, txg);
5898eda14cbcSMatt Macy 
5899eda14cbcSMatt Macy 	/*
5900eda14cbcSMatt Macy 	 * Create the pool's history object.
5901eda14cbcSMatt Macy 	 */
5902eda14cbcSMatt Macy 	if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
5903eda14cbcSMatt Macy 		spa_history_create_obj(spa, tx);
5904eda14cbcSMatt Macy 
5905eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
5906eda14cbcSMatt Macy 	spa_history_log_version(spa, "create", tx);
5907eda14cbcSMatt Macy 
5908eda14cbcSMatt Macy 	/*
5909eda14cbcSMatt Macy 	 * Create the pool config object.
5910eda14cbcSMatt Macy 	 */
5911eda14cbcSMatt Macy 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
5912eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
5913eda14cbcSMatt Macy 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
5914eda14cbcSMatt Macy 
5915eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
5916eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
5917eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
5918eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool config");
5919eda14cbcSMatt Macy 	}
5920eda14cbcSMatt Macy 
5921eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
5922eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
5923eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &version, tx) != 0) {
5924eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add pool version");
5925eda14cbcSMatt Macy 	}
5926eda14cbcSMatt Macy 
5927eda14cbcSMatt Macy 	/* Newly created pools with the right version are always deflated. */
5928eda14cbcSMatt Macy 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
5929eda14cbcSMatt Macy 		spa->spa_deflate = TRUE;
5930eda14cbcSMatt Macy 		if (zap_add(spa->spa_meta_objset,
5931eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5932eda14cbcSMatt Macy 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
5933eda14cbcSMatt Macy 			cmn_err(CE_PANIC, "failed to add deflate");
5934eda14cbcSMatt Macy 		}
5935eda14cbcSMatt Macy 	}
5936eda14cbcSMatt Macy 
5937eda14cbcSMatt Macy 	/*
5938eda14cbcSMatt Macy 	 * Create the deferred-free bpobj.  Turn off compression
5939eda14cbcSMatt Macy 	 * because sync-to-convergence takes longer if the blocksize
5940eda14cbcSMatt Macy 	 * keeps changing.
5941eda14cbcSMatt Macy 	 */
5942eda14cbcSMatt Macy 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
5943eda14cbcSMatt Macy 	dmu_object_set_compress(spa->spa_meta_objset, obj,
5944eda14cbcSMatt Macy 	    ZIO_COMPRESS_OFF, tx);
5945eda14cbcSMatt Macy 	if (zap_add(spa->spa_meta_objset,
5946eda14cbcSMatt Macy 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
5947eda14cbcSMatt Macy 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
5948eda14cbcSMatt Macy 		cmn_err(CE_PANIC, "failed to add bpobj");
5949eda14cbcSMatt Macy 	}
5950eda14cbcSMatt Macy 	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
5951eda14cbcSMatt Macy 	    spa->spa_meta_objset, obj));
5952eda14cbcSMatt Macy 
5953eda14cbcSMatt Macy 	/*
5954eda14cbcSMatt Macy 	 * Generate some random noise for salted checksums to operate on.
5955eda14cbcSMatt Macy 	 */
5956eda14cbcSMatt Macy 	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5957eda14cbcSMatt Macy 	    sizeof (spa->spa_cksum_salt.zcs_bytes));
5958eda14cbcSMatt Macy 
5959eda14cbcSMatt Macy 	/*
5960eda14cbcSMatt Macy 	 * Set pool properties.
5961eda14cbcSMatt Macy 	 */
5962eda14cbcSMatt Macy 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
5963eda14cbcSMatt Macy 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5964eda14cbcSMatt Macy 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
5965eda14cbcSMatt Macy 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
5966eda14cbcSMatt Macy 	spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
5967eda14cbcSMatt Macy 	spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
5968eda14cbcSMatt Macy 
5969eda14cbcSMatt Macy 	if (props != NULL) {
5970eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
5971eda14cbcSMatt Macy 		spa_sync_props(props, tx);
5972eda14cbcSMatt Macy 	}
5973eda14cbcSMatt Macy 
59747877fdebSMatt Macy 	for (int i = 0; i < ndraid; i++)
59757877fdebSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
59767877fdebSMatt Macy 
5977eda14cbcSMatt Macy 	dmu_tx_commit(tx);
5978eda14cbcSMatt Macy 
5979eda14cbcSMatt Macy 	spa->spa_sync_on = B_TRUE;
5980eda14cbcSMatt Macy 	txg_sync_start(dp);
5981eda14cbcSMatt Macy 	mmp_thread_start(spa);
5982eda14cbcSMatt Macy 	txg_wait_synced(dp, txg);
5983eda14cbcSMatt Macy 
5984eda14cbcSMatt Macy 	spa_spawn_aux_threads(spa);
5985eda14cbcSMatt Macy 
5986eda14cbcSMatt Macy 	spa_write_cachefile(spa, B_FALSE, B_TRUE);
5987eda14cbcSMatt Macy 
5988eda14cbcSMatt Macy 	/*
5989eda14cbcSMatt Macy 	 * Don't count references from objsets that are already closed
5990eda14cbcSMatt Macy 	 * and are making their way through the eviction process.
5991eda14cbcSMatt Macy 	 */
5992eda14cbcSMatt Macy 	spa_evicting_os_wait(spa);
5993eda14cbcSMatt Macy 	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
5994eda14cbcSMatt Macy 	spa->spa_load_state = SPA_LOAD_NONE;
5995eda14cbcSMatt Macy 
5996eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
5997eda14cbcSMatt Macy 
5998eda14cbcSMatt Macy 	return (0);
5999eda14cbcSMatt Macy }
6000eda14cbcSMatt Macy 
6001eda14cbcSMatt Macy /*
6002eda14cbcSMatt Macy  * Import a non-root pool into the system.
6003eda14cbcSMatt Macy  */
6004eda14cbcSMatt Macy int
6005eda14cbcSMatt Macy spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
6006eda14cbcSMatt Macy {
6007eda14cbcSMatt Macy 	spa_t *spa;
6008eda14cbcSMatt Macy 	char *altroot = NULL;
6009eda14cbcSMatt Macy 	spa_load_state_t state = SPA_LOAD_IMPORT;
6010eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6011eda14cbcSMatt Macy 	spa_mode_t mode = spa_mode_global;
6012eda14cbcSMatt Macy 	uint64_t readonly = B_FALSE;
6013eda14cbcSMatt Macy 	int error;
6014eda14cbcSMatt Macy 	nvlist_t *nvroot;
6015eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6016eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
6017eda14cbcSMatt Macy 
6018eda14cbcSMatt Macy 	/*
6019eda14cbcSMatt Macy 	 * If a pool with this name exists, return failure.
6020eda14cbcSMatt Macy 	 */
6021eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6022eda14cbcSMatt Macy 	if (spa_lookup(pool) != NULL) {
6023eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6024eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
6025eda14cbcSMatt Macy 	}
6026eda14cbcSMatt Macy 
6027eda14cbcSMatt Macy 	/*
6028eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6029eda14cbcSMatt Macy 	 */
6030eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
6031eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6032eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(props,
6033eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
6034eda14cbcSMatt Macy 	if (readonly)
6035eda14cbcSMatt Macy 		mode = SPA_MODE_READ;
6036eda14cbcSMatt Macy 	spa = spa_add(pool, config, altroot);
6037eda14cbcSMatt Macy 	spa->spa_import_flags = flags;
6038eda14cbcSMatt Macy 
6039eda14cbcSMatt Macy 	/*
6040eda14cbcSMatt Macy 	 * Verbatim import - Take a pool and insert it into the namespace
6041eda14cbcSMatt Macy 	 * as if it had been loaded at boot.
6042eda14cbcSMatt Macy 	 */
6043eda14cbcSMatt Macy 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
6044eda14cbcSMatt Macy 		if (props != NULL)
6045eda14cbcSMatt Macy 			spa_configfile_set(spa, props, B_FALSE);
6046eda14cbcSMatt Macy 
6047eda14cbcSMatt Macy 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
6048eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6049eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
6050eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6051eda14cbcSMatt Macy 		return (0);
6052eda14cbcSMatt Macy 	}
6053eda14cbcSMatt Macy 
6054eda14cbcSMatt Macy 	spa_activate(spa, mode);
6055eda14cbcSMatt Macy 
6056eda14cbcSMatt Macy 	/*
6057eda14cbcSMatt Macy 	 * Don't start async tasks until we know everything is healthy.
6058eda14cbcSMatt Macy 	 */
6059eda14cbcSMatt Macy 	spa_async_suspend(spa);
6060eda14cbcSMatt Macy 
6061eda14cbcSMatt Macy 	zpool_get_load_policy(config, &policy);
6062eda14cbcSMatt Macy 	if (policy.zlp_rewind & ZPOOL_DO_REWIND)
6063eda14cbcSMatt Macy 		state = SPA_LOAD_RECOVER;
6064eda14cbcSMatt Macy 
6065eda14cbcSMatt Macy 	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
6066eda14cbcSMatt Macy 
6067eda14cbcSMatt Macy 	if (state != SPA_LOAD_RECOVER) {
6068eda14cbcSMatt Macy 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
6069eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s", pool);
6070eda14cbcSMatt Macy 	} else {
6071eda14cbcSMatt Macy 		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
6072eda14cbcSMatt Macy 		    "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
6073eda14cbcSMatt Macy 	}
6074eda14cbcSMatt Macy 	error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
6075eda14cbcSMatt Macy 
6076eda14cbcSMatt Macy 	/*
6077eda14cbcSMatt Macy 	 * Propagate anything learned while loading the pool and pass it
6078eda14cbcSMatt Macy 	 * back to caller (i.e. rewind info, missing devices, etc).
6079eda14cbcSMatt Macy 	 */
6080eda14cbcSMatt Macy 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6081eda14cbcSMatt Macy 	    spa->spa_load_info) == 0);
6082eda14cbcSMatt Macy 
6083eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6084eda14cbcSMatt Macy 	/*
6085eda14cbcSMatt Macy 	 * Toss any existing sparelist, as it doesn't have any validity
6086eda14cbcSMatt Macy 	 * anymore, and conflicts with spa_has_spare().
6087eda14cbcSMatt Macy 	 */
6088eda14cbcSMatt Macy 	if (spa->spa_spares.sav_config) {
6089eda14cbcSMatt Macy 		nvlist_free(spa->spa_spares.sav_config);
6090eda14cbcSMatt Macy 		spa->spa_spares.sav_config = NULL;
6091eda14cbcSMatt Macy 		spa_load_spares(spa);
6092eda14cbcSMatt Macy 	}
6093eda14cbcSMatt Macy 	if (spa->spa_l2cache.sav_config) {
6094eda14cbcSMatt Macy 		nvlist_free(spa->spa_l2cache.sav_config);
6095eda14cbcSMatt Macy 		spa->spa_l2cache.sav_config = NULL;
6096eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6097eda14cbcSMatt Macy 	}
6098eda14cbcSMatt Macy 
6099eda14cbcSMatt Macy 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6100eda14cbcSMatt Macy 	    &nvroot) == 0);
6101eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
6102eda14cbcSMatt Macy 
6103eda14cbcSMatt Macy 	if (props != NULL)
6104eda14cbcSMatt Macy 		spa_configfile_set(spa, props, B_FALSE);
6105eda14cbcSMatt Macy 
6106eda14cbcSMatt Macy 	if (error != 0 || (props && spa_writeable(spa) &&
6107eda14cbcSMatt Macy 	    (error = spa_prop_set(spa, props)))) {
6108eda14cbcSMatt Macy 		spa_unload(spa);
6109eda14cbcSMatt Macy 		spa_deactivate(spa);
6110eda14cbcSMatt Macy 		spa_remove(spa);
6111eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6112eda14cbcSMatt Macy 		return (error);
6113eda14cbcSMatt Macy 	}
6114eda14cbcSMatt Macy 
6115eda14cbcSMatt Macy 	spa_async_resume(spa);
6116eda14cbcSMatt Macy 
6117eda14cbcSMatt Macy 	/*
6118eda14cbcSMatt Macy 	 * Override any spares and level 2 cache devices as specified by
6119eda14cbcSMatt Macy 	 * the user, as these may have correct device names/devids, etc.
6120eda14cbcSMatt Macy 	 */
6121eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6122eda14cbcSMatt Macy 	    &spares, &nspares) == 0) {
6123eda14cbcSMatt Macy 		if (spa->spa_spares.sav_config)
6124eda14cbcSMatt Macy 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
6125eda14cbcSMatt Macy 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
6126eda14cbcSMatt Macy 		else
6127eda14cbcSMatt Macy 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
6128eda14cbcSMatt Macy 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
6129eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
6130eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
6131eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6132eda14cbcSMatt Macy 		spa_load_spares(spa);
6133eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6134eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6135eda14cbcSMatt Macy 	}
6136eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6137eda14cbcSMatt Macy 	    &l2cache, &nl2cache) == 0) {
6138eda14cbcSMatt Macy 		if (spa->spa_l2cache.sav_config)
6139eda14cbcSMatt Macy 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
6140eda14cbcSMatt Macy 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
6141eda14cbcSMatt Macy 		else
6142eda14cbcSMatt Macy 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
6143eda14cbcSMatt Macy 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
6144eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
6145eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
6146eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6147eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6148eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
6149eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6150eda14cbcSMatt Macy 	}
6151eda14cbcSMatt Macy 
6152eda14cbcSMatt Macy 	/*
6153eda14cbcSMatt Macy 	 * Check for any removed devices.
6154eda14cbcSMatt Macy 	 */
6155eda14cbcSMatt Macy 	if (spa->spa_autoreplace) {
6156eda14cbcSMatt Macy 		spa_aux_check_removed(&spa->spa_spares);
6157eda14cbcSMatt Macy 		spa_aux_check_removed(&spa->spa_l2cache);
6158eda14cbcSMatt Macy 	}
6159eda14cbcSMatt Macy 
6160eda14cbcSMatt Macy 	if (spa_writeable(spa)) {
6161eda14cbcSMatt Macy 		/*
6162eda14cbcSMatt Macy 		 * Update the config cache to include the newly-imported pool.
6163eda14cbcSMatt Macy 		 */
6164eda14cbcSMatt Macy 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6165eda14cbcSMatt Macy 	}
6166eda14cbcSMatt Macy 
6167eda14cbcSMatt Macy 	/*
6168eda14cbcSMatt Macy 	 * It's possible that the pool was expanded while it was exported.
6169eda14cbcSMatt Macy 	 * We kick off an async task to handle this for us.
6170eda14cbcSMatt Macy 	 */
6171eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
6172eda14cbcSMatt Macy 
6173eda14cbcSMatt Macy 	spa_history_log_version(spa, "import", NULL);
6174eda14cbcSMatt Macy 
6175eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
6176eda14cbcSMatt Macy 
6177eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6178eda14cbcSMatt Macy 
6179eda14cbcSMatt Macy 	zvol_create_minors_recursive(pool);
6180eda14cbcSMatt Macy 
6181eda14cbcSMatt Macy 	return (0);
6182eda14cbcSMatt Macy }
6183eda14cbcSMatt Macy 
6184eda14cbcSMatt Macy nvlist_t *
6185eda14cbcSMatt Macy spa_tryimport(nvlist_t *tryconfig)
6186eda14cbcSMatt Macy {
6187eda14cbcSMatt Macy 	nvlist_t *config = NULL;
6188eda14cbcSMatt Macy 	char *poolname, *cachefile;
6189eda14cbcSMatt Macy 	spa_t *spa;
6190eda14cbcSMatt Macy 	uint64_t state;
6191eda14cbcSMatt Macy 	int error;
6192eda14cbcSMatt Macy 	zpool_load_policy_t policy;
6193eda14cbcSMatt Macy 
6194eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
6195eda14cbcSMatt Macy 		return (NULL);
6196eda14cbcSMatt Macy 
6197eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6198eda14cbcSMatt Macy 		return (NULL);
6199eda14cbcSMatt Macy 
6200eda14cbcSMatt Macy 	/*
6201eda14cbcSMatt Macy 	 * Create and initialize the spa structure.
6202eda14cbcSMatt Macy 	 */
6203eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6204eda14cbcSMatt Macy 	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
6205eda14cbcSMatt Macy 	spa_activate(spa, SPA_MODE_READ);
6206eda14cbcSMatt Macy 
6207eda14cbcSMatt Macy 	/*
6208eda14cbcSMatt Macy 	 * Rewind pool if a max txg was provided.
6209eda14cbcSMatt Macy 	 */
6210eda14cbcSMatt Macy 	zpool_get_load_policy(spa->spa_config, &policy);
6211eda14cbcSMatt Macy 	if (policy.zlp_txg != UINT64_MAX) {
6212eda14cbcSMatt Macy 		spa->spa_load_max_txg = policy.zlp_txg;
6213eda14cbcSMatt Macy 		spa->spa_extreme_rewind = B_TRUE;
6214eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
6215eda14cbcSMatt Macy 		    poolname, (longlong_t)policy.zlp_txg);
6216eda14cbcSMatt Macy 	} else {
6217eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
6218eda14cbcSMatt Macy 	}
6219eda14cbcSMatt Macy 
6220eda14cbcSMatt Macy 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
6221eda14cbcSMatt Macy 	    == 0) {
6222eda14cbcSMatt Macy 		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
6223eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6224eda14cbcSMatt Macy 	} else {
6225eda14cbcSMatt Macy 		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
6226eda14cbcSMatt Macy 	}
6227eda14cbcSMatt Macy 
6228eda14cbcSMatt Macy 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
6229eda14cbcSMatt Macy 
6230eda14cbcSMatt Macy 	/*
6231eda14cbcSMatt Macy 	 * If 'tryconfig' was at least parsable, return the current config.
6232eda14cbcSMatt Macy 	 */
6233eda14cbcSMatt Macy 	if (spa->spa_root_vdev != NULL) {
6234eda14cbcSMatt Macy 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6235eda14cbcSMatt Macy 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
6236eda14cbcSMatt Macy 		    poolname) == 0);
6237eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
6238eda14cbcSMatt Macy 		    state) == 0);
6239eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
6240eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_timestamp) == 0);
6241eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
6242eda14cbcSMatt Macy 		    spa->spa_load_info) == 0);
6243eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
6244eda14cbcSMatt Macy 		    spa->spa_errata) == 0);
6245eda14cbcSMatt Macy 
6246eda14cbcSMatt Macy 		/*
6247eda14cbcSMatt Macy 		 * If the bootfs property exists on this pool then we
6248eda14cbcSMatt Macy 		 * copy it out so that external consumers can tell which
6249eda14cbcSMatt Macy 		 * pools are bootable.
6250eda14cbcSMatt Macy 		 */
6251eda14cbcSMatt Macy 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
6252eda14cbcSMatt Macy 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6253eda14cbcSMatt Macy 
6254eda14cbcSMatt Macy 			/*
6255eda14cbcSMatt Macy 			 * We have to play games with the name since the
6256eda14cbcSMatt Macy 			 * pool was opened as TRYIMPORT_NAME.
6257eda14cbcSMatt Macy 			 */
6258eda14cbcSMatt Macy 			if (dsl_dsobj_to_dsname(spa_name(spa),
6259eda14cbcSMatt Macy 			    spa->spa_bootfs, tmpname) == 0) {
6260eda14cbcSMatt Macy 				char *cp;
6261eda14cbcSMatt Macy 				char *dsname;
6262eda14cbcSMatt Macy 
6263eda14cbcSMatt Macy 				dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
6264eda14cbcSMatt Macy 
6265eda14cbcSMatt Macy 				cp = strchr(tmpname, '/');
6266eda14cbcSMatt Macy 				if (cp == NULL) {
6267eda14cbcSMatt Macy 					(void) strlcpy(dsname, tmpname,
6268eda14cbcSMatt Macy 					    MAXPATHLEN);
6269eda14cbcSMatt Macy 				} else {
6270eda14cbcSMatt Macy 					(void) snprintf(dsname, MAXPATHLEN,
6271eda14cbcSMatt Macy 					    "%s/%s", poolname, ++cp);
6272eda14cbcSMatt Macy 				}
6273eda14cbcSMatt Macy 				VERIFY(nvlist_add_string(config,
6274eda14cbcSMatt Macy 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
6275eda14cbcSMatt Macy 				kmem_free(dsname, MAXPATHLEN);
6276eda14cbcSMatt Macy 			}
6277eda14cbcSMatt Macy 			kmem_free(tmpname, MAXPATHLEN);
6278eda14cbcSMatt Macy 		}
6279eda14cbcSMatt Macy 
6280eda14cbcSMatt Macy 		/*
6281eda14cbcSMatt Macy 		 * Add the list of hot spares and level 2 cache devices.
6282eda14cbcSMatt Macy 		 */
6283eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6284eda14cbcSMatt Macy 		spa_add_spares(spa, config);
6285eda14cbcSMatt Macy 		spa_add_l2cache(spa, config);
6286eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6287eda14cbcSMatt Macy 	}
6288eda14cbcSMatt Macy 
6289eda14cbcSMatt Macy 	spa_unload(spa);
6290eda14cbcSMatt Macy 	spa_deactivate(spa);
6291eda14cbcSMatt Macy 	spa_remove(spa);
6292eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6293eda14cbcSMatt Macy 
6294eda14cbcSMatt Macy 	return (config);
6295eda14cbcSMatt Macy }
6296eda14cbcSMatt Macy 
6297eda14cbcSMatt Macy /*
6298eda14cbcSMatt Macy  * Pool export/destroy
6299eda14cbcSMatt Macy  *
6300eda14cbcSMatt Macy  * The act of destroying or exporting a pool is very simple.  We make sure there
6301eda14cbcSMatt Macy  * is no more pending I/O and any references to the pool are gone.  Then, we
6302eda14cbcSMatt Macy  * update the pool state and sync all the labels to disk, removing the
6303eda14cbcSMatt Macy  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
6304eda14cbcSMatt Macy  * we don't sync the labels or remove the configuration cache.
6305eda14cbcSMatt Macy  */
6306eda14cbcSMatt Macy static int
6307180f8225SMatt Macy spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
6308eda14cbcSMatt Macy     boolean_t force, boolean_t hardforce)
6309eda14cbcSMatt Macy {
6310184c1b94SMartin Matuska 	int error;
6311eda14cbcSMatt Macy 	spa_t *spa;
6312eda14cbcSMatt Macy 
6313eda14cbcSMatt Macy 	if (oldconfig)
6314eda14cbcSMatt Macy 		*oldconfig = NULL;
6315eda14cbcSMatt Macy 
6316eda14cbcSMatt Macy 	if (!(spa_mode_global & SPA_MODE_WRITE))
6317eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
6318eda14cbcSMatt Macy 
6319eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6320eda14cbcSMatt Macy 	if ((spa = spa_lookup(pool)) == NULL) {
6321eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6322eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
6323eda14cbcSMatt Macy 	}
6324eda14cbcSMatt Macy 
6325eda14cbcSMatt Macy 	if (spa->spa_is_exporting) {
6326eda14cbcSMatt Macy 		/* the pool is being exported by another thread */
6327eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
6328eda14cbcSMatt Macy 		return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
6329eda14cbcSMatt Macy 	}
6330eda14cbcSMatt Macy 	spa->spa_is_exporting = B_TRUE;
6331eda14cbcSMatt Macy 
6332eda14cbcSMatt Macy 	/*
6333eda14cbcSMatt Macy 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
6334eda14cbcSMatt Macy 	 * reacquire the namespace lock, and see if we can export.
6335eda14cbcSMatt Macy 	 */
6336eda14cbcSMatt Macy 	spa_open_ref(spa, FTAG);
6337eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6338eda14cbcSMatt Macy 	spa_async_suspend(spa);
6339eda14cbcSMatt Macy 	if (spa->spa_zvol_taskq) {
6340eda14cbcSMatt Macy 		zvol_remove_minors(spa, spa_name(spa), B_TRUE);
6341eda14cbcSMatt Macy 		taskq_wait(spa->spa_zvol_taskq);
6342eda14cbcSMatt Macy 	}
6343eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6344eda14cbcSMatt Macy 	spa_close(spa, FTAG);
6345eda14cbcSMatt Macy 
6346eda14cbcSMatt Macy 	if (spa->spa_state == POOL_STATE_UNINITIALIZED)
6347eda14cbcSMatt Macy 		goto export_spa;
6348eda14cbcSMatt Macy 	/*
6349eda14cbcSMatt Macy 	 * The pool will be in core if it's openable, in which case we can
6350eda14cbcSMatt Macy 	 * modify its state.  Objsets may be open only because they're dirty,
6351eda14cbcSMatt Macy 	 * so we have to force it to sync before checking spa_refcnt.
6352eda14cbcSMatt Macy 	 */
6353eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
6354eda14cbcSMatt Macy 		txg_wait_synced(spa->spa_dsl_pool, 0);
6355eda14cbcSMatt Macy 		spa_evicting_os_wait(spa);
6356eda14cbcSMatt Macy 	}
6357eda14cbcSMatt Macy 
6358eda14cbcSMatt Macy 	/*
6359eda14cbcSMatt Macy 	 * A pool cannot be exported or destroyed if there are active
6360eda14cbcSMatt Macy 	 * references.  If we are resetting a pool, allow references by
6361eda14cbcSMatt Macy 	 * fault injection handlers.
6362eda14cbcSMatt Macy 	 */
6363184c1b94SMartin Matuska 	if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
6364184c1b94SMartin Matuska 		error = SET_ERROR(EBUSY);
6365184c1b94SMartin Matuska 		goto fail;
6366eda14cbcSMatt Macy 	}
6367eda14cbcSMatt Macy 
6368eda14cbcSMatt Macy 	if (spa->spa_sync_on) {
6369eda14cbcSMatt Macy 		/*
6370eda14cbcSMatt Macy 		 * A pool cannot be exported if it has an active shared spare.
6371eda14cbcSMatt Macy 		 * This is to prevent other pools stealing the active spare
6372eda14cbcSMatt Macy 		 * from an exported pool. At user's own will, such pool can
6373eda14cbcSMatt Macy 		 * be forcedly exported.
6374eda14cbcSMatt Macy 		 */
6375eda14cbcSMatt Macy 		if (!force && new_state == POOL_STATE_EXPORTED &&
6376eda14cbcSMatt Macy 		    spa_has_active_shared_spare(spa)) {
6377184c1b94SMartin Matuska 			error = SET_ERROR(EXDEV);
6378184c1b94SMartin Matuska 			goto fail;
6379eda14cbcSMatt Macy 		}
6380eda14cbcSMatt Macy 
6381eda14cbcSMatt Macy 		/*
6382eda14cbcSMatt Macy 		 * We're about to export or destroy this pool. Make sure
6383eda14cbcSMatt Macy 		 * we stop all initialization and trim activity here before
6384eda14cbcSMatt Macy 		 * we set the spa_final_txg. This will ensure that all
6385eda14cbcSMatt Macy 		 * dirty data resulting from the initialization is
6386eda14cbcSMatt Macy 		 * committed to disk before we unload the pool.
6387eda14cbcSMatt Macy 		 */
6388eda14cbcSMatt Macy 		if (spa->spa_root_vdev != NULL) {
6389eda14cbcSMatt Macy 			vdev_t *rvd = spa->spa_root_vdev;
6390eda14cbcSMatt Macy 			vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
6391eda14cbcSMatt Macy 			vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
6392eda14cbcSMatt Macy 			vdev_autotrim_stop_all(spa);
6393eda14cbcSMatt Macy 			vdev_rebuild_stop_all(spa);
6394eda14cbcSMatt Macy 		}
6395eda14cbcSMatt Macy 
6396eda14cbcSMatt Macy 		/*
6397eda14cbcSMatt Macy 		 * We want this to be reflected on every label,
6398eda14cbcSMatt Macy 		 * so mark them all dirty.  spa_unload() will do the
6399eda14cbcSMatt Macy 		 * final sync that pushes these changes out.
6400eda14cbcSMatt Macy 		 */
6401eda14cbcSMatt Macy 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
6402eda14cbcSMatt Macy 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6403eda14cbcSMatt Macy 			spa->spa_state = new_state;
6404eda14cbcSMatt Macy 			spa->spa_final_txg = spa_last_synced_txg(spa) +
6405eda14cbcSMatt Macy 			    TXG_DEFER_SIZE + 1;
6406eda14cbcSMatt Macy 			vdev_config_dirty(spa->spa_root_vdev);
6407eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ALL, FTAG);
6408eda14cbcSMatt Macy 		}
6409eda14cbcSMatt Macy 	}
6410eda14cbcSMatt Macy 
6411eda14cbcSMatt Macy export_spa:
6412eda14cbcSMatt Macy 	if (new_state == POOL_STATE_DESTROYED)
6413eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
6414eda14cbcSMatt Macy 	else if (new_state == POOL_STATE_EXPORTED)
6415eda14cbcSMatt Macy 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
6416eda14cbcSMatt Macy 
6417eda14cbcSMatt Macy 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6418eda14cbcSMatt Macy 		spa_unload(spa);
6419eda14cbcSMatt Macy 		spa_deactivate(spa);
6420eda14cbcSMatt Macy 	}
6421eda14cbcSMatt Macy 
6422eda14cbcSMatt Macy 	if (oldconfig && spa->spa_config)
6423eda14cbcSMatt Macy 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
6424eda14cbcSMatt Macy 
6425eda14cbcSMatt Macy 	if (new_state != POOL_STATE_UNINITIALIZED) {
6426eda14cbcSMatt Macy 		if (!hardforce)
6427eda14cbcSMatt Macy 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
6428eda14cbcSMatt Macy 		spa_remove(spa);
6429eda14cbcSMatt Macy 	} else {
6430eda14cbcSMatt Macy 		/*
6431eda14cbcSMatt Macy 		 * If spa_remove() is not called for this spa_t and
6432eda14cbcSMatt Macy 		 * there is any possibility that it can be reused,
6433eda14cbcSMatt Macy 		 * we make sure to reset the exporting flag.
6434eda14cbcSMatt Macy 		 */
6435eda14cbcSMatt Macy 		spa->spa_is_exporting = B_FALSE;
6436eda14cbcSMatt Macy 	}
6437eda14cbcSMatt Macy 
6438eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6439eda14cbcSMatt Macy 	return (0);
6440184c1b94SMartin Matuska 
6441184c1b94SMartin Matuska fail:
6442184c1b94SMartin Matuska 	spa->spa_is_exporting = B_FALSE;
6443184c1b94SMartin Matuska 	spa_async_resume(spa);
6444184c1b94SMartin Matuska 	mutex_exit(&spa_namespace_lock);
6445184c1b94SMartin Matuska 	return (error);
6446eda14cbcSMatt Macy }
6447eda14cbcSMatt Macy 
6448eda14cbcSMatt Macy /*
6449eda14cbcSMatt Macy  * Destroy a storage pool.
6450eda14cbcSMatt Macy  */
6451eda14cbcSMatt Macy int
6452180f8225SMatt Macy spa_destroy(const char *pool)
6453eda14cbcSMatt Macy {
6454eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
6455eda14cbcSMatt Macy 	    B_FALSE, B_FALSE));
6456eda14cbcSMatt Macy }
6457eda14cbcSMatt Macy 
6458eda14cbcSMatt Macy /*
6459eda14cbcSMatt Macy  * Export a storage pool.
6460eda14cbcSMatt Macy  */
6461eda14cbcSMatt Macy int
6462180f8225SMatt Macy spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
6463eda14cbcSMatt Macy     boolean_t hardforce)
6464eda14cbcSMatt Macy {
6465eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
6466eda14cbcSMatt Macy 	    force, hardforce));
6467eda14cbcSMatt Macy }
6468eda14cbcSMatt Macy 
6469eda14cbcSMatt Macy /*
6470eda14cbcSMatt Macy  * Similar to spa_export(), this unloads the spa_t without actually removing it
6471eda14cbcSMatt Macy  * from the namespace in any way.
6472eda14cbcSMatt Macy  */
6473eda14cbcSMatt Macy int
6474180f8225SMatt Macy spa_reset(const char *pool)
6475eda14cbcSMatt Macy {
6476eda14cbcSMatt Macy 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
6477eda14cbcSMatt Macy 	    B_FALSE, B_FALSE));
6478eda14cbcSMatt Macy }
6479eda14cbcSMatt Macy 
6480eda14cbcSMatt Macy /*
6481eda14cbcSMatt Macy  * ==========================================================================
6482eda14cbcSMatt Macy  * Device manipulation
6483eda14cbcSMatt Macy  * ==========================================================================
6484eda14cbcSMatt Macy  */
6485eda14cbcSMatt Macy 
6486eda14cbcSMatt Macy /*
64877877fdebSMatt Macy  * This is called as a synctask to increment the draid feature flag
64887877fdebSMatt Macy  */
64897877fdebSMatt Macy static void
64907877fdebSMatt Macy spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
64917877fdebSMatt Macy {
64927877fdebSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
64937877fdebSMatt Macy 	int draid = (int)(uintptr_t)arg;
64947877fdebSMatt Macy 
64957877fdebSMatt Macy 	for (int c = 0; c < draid; c++)
64967877fdebSMatt Macy 		spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
64977877fdebSMatt Macy }
64987877fdebSMatt Macy 
64997877fdebSMatt Macy /*
6500eda14cbcSMatt Macy  * Add a device to a storage pool.
6501eda14cbcSMatt Macy  */
6502eda14cbcSMatt Macy int
6503eda14cbcSMatt Macy spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
6504eda14cbcSMatt Macy {
65057877fdebSMatt Macy 	uint64_t txg, ndraid = 0;
6506eda14cbcSMatt Macy 	int error;
6507eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
6508eda14cbcSMatt Macy 	vdev_t *vd, *tvd;
6509eda14cbcSMatt Macy 	nvlist_t **spares, **l2cache;
6510eda14cbcSMatt Macy 	uint_t nspares, nl2cache;
6511eda14cbcSMatt Macy 
6512eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
6513eda14cbcSMatt Macy 
6514eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
6515eda14cbcSMatt Macy 
6516eda14cbcSMatt Macy 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
6517eda14cbcSMatt Macy 	    VDEV_ALLOC_ADD)) != 0)
6518eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
6519eda14cbcSMatt Macy 
6520eda14cbcSMatt Macy 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
6521eda14cbcSMatt Macy 
6522eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
6523eda14cbcSMatt Macy 	    &nspares) != 0)
6524eda14cbcSMatt Macy 		nspares = 0;
6525eda14cbcSMatt Macy 
6526eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
6527eda14cbcSMatt Macy 	    &nl2cache) != 0)
6528eda14cbcSMatt Macy 		nl2cache = 0;
6529eda14cbcSMatt Macy 
6530eda14cbcSMatt Macy 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
6531eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
6532eda14cbcSMatt Macy 
6533eda14cbcSMatt Macy 	if (vd->vdev_children != 0 &&
65347877fdebSMatt Macy 	    (error = vdev_create(vd, txg, B_FALSE)) != 0) {
6535eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
65367877fdebSMatt Macy 	}
65377877fdebSMatt Macy 
65387877fdebSMatt Macy 	/*
65397877fdebSMatt Macy 	 * The virtual dRAID spares must be added after vdev tree is created
6540*16038816SMartin Matuska 	 * and the vdev guids are generated.  The guid of their associated
65417877fdebSMatt Macy 	 * dRAID is stored in the config and used when opening the spare.
65427877fdebSMatt Macy 	 */
65437877fdebSMatt Macy 	if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
65447877fdebSMatt Macy 	    rvd->vdev_children)) == 0) {
65457877fdebSMatt Macy 		if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
65467877fdebSMatt Macy 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
65477877fdebSMatt Macy 			nspares = 0;
65487877fdebSMatt Macy 	} else {
65497877fdebSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
65507877fdebSMatt Macy 	}
6551eda14cbcSMatt Macy 
6552eda14cbcSMatt Macy 	/*
6553eda14cbcSMatt Macy 	 * We must validate the spares and l2cache devices after checking the
6554eda14cbcSMatt Macy 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
6555eda14cbcSMatt Macy 	 */
6556eda14cbcSMatt Macy 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
6557eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, vd, txg, error));
6558eda14cbcSMatt Macy 
6559eda14cbcSMatt Macy 	/*
6560eda14cbcSMatt Macy 	 * If we are in the middle of a device removal, we can only add
6561eda14cbcSMatt Macy 	 * devices which match the existing devices in the pool.
6562eda14cbcSMatt Macy 	 * If we are in the middle of a removal, or have some indirect
65637877fdebSMatt Macy 	 * vdevs, we can not add raidz or dRAID top levels.
6564eda14cbcSMatt Macy 	 */
6565eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL ||
6566eda14cbcSMatt Macy 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
6567eda14cbcSMatt Macy 		for (int c = 0; c < vd->vdev_children; c++) {
6568eda14cbcSMatt Macy 			tvd = vd->vdev_child[c];
6569eda14cbcSMatt Macy 			if (spa->spa_vdev_removal != NULL &&
6570eda14cbcSMatt Macy 			    tvd->vdev_ashift != spa->spa_max_ashift) {
6571eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
6572eda14cbcSMatt Macy 			}
65737877fdebSMatt Macy 			/* Fail if top level vdev is raidz or a dRAID */
65747877fdebSMatt Macy 			if (vdev_get_nparity(tvd) != 0)
6575eda14cbcSMatt Macy 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
65767877fdebSMatt Macy 
6577eda14cbcSMatt Macy 			/*
6578eda14cbcSMatt Macy 			 * Need the top level mirror to be
6579eda14cbcSMatt Macy 			 * a mirror of leaf vdevs only
6580eda14cbcSMatt Macy 			 */
6581eda14cbcSMatt Macy 			if (tvd->vdev_ops == &vdev_mirror_ops) {
6582eda14cbcSMatt Macy 				for (uint64_t cid = 0;
6583eda14cbcSMatt Macy 				    cid < tvd->vdev_children; cid++) {
6584eda14cbcSMatt Macy 					vdev_t *cvd = tvd->vdev_child[cid];
6585eda14cbcSMatt Macy 					if (!cvd->vdev_ops->vdev_op_leaf) {
6586eda14cbcSMatt Macy 						return (spa_vdev_exit(spa, vd,
6587eda14cbcSMatt Macy 						    txg, EINVAL));
6588eda14cbcSMatt Macy 					}
6589eda14cbcSMatt Macy 				}
6590eda14cbcSMatt Macy 			}
6591eda14cbcSMatt Macy 		}
6592eda14cbcSMatt Macy 	}
6593eda14cbcSMatt Macy 
6594eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
6595eda14cbcSMatt Macy 		tvd = vd->vdev_child[c];
6596eda14cbcSMatt Macy 		vdev_remove_child(vd, tvd);
6597eda14cbcSMatt Macy 		tvd->vdev_id = rvd->vdev_children;
6598eda14cbcSMatt Macy 		vdev_add_child(rvd, tvd);
6599eda14cbcSMatt Macy 		vdev_config_dirty(tvd);
6600eda14cbcSMatt Macy 	}
6601eda14cbcSMatt Macy 
6602eda14cbcSMatt Macy 	if (nspares != 0) {
6603eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
6604eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES);
6605eda14cbcSMatt Macy 		spa_load_spares(spa);
6606eda14cbcSMatt Macy 		spa->spa_spares.sav_sync = B_TRUE;
6607eda14cbcSMatt Macy 	}
6608eda14cbcSMatt Macy 
6609eda14cbcSMatt Macy 	if (nl2cache != 0) {
6610eda14cbcSMatt Macy 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
6611eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE);
6612eda14cbcSMatt Macy 		spa_load_l2cache(spa);
6613eda14cbcSMatt Macy 		spa->spa_l2cache.sav_sync = B_TRUE;
6614eda14cbcSMatt Macy 	}
6615eda14cbcSMatt Macy 
6616eda14cbcSMatt Macy 	/*
66177877fdebSMatt Macy 	 * We can't increment a feature while holding spa_vdev so we
66187877fdebSMatt Macy 	 * have to do it in a synctask.
66197877fdebSMatt Macy 	 */
66207877fdebSMatt Macy 	if (ndraid != 0) {
66217877fdebSMatt Macy 		dmu_tx_t *tx;
66227877fdebSMatt Macy 
66237877fdebSMatt Macy 		tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
66247877fdebSMatt Macy 		dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
66257877fdebSMatt Macy 		    (void *)(uintptr_t)ndraid, tx);
66267877fdebSMatt Macy 		dmu_tx_commit(tx);
66277877fdebSMatt Macy 	}
66287877fdebSMatt Macy 
66297877fdebSMatt Macy 	/*
6630eda14cbcSMatt Macy 	 * We have to be careful when adding new vdevs to an existing pool.
6631eda14cbcSMatt Macy 	 * If other threads start allocating from these vdevs before we
6632eda14cbcSMatt Macy 	 * sync the config cache, and we lose power, then upon reboot we may
6633eda14cbcSMatt Macy 	 * fail to open the pool because there are DVAs that the config cache
6634eda14cbcSMatt Macy 	 * can't translate.  Therefore, we first add the vdevs without
6635eda14cbcSMatt Macy 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
6636eda14cbcSMatt Macy 	 * and then let spa_config_update() initialize the new metaslabs.
6637eda14cbcSMatt Macy 	 *
6638eda14cbcSMatt Macy 	 * spa_load() checks for added-but-not-initialized vdevs, so that
6639eda14cbcSMatt Macy 	 * if we lose power at any point in this sequence, the remaining
6640eda14cbcSMatt Macy 	 * steps will be completed the next time we load the pool.
6641eda14cbcSMatt Macy 	 */
6642eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, vd, txg, 0);
6643eda14cbcSMatt Macy 
6644eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
6645eda14cbcSMatt Macy 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6646eda14cbcSMatt Macy 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
6647eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
6648eda14cbcSMatt Macy 
6649eda14cbcSMatt Macy 	return (0);
6650eda14cbcSMatt Macy }
6651eda14cbcSMatt Macy 
6652eda14cbcSMatt Macy /*
6653eda14cbcSMatt Macy  * Attach a device to a mirror.  The arguments are the path to any device
6654eda14cbcSMatt Macy  * in the mirror, and the nvroot for the new device.  If the path specifies
6655eda14cbcSMatt Macy  * a device that is not mirrored, we automatically insert the mirror vdev.
6656eda14cbcSMatt Macy  *
6657eda14cbcSMatt Macy  * If 'replacing' is specified, the new device is intended to replace the
6658eda14cbcSMatt Macy  * existing device; in this case the two devices are made into their own
6659eda14cbcSMatt Macy  * mirror using the 'replacing' vdev, which is functionally identical to
6660eda14cbcSMatt Macy  * the mirror vdev (it actually reuses all the same ops) but has a few
6661eda14cbcSMatt Macy  * extra rules: you can't attach to it after it's been created, and upon
6662eda14cbcSMatt Macy  * completion of resilvering, the first disk (the one being replaced)
6663eda14cbcSMatt Macy  * is automatically detached.
6664eda14cbcSMatt Macy  *
6665eda14cbcSMatt Macy  * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
6666eda14cbcSMatt Macy  * should be performed instead of traditional healing reconstruction.  From
6667eda14cbcSMatt Macy  * an administrators perspective these are both resilver operations.
6668eda14cbcSMatt Macy  */
6669eda14cbcSMatt Macy int
6670eda14cbcSMatt Macy spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
6671eda14cbcSMatt Macy     int rebuild)
6672eda14cbcSMatt Macy {
6673eda14cbcSMatt Macy 	uint64_t txg, dtl_max_txg;
6674eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
6675eda14cbcSMatt Macy 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
6676eda14cbcSMatt Macy 	vdev_ops_t *pvops;
6677eda14cbcSMatt Macy 	char *oldvdpath, *newvdpath;
6678eda14cbcSMatt Macy 	int newvd_isspare;
6679eda14cbcSMatt Macy 	int error;
6680eda14cbcSMatt Macy 
6681eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
6682eda14cbcSMatt Macy 
6683eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
6684eda14cbcSMatt Macy 
6685eda14cbcSMatt Macy 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
6686eda14cbcSMatt Macy 
6687eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
6688eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6689eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
6690eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6691eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
6692eda14cbcSMatt Macy 	}
6693eda14cbcSMatt Macy 
6694eda14cbcSMatt Macy 	if (rebuild) {
6695eda14cbcSMatt Macy 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
6696eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6697eda14cbcSMatt Macy 
6698eda14cbcSMatt Macy 		if (dsl_scan_resilvering(spa_get_dsl(spa)))
6699eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
6700eda14cbcSMatt Macy 			    ZFS_ERR_RESILVER_IN_PROGRESS));
6701eda14cbcSMatt Macy 	} else {
6702eda14cbcSMatt Macy 		if (vdev_rebuild_active(rvd))
6703eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, NULL, txg,
6704eda14cbcSMatt Macy 			    ZFS_ERR_REBUILD_IN_PROGRESS));
6705eda14cbcSMatt Macy 	}
6706eda14cbcSMatt Macy 
6707eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL)
6708eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6709eda14cbcSMatt Macy 
6710eda14cbcSMatt Macy 	if (oldvd == NULL)
6711eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6712eda14cbcSMatt Macy 
6713eda14cbcSMatt Macy 	if (!oldvd->vdev_ops->vdev_op_leaf)
6714eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6715eda14cbcSMatt Macy 
6716eda14cbcSMatt Macy 	pvd = oldvd->vdev_parent;
6717eda14cbcSMatt Macy 
6718eda14cbcSMatt Macy 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
6719eda14cbcSMatt Macy 	    VDEV_ALLOC_ATTACH)) != 0)
6720eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6721eda14cbcSMatt Macy 
6722eda14cbcSMatt Macy 	if (newrootvd->vdev_children != 1)
6723eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6724eda14cbcSMatt Macy 
6725eda14cbcSMatt Macy 	newvd = newrootvd->vdev_child[0];
6726eda14cbcSMatt Macy 
6727eda14cbcSMatt Macy 	if (!newvd->vdev_ops->vdev_op_leaf)
6728eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
6729eda14cbcSMatt Macy 
6730eda14cbcSMatt Macy 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
6731eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, error));
6732eda14cbcSMatt Macy 
6733eda14cbcSMatt Macy 	/*
6734eda14cbcSMatt Macy 	 * Spares can't replace logs
6735eda14cbcSMatt Macy 	 */
6736eda14cbcSMatt Macy 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
6737eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6738eda14cbcSMatt Macy 
67397877fdebSMatt Macy 	/*
67407877fdebSMatt Macy 	 * A dRAID spare can only replace a child of its parent dRAID vdev.
67417877fdebSMatt Macy 	 */
67427877fdebSMatt Macy 	if (newvd->vdev_ops == &vdev_draid_spare_ops &&
67437877fdebSMatt Macy 	    oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
67447877fdebSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
67457877fdebSMatt Macy 	}
67467877fdebSMatt Macy 
6747eda14cbcSMatt Macy 	if (rebuild) {
6748eda14cbcSMatt Macy 		/*
67497877fdebSMatt Macy 		 * For rebuilds, the top vdev must support reconstruction
6750eda14cbcSMatt Macy 		 * using only space maps.  This means the only allowable
67517877fdebSMatt Macy 		 * vdevs types are the root vdev, a mirror, or dRAID.
6752eda14cbcSMatt Macy 		 */
67537877fdebSMatt Macy 		tvd = pvd;
67547877fdebSMatt Macy 		if (pvd->vdev_top != NULL)
67557877fdebSMatt Macy 			tvd = pvd->vdev_top;
67567877fdebSMatt Macy 
67577877fdebSMatt Macy 		if (tvd->vdev_ops != &vdev_mirror_ops &&
67587877fdebSMatt Macy 		    tvd->vdev_ops != &vdev_root_ops &&
67597877fdebSMatt Macy 		    tvd->vdev_ops != &vdev_draid_ops) {
6760eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6761eda14cbcSMatt Macy 		}
6762eda14cbcSMatt Macy 	}
6763eda14cbcSMatt Macy 
6764eda14cbcSMatt Macy 	if (!replacing) {
6765eda14cbcSMatt Macy 		/*
6766eda14cbcSMatt Macy 		 * For attach, the only allowable parent is a mirror or the root
6767eda14cbcSMatt Macy 		 * vdev.
6768eda14cbcSMatt Macy 		 */
6769eda14cbcSMatt Macy 		if (pvd->vdev_ops != &vdev_mirror_ops &&
6770eda14cbcSMatt Macy 		    pvd->vdev_ops != &vdev_root_ops)
6771eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6772eda14cbcSMatt Macy 
6773eda14cbcSMatt Macy 		pvops = &vdev_mirror_ops;
6774eda14cbcSMatt Macy 	} else {
6775eda14cbcSMatt Macy 		/*
6776eda14cbcSMatt Macy 		 * Active hot spares can only be replaced by inactive hot
6777eda14cbcSMatt Macy 		 * spares.
6778eda14cbcSMatt Macy 		 */
6779eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops &&
6780eda14cbcSMatt Macy 		    oldvd->vdev_isspare &&
6781eda14cbcSMatt Macy 		    !spa_has_spare(spa, newvd->vdev_guid))
6782eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6783eda14cbcSMatt Macy 
6784eda14cbcSMatt Macy 		/*
6785eda14cbcSMatt Macy 		 * If the source is a hot spare, and the parent isn't already a
6786eda14cbcSMatt Macy 		 * spare, then we want to create a new hot spare.  Otherwise, we
6787eda14cbcSMatt Macy 		 * want to create a replacing vdev.  The user is not allowed to
6788eda14cbcSMatt Macy 		 * attach to a spared vdev child unless the 'isspare' state is
6789eda14cbcSMatt Macy 		 * the same (spare replaces spare, non-spare replaces
6790eda14cbcSMatt Macy 		 * non-spare).
6791eda14cbcSMatt Macy 		 */
6792eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_replacing_ops &&
6793eda14cbcSMatt Macy 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
6794eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6795eda14cbcSMatt Macy 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
6796eda14cbcSMatt Macy 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
6797eda14cbcSMatt Macy 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6798eda14cbcSMatt Macy 		}
6799eda14cbcSMatt Macy 
6800eda14cbcSMatt Macy 		if (newvd->vdev_isspare)
6801eda14cbcSMatt Macy 			pvops = &vdev_spare_ops;
6802eda14cbcSMatt Macy 		else
6803eda14cbcSMatt Macy 			pvops = &vdev_replacing_ops;
6804eda14cbcSMatt Macy 	}
6805eda14cbcSMatt Macy 
6806eda14cbcSMatt Macy 	/*
6807eda14cbcSMatt Macy 	 * Make sure the new device is big enough.
6808eda14cbcSMatt Macy 	 */
6809eda14cbcSMatt Macy 	if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
6810eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
6811eda14cbcSMatt Macy 
6812eda14cbcSMatt Macy 	/*
6813eda14cbcSMatt Macy 	 * The new device cannot have a higher alignment requirement
6814eda14cbcSMatt Macy 	 * than the top-level vdev.
6815eda14cbcSMatt Macy 	 */
6816eda14cbcSMatt Macy 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
6817eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
6818eda14cbcSMatt Macy 
6819eda14cbcSMatt Macy 	/*
6820eda14cbcSMatt Macy 	 * If this is an in-place replacement, update oldvd's path and devid
6821eda14cbcSMatt Macy 	 * to make it distinguishable from newvd, and unopenable from now on.
6822eda14cbcSMatt Macy 	 */
6823eda14cbcSMatt Macy 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
6824eda14cbcSMatt Macy 		spa_strfree(oldvd->vdev_path);
6825eda14cbcSMatt Macy 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
6826eda14cbcSMatt Macy 		    KM_SLEEP);
6827eda14cbcSMatt Macy 		(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
6828eda14cbcSMatt Macy 		    "%s/%s", newvd->vdev_path, "old");
6829eda14cbcSMatt Macy 		if (oldvd->vdev_devid != NULL) {
6830eda14cbcSMatt Macy 			spa_strfree(oldvd->vdev_devid);
6831eda14cbcSMatt Macy 			oldvd->vdev_devid = NULL;
6832eda14cbcSMatt Macy 		}
6833eda14cbcSMatt Macy 	}
6834eda14cbcSMatt Macy 
6835eda14cbcSMatt Macy 	/*
6836eda14cbcSMatt Macy 	 * If the parent is not a mirror, or if we're replacing, insert the new
6837eda14cbcSMatt Macy 	 * mirror/replacing/spare vdev above oldvd.
6838eda14cbcSMatt Macy 	 */
6839eda14cbcSMatt Macy 	if (pvd->vdev_ops != pvops)
6840eda14cbcSMatt Macy 		pvd = vdev_add_parent(oldvd, pvops);
6841eda14cbcSMatt Macy 
6842eda14cbcSMatt Macy 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
6843eda14cbcSMatt Macy 	ASSERT(pvd->vdev_ops == pvops);
6844eda14cbcSMatt Macy 	ASSERT(oldvd->vdev_parent == pvd);
6845eda14cbcSMatt Macy 
6846eda14cbcSMatt Macy 	/*
6847eda14cbcSMatt Macy 	 * Extract the new device from its root and add it to pvd.
6848eda14cbcSMatt Macy 	 */
6849eda14cbcSMatt Macy 	vdev_remove_child(newrootvd, newvd);
6850eda14cbcSMatt Macy 	newvd->vdev_id = pvd->vdev_children;
6851eda14cbcSMatt Macy 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
6852eda14cbcSMatt Macy 	vdev_add_child(pvd, newvd);
6853eda14cbcSMatt Macy 
6854eda14cbcSMatt Macy 	/*
6855eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
6856eda14cbcSMatt Macy 	 */
6857eda14cbcSMatt Macy 	vdev_propagate_state(pvd);
6858eda14cbcSMatt Macy 
6859eda14cbcSMatt Macy 	tvd = newvd->vdev_top;
6860eda14cbcSMatt Macy 	ASSERT(pvd->vdev_top == tvd);
6861eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
6862eda14cbcSMatt Macy 
6863eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
6864eda14cbcSMatt Macy 
6865eda14cbcSMatt Macy 	/*
6866eda14cbcSMatt Macy 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
6867eda14cbcSMatt Macy 	 * for any dmu_sync-ed blocks.  It will propagate upward when
6868eda14cbcSMatt Macy 	 * spa_vdev_exit() calls vdev_dtl_reassess().
6869eda14cbcSMatt Macy 	 */
6870eda14cbcSMatt Macy 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
6871eda14cbcSMatt Macy 
6872eda14cbcSMatt Macy 	vdev_dtl_dirty(newvd, DTL_MISSING,
6873eda14cbcSMatt Macy 	    TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
6874eda14cbcSMatt Macy 
6875eda14cbcSMatt Macy 	if (newvd->vdev_isspare) {
6876eda14cbcSMatt Macy 		spa_spare_activate(newvd);
6877eda14cbcSMatt Macy 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
6878eda14cbcSMatt Macy 	}
6879eda14cbcSMatt Macy 
6880eda14cbcSMatt Macy 	oldvdpath = spa_strdup(oldvd->vdev_path);
6881eda14cbcSMatt Macy 	newvdpath = spa_strdup(newvd->vdev_path);
6882eda14cbcSMatt Macy 	newvd_isspare = newvd->vdev_isspare;
6883eda14cbcSMatt Macy 
6884eda14cbcSMatt Macy 	/*
6885eda14cbcSMatt Macy 	 * Mark newvd's DTL dirty in this txg.
6886eda14cbcSMatt Macy 	 */
6887eda14cbcSMatt Macy 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
6888eda14cbcSMatt Macy 
6889eda14cbcSMatt Macy 	/*
6890eda14cbcSMatt Macy 	 * Schedule the resilver or rebuild to restart in the future. We do
6891eda14cbcSMatt Macy 	 * this to ensure that dmu_sync-ed blocks have been stitched into the
6892eda14cbcSMatt Macy 	 * respective datasets.
6893eda14cbcSMatt Macy 	 */
6894eda14cbcSMatt Macy 	if (rebuild) {
6895eda14cbcSMatt Macy 		newvd->vdev_rebuild_txg = txg;
6896eda14cbcSMatt Macy 
6897eda14cbcSMatt Macy 		vdev_rebuild(tvd);
6898eda14cbcSMatt Macy 	} else {
6899eda14cbcSMatt Macy 		newvd->vdev_resilver_txg = txg;
6900eda14cbcSMatt Macy 
6901eda14cbcSMatt Macy 		if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
6902eda14cbcSMatt Macy 		    spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
6903eda14cbcSMatt Macy 			vdev_defer_resilver(newvd);
6904eda14cbcSMatt Macy 		} else {
6905eda14cbcSMatt Macy 			dsl_scan_restart_resilver(spa->spa_dsl_pool,
6906eda14cbcSMatt Macy 			    dtl_max_txg);
6907eda14cbcSMatt Macy 		}
6908eda14cbcSMatt Macy 	}
6909eda14cbcSMatt Macy 
6910eda14cbcSMatt Macy 	if (spa->spa_bootfs)
6911eda14cbcSMatt Macy 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
6912eda14cbcSMatt Macy 
6913eda14cbcSMatt Macy 	spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
6914eda14cbcSMatt Macy 
6915eda14cbcSMatt Macy 	/*
6916eda14cbcSMatt Macy 	 * Commit the config
6917eda14cbcSMatt Macy 	 */
6918eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
6919eda14cbcSMatt Macy 
6920eda14cbcSMatt Macy 	spa_history_log_internal(spa, "vdev attach", NULL,
6921eda14cbcSMatt Macy 	    "%s vdev=%s %s vdev=%s",
6922eda14cbcSMatt Macy 	    replacing && newvd_isspare ? "spare in" :
6923eda14cbcSMatt Macy 	    replacing ? "replace" : "attach", newvdpath,
6924eda14cbcSMatt Macy 	    replacing ? "for" : "to", oldvdpath);
6925eda14cbcSMatt Macy 
6926eda14cbcSMatt Macy 	spa_strfree(oldvdpath);
6927eda14cbcSMatt Macy 	spa_strfree(newvdpath);
6928eda14cbcSMatt Macy 
6929eda14cbcSMatt Macy 	return (0);
6930eda14cbcSMatt Macy }
6931eda14cbcSMatt Macy 
6932eda14cbcSMatt Macy /*
6933eda14cbcSMatt Macy  * Detach a device from a mirror or replacing vdev.
6934eda14cbcSMatt Macy  *
6935eda14cbcSMatt Macy  * If 'replace_done' is specified, only detach if the parent
6936eda14cbcSMatt Macy  * is a replacing vdev.
6937eda14cbcSMatt Macy  */
6938eda14cbcSMatt Macy int
6939eda14cbcSMatt Macy spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
6940eda14cbcSMatt Macy {
6941eda14cbcSMatt Macy 	uint64_t txg;
6942eda14cbcSMatt Macy 	int error;
6943eda14cbcSMatt Macy 	vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
6944eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *cvd, *tvd;
6945eda14cbcSMatt Macy 	boolean_t unspare = B_FALSE;
6946eda14cbcSMatt Macy 	uint64_t unspare_guid = 0;
6947eda14cbcSMatt Macy 	char *vdpath;
6948eda14cbcSMatt Macy 
6949eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
6950eda14cbcSMatt Macy 
6951eda14cbcSMatt Macy 	txg = spa_vdev_detach_enter(spa, guid);
6952eda14cbcSMatt Macy 
6953eda14cbcSMatt Macy 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
6954eda14cbcSMatt Macy 
6955eda14cbcSMatt Macy 	/*
6956eda14cbcSMatt Macy 	 * Besides being called directly from the userland through the
6957eda14cbcSMatt Macy 	 * ioctl interface, spa_vdev_detach() can be potentially called
6958eda14cbcSMatt Macy 	 * at the end of spa_vdev_resilver_done().
6959eda14cbcSMatt Macy 	 *
6960eda14cbcSMatt Macy 	 * In the regular case, when we have a checkpoint this shouldn't
6961eda14cbcSMatt Macy 	 * happen as we never empty the DTLs of a vdev during the scrub
6962eda14cbcSMatt Macy 	 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
6963eda14cbcSMatt Macy 	 * should never get here when we have a checkpoint.
6964eda14cbcSMatt Macy 	 *
6965eda14cbcSMatt Macy 	 * That said, even in a case when we checkpoint the pool exactly
6966eda14cbcSMatt Macy 	 * as spa_vdev_resilver_done() calls this function everything
6967eda14cbcSMatt Macy 	 * should be fine as the resilver will return right away.
6968eda14cbcSMatt Macy 	 */
6969eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
6970eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6971eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
6972eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6973eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
6974eda14cbcSMatt Macy 	}
6975eda14cbcSMatt Macy 
6976eda14cbcSMatt Macy 	if (vd == NULL)
6977eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
6978eda14cbcSMatt Macy 
6979eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
6980eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
6981eda14cbcSMatt Macy 
6982eda14cbcSMatt Macy 	pvd = vd->vdev_parent;
6983eda14cbcSMatt Macy 
6984eda14cbcSMatt Macy 	/*
6985eda14cbcSMatt Macy 	 * If the parent/child relationship is not as expected, don't do it.
6986eda14cbcSMatt Macy 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
6987eda14cbcSMatt Macy 	 * vdev that's replacing B with C.  The user's intent in replacing
6988eda14cbcSMatt Macy 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
6989eda14cbcSMatt Macy 	 * the replace by detaching C, the expected behavior is to end up
6990eda14cbcSMatt Macy 	 * M(A,B).  But suppose that right after deciding to detach C,
6991eda14cbcSMatt Macy 	 * the replacement of B completes.  We would have M(A,C), and then
6992eda14cbcSMatt Macy 	 * ask to detach C, which would leave us with just A -- not what
6993eda14cbcSMatt Macy 	 * the user wanted.  To prevent this, we make sure that the
6994eda14cbcSMatt Macy 	 * parent/child relationship hasn't changed -- in this example,
6995eda14cbcSMatt Macy 	 * that C's parent is still the replacing vdev R.
6996eda14cbcSMatt Macy 	 */
6997eda14cbcSMatt Macy 	if (pvd->vdev_guid != pguid && pguid != 0)
6998eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
6999eda14cbcSMatt Macy 
7000eda14cbcSMatt Macy 	/*
7001eda14cbcSMatt Macy 	 * Only 'replacing' or 'spare' vdevs can be replaced.
7002eda14cbcSMatt Macy 	 */
7003eda14cbcSMatt Macy 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
7004eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7005eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7006eda14cbcSMatt Macy 
7007eda14cbcSMatt Macy 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
7008eda14cbcSMatt Macy 	    spa_version(spa) >= SPA_VERSION_SPARES);
7009eda14cbcSMatt Macy 
7010eda14cbcSMatt Macy 	/*
7011eda14cbcSMatt Macy 	 * Only mirror, replacing, and spare vdevs support detach.
7012eda14cbcSMatt Macy 	 */
7013eda14cbcSMatt Macy 	if (pvd->vdev_ops != &vdev_replacing_ops &&
7014eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_mirror_ops &&
7015eda14cbcSMatt Macy 	    pvd->vdev_ops != &vdev_spare_ops)
7016eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
7017eda14cbcSMatt Macy 
7018eda14cbcSMatt Macy 	/*
7019eda14cbcSMatt Macy 	 * If this device has the only valid copy of some data,
7020eda14cbcSMatt Macy 	 * we cannot safely detach it.
7021eda14cbcSMatt Macy 	 */
7022eda14cbcSMatt Macy 	if (vdev_dtl_required(vd))
7023eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
7024eda14cbcSMatt Macy 
7025eda14cbcSMatt Macy 	ASSERT(pvd->vdev_children >= 2);
7026eda14cbcSMatt Macy 
7027eda14cbcSMatt Macy 	/*
7028eda14cbcSMatt Macy 	 * If we are detaching the second disk from a replacing vdev, then
7029eda14cbcSMatt Macy 	 * check to see if we changed the original vdev's path to have "/old"
7030eda14cbcSMatt Macy 	 * at the end in spa_vdev_attach().  If so, undo that change now.
7031eda14cbcSMatt Macy 	 */
7032eda14cbcSMatt Macy 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
7033eda14cbcSMatt Macy 	    vd->vdev_path != NULL) {
7034eda14cbcSMatt Macy 		size_t len = strlen(vd->vdev_path);
7035eda14cbcSMatt Macy 
7036eda14cbcSMatt Macy 		for (int c = 0; c < pvd->vdev_children; c++) {
7037eda14cbcSMatt Macy 			cvd = pvd->vdev_child[c];
7038eda14cbcSMatt Macy 
7039eda14cbcSMatt Macy 			if (cvd == vd || cvd->vdev_path == NULL)
7040eda14cbcSMatt Macy 				continue;
7041eda14cbcSMatt Macy 
7042eda14cbcSMatt Macy 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
7043eda14cbcSMatt Macy 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
7044eda14cbcSMatt Macy 				spa_strfree(cvd->vdev_path);
7045eda14cbcSMatt Macy 				cvd->vdev_path = spa_strdup(vd->vdev_path);
7046eda14cbcSMatt Macy 				break;
7047eda14cbcSMatt Macy 			}
7048eda14cbcSMatt Macy 		}
7049eda14cbcSMatt Macy 	}
7050eda14cbcSMatt Macy 
7051eda14cbcSMatt Macy 	/*
70527877fdebSMatt Macy 	 * If we are detaching the original disk from a normal spare, then it
70537877fdebSMatt Macy 	 * implies that the spare should become a real disk, and be removed
70547877fdebSMatt Macy 	 * from the active spare list for the pool.  dRAID spares on the
70557877fdebSMatt Macy 	 * other hand are coupled to the pool and thus should never be removed
70567877fdebSMatt Macy 	 * from the spares list.
7057eda14cbcSMatt Macy 	 */
70587877fdebSMatt Macy 	if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
70597877fdebSMatt Macy 		vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
70607877fdebSMatt Macy 
70617877fdebSMatt Macy 		if (last_cvd->vdev_isspare &&
70627877fdebSMatt Macy 		    last_cvd->vdev_ops != &vdev_draid_spare_ops) {
7063eda14cbcSMatt Macy 			unspare = B_TRUE;
70647877fdebSMatt Macy 		}
70657877fdebSMatt Macy 	}
7066eda14cbcSMatt Macy 
7067eda14cbcSMatt Macy 	/*
7068eda14cbcSMatt Macy 	 * Erase the disk labels so the disk can be used for other things.
7069eda14cbcSMatt Macy 	 * This must be done after all other error cases are handled,
7070eda14cbcSMatt Macy 	 * but before we disembowel vd (so we can still do I/O to it).
7071eda14cbcSMatt Macy 	 * But if we can't do it, don't treat the error as fatal --
7072eda14cbcSMatt Macy 	 * it may be that the unwritability of the disk is the reason
7073eda14cbcSMatt Macy 	 * it's being detached!
7074eda14cbcSMatt Macy 	 */
7075eda14cbcSMatt Macy 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
7076eda14cbcSMatt Macy 
7077eda14cbcSMatt Macy 	/*
7078eda14cbcSMatt Macy 	 * Remove vd from its parent and compact the parent's children.
7079eda14cbcSMatt Macy 	 */
7080eda14cbcSMatt Macy 	vdev_remove_child(pvd, vd);
7081eda14cbcSMatt Macy 	vdev_compact_children(pvd);
7082eda14cbcSMatt Macy 
7083eda14cbcSMatt Macy 	/*
7084eda14cbcSMatt Macy 	 * Remember one of the remaining children so we can get tvd below.
7085eda14cbcSMatt Macy 	 */
7086eda14cbcSMatt Macy 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
7087eda14cbcSMatt Macy 
7088eda14cbcSMatt Macy 	/*
7089eda14cbcSMatt Macy 	 * If we need to remove the remaining child from the list of hot spares,
7090eda14cbcSMatt Macy 	 * do it now, marking the vdev as no longer a spare in the process.
7091eda14cbcSMatt Macy 	 * We must do this before vdev_remove_parent(), because that can
7092eda14cbcSMatt Macy 	 * change the GUID if it creates a new toplevel GUID.  For a similar
7093eda14cbcSMatt Macy 	 * reason, we must remove the spare now, in the same txg as the detach;
7094eda14cbcSMatt Macy 	 * otherwise someone could attach a new sibling, change the GUID, and
7095eda14cbcSMatt Macy 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
7096eda14cbcSMatt Macy 	 */
7097eda14cbcSMatt Macy 	if (unspare) {
7098eda14cbcSMatt Macy 		ASSERT(cvd->vdev_isspare);
7099eda14cbcSMatt Macy 		spa_spare_remove(cvd);
7100eda14cbcSMatt Macy 		unspare_guid = cvd->vdev_guid;
7101eda14cbcSMatt Macy 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
7102eda14cbcSMatt Macy 		cvd->vdev_unspare = B_TRUE;
7103eda14cbcSMatt Macy 	}
7104eda14cbcSMatt Macy 
7105eda14cbcSMatt Macy 	/*
7106eda14cbcSMatt Macy 	 * If the parent mirror/replacing vdev only has one child,
7107eda14cbcSMatt Macy 	 * the parent is no longer needed.  Remove it from the tree.
7108eda14cbcSMatt Macy 	 */
7109eda14cbcSMatt Macy 	if (pvd->vdev_children == 1) {
7110eda14cbcSMatt Macy 		if (pvd->vdev_ops == &vdev_spare_ops)
7111eda14cbcSMatt Macy 			cvd->vdev_unspare = B_FALSE;
7112eda14cbcSMatt Macy 		vdev_remove_parent(cvd);
7113eda14cbcSMatt Macy 	}
7114eda14cbcSMatt Macy 
7115eda14cbcSMatt Macy 	/*
7116eda14cbcSMatt Macy 	 * We don't set tvd until now because the parent we just removed
7117eda14cbcSMatt Macy 	 * may have been the previous top-level vdev.
7118eda14cbcSMatt Macy 	 */
7119eda14cbcSMatt Macy 	tvd = cvd->vdev_top;
7120eda14cbcSMatt Macy 	ASSERT(tvd->vdev_parent == rvd);
7121eda14cbcSMatt Macy 
7122eda14cbcSMatt Macy 	/*
7123eda14cbcSMatt Macy 	 * Reevaluate the parent vdev state.
7124eda14cbcSMatt Macy 	 */
7125eda14cbcSMatt Macy 	vdev_propagate_state(cvd);
7126eda14cbcSMatt Macy 
7127eda14cbcSMatt Macy 	/*
7128eda14cbcSMatt Macy 	 * If the 'autoexpand' property is set on the pool then automatically
7129eda14cbcSMatt Macy 	 * try to expand the size of the pool. For example if the device we
7130eda14cbcSMatt Macy 	 * just detached was smaller than the others, it may be possible to
7131eda14cbcSMatt Macy 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
7132eda14cbcSMatt Macy 	 * first so that we can obtain the updated sizes of the leaf vdevs.
7133eda14cbcSMatt Macy 	 */
7134eda14cbcSMatt Macy 	if (spa->spa_autoexpand) {
7135eda14cbcSMatt Macy 		vdev_reopen(tvd);
7136eda14cbcSMatt Macy 		vdev_expand(tvd, txg);
7137eda14cbcSMatt Macy 	}
7138eda14cbcSMatt Macy 
7139eda14cbcSMatt Macy 	vdev_config_dirty(tvd);
7140eda14cbcSMatt Macy 
7141eda14cbcSMatt Macy 	/*
7142eda14cbcSMatt Macy 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
7143eda14cbcSMatt Macy 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
7144eda14cbcSMatt Macy 	 * But first make sure we're not on any *other* txg's DTL list, to
7145eda14cbcSMatt Macy 	 * prevent vd from being accessed after it's freed.
7146eda14cbcSMatt Macy 	 */
7147eda14cbcSMatt Macy 	vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
7148eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++)
7149eda14cbcSMatt Macy 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
7150eda14cbcSMatt Macy 	vd->vdev_detached = B_TRUE;
7151eda14cbcSMatt Macy 	vdev_dirty(tvd, VDD_DTL, vd, txg);
7152eda14cbcSMatt Macy 
7153eda14cbcSMatt Macy 	spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
7154eda14cbcSMatt Macy 	spa_notify_waiters(spa);
7155eda14cbcSMatt Macy 
7156eda14cbcSMatt Macy 	/* hang on to the spa before we release the lock */
7157eda14cbcSMatt Macy 	spa_open_ref(spa, FTAG);
7158eda14cbcSMatt Macy 
7159eda14cbcSMatt Macy 	error = spa_vdev_exit(spa, vd, txg, 0);
7160eda14cbcSMatt Macy 
7161eda14cbcSMatt Macy 	spa_history_log_internal(spa, "detach", NULL,
7162eda14cbcSMatt Macy 	    "vdev=%s", vdpath);
7163eda14cbcSMatt Macy 	spa_strfree(vdpath);
7164eda14cbcSMatt Macy 
7165eda14cbcSMatt Macy 	/*
7166eda14cbcSMatt Macy 	 * If this was the removal of the original device in a hot spare vdev,
7167eda14cbcSMatt Macy 	 * then we want to go through and remove the device from the hot spare
7168eda14cbcSMatt Macy 	 * list of every other pool.
7169eda14cbcSMatt Macy 	 */
7170eda14cbcSMatt Macy 	if (unspare) {
7171eda14cbcSMatt Macy 		spa_t *altspa = NULL;
7172eda14cbcSMatt Macy 
7173eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
7174eda14cbcSMatt Macy 		while ((altspa = spa_next(altspa)) != NULL) {
7175eda14cbcSMatt Macy 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
7176eda14cbcSMatt Macy 			    altspa == spa)
7177eda14cbcSMatt Macy 				continue;
7178eda14cbcSMatt Macy 
7179eda14cbcSMatt Macy 			spa_open_ref(altspa, FTAG);
7180eda14cbcSMatt Macy 			mutex_exit(&spa_namespace_lock);
7181eda14cbcSMatt Macy 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
7182eda14cbcSMatt Macy 			mutex_enter(&spa_namespace_lock);
7183eda14cbcSMatt Macy 			spa_close(altspa, FTAG);
7184eda14cbcSMatt Macy 		}
7185eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
7186eda14cbcSMatt Macy 
7187eda14cbcSMatt Macy 		/* search the rest of the vdevs for spares to remove */
7188eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
7189eda14cbcSMatt Macy 	}
7190eda14cbcSMatt Macy 
7191eda14cbcSMatt Macy 	/* all done with the spa; OK to release */
7192eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7193eda14cbcSMatt Macy 	spa_close(spa, FTAG);
7194eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7195eda14cbcSMatt Macy 
7196eda14cbcSMatt Macy 	return (error);
7197eda14cbcSMatt Macy }
7198eda14cbcSMatt Macy 
7199eda14cbcSMatt Macy static int
7200eda14cbcSMatt Macy spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7201eda14cbcSMatt Macy     list_t *vd_list)
7202eda14cbcSMatt Macy {
7203eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7204eda14cbcSMatt Macy 
7205eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7206eda14cbcSMatt Macy 
7207eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
7208eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7209eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
7210eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7211eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
7212eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7213eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7214eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
7215eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
7216eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7217eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
7218eda14cbcSMatt Macy 	}
7219eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_initialize_lock);
7220eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7221eda14cbcSMatt Macy 
7222eda14cbcSMatt Macy 	/*
7223eda14cbcSMatt Macy 	 * When we activate an initialize action we check to see
7224eda14cbcSMatt Macy 	 * if the vdev_initialize_thread is NULL. We do this instead
7225eda14cbcSMatt Macy 	 * of using the vdev_initialize_state since there might be
7226eda14cbcSMatt Macy 	 * a previous initialization process which has completed but
7227eda14cbcSMatt Macy 	 * the thread is not exited.
7228eda14cbcSMatt Macy 	 */
7229eda14cbcSMatt Macy 	if (cmd_type == POOL_INITIALIZE_START &&
7230eda14cbcSMatt Macy 	    (vd->vdev_initialize_thread != NULL ||
7231eda14cbcSMatt Macy 	    vd->vdev_top->vdev_removing)) {
7232eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
7233eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
7234eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
7235eda14cbcSMatt Macy 	    (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
7236eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
7237eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
7238eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
7239eda14cbcSMatt Macy 	} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
7240eda14cbcSMatt Macy 	    vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
7241eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_initialize_lock);
7242eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
7243eda14cbcSMatt Macy 	}
7244eda14cbcSMatt Macy 
7245eda14cbcSMatt Macy 	switch (cmd_type) {
7246eda14cbcSMatt Macy 	case POOL_INITIALIZE_START:
7247eda14cbcSMatt Macy 		vdev_initialize(vd);
7248eda14cbcSMatt Macy 		break;
7249eda14cbcSMatt Macy 	case POOL_INITIALIZE_CANCEL:
7250eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
7251eda14cbcSMatt Macy 		break;
7252eda14cbcSMatt Macy 	case POOL_INITIALIZE_SUSPEND:
7253eda14cbcSMatt Macy 		vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
7254eda14cbcSMatt Macy 		break;
7255eda14cbcSMatt Macy 	default:
7256eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7257eda14cbcSMatt Macy 	}
7258eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_initialize_lock);
7259eda14cbcSMatt Macy 
7260eda14cbcSMatt Macy 	return (0);
7261eda14cbcSMatt Macy }
7262eda14cbcSMatt Macy 
7263eda14cbcSMatt Macy int
7264eda14cbcSMatt Macy spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
7265eda14cbcSMatt Macy     nvlist_t *vdev_errlist)
7266eda14cbcSMatt Macy {
7267eda14cbcSMatt Macy 	int total_errors = 0;
7268eda14cbcSMatt Macy 	list_t vd_list;
7269eda14cbcSMatt Macy 
7270eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
7271eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
7272eda14cbcSMatt Macy 
7273eda14cbcSMatt Macy 	/*
7274eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
7275eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
7276eda14cbcSMatt Macy 	 * stopping initialization. The config and state locks are held so that
7277eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
7278eda14cbcSMatt Macy 	 * the initializing operation.
7279eda14cbcSMatt Macy 	 */
7280eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7281eda14cbcSMatt Macy 
7282eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7283eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7284eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
7285eda14cbcSMatt Macy 
7286eda14cbcSMatt Macy 		int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
7287eda14cbcSMatt Macy 		    &vd_list);
7288eda14cbcSMatt Macy 		if (error != 0) {
7289eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
7290eda14cbcSMatt Macy 
7291eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
7292eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
7293eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7294eda14cbcSMatt Macy 			total_errors++;
7295eda14cbcSMatt Macy 		}
7296eda14cbcSMatt Macy 	}
7297eda14cbcSMatt Macy 
7298eda14cbcSMatt Macy 	/* Wait for all initialize threads to stop. */
7299eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_list);
7300eda14cbcSMatt Macy 
7301eda14cbcSMatt Macy 	/* Sync out the initializing state */
7302eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
7303eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7304eda14cbcSMatt Macy 
7305eda14cbcSMatt Macy 	list_destroy(&vd_list);
7306eda14cbcSMatt Macy 
7307eda14cbcSMatt Macy 	return (total_errors);
7308eda14cbcSMatt Macy }
7309eda14cbcSMatt Macy 
7310eda14cbcSMatt Macy static int
7311eda14cbcSMatt Macy spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
7312eda14cbcSMatt Macy     uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
7313eda14cbcSMatt Macy {
7314eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7315eda14cbcSMatt Macy 
7316eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7317eda14cbcSMatt Macy 
7318eda14cbcSMatt Macy 	/* Look up vdev and ensure it's a leaf. */
7319eda14cbcSMatt Macy 	vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
7320eda14cbcSMatt Macy 	if (vd == NULL || vd->vdev_detached) {
7321eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7322eda14cbcSMatt Macy 		return (SET_ERROR(ENODEV));
7323eda14cbcSMatt Macy 	} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
7324eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7325eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
7326eda14cbcSMatt Macy 	} else if (!vdev_writeable(vd)) {
7327eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7328eda14cbcSMatt Macy 		return (SET_ERROR(EROFS));
7329eda14cbcSMatt Macy 	} else if (!vd->vdev_has_trim) {
7330eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7331eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
7332eda14cbcSMatt Macy 	} else if (secure && !vd->vdev_has_securetrim) {
7333eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7334eda14cbcSMatt Macy 		return (SET_ERROR(EOPNOTSUPP));
7335eda14cbcSMatt Macy 	}
7336eda14cbcSMatt Macy 	mutex_enter(&vd->vdev_trim_lock);
7337eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7338eda14cbcSMatt Macy 
7339eda14cbcSMatt Macy 	/*
7340eda14cbcSMatt Macy 	 * When we activate a TRIM action we check to see if the
7341eda14cbcSMatt Macy 	 * vdev_trim_thread is NULL. We do this instead of using the
7342eda14cbcSMatt Macy 	 * vdev_trim_state since there might be a previous TRIM process
7343eda14cbcSMatt Macy 	 * which has completed but the thread is not exited.
7344eda14cbcSMatt Macy 	 */
7345eda14cbcSMatt Macy 	if (cmd_type == POOL_TRIM_START &&
7346eda14cbcSMatt Macy 	    (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
7347eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
7348eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
7349eda14cbcSMatt Macy 	} else if (cmd_type == POOL_TRIM_CANCEL &&
7350eda14cbcSMatt Macy 	    (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
7351eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
7352eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
7353eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
7354eda14cbcSMatt Macy 	} else if (cmd_type == POOL_TRIM_SUSPEND &&
7355eda14cbcSMatt Macy 	    vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
7356eda14cbcSMatt Macy 		mutex_exit(&vd->vdev_trim_lock);
7357eda14cbcSMatt Macy 		return (SET_ERROR(ESRCH));
7358eda14cbcSMatt Macy 	}
7359eda14cbcSMatt Macy 
7360eda14cbcSMatt Macy 	switch (cmd_type) {
7361eda14cbcSMatt Macy 	case POOL_TRIM_START:
7362eda14cbcSMatt Macy 		vdev_trim(vd, rate, partial, secure);
7363eda14cbcSMatt Macy 		break;
7364eda14cbcSMatt Macy 	case POOL_TRIM_CANCEL:
7365eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
7366eda14cbcSMatt Macy 		break;
7367eda14cbcSMatt Macy 	case POOL_TRIM_SUSPEND:
7368eda14cbcSMatt Macy 		vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
7369eda14cbcSMatt Macy 		break;
7370eda14cbcSMatt Macy 	default:
7371eda14cbcSMatt Macy 		panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
7372eda14cbcSMatt Macy 	}
7373eda14cbcSMatt Macy 	mutex_exit(&vd->vdev_trim_lock);
7374eda14cbcSMatt Macy 
7375eda14cbcSMatt Macy 	return (0);
7376eda14cbcSMatt Macy }
7377eda14cbcSMatt Macy 
7378eda14cbcSMatt Macy /*
7379eda14cbcSMatt Macy  * Initiates a manual TRIM for the requested vdevs. This kicks off individual
7380eda14cbcSMatt Macy  * TRIM threads for each child vdev.  These threads pass over all of the free
7381eda14cbcSMatt Macy  * space in the vdev's metaslabs and issues TRIM commands for that space.
7382eda14cbcSMatt Macy  */
7383eda14cbcSMatt Macy int
7384eda14cbcSMatt Macy spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
7385eda14cbcSMatt Macy     boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
7386eda14cbcSMatt Macy {
7387eda14cbcSMatt Macy 	int total_errors = 0;
7388eda14cbcSMatt Macy 	list_t vd_list;
7389eda14cbcSMatt Macy 
7390eda14cbcSMatt Macy 	list_create(&vd_list, sizeof (vdev_t),
7391eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
7392eda14cbcSMatt Macy 
7393eda14cbcSMatt Macy 	/*
7394eda14cbcSMatt Macy 	 * We hold the namespace lock through the whole function
7395eda14cbcSMatt Macy 	 * to prevent any changes to the pool while we're starting or
7396eda14cbcSMatt Macy 	 * stopping TRIM. The config and state locks are held so that
7397eda14cbcSMatt Macy 	 * we can properly assess the vdev state before we commit to
7398eda14cbcSMatt Macy 	 * the TRIM operation.
7399eda14cbcSMatt Macy 	 */
7400eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
7401eda14cbcSMatt Macy 
7402eda14cbcSMatt Macy 	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
7403eda14cbcSMatt Macy 	    pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
7404eda14cbcSMatt Macy 		uint64_t vdev_guid = fnvpair_value_uint64(pair);
7405eda14cbcSMatt Macy 
7406eda14cbcSMatt Macy 		int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
7407eda14cbcSMatt Macy 		    rate, partial, secure, &vd_list);
7408eda14cbcSMatt Macy 		if (error != 0) {
7409eda14cbcSMatt Macy 			char guid_as_str[MAXNAMELEN];
7410eda14cbcSMatt Macy 
7411eda14cbcSMatt Macy 			(void) snprintf(guid_as_str, sizeof (guid_as_str),
7412eda14cbcSMatt Macy 			    "%llu", (unsigned long long)vdev_guid);
7413eda14cbcSMatt Macy 			fnvlist_add_int64(vdev_errlist, guid_as_str, error);
7414eda14cbcSMatt Macy 			total_errors++;
7415eda14cbcSMatt Macy 		}
7416eda14cbcSMatt Macy 	}
7417eda14cbcSMatt Macy 
7418eda14cbcSMatt Macy 	/* Wait for all TRIM threads to stop. */
7419eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_list);
7420eda14cbcSMatt Macy 
7421eda14cbcSMatt Macy 	/* Sync out the TRIM state */
7422eda14cbcSMatt Macy 	txg_wait_synced(spa->spa_dsl_pool, 0);
7423eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
7424eda14cbcSMatt Macy 
7425eda14cbcSMatt Macy 	list_destroy(&vd_list);
7426eda14cbcSMatt Macy 
7427eda14cbcSMatt Macy 	return (total_errors);
7428eda14cbcSMatt Macy }
7429eda14cbcSMatt Macy 
7430eda14cbcSMatt Macy /*
7431eda14cbcSMatt Macy  * Split a set of devices from their mirrors, and create a new pool from them.
7432eda14cbcSMatt Macy  */
7433eda14cbcSMatt Macy int
7434eda14cbcSMatt Macy spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
7435eda14cbcSMatt Macy     nvlist_t *props, boolean_t exp)
7436eda14cbcSMatt Macy {
7437eda14cbcSMatt Macy 	int error = 0;
7438eda14cbcSMatt Macy 	uint64_t txg, *glist;
7439eda14cbcSMatt Macy 	spa_t *newspa;
7440eda14cbcSMatt Macy 	uint_t c, children, lastlog;
7441eda14cbcSMatt Macy 	nvlist_t **child, *nvl, *tmp;
7442eda14cbcSMatt Macy 	dmu_tx_t *tx;
7443eda14cbcSMatt Macy 	char *altroot = NULL;
7444eda14cbcSMatt Macy 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
7445eda14cbcSMatt Macy 	boolean_t activate_slog;
7446eda14cbcSMatt Macy 
7447eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7448eda14cbcSMatt Macy 
7449eda14cbcSMatt Macy 	txg = spa_vdev_enter(spa);
7450eda14cbcSMatt Macy 
7451eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
7452eda14cbcSMatt Macy 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
7453eda14cbcSMatt Macy 		error = (spa_has_checkpoint(spa)) ?
7454eda14cbcSMatt Macy 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
7455eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7456eda14cbcSMatt Macy 	}
7457eda14cbcSMatt Macy 
7458eda14cbcSMatt Macy 	/* clear the log and flush everything up to now */
7459eda14cbcSMatt Macy 	activate_slog = spa_passivate_log(spa);
7460eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7461eda14cbcSMatt Macy 	error = spa_reset_logs(spa);
7462eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
7463eda14cbcSMatt Macy 
7464eda14cbcSMatt Macy 	if (activate_slog)
7465eda14cbcSMatt Macy 		spa_activate_log(spa);
7466eda14cbcSMatt Macy 
7467eda14cbcSMatt Macy 	if (error != 0)
7468eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7469eda14cbcSMatt Macy 
7470eda14cbcSMatt Macy 	/* check new spa name before going any further */
7471eda14cbcSMatt Macy 	if (spa_lookup(newname) != NULL)
7472eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
7473eda14cbcSMatt Macy 
7474eda14cbcSMatt Macy 	/*
7475eda14cbcSMatt Macy 	 * scan through all the children to ensure they're all mirrors
7476eda14cbcSMatt Macy 	 */
7477eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
7478eda14cbcSMatt Macy 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
7479eda14cbcSMatt Macy 	    &children) != 0)
7480eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7481eda14cbcSMatt Macy 
7482eda14cbcSMatt Macy 	/* first, check to ensure we've got the right child count */
7483eda14cbcSMatt Macy 	rvd = spa->spa_root_vdev;
7484eda14cbcSMatt Macy 	lastlog = 0;
7485eda14cbcSMatt Macy 	for (c = 0; c < rvd->vdev_children; c++) {
7486eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
7487eda14cbcSMatt Macy 
7488eda14cbcSMatt Macy 		/* don't count the holes & logs as children */
7489eda14cbcSMatt Macy 		if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
7490eda14cbcSMatt Macy 		    !vdev_is_concrete(vd))) {
7491eda14cbcSMatt Macy 			if (lastlog == 0)
7492eda14cbcSMatt Macy 				lastlog = c;
7493eda14cbcSMatt Macy 			continue;
7494eda14cbcSMatt Macy 		}
7495eda14cbcSMatt Macy 
7496eda14cbcSMatt Macy 		lastlog = 0;
7497eda14cbcSMatt Macy 	}
7498eda14cbcSMatt Macy 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
7499eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7500eda14cbcSMatt Macy 
7501eda14cbcSMatt Macy 	/* next, ensure no spare or cache devices are part of the split */
7502eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
7503eda14cbcSMatt Macy 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
7504eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
7505eda14cbcSMatt Macy 
7506eda14cbcSMatt Macy 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
7507eda14cbcSMatt Macy 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
7508eda14cbcSMatt Macy 
7509eda14cbcSMatt Macy 	/* then, loop over each vdev and validate it */
7510eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
7511eda14cbcSMatt Macy 		uint64_t is_hole = 0;
7512eda14cbcSMatt Macy 
7513eda14cbcSMatt Macy 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
7514eda14cbcSMatt Macy 		    &is_hole);
7515eda14cbcSMatt Macy 
7516eda14cbcSMatt Macy 		if (is_hole != 0) {
7517eda14cbcSMatt Macy 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
7518eda14cbcSMatt Macy 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
7519eda14cbcSMatt Macy 				continue;
7520eda14cbcSMatt Macy 			} else {
7521eda14cbcSMatt Macy 				error = SET_ERROR(EINVAL);
7522eda14cbcSMatt Macy 				break;
7523eda14cbcSMatt Macy 			}
7524eda14cbcSMatt Macy 		}
7525eda14cbcSMatt Macy 
7526eda14cbcSMatt Macy 		/* deal with indirect vdevs */
7527eda14cbcSMatt Macy 		if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
7528eda14cbcSMatt Macy 		    &vdev_indirect_ops)
7529eda14cbcSMatt Macy 			continue;
7530eda14cbcSMatt Macy 
7531eda14cbcSMatt Macy 		/* which disk is going to be split? */
7532eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
7533eda14cbcSMatt Macy 		    &glist[c]) != 0) {
7534eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
7535eda14cbcSMatt Macy 			break;
7536eda14cbcSMatt Macy 		}
7537eda14cbcSMatt Macy 
7538eda14cbcSMatt Macy 		/* look it up in the spa */
7539eda14cbcSMatt Macy 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
7540eda14cbcSMatt Macy 		if (vml[c] == NULL) {
7541eda14cbcSMatt Macy 			error = SET_ERROR(ENODEV);
7542eda14cbcSMatt Macy 			break;
7543eda14cbcSMatt Macy 		}
7544eda14cbcSMatt Macy 
7545eda14cbcSMatt Macy 		/* make sure there's nothing stopping the split */
7546eda14cbcSMatt Macy 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
7547eda14cbcSMatt Macy 		    vml[c]->vdev_islog ||
7548eda14cbcSMatt Macy 		    !vdev_is_concrete(vml[c]) ||
7549eda14cbcSMatt Macy 		    vml[c]->vdev_isspare ||
7550eda14cbcSMatt Macy 		    vml[c]->vdev_isl2cache ||
7551eda14cbcSMatt Macy 		    !vdev_writeable(vml[c]) ||
7552eda14cbcSMatt Macy 		    vml[c]->vdev_children != 0 ||
7553eda14cbcSMatt Macy 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
7554eda14cbcSMatt Macy 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
7555eda14cbcSMatt Macy 			error = SET_ERROR(EINVAL);
7556eda14cbcSMatt Macy 			break;
7557eda14cbcSMatt Macy 		}
7558eda14cbcSMatt Macy 
7559eda14cbcSMatt Macy 		if (vdev_dtl_required(vml[c]) ||
7560eda14cbcSMatt Macy 		    vdev_resilver_needed(vml[c], NULL, NULL)) {
7561eda14cbcSMatt Macy 			error = SET_ERROR(EBUSY);
7562eda14cbcSMatt Macy 			break;
7563eda14cbcSMatt Macy 		}
7564eda14cbcSMatt Macy 
7565eda14cbcSMatt Macy 		/* we need certain info from the top level */
7566eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
7567eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ms_array) == 0);
7568eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
7569eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ms_shift) == 0);
7570eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
7571eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_asize) == 0);
7572eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
7573eda14cbcSMatt Macy 		    vml[c]->vdev_top->vdev_ashift) == 0);
7574eda14cbcSMatt Macy 
7575eda14cbcSMatt Macy 		/* transfer per-vdev ZAPs */
7576eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
7577eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
7578eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
7579eda14cbcSMatt Macy 
7580eda14cbcSMatt Macy 		ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
7581eda14cbcSMatt Macy 		VERIFY0(nvlist_add_uint64(child[c],
7582eda14cbcSMatt Macy 		    ZPOOL_CONFIG_VDEV_TOP_ZAP,
7583eda14cbcSMatt Macy 		    vml[c]->vdev_parent->vdev_top_zap));
7584eda14cbcSMatt Macy 	}
7585eda14cbcSMatt Macy 
7586eda14cbcSMatt Macy 	if (error != 0) {
7587eda14cbcSMatt Macy 		kmem_free(vml, children * sizeof (vdev_t *));
7588eda14cbcSMatt Macy 		kmem_free(glist, children * sizeof (uint64_t));
7589eda14cbcSMatt Macy 		return (spa_vdev_exit(spa, NULL, txg, error));
7590eda14cbcSMatt Macy 	}
7591eda14cbcSMatt Macy 
7592eda14cbcSMatt Macy 	/* stop writers from using the disks */
7593eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
7594eda14cbcSMatt Macy 		if (vml[c] != NULL)
7595eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_TRUE;
7596eda14cbcSMatt Macy 	}
7597eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
7598eda14cbcSMatt Macy 
7599eda14cbcSMatt Macy 	/*
7600eda14cbcSMatt Macy 	 * Temporarily record the splitting vdevs in the spa config.  This
7601eda14cbcSMatt Macy 	 * will disappear once the config is regenerated.
7602eda14cbcSMatt Macy 	 */
7603eda14cbcSMatt Macy 	VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
7604eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
7605eda14cbcSMatt Macy 	    glist, children) == 0);
7606eda14cbcSMatt Macy 	kmem_free(glist, children * sizeof (uint64_t));
7607eda14cbcSMatt Macy 
7608eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
7609eda14cbcSMatt Macy 	VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
7610eda14cbcSMatt Macy 	    nvl) == 0);
7611eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
7612eda14cbcSMatt Macy 	spa->spa_config_splitting = nvl;
7613eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
7614eda14cbcSMatt Macy 
7615eda14cbcSMatt Macy 	/* configure and create the new pool */
7616eda14cbcSMatt Macy 	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
7617eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
7618eda14cbcSMatt Macy 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
7619eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
7620eda14cbcSMatt Macy 	    spa_version(spa)) == 0);
7621eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
7622eda14cbcSMatt Macy 	    spa->spa_config_txg) == 0);
7623eda14cbcSMatt Macy 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
7624eda14cbcSMatt Macy 	    spa_generate_guid(NULL)) == 0);
7625eda14cbcSMatt Macy 	VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
7626eda14cbcSMatt Macy 	(void) nvlist_lookup_string(props,
7627eda14cbcSMatt Macy 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
7628eda14cbcSMatt Macy 
7629eda14cbcSMatt Macy 	/* add the new pool to the namespace */
7630eda14cbcSMatt Macy 	newspa = spa_add(newname, config, altroot);
7631eda14cbcSMatt Macy 	newspa->spa_avz_action = AVZ_ACTION_REBUILD;
7632eda14cbcSMatt Macy 	newspa->spa_config_txg = spa->spa_config_txg;
7633eda14cbcSMatt Macy 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
7634eda14cbcSMatt Macy 
7635eda14cbcSMatt Macy 	/* release the spa config lock, retaining the namespace lock */
7636eda14cbcSMatt Macy 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
7637eda14cbcSMatt Macy 
7638eda14cbcSMatt Macy 	if (zio_injection_enabled)
7639eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 1);
7640eda14cbcSMatt Macy 
7641eda14cbcSMatt Macy 	spa_activate(newspa, spa_mode_global);
7642eda14cbcSMatt Macy 	spa_async_suspend(newspa);
7643eda14cbcSMatt Macy 
7644eda14cbcSMatt Macy 	/*
7645eda14cbcSMatt Macy 	 * Temporarily stop the initializing and TRIM activity.  We set the
7646eda14cbcSMatt Macy 	 * state to ACTIVE so that we know to resume initializing or TRIM
7647eda14cbcSMatt Macy 	 * once the split has completed.
7648eda14cbcSMatt Macy 	 */
7649eda14cbcSMatt Macy 	list_t vd_initialize_list;
7650eda14cbcSMatt Macy 	list_create(&vd_initialize_list, sizeof (vdev_t),
7651eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_initialize_node));
7652eda14cbcSMatt Macy 
7653eda14cbcSMatt Macy 	list_t vd_trim_list;
7654eda14cbcSMatt Macy 	list_create(&vd_trim_list, sizeof (vdev_t),
7655eda14cbcSMatt Macy 	    offsetof(vdev_t, vdev_trim_node));
7656eda14cbcSMatt Macy 
7657eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
7658eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7659eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_initialize_lock);
7660eda14cbcSMatt Macy 			vdev_initialize_stop(vml[c],
7661eda14cbcSMatt Macy 			    VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
7662eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_initialize_lock);
7663eda14cbcSMatt Macy 
7664eda14cbcSMatt Macy 			mutex_enter(&vml[c]->vdev_trim_lock);
7665eda14cbcSMatt Macy 			vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
7666eda14cbcSMatt Macy 			mutex_exit(&vml[c]->vdev_trim_lock);
7667eda14cbcSMatt Macy 		}
7668eda14cbcSMatt Macy 	}
7669eda14cbcSMatt Macy 
7670eda14cbcSMatt Macy 	vdev_initialize_stop_wait(spa, &vd_initialize_list);
7671eda14cbcSMatt Macy 	vdev_trim_stop_wait(spa, &vd_trim_list);
7672eda14cbcSMatt Macy 
7673eda14cbcSMatt Macy 	list_destroy(&vd_initialize_list);
7674eda14cbcSMatt Macy 	list_destroy(&vd_trim_list);
7675eda14cbcSMatt Macy 
7676eda14cbcSMatt Macy 	newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
7677eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_TRUE;
7678eda14cbcSMatt Macy 
7679eda14cbcSMatt Macy 	/* create the new pool from the disks of the original pool */
7680eda14cbcSMatt Macy 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
7681eda14cbcSMatt Macy 	if (error)
7682eda14cbcSMatt Macy 		goto out;
7683eda14cbcSMatt Macy 
7684eda14cbcSMatt Macy 	/* if that worked, generate a real config for the new pool */
7685eda14cbcSMatt Macy 	if (newspa->spa_root_vdev != NULL) {
7686eda14cbcSMatt Macy 		VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
7687eda14cbcSMatt Macy 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
7688eda14cbcSMatt Macy 		VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
7689eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
7690eda14cbcSMatt Macy 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
7691eda14cbcSMatt Macy 		    B_TRUE));
7692eda14cbcSMatt Macy 	}
7693eda14cbcSMatt Macy 
7694eda14cbcSMatt Macy 	/* set the props */
7695eda14cbcSMatt Macy 	if (props != NULL) {
7696eda14cbcSMatt Macy 		spa_configfile_set(newspa, props, B_FALSE);
7697eda14cbcSMatt Macy 		error = spa_prop_set(newspa, props);
7698eda14cbcSMatt Macy 		if (error)
7699eda14cbcSMatt Macy 			goto out;
7700eda14cbcSMatt Macy 	}
7701eda14cbcSMatt Macy 
7702eda14cbcSMatt Macy 	/* flush everything */
7703eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(newspa);
7704eda14cbcSMatt Macy 	vdev_config_dirty(newspa->spa_root_vdev);
7705eda14cbcSMatt Macy 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
7706eda14cbcSMatt Macy 
7707eda14cbcSMatt Macy 	if (zio_injection_enabled)
7708eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 2);
7709eda14cbcSMatt Macy 
7710eda14cbcSMatt Macy 	spa_async_resume(newspa);
7711eda14cbcSMatt Macy 
7712eda14cbcSMatt Macy 	/* finally, update the original pool's config */
7713eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
7714eda14cbcSMatt Macy 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
7715eda14cbcSMatt Macy 	error = dmu_tx_assign(tx, TXG_WAIT);
7716eda14cbcSMatt Macy 	if (error != 0)
7717eda14cbcSMatt Macy 		dmu_tx_abort(tx);
7718eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
7719eda14cbcSMatt Macy 		if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
7720eda14cbcSMatt Macy 			vdev_t *tvd = vml[c]->vdev_top;
7721eda14cbcSMatt Macy 
7722eda14cbcSMatt Macy 			/*
7723eda14cbcSMatt Macy 			 * Need to be sure the detachable VDEV is not
7724eda14cbcSMatt Macy 			 * on any *other* txg's DTL list to prevent it
7725eda14cbcSMatt Macy 			 * from being accessed after it's freed.
7726eda14cbcSMatt Macy 			 */
7727eda14cbcSMatt Macy 			for (int t = 0; t < TXG_SIZE; t++) {
7728eda14cbcSMatt Macy 				(void) txg_list_remove_this(
7729eda14cbcSMatt Macy 				    &tvd->vdev_dtl_list, vml[c], t);
7730eda14cbcSMatt Macy 			}
7731eda14cbcSMatt Macy 
7732eda14cbcSMatt Macy 			vdev_split(vml[c]);
7733eda14cbcSMatt Macy 			if (error == 0)
7734eda14cbcSMatt Macy 				spa_history_log_internal(spa, "detach", tx,
7735eda14cbcSMatt Macy 				    "vdev=%s", vml[c]->vdev_path);
7736eda14cbcSMatt Macy 
7737eda14cbcSMatt Macy 			vdev_free(vml[c]);
7738eda14cbcSMatt Macy 		}
7739eda14cbcSMatt Macy 	}
7740eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_REBUILD;
7741eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
7742eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
7743eda14cbcSMatt Macy 	nvlist_free(nvl);
7744eda14cbcSMatt Macy 	if (error == 0)
7745eda14cbcSMatt Macy 		dmu_tx_commit(tx);
7746eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, 0);
7747eda14cbcSMatt Macy 
7748eda14cbcSMatt Macy 	if (zio_injection_enabled)
7749eda14cbcSMatt Macy 		zio_handle_panic_injection(spa, FTAG, 3);
7750eda14cbcSMatt Macy 
7751eda14cbcSMatt Macy 	/* split is complete; log a history record */
7752eda14cbcSMatt Macy 	spa_history_log_internal(newspa, "split", NULL,
7753eda14cbcSMatt Macy 	    "from pool %s", spa_name(spa));
7754eda14cbcSMatt Macy 
7755eda14cbcSMatt Macy 	newspa->spa_is_splitting = B_FALSE;
7756eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
7757eda14cbcSMatt Macy 
7758eda14cbcSMatt Macy 	/* if we're not going to mount the filesystems in userland, export */
7759eda14cbcSMatt Macy 	if (exp)
7760eda14cbcSMatt Macy 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
7761eda14cbcSMatt Macy 		    B_FALSE, B_FALSE);
7762eda14cbcSMatt Macy 
7763eda14cbcSMatt Macy 	return (error);
7764eda14cbcSMatt Macy 
7765eda14cbcSMatt Macy out:
7766eda14cbcSMatt Macy 	spa_unload(newspa);
7767eda14cbcSMatt Macy 	spa_deactivate(newspa);
7768eda14cbcSMatt Macy 	spa_remove(newspa);
7769eda14cbcSMatt Macy 
7770eda14cbcSMatt Macy 	txg = spa_vdev_config_enter(spa);
7771eda14cbcSMatt Macy 
7772eda14cbcSMatt Macy 	/* re-online all offlined disks */
7773eda14cbcSMatt Macy 	for (c = 0; c < children; c++) {
7774eda14cbcSMatt Macy 		if (vml[c] != NULL)
7775eda14cbcSMatt Macy 			vml[c]->vdev_offline = B_FALSE;
7776eda14cbcSMatt Macy 	}
7777eda14cbcSMatt Macy 
7778eda14cbcSMatt Macy 	/* restart initializing or trimming disks as necessary */
7779eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
7780eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
7781eda14cbcSMatt Macy 	spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
7782eda14cbcSMatt Macy 
7783eda14cbcSMatt Macy 	vdev_reopen(spa->spa_root_vdev);
7784eda14cbcSMatt Macy 
7785eda14cbcSMatt Macy 	nvlist_free(spa->spa_config_splitting);
7786eda14cbcSMatt Macy 	spa->spa_config_splitting = NULL;
7787eda14cbcSMatt Macy 	(void) spa_vdev_exit(spa, NULL, txg, error);
7788eda14cbcSMatt Macy 
7789eda14cbcSMatt Macy 	kmem_free(vml, children * sizeof (vdev_t *));
7790eda14cbcSMatt Macy 	return (error);
7791eda14cbcSMatt Macy }
7792eda14cbcSMatt Macy 
7793eda14cbcSMatt Macy /*
7794eda14cbcSMatt Macy  * Find any device that's done replacing, or a vdev marked 'unspare' that's
7795eda14cbcSMatt Macy  * currently spared, so we can detach it.
7796eda14cbcSMatt Macy  */
7797eda14cbcSMatt Macy static vdev_t *
7798eda14cbcSMatt Macy spa_vdev_resilver_done_hunt(vdev_t *vd)
7799eda14cbcSMatt Macy {
7800eda14cbcSMatt Macy 	vdev_t *newvd, *oldvd;
7801eda14cbcSMatt Macy 
7802eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
7803eda14cbcSMatt Macy 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
7804eda14cbcSMatt Macy 		if (oldvd != NULL)
7805eda14cbcSMatt Macy 			return (oldvd);
7806eda14cbcSMatt Macy 	}
7807eda14cbcSMatt Macy 
7808eda14cbcSMatt Macy 	/*
7809eda14cbcSMatt Macy 	 * Check for a completed replacement.  We always consider the first
7810eda14cbcSMatt Macy 	 * vdev in the list to be the oldest vdev, and the last one to be
7811eda14cbcSMatt Macy 	 * the newest (see spa_vdev_attach() for how that works).  In
7812eda14cbcSMatt Macy 	 * the case where the newest vdev is faulted, we will not automatically
7813eda14cbcSMatt Macy 	 * remove it after a resilver completes.  This is OK as it will require
7814eda14cbcSMatt Macy 	 * user intervention to determine which disk the admin wishes to keep.
7815eda14cbcSMatt Macy 	 */
7816eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_replacing_ops) {
7817eda14cbcSMatt Macy 		ASSERT(vd->vdev_children > 1);
7818eda14cbcSMatt Macy 
7819eda14cbcSMatt Macy 		newvd = vd->vdev_child[vd->vdev_children - 1];
7820eda14cbcSMatt Macy 		oldvd = vd->vdev_child[0];
7821eda14cbcSMatt Macy 
7822eda14cbcSMatt Macy 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
7823eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7824eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
7825eda14cbcSMatt Macy 			return (oldvd);
7826eda14cbcSMatt Macy 	}
7827eda14cbcSMatt Macy 
7828eda14cbcSMatt Macy 	/*
7829eda14cbcSMatt Macy 	 * Check for a completed resilver with the 'unspare' flag set.
7830eda14cbcSMatt Macy 	 * Also potentially update faulted state.
7831eda14cbcSMatt Macy 	 */
7832eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_spare_ops) {
7833eda14cbcSMatt Macy 		vdev_t *first = vd->vdev_child[0];
7834eda14cbcSMatt Macy 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
7835eda14cbcSMatt Macy 
7836eda14cbcSMatt Macy 		if (last->vdev_unspare) {
7837eda14cbcSMatt Macy 			oldvd = first;
7838eda14cbcSMatt Macy 			newvd = last;
7839eda14cbcSMatt Macy 		} else if (first->vdev_unspare) {
7840eda14cbcSMatt Macy 			oldvd = last;
7841eda14cbcSMatt Macy 			newvd = first;
7842eda14cbcSMatt Macy 		} else {
7843eda14cbcSMatt Macy 			oldvd = NULL;
7844eda14cbcSMatt Macy 		}
7845eda14cbcSMatt Macy 
7846eda14cbcSMatt Macy 		if (oldvd != NULL &&
7847eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
7848eda14cbcSMatt Macy 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
7849eda14cbcSMatt Macy 		    !vdev_dtl_required(oldvd))
7850eda14cbcSMatt Macy 			return (oldvd);
7851eda14cbcSMatt Macy 
7852eda14cbcSMatt Macy 		vdev_propagate_state(vd);
7853eda14cbcSMatt Macy 
7854eda14cbcSMatt Macy 		/*
7855eda14cbcSMatt Macy 		 * If there are more than two spares attached to a disk,
7856eda14cbcSMatt Macy 		 * and those spares are not required, then we want to
7857eda14cbcSMatt Macy 		 * attempt to free them up now so that they can be used
7858eda14cbcSMatt Macy 		 * by other pools.  Once we're back down to a single
7859eda14cbcSMatt Macy 		 * disk+spare, we stop removing them.
7860eda14cbcSMatt Macy 		 */
7861eda14cbcSMatt Macy 		if (vd->vdev_children > 2) {
7862eda14cbcSMatt Macy 			newvd = vd->vdev_child[1];
7863eda14cbcSMatt Macy 
7864eda14cbcSMatt Macy 			if (newvd->vdev_isspare && last->vdev_isspare &&
7865eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_MISSING) &&
7866eda14cbcSMatt Macy 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
7867eda14cbcSMatt Macy 			    !vdev_dtl_required(newvd))
7868eda14cbcSMatt Macy 				return (newvd);
7869eda14cbcSMatt Macy 		}
7870eda14cbcSMatt Macy 	}
7871eda14cbcSMatt Macy 
7872eda14cbcSMatt Macy 	return (NULL);
7873eda14cbcSMatt Macy }
7874eda14cbcSMatt Macy 
7875eda14cbcSMatt Macy static void
7876eda14cbcSMatt Macy spa_vdev_resilver_done(spa_t *spa)
7877eda14cbcSMatt Macy {
7878eda14cbcSMatt Macy 	vdev_t *vd, *pvd, *ppvd;
7879eda14cbcSMatt Macy 	uint64_t guid, sguid, pguid, ppguid;
7880eda14cbcSMatt Macy 
7881eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7882eda14cbcSMatt Macy 
7883eda14cbcSMatt Macy 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
7884eda14cbcSMatt Macy 		pvd = vd->vdev_parent;
7885eda14cbcSMatt Macy 		ppvd = pvd->vdev_parent;
7886eda14cbcSMatt Macy 		guid = vd->vdev_guid;
7887eda14cbcSMatt Macy 		pguid = pvd->vdev_guid;
7888eda14cbcSMatt Macy 		ppguid = ppvd->vdev_guid;
7889eda14cbcSMatt Macy 		sguid = 0;
7890eda14cbcSMatt Macy 		/*
7891eda14cbcSMatt Macy 		 * If we have just finished replacing a hot spared device, then
7892eda14cbcSMatt Macy 		 * we need to detach the parent's first child (the original hot
7893eda14cbcSMatt Macy 		 * spare) as well.
7894eda14cbcSMatt Macy 		 */
7895eda14cbcSMatt Macy 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
7896eda14cbcSMatt Macy 		    ppvd->vdev_children == 2) {
7897eda14cbcSMatt Macy 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
7898eda14cbcSMatt Macy 			sguid = ppvd->vdev_child[1]->vdev_guid;
7899eda14cbcSMatt Macy 		}
7900eda14cbcSMatt Macy 		ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
7901eda14cbcSMatt Macy 
7902eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALL, FTAG);
7903eda14cbcSMatt Macy 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
7904eda14cbcSMatt Macy 			return;
7905eda14cbcSMatt Macy 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
7906eda14cbcSMatt Macy 			return;
7907eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7908eda14cbcSMatt Macy 	}
7909eda14cbcSMatt Macy 
7910eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
7911eda14cbcSMatt Macy 
7912eda14cbcSMatt Macy 	/*
7913eda14cbcSMatt Macy 	 * If a detach was not performed above replace waiters will not have
7914eda14cbcSMatt Macy 	 * been notified.  In which case we must do so now.
7915eda14cbcSMatt Macy 	 */
7916eda14cbcSMatt Macy 	spa_notify_waiters(spa);
7917eda14cbcSMatt Macy }
7918eda14cbcSMatt Macy 
7919eda14cbcSMatt Macy /*
7920eda14cbcSMatt Macy  * Update the stored path or FRU for this vdev.
7921eda14cbcSMatt Macy  */
7922eda14cbcSMatt Macy static int
7923eda14cbcSMatt Macy spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
7924eda14cbcSMatt Macy     boolean_t ispath)
7925eda14cbcSMatt Macy {
7926eda14cbcSMatt Macy 	vdev_t *vd;
7927eda14cbcSMatt Macy 	boolean_t sync = B_FALSE;
7928eda14cbcSMatt Macy 
7929eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
7930eda14cbcSMatt Macy 
7931eda14cbcSMatt Macy 	spa_vdev_state_enter(spa, SCL_ALL);
7932eda14cbcSMatt Macy 
7933eda14cbcSMatt Macy 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
7934eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
7935eda14cbcSMatt Macy 
7936eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf)
7937eda14cbcSMatt Macy 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
7938eda14cbcSMatt Macy 
7939eda14cbcSMatt Macy 	if (ispath) {
7940eda14cbcSMatt Macy 		if (strcmp(value, vd->vdev_path) != 0) {
7941eda14cbcSMatt Macy 			spa_strfree(vd->vdev_path);
7942eda14cbcSMatt Macy 			vd->vdev_path = spa_strdup(value);
7943eda14cbcSMatt Macy 			sync = B_TRUE;
7944eda14cbcSMatt Macy 		}
7945eda14cbcSMatt Macy 	} else {
7946eda14cbcSMatt Macy 		if (vd->vdev_fru == NULL) {
7947eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
7948eda14cbcSMatt Macy 			sync = B_TRUE;
7949eda14cbcSMatt Macy 		} else if (strcmp(value, vd->vdev_fru) != 0) {
7950eda14cbcSMatt Macy 			spa_strfree(vd->vdev_fru);
7951eda14cbcSMatt Macy 			vd->vdev_fru = spa_strdup(value);
7952eda14cbcSMatt Macy 			sync = B_TRUE;
7953eda14cbcSMatt Macy 		}
7954eda14cbcSMatt Macy 	}
7955eda14cbcSMatt Macy 
7956eda14cbcSMatt Macy 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
7957eda14cbcSMatt Macy }
7958eda14cbcSMatt Macy 
7959eda14cbcSMatt Macy int
7960eda14cbcSMatt Macy spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
7961eda14cbcSMatt Macy {
7962eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
7963eda14cbcSMatt Macy }
7964eda14cbcSMatt Macy 
7965eda14cbcSMatt Macy int
7966eda14cbcSMatt Macy spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
7967eda14cbcSMatt Macy {
7968eda14cbcSMatt Macy 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
7969eda14cbcSMatt Macy }
7970eda14cbcSMatt Macy 
7971eda14cbcSMatt Macy /*
7972eda14cbcSMatt Macy  * ==========================================================================
7973eda14cbcSMatt Macy  * SPA Scanning
7974eda14cbcSMatt Macy  * ==========================================================================
7975eda14cbcSMatt Macy  */
7976eda14cbcSMatt Macy int
7977eda14cbcSMatt Macy spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
7978eda14cbcSMatt Macy {
7979eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7980eda14cbcSMatt Macy 
7981eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
7982eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
7983eda14cbcSMatt Macy 
7984eda14cbcSMatt Macy 	return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
7985eda14cbcSMatt Macy }
7986eda14cbcSMatt Macy 
7987eda14cbcSMatt Macy int
7988eda14cbcSMatt Macy spa_scan_stop(spa_t *spa)
7989eda14cbcSMatt Macy {
7990eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
7991eda14cbcSMatt Macy 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
7992eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
7993eda14cbcSMatt Macy 	return (dsl_scan_cancel(spa->spa_dsl_pool));
7994eda14cbcSMatt Macy }
7995eda14cbcSMatt Macy 
7996eda14cbcSMatt Macy int
7997eda14cbcSMatt Macy spa_scan(spa_t *spa, pool_scan_func_t func)
7998eda14cbcSMatt Macy {
7999eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
8000eda14cbcSMatt Macy 
8001eda14cbcSMatt Macy 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
8002eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8003eda14cbcSMatt Macy 
8004eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8005eda14cbcSMatt Macy 	    !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
8006eda14cbcSMatt Macy 		return (SET_ERROR(ENOTSUP));
8007eda14cbcSMatt Macy 
8008eda14cbcSMatt Macy 	/*
8009eda14cbcSMatt Macy 	 * If a resilver was requested, but there is no DTL on a
8010eda14cbcSMatt Macy 	 * writeable leaf device, we have nothing to do.
8011eda14cbcSMatt Macy 	 */
8012eda14cbcSMatt Macy 	if (func == POOL_SCAN_RESILVER &&
8013eda14cbcSMatt Macy 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
8014eda14cbcSMatt Macy 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
8015eda14cbcSMatt Macy 		return (0);
8016eda14cbcSMatt Macy 	}
8017eda14cbcSMatt Macy 
8018eda14cbcSMatt Macy 	return (dsl_scan(spa->spa_dsl_pool, func));
8019eda14cbcSMatt Macy }
8020eda14cbcSMatt Macy 
8021eda14cbcSMatt Macy /*
8022eda14cbcSMatt Macy  * ==========================================================================
8023eda14cbcSMatt Macy  * SPA async task processing
8024eda14cbcSMatt Macy  * ==========================================================================
8025eda14cbcSMatt Macy  */
8026eda14cbcSMatt Macy 
8027eda14cbcSMatt Macy static void
8028eda14cbcSMatt Macy spa_async_remove(spa_t *spa, vdev_t *vd)
8029eda14cbcSMatt Macy {
8030eda14cbcSMatt Macy 	if (vd->vdev_remove_wanted) {
8031eda14cbcSMatt Macy 		vd->vdev_remove_wanted = B_FALSE;
8032eda14cbcSMatt Macy 		vd->vdev_delayed_close = B_FALSE;
8033eda14cbcSMatt Macy 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
8034eda14cbcSMatt Macy 
8035eda14cbcSMatt Macy 		/*
8036eda14cbcSMatt Macy 		 * We want to clear the stats, but we don't want to do a full
8037eda14cbcSMatt Macy 		 * vdev_clear() as that will cause us to throw away
8038eda14cbcSMatt Macy 		 * degraded/faulted state as well as attempt to reopen the
8039eda14cbcSMatt Macy 		 * device, all of which is a waste.
8040eda14cbcSMatt Macy 		 */
8041eda14cbcSMatt Macy 		vd->vdev_stat.vs_read_errors = 0;
8042eda14cbcSMatt Macy 		vd->vdev_stat.vs_write_errors = 0;
8043eda14cbcSMatt Macy 		vd->vdev_stat.vs_checksum_errors = 0;
8044eda14cbcSMatt Macy 
8045eda14cbcSMatt Macy 		vdev_state_dirty(vd->vdev_top);
80467877fdebSMatt Macy 
80477877fdebSMatt Macy 		/* Tell userspace that the vdev is gone. */
80487877fdebSMatt Macy 		zfs_post_remove(spa, vd);
8049eda14cbcSMatt Macy 	}
8050eda14cbcSMatt Macy 
8051eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8052eda14cbcSMatt Macy 		spa_async_remove(spa, vd->vdev_child[c]);
8053eda14cbcSMatt Macy }
8054eda14cbcSMatt Macy 
8055eda14cbcSMatt Macy static void
8056eda14cbcSMatt Macy spa_async_probe(spa_t *spa, vdev_t *vd)
8057eda14cbcSMatt Macy {
8058eda14cbcSMatt Macy 	if (vd->vdev_probe_wanted) {
8059eda14cbcSMatt Macy 		vd->vdev_probe_wanted = B_FALSE;
8060eda14cbcSMatt Macy 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
8061eda14cbcSMatt Macy 	}
8062eda14cbcSMatt Macy 
8063eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
8064eda14cbcSMatt Macy 		spa_async_probe(spa, vd->vdev_child[c]);
8065eda14cbcSMatt Macy }
8066eda14cbcSMatt Macy 
8067eda14cbcSMatt Macy static void
8068eda14cbcSMatt Macy spa_async_autoexpand(spa_t *spa, vdev_t *vd)
8069eda14cbcSMatt Macy {
8070eda14cbcSMatt Macy 	if (!spa->spa_autoexpand)
8071eda14cbcSMatt Macy 		return;
8072eda14cbcSMatt Macy 
8073eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
8074eda14cbcSMatt Macy 		vdev_t *cvd = vd->vdev_child[c];
8075eda14cbcSMatt Macy 		spa_async_autoexpand(spa, cvd);
8076eda14cbcSMatt Macy 	}
8077eda14cbcSMatt Macy 
8078eda14cbcSMatt Macy 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
8079eda14cbcSMatt Macy 		return;
8080eda14cbcSMatt Macy 
8081eda14cbcSMatt Macy 	spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
8082eda14cbcSMatt Macy }
8083eda14cbcSMatt Macy 
8084eda14cbcSMatt Macy static void
8085eda14cbcSMatt Macy spa_async_thread(void *arg)
8086eda14cbcSMatt Macy {
8087eda14cbcSMatt Macy 	spa_t *spa = (spa_t *)arg;
8088eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
8089eda14cbcSMatt Macy 	int tasks;
8090eda14cbcSMatt Macy 
8091eda14cbcSMatt Macy 	ASSERT(spa->spa_sync_on);
8092eda14cbcSMatt Macy 
8093eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8094eda14cbcSMatt Macy 	tasks = spa->spa_async_tasks;
8095eda14cbcSMatt Macy 	spa->spa_async_tasks = 0;
8096eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8097eda14cbcSMatt Macy 
8098eda14cbcSMatt Macy 	/*
8099eda14cbcSMatt Macy 	 * See if the config needs to be updated.
8100eda14cbcSMatt Macy 	 */
8101eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
8102eda14cbcSMatt Macy 		uint64_t old_space, new_space;
8103eda14cbcSMatt Macy 
8104eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8105eda14cbcSMatt Macy 		old_space = metaslab_class_get_space(spa_normal_class(spa));
8106eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_special_class(spa));
8107eda14cbcSMatt Macy 		old_space += metaslab_class_get_space(spa_dedup_class(spa));
8108184c1b94SMartin Matuska 		old_space += metaslab_class_get_space(
8109184c1b94SMartin Matuska 		    spa_embedded_log_class(spa));
8110eda14cbcSMatt Macy 
8111eda14cbcSMatt Macy 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
8112eda14cbcSMatt Macy 
8113eda14cbcSMatt Macy 		new_space = metaslab_class_get_space(spa_normal_class(spa));
8114eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_special_class(spa));
8115eda14cbcSMatt Macy 		new_space += metaslab_class_get_space(spa_dedup_class(spa));
8116184c1b94SMartin Matuska 		new_space += metaslab_class_get_space(
8117184c1b94SMartin Matuska 		    spa_embedded_log_class(spa));
8118eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8119eda14cbcSMatt Macy 
8120eda14cbcSMatt Macy 		/*
8121eda14cbcSMatt Macy 		 * If the pool grew as a result of the config update,
8122eda14cbcSMatt Macy 		 * then log an internal history event.
8123eda14cbcSMatt Macy 		 */
8124eda14cbcSMatt Macy 		if (new_space != old_space) {
8125eda14cbcSMatt Macy 			spa_history_log_internal(spa, "vdev online", NULL,
8126eda14cbcSMatt Macy 			    "pool '%s' size: %llu(+%llu)",
8127eda14cbcSMatt Macy 			    spa_name(spa), (u_longlong_t)new_space,
8128eda14cbcSMatt Macy 			    (u_longlong_t)(new_space - old_space));
8129eda14cbcSMatt Macy 		}
8130eda14cbcSMatt Macy 	}
8131eda14cbcSMatt Macy 
8132eda14cbcSMatt Macy 	/*
8133eda14cbcSMatt Macy 	 * See if any devices need to be marked REMOVED.
8134eda14cbcSMatt Macy 	 */
8135eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_REMOVE) {
8136eda14cbcSMatt Macy 		spa_vdev_state_enter(spa, SCL_NONE);
8137eda14cbcSMatt Macy 		spa_async_remove(spa, spa->spa_root_vdev);
8138eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
8139eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
8140eda14cbcSMatt Macy 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
8141eda14cbcSMatt Macy 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
8142eda14cbcSMatt Macy 		(void) spa_vdev_state_exit(spa, NULL, 0);
8143eda14cbcSMatt Macy 	}
8144eda14cbcSMatt Macy 
8145eda14cbcSMatt Macy 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
8146eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8147eda14cbcSMatt Macy 		spa_async_autoexpand(spa, spa->spa_root_vdev);
8148eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8149eda14cbcSMatt Macy 	}
8150eda14cbcSMatt Macy 
8151eda14cbcSMatt Macy 	/*
8152eda14cbcSMatt Macy 	 * See if any devices need to be probed.
8153eda14cbcSMatt Macy 	 */
8154eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_PROBE) {
8155eda14cbcSMatt Macy 		spa_vdev_state_enter(spa, SCL_NONE);
8156eda14cbcSMatt Macy 		spa_async_probe(spa, spa->spa_root_vdev);
8157eda14cbcSMatt Macy 		(void) spa_vdev_state_exit(spa, NULL, 0);
8158eda14cbcSMatt Macy 	}
8159eda14cbcSMatt Macy 
8160eda14cbcSMatt Macy 	/*
8161eda14cbcSMatt Macy 	 * If any devices are done replacing, detach them.
8162eda14cbcSMatt Macy 	 */
81637877fdebSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER_DONE ||
81647877fdebSMatt Macy 	    tasks & SPA_ASYNC_REBUILD_DONE) {
8165eda14cbcSMatt Macy 		spa_vdev_resilver_done(spa);
8166eda14cbcSMatt Macy 	}
8167eda14cbcSMatt Macy 
8168eda14cbcSMatt Macy 	/*
8169eda14cbcSMatt Macy 	 * Kick off a resilver.
8170eda14cbcSMatt Macy 	 */
8171eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_RESILVER &&
8172eda14cbcSMatt Macy 	    !vdev_rebuild_active(spa->spa_root_vdev) &&
8173eda14cbcSMatt Macy 	    (!dsl_scan_resilvering(dp) ||
8174eda14cbcSMatt Macy 	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
8175eda14cbcSMatt Macy 		dsl_scan_restart_resilver(dp, 0);
8176eda14cbcSMatt Macy 
8177eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
8178eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8179eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8180eda14cbcSMatt Macy 		vdev_initialize_restart(spa->spa_root_vdev);
8181eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8182eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8183eda14cbcSMatt Macy 	}
8184eda14cbcSMatt Macy 
8185eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_TRIM_RESTART) {
8186eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8187eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8188eda14cbcSMatt Macy 		vdev_trim_restart(spa->spa_root_vdev);
8189eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8190eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8191eda14cbcSMatt Macy 	}
8192eda14cbcSMatt Macy 
8193eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
8194eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8195eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8196eda14cbcSMatt Macy 		vdev_autotrim_restart(spa);
8197eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8198eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8199eda14cbcSMatt Macy 	}
8200eda14cbcSMatt Macy 
8201eda14cbcSMatt Macy 	/*
8202eda14cbcSMatt Macy 	 * Kick off L2 cache whole device TRIM.
8203eda14cbcSMatt Macy 	 */
8204eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
8205eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8206eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
8207eda14cbcSMatt Macy 		vdev_trim_l2arc(spa);
8208eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG, FTAG);
8209eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8210eda14cbcSMatt Macy 	}
8211eda14cbcSMatt Macy 
8212eda14cbcSMatt Macy 	/*
8213eda14cbcSMatt Macy 	 * Kick off L2 cache rebuilding.
8214eda14cbcSMatt Macy 	 */
8215eda14cbcSMatt Macy 	if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
8216eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
8217eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
8218eda14cbcSMatt Macy 		l2arc_spa_rebuild_start(spa);
8219eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_L2ARC, FTAG);
8220eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
8221eda14cbcSMatt Macy 	}
8222eda14cbcSMatt Macy 
8223eda14cbcSMatt Macy 	/*
8224eda14cbcSMatt Macy 	 * Let the world know that we're done.
8225eda14cbcSMatt Macy 	 */
8226eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8227eda14cbcSMatt Macy 	spa->spa_async_thread = NULL;
8228eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_async_cv);
8229eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8230eda14cbcSMatt Macy 	thread_exit();
8231eda14cbcSMatt Macy }
8232eda14cbcSMatt Macy 
8233eda14cbcSMatt Macy void
8234eda14cbcSMatt Macy spa_async_suspend(spa_t *spa)
8235eda14cbcSMatt Macy {
8236eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8237eda14cbcSMatt Macy 	spa->spa_async_suspended++;
8238eda14cbcSMatt Macy 	while (spa->spa_async_thread != NULL)
8239eda14cbcSMatt Macy 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
8240eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8241eda14cbcSMatt Macy 
8242eda14cbcSMatt Macy 	spa_vdev_remove_suspend(spa);
8243eda14cbcSMatt Macy 
8244eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
8245eda14cbcSMatt Macy 	if (condense_thread != NULL)
8246eda14cbcSMatt Macy 		zthr_cancel(condense_thread);
8247eda14cbcSMatt Macy 
8248eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8249eda14cbcSMatt Macy 	if (discard_thread != NULL)
8250eda14cbcSMatt Macy 		zthr_cancel(discard_thread);
8251eda14cbcSMatt Macy 
8252eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8253eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
8254eda14cbcSMatt Macy 		zthr_cancel(ll_delete_thread);
8255eda14cbcSMatt Macy 
8256eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8257eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
8258eda14cbcSMatt Macy 		zthr_cancel(ll_condense_thread);
8259eda14cbcSMatt Macy }
8260eda14cbcSMatt Macy 
8261eda14cbcSMatt Macy void
8262eda14cbcSMatt Macy spa_async_resume(spa_t *spa)
8263eda14cbcSMatt Macy {
8264eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8265eda14cbcSMatt Macy 	ASSERT(spa->spa_async_suspended != 0);
8266eda14cbcSMatt Macy 	spa->spa_async_suspended--;
8267eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8268eda14cbcSMatt Macy 	spa_restart_removal(spa);
8269eda14cbcSMatt Macy 
8270eda14cbcSMatt Macy 	zthr_t *condense_thread = spa->spa_condense_zthr;
8271eda14cbcSMatt Macy 	if (condense_thread != NULL)
8272eda14cbcSMatt Macy 		zthr_resume(condense_thread);
8273eda14cbcSMatt Macy 
8274eda14cbcSMatt Macy 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
8275eda14cbcSMatt Macy 	if (discard_thread != NULL)
8276eda14cbcSMatt Macy 		zthr_resume(discard_thread);
8277eda14cbcSMatt Macy 
8278eda14cbcSMatt Macy 	zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
8279eda14cbcSMatt Macy 	if (ll_delete_thread != NULL)
8280eda14cbcSMatt Macy 		zthr_resume(ll_delete_thread);
8281eda14cbcSMatt Macy 
8282eda14cbcSMatt Macy 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
8283eda14cbcSMatt Macy 	if (ll_condense_thread != NULL)
8284eda14cbcSMatt Macy 		zthr_resume(ll_condense_thread);
8285eda14cbcSMatt Macy }
8286eda14cbcSMatt Macy 
8287eda14cbcSMatt Macy static boolean_t
8288eda14cbcSMatt Macy spa_async_tasks_pending(spa_t *spa)
8289eda14cbcSMatt Macy {
8290eda14cbcSMatt Macy 	uint_t non_config_tasks;
8291eda14cbcSMatt Macy 	uint_t config_task;
8292eda14cbcSMatt Macy 	boolean_t config_task_suspended;
8293eda14cbcSMatt Macy 
8294eda14cbcSMatt Macy 	non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
8295eda14cbcSMatt Macy 	config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
8296eda14cbcSMatt Macy 	if (spa->spa_ccw_fail_time == 0) {
8297eda14cbcSMatt Macy 		config_task_suspended = B_FALSE;
8298eda14cbcSMatt Macy 	} else {
8299eda14cbcSMatt Macy 		config_task_suspended =
8300eda14cbcSMatt Macy 		    (gethrtime() - spa->spa_ccw_fail_time) <
8301eda14cbcSMatt Macy 		    ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
8302eda14cbcSMatt Macy 	}
8303eda14cbcSMatt Macy 
8304eda14cbcSMatt Macy 	return (non_config_tasks || (config_task && !config_task_suspended));
8305eda14cbcSMatt Macy }
8306eda14cbcSMatt Macy 
8307eda14cbcSMatt Macy static void
8308eda14cbcSMatt Macy spa_async_dispatch(spa_t *spa)
8309eda14cbcSMatt Macy {
8310eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8311eda14cbcSMatt Macy 	if (spa_async_tasks_pending(spa) &&
8312eda14cbcSMatt Macy 	    !spa->spa_async_suspended &&
8313eda14cbcSMatt Macy 	    spa->spa_async_thread == NULL)
8314eda14cbcSMatt Macy 		spa->spa_async_thread = thread_create(NULL, 0,
8315eda14cbcSMatt Macy 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
8316eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8317eda14cbcSMatt Macy }
8318eda14cbcSMatt Macy 
8319eda14cbcSMatt Macy void
8320eda14cbcSMatt Macy spa_async_request(spa_t *spa, int task)
8321eda14cbcSMatt Macy {
8322eda14cbcSMatt Macy 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
8323eda14cbcSMatt Macy 	mutex_enter(&spa->spa_async_lock);
8324eda14cbcSMatt Macy 	spa->spa_async_tasks |= task;
8325eda14cbcSMatt Macy 	mutex_exit(&spa->spa_async_lock);
8326eda14cbcSMatt Macy }
8327eda14cbcSMatt Macy 
8328eda14cbcSMatt Macy int
8329eda14cbcSMatt Macy spa_async_tasks(spa_t *spa)
8330eda14cbcSMatt Macy {
8331eda14cbcSMatt Macy 	return (spa->spa_async_tasks);
8332eda14cbcSMatt Macy }
8333eda14cbcSMatt Macy 
8334eda14cbcSMatt Macy /*
8335eda14cbcSMatt Macy  * ==========================================================================
8336eda14cbcSMatt Macy  * SPA syncing routines
8337eda14cbcSMatt Macy  * ==========================================================================
8338eda14cbcSMatt Macy  */
8339eda14cbcSMatt Macy 
8340eda14cbcSMatt Macy 
8341eda14cbcSMatt Macy static int
8342eda14cbcSMatt Macy bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8343eda14cbcSMatt Macy     dmu_tx_t *tx)
8344eda14cbcSMatt Macy {
8345eda14cbcSMatt Macy 	bpobj_t *bpo = arg;
8346eda14cbcSMatt Macy 	bpobj_enqueue(bpo, bp, bp_freed, tx);
8347eda14cbcSMatt Macy 	return (0);
8348eda14cbcSMatt Macy }
8349eda14cbcSMatt Macy 
8350eda14cbcSMatt Macy int
8351eda14cbcSMatt Macy bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8352eda14cbcSMatt Macy {
8353eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
8354eda14cbcSMatt Macy }
8355eda14cbcSMatt Macy 
8356eda14cbcSMatt Macy int
8357eda14cbcSMatt Macy bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8358eda14cbcSMatt Macy {
8359eda14cbcSMatt Macy 	return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
8360eda14cbcSMatt Macy }
8361eda14cbcSMatt Macy 
8362eda14cbcSMatt Macy static int
8363eda14cbcSMatt Macy spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
8364eda14cbcSMatt Macy {
8365eda14cbcSMatt Macy 	zio_t *pio = arg;
8366eda14cbcSMatt Macy 
8367eda14cbcSMatt Macy 	zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
8368eda14cbcSMatt Macy 	    pio->io_flags));
8369eda14cbcSMatt Macy 	return (0);
8370eda14cbcSMatt Macy }
8371eda14cbcSMatt Macy 
8372eda14cbcSMatt Macy static int
8373eda14cbcSMatt Macy bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
8374eda14cbcSMatt Macy     dmu_tx_t *tx)
8375eda14cbcSMatt Macy {
8376eda14cbcSMatt Macy 	ASSERT(!bp_freed);
8377eda14cbcSMatt Macy 	return (spa_free_sync_cb(arg, bp, tx));
8378eda14cbcSMatt Macy }
8379eda14cbcSMatt Macy 
8380eda14cbcSMatt Macy /*
8381eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
8382eda14cbcSMatt Macy  * amount of time spent syncing frees.
8383eda14cbcSMatt Macy  */
8384eda14cbcSMatt Macy static void
8385eda14cbcSMatt Macy spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
8386eda14cbcSMatt Macy {
8387eda14cbcSMatt Macy 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
8388eda14cbcSMatt Macy 	bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
8389eda14cbcSMatt Macy 	VERIFY(zio_wait(zio) == 0);
8390eda14cbcSMatt Macy }
8391eda14cbcSMatt Macy 
8392eda14cbcSMatt Macy /*
8393eda14cbcSMatt Macy  * Note: this simple function is not inlined to make it easier to dtrace the
8394eda14cbcSMatt Macy  * amount of time spent syncing deferred frees.
8395eda14cbcSMatt Macy  */
8396eda14cbcSMatt Macy static void
8397eda14cbcSMatt Macy spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
8398eda14cbcSMatt Macy {
8399eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
8400eda14cbcSMatt Macy 		return;
8401eda14cbcSMatt Macy 
8402eda14cbcSMatt Macy 	/*
8403eda14cbcSMatt Macy 	 * Note:
8404eda14cbcSMatt Macy 	 * If the log space map feature is active, we stop deferring
8405eda14cbcSMatt Macy 	 * frees to the next TXG and therefore running this function
8406eda14cbcSMatt Macy 	 * would be considered a no-op as spa_deferred_bpobj should
8407eda14cbcSMatt Macy 	 * not have any entries.
8408eda14cbcSMatt Macy 	 *
8409eda14cbcSMatt Macy 	 * That said we run this function anyway (instead of returning
8410eda14cbcSMatt Macy 	 * immediately) for the edge-case scenario where we just
8411eda14cbcSMatt Macy 	 * activated the log space map feature in this TXG but we have
8412eda14cbcSMatt Macy 	 * deferred frees from the previous TXG.
8413eda14cbcSMatt Macy 	 */
8414eda14cbcSMatt Macy 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
8415eda14cbcSMatt Macy 	VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
8416eda14cbcSMatt Macy 	    bpobj_spa_free_sync_cb, zio, tx), ==, 0);
8417eda14cbcSMatt Macy 	VERIFY0(zio_wait(zio));
8418eda14cbcSMatt Macy }
8419eda14cbcSMatt Macy 
8420eda14cbcSMatt Macy static void
8421eda14cbcSMatt Macy spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
8422eda14cbcSMatt Macy {
8423eda14cbcSMatt Macy 	char *packed = NULL;
8424eda14cbcSMatt Macy 	size_t bufsize;
8425eda14cbcSMatt Macy 	size_t nvsize = 0;
8426eda14cbcSMatt Macy 	dmu_buf_t *db;
8427eda14cbcSMatt Macy 
8428eda14cbcSMatt Macy 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
8429eda14cbcSMatt Macy 
8430eda14cbcSMatt Macy 	/*
8431eda14cbcSMatt Macy 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
8432eda14cbcSMatt Macy 	 * information.  This avoids the dmu_buf_will_dirty() path and
8433eda14cbcSMatt Macy 	 * saves us a pre-read to get data we don't actually care about.
8434eda14cbcSMatt Macy 	 */
8435eda14cbcSMatt Macy 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
8436eda14cbcSMatt Macy 	packed = vmem_alloc(bufsize, KM_SLEEP);
8437eda14cbcSMatt Macy 
8438eda14cbcSMatt Macy 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
8439eda14cbcSMatt Macy 	    KM_SLEEP) == 0);
8440eda14cbcSMatt Macy 	bzero(packed + nvsize, bufsize - nvsize);
8441eda14cbcSMatt Macy 
8442eda14cbcSMatt Macy 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
8443eda14cbcSMatt Macy 
8444eda14cbcSMatt Macy 	vmem_free(packed, bufsize);
8445eda14cbcSMatt Macy 
8446eda14cbcSMatt Macy 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
8447eda14cbcSMatt Macy 	dmu_buf_will_dirty(db, tx);
8448eda14cbcSMatt Macy 	*(uint64_t *)db->db_data = nvsize;
8449eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
8450eda14cbcSMatt Macy }
8451eda14cbcSMatt Macy 
8452eda14cbcSMatt Macy static void
8453eda14cbcSMatt Macy spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
8454eda14cbcSMatt Macy     const char *config, const char *entry)
8455eda14cbcSMatt Macy {
8456eda14cbcSMatt Macy 	nvlist_t *nvroot;
8457eda14cbcSMatt Macy 	nvlist_t **list;
8458eda14cbcSMatt Macy 	int i;
8459eda14cbcSMatt Macy 
8460eda14cbcSMatt Macy 	if (!sav->sav_sync)
8461eda14cbcSMatt Macy 		return;
8462eda14cbcSMatt Macy 
8463eda14cbcSMatt Macy 	/*
8464eda14cbcSMatt Macy 	 * Update the MOS nvlist describing the list of available devices.
8465eda14cbcSMatt Macy 	 * spa_validate_aux() will have already made sure this nvlist is
8466eda14cbcSMatt Macy 	 * valid and the vdevs are labeled appropriately.
8467eda14cbcSMatt Macy 	 */
8468eda14cbcSMatt Macy 	if (sav->sav_object == 0) {
8469eda14cbcSMatt Macy 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
8470eda14cbcSMatt Macy 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
8471eda14cbcSMatt Macy 		    sizeof (uint64_t), tx);
8472eda14cbcSMatt Macy 		VERIFY(zap_update(spa->spa_meta_objset,
8473eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
8474eda14cbcSMatt Macy 		    &sav->sav_object, tx) == 0);
8475eda14cbcSMatt Macy 	}
8476eda14cbcSMatt Macy 
8477eda14cbcSMatt Macy 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
8478eda14cbcSMatt Macy 	if (sav->sav_count == 0) {
8479eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
8480eda14cbcSMatt Macy 	} else {
8481eda14cbcSMatt Macy 		list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
8482eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
8483eda14cbcSMatt Macy 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
8484eda14cbcSMatt Macy 			    B_FALSE, VDEV_CONFIG_L2CACHE);
8485eda14cbcSMatt Macy 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
8486eda14cbcSMatt Macy 		    sav->sav_count) == 0);
8487eda14cbcSMatt Macy 		for (i = 0; i < sav->sav_count; i++)
8488eda14cbcSMatt Macy 			nvlist_free(list[i]);
8489eda14cbcSMatt Macy 		kmem_free(list, sav->sav_count * sizeof (void *));
8490eda14cbcSMatt Macy 	}
8491eda14cbcSMatt Macy 
8492eda14cbcSMatt Macy 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
8493eda14cbcSMatt Macy 	nvlist_free(nvroot);
8494eda14cbcSMatt Macy 
8495eda14cbcSMatt Macy 	sav->sav_sync = B_FALSE;
8496eda14cbcSMatt Macy }
8497eda14cbcSMatt Macy 
8498eda14cbcSMatt Macy /*
8499eda14cbcSMatt Macy  * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
8500eda14cbcSMatt Macy  * The all-vdev ZAP must be empty.
8501eda14cbcSMatt Macy  */
8502eda14cbcSMatt Macy static void
8503eda14cbcSMatt Macy spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
8504eda14cbcSMatt Macy {
8505eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
8506eda14cbcSMatt Macy 
8507eda14cbcSMatt Macy 	if (vd->vdev_top_zap != 0) {
8508eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8509eda14cbcSMatt Macy 		    vd->vdev_top_zap, tx));
8510eda14cbcSMatt Macy 	}
8511eda14cbcSMatt Macy 	if (vd->vdev_leaf_zap != 0) {
8512eda14cbcSMatt Macy 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
8513eda14cbcSMatt Macy 		    vd->vdev_leaf_zap, tx));
8514eda14cbcSMatt Macy 	}
8515eda14cbcSMatt Macy 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
8516eda14cbcSMatt Macy 		spa_avz_build(vd->vdev_child[i], avz, tx);
8517eda14cbcSMatt Macy 	}
8518eda14cbcSMatt Macy }
8519eda14cbcSMatt Macy 
8520eda14cbcSMatt Macy static void
8521eda14cbcSMatt Macy spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
8522eda14cbcSMatt Macy {
8523eda14cbcSMatt Macy 	nvlist_t *config;
8524eda14cbcSMatt Macy 
8525eda14cbcSMatt Macy 	/*
8526eda14cbcSMatt Macy 	 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
8527eda14cbcSMatt Macy 	 * its config may not be dirty but we still need to build per-vdev ZAPs.
8528eda14cbcSMatt Macy 	 * Similarly, if the pool is being assembled (e.g. after a split), we
8529eda14cbcSMatt Macy 	 * need to rebuild the AVZ although the config may not be dirty.
8530eda14cbcSMatt Macy 	 */
8531eda14cbcSMatt Macy 	if (list_is_empty(&spa->spa_config_dirty_list) &&
8532eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_NONE)
8533eda14cbcSMatt Macy 		return;
8534eda14cbcSMatt Macy 
8535eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
8536eda14cbcSMatt Macy 
8537eda14cbcSMatt Macy 	ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
8538eda14cbcSMatt Macy 	    spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
8539eda14cbcSMatt Macy 	    spa->spa_all_vdev_zaps != 0);
8540eda14cbcSMatt Macy 
8541eda14cbcSMatt Macy 	if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
8542eda14cbcSMatt Macy 		/* Make and build the new AVZ */
8543eda14cbcSMatt Macy 		uint64_t new_avz = zap_create(spa->spa_meta_objset,
8544eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
8545eda14cbcSMatt Macy 		spa_avz_build(spa->spa_root_vdev, new_avz, tx);
8546eda14cbcSMatt Macy 
8547eda14cbcSMatt Macy 		/* Diff old AVZ with new one */
8548eda14cbcSMatt Macy 		zap_cursor_t zc;
8549eda14cbcSMatt Macy 		zap_attribute_t za;
8550eda14cbcSMatt Macy 
8551eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
8552eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
8553eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
8554eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
8555eda14cbcSMatt Macy 			uint64_t vdzap = za.za_first_integer;
8556eda14cbcSMatt Macy 			if (zap_lookup_int(spa->spa_meta_objset, new_avz,
8557eda14cbcSMatt Macy 			    vdzap) == ENOENT) {
8558eda14cbcSMatt Macy 				/*
8559eda14cbcSMatt Macy 				 * ZAP is listed in old AVZ but not in new one;
8560eda14cbcSMatt Macy 				 * destroy it
8561eda14cbcSMatt Macy 				 */
8562eda14cbcSMatt Macy 				VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
8563eda14cbcSMatt Macy 				    tx));
8564eda14cbcSMatt Macy 			}
8565eda14cbcSMatt Macy 		}
8566eda14cbcSMatt Macy 
8567eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
8568eda14cbcSMatt Macy 
8569eda14cbcSMatt Macy 		/* Destroy the old AVZ */
8570eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
8571eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
8572eda14cbcSMatt Macy 
8573eda14cbcSMatt Macy 		/* Replace the old AVZ in the dir obj with the new one */
8574eda14cbcSMatt Macy 		VERIFY0(zap_update(spa->spa_meta_objset,
8575eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
8576eda14cbcSMatt Macy 		    sizeof (new_avz), 1, &new_avz, tx));
8577eda14cbcSMatt Macy 
8578eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = new_avz;
8579eda14cbcSMatt Macy 	} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
8580eda14cbcSMatt Macy 		zap_cursor_t zc;
8581eda14cbcSMatt Macy 		zap_attribute_t za;
8582eda14cbcSMatt Macy 
8583eda14cbcSMatt Macy 		/* Walk through the AVZ and destroy all listed ZAPs */
8584eda14cbcSMatt Macy 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
8585eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps);
8586eda14cbcSMatt Macy 		    zap_cursor_retrieve(&zc, &za) == 0;
8587eda14cbcSMatt Macy 		    zap_cursor_advance(&zc)) {
8588eda14cbcSMatt Macy 			uint64_t zap = za.za_first_integer;
8589eda14cbcSMatt Macy 			VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
8590eda14cbcSMatt Macy 		}
8591eda14cbcSMatt Macy 
8592eda14cbcSMatt Macy 		zap_cursor_fini(&zc);
8593eda14cbcSMatt Macy 
8594eda14cbcSMatt Macy 		/* Destroy and unlink the AVZ itself */
8595eda14cbcSMatt Macy 		VERIFY0(zap_destroy(spa->spa_meta_objset,
8596eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, tx));
8597eda14cbcSMatt Macy 		VERIFY0(zap_remove(spa->spa_meta_objset,
8598eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
8599eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = 0;
8600eda14cbcSMatt Macy 	}
8601eda14cbcSMatt Macy 
8602eda14cbcSMatt Macy 	if (spa->spa_all_vdev_zaps == 0) {
8603eda14cbcSMatt Macy 		spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
8604eda14cbcSMatt Macy 		    DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
8605eda14cbcSMatt Macy 		    DMU_POOL_VDEV_ZAP_MAP, tx);
8606eda14cbcSMatt Macy 	}
8607eda14cbcSMatt Macy 	spa->spa_avz_action = AVZ_ACTION_NONE;
8608eda14cbcSMatt Macy 
8609eda14cbcSMatt Macy 	/* Create ZAPs for vdevs that don't have them. */
8610eda14cbcSMatt Macy 	vdev_construct_zaps(spa->spa_root_vdev, tx);
8611eda14cbcSMatt Macy 
8612eda14cbcSMatt Macy 	config = spa_config_generate(spa, spa->spa_root_vdev,
8613eda14cbcSMatt Macy 	    dmu_tx_get_txg(tx), B_FALSE);
8614eda14cbcSMatt Macy 
8615eda14cbcSMatt Macy 	/*
8616eda14cbcSMatt Macy 	 * If we're upgrading the spa version then make sure that
8617eda14cbcSMatt Macy 	 * the config object gets updated with the correct version.
8618eda14cbcSMatt Macy 	 */
8619eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
8620eda14cbcSMatt Macy 		fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
8621eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_version);
8622eda14cbcSMatt Macy 
8623eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
8624eda14cbcSMatt Macy 
8625eda14cbcSMatt Macy 	nvlist_free(spa->spa_config_syncing);
8626eda14cbcSMatt Macy 	spa->spa_config_syncing = config;
8627eda14cbcSMatt Macy 
8628eda14cbcSMatt Macy 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
8629eda14cbcSMatt Macy }
8630eda14cbcSMatt Macy 
8631eda14cbcSMatt Macy static void
8632eda14cbcSMatt Macy spa_sync_version(void *arg, dmu_tx_t *tx)
8633eda14cbcSMatt Macy {
8634eda14cbcSMatt Macy 	uint64_t *versionp = arg;
8635eda14cbcSMatt Macy 	uint64_t version = *versionp;
8636eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8637eda14cbcSMatt Macy 
8638eda14cbcSMatt Macy 	/*
8639eda14cbcSMatt Macy 	 * Setting the version is special cased when first creating the pool.
8640eda14cbcSMatt Macy 	 */
8641eda14cbcSMatt Macy 	ASSERT(tx->tx_txg != TXG_INITIAL);
8642eda14cbcSMatt Macy 
8643eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
8644eda14cbcSMatt Macy 	ASSERT(version >= spa_version(spa));
8645eda14cbcSMatt Macy 
8646eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
8647eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
8648eda14cbcSMatt Macy 	spa_history_log_internal(spa, "set", tx, "version=%lld",
8649eda14cbcSMatt Macy 	    (longlong_t)version);
8650eda14cbcSMatt Macy }
8651eda14cbcSMatt Macy 
8652eda14cbcSMatt Macy /*
8653eda14cbcSMatt Macy  * Set zpool properties.
8654eda14cbcSMatt Macy  */
8655eda14cbcSMatt Macy static void
8656eda14cbcSMatt Macy spa_sync_props(void *arg, dmu_tx_t *tx)
8657eda14cbcSMatt Macy {
8658eda14cbcSMatt Macy 	nvlist_t *nvp = arg;
8659eda14cbcSMatt Macy 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
8660eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
8661eda14cbcSMatt Macy 	nvpair_t *elem = NULL;
8662eda14cbcSMatt Macy 
8663eda14cbcSMatt Macy 	mutex_enter(&spa->spa_props_lock);
8664eda14cbcSMatt Macy 
8665eda14cbcSMatt Macy 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
8666eda14cbcSMatt Macy 		uint64_t intval;
8667eda14cbcSMatt Macy 		char *strval, *fname;
8668eda14cbcSMatt Macy 		zpool_prop_t prop;
8669eda14cbcSMatt Macy 		const char *propname;
8670eda14cbcSMatt Macy 		zprop_type_t proptype;
8671eda14cbcSMatt Macy 		spa_feature_t fid;
8672eda14cbcSMatt Macy 
8673eda14cbcSMatt Macy 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
8674eda14cbcSMatt Macy 		case ZPOOL_PROP_INVAL:
8675eda14cbcSMatt Macy 			/*
8676eda14cbcSMatt Macy 			 * We checked this earlier in spa_prop_validate().
8677eda14cbcSMatt Macy 			 */
8678eda14cbcSMatt Macy 			ASSERT(zpool_prop_feature(nvpair_name(elem)));
8679eda14cbcSMatt Macy 
8680eda14cbcSMatt Macy 			fname = strchr(nvpair_name(elem), '@') + 1;
8681eda14cbcSMatt Macy 			VERIFY0(zfeature_lookup_name(fname, &fid));
8682eda14cbcSMatt Macy 
8683eda14cbcSMatt Macy 			spa_feature_enable(spa, fid, tx);
8684eda14cbcSMatt Macy 			spa_history_log_internal(spa, "set", tx,
8685eda14cbcSMatt Macy 			    "%s=enabled", nvpair_name(elem));
8686eda14cbcSMatt Macy 			break;
8687eda14cbcSMatt Macy 
8688eda14cbcSMatt Macy 		case ZPOOL_PROP_VERSION:
8689eda14cbcSMatt Macy 			intval = fnvpair_value_uint64(elem);
8690eda14cbcSMatt Macy 			/*
8691eda14cbcSMatt Macy 			 * The version is synced separately before other
8692eda14cbcSMatt Macy 			 * properties and should be correct by now.
8693eda14cbcSMatt Macy 			 */
8694eda14cbcSMatt Macy 			ASSERT3U(spa_version(spa), >=, intval);
8695eda14cbcSMatt Macy 			break;
8696eda14cbcSMatt Macy 
8697eda14cbcSMatt Macy 		case ZPOOL_PROP_ALTROOT:
8698eda14cbcSMatt Macy 			/*
8699eda14cbcSMatt Macy 			 * 'altroot' is a non-persistent property. It should
8700eda14cbcSMatt Macy 			 * have been set temporarily at creation or import time.
8701eda14cbcSMatt Macy 			 */
8702eda14cbcSMatt Macy 			ASSERT(spa->spa_root != NULL);
8703eda14cbcSMatt Macy 			break;
8704eda14cbcSMatt Macy 
8705eda14cbcSMatt Macy 		case ZPOOL_PROP_READONLY:
8706eda14cbcSMatt Macy 		case ZPOOL_PROP_CACHEFILE:
8707eda14cbcSMatt Macy 			/*
8708eda14cbcSMatt Macy 			 * 'readonly' and 'cachefile' are also non-persistent
8709eda14cbcSMatt Macy 			 * properties.
8710eda14cbcSMatt Macy 			 */
8711eda14cbcSMatt Macy 			break;
8712eda14cbcSMatt Macy 		case ZPOOL_PROP_COMMENT:
8713eda14cbcSMatt Macy 			strval = fnvpair_value_string(elem);
8714eda14cbcSMatt Macy 			if (spa->spa_comment != NULL)
8715eda14cbcSMatt Macy 				spa_strfree(spa->spa_comment);
8716eda14cbcSMatt Macy 			spa->spa_comment = spa_strdup(strval);
8717eda14cbcSMatt Macy 			/*
8718eda14cbcSMatt Macy 			 * We need to dirty the configuration on all the vdevs
8719eda14cbcSMatt Macy 			 * so that their labels get updated.  It's unnecessary
8720eda14cbcSMatt Macy 			 * to do this for pool creation since the vdev's
8721eda14cbcSMatt Macy 			 * configuration has already been dirtied.
8722eda14cbcSMatt Macy 			 */
8723eda14cbcSMatt Macy 			if (tx->tx_txg != TXG_INITIAL)
8724eda14cbcSMatt Macy 				vdev_config_dirty(spa->spa_root_vdev);
8725eda14cbcSMatt Macy 			spa_history_log_internal(spa, "set", tx,
8726eda14cbcSMatt Macy 			    "%s=%s", nvpair_name(elem), strval);
8727eda14cbcSMatt Macy 			break;
8728ee36e25aSMartin Matuska 		case ZPOOL_PROP_COMPATIBILITY:
8729ee36e25aSMartin Matuska 			strval = fnvpair_value_string(elem);
8730ee36e25aSMartin Matuska 			if (spa->spa_compatibility != NULL)
8731ee36e25aSMartin Matuska 				spa_strfree(spa->spa_compatibility);
8732ee36e25aSMartin Matuska 			spa->spa_compatibility = spa_strdup(strval);
8733ee36e25aSMartin Matuska 			/*
8734ee36e25aSMartin Matuska 			 * Dirty the configuration on vdevs as above.
8735ee36e25aSMartin Matuska 			 */
8736ee36e25aSMartin Matuska 			if (tx->tx_txg != TXG_INITIAL)
8737ee36e25aSMartin Matuska 				vdev_config_dirty(spa->spa_root_vdev);
8738ee36e25aSMartin Matuska 			spa_history_log_internal(spa, "set", tx,
8739ee36e25aSMartin Matuska 			    "%s=%s", nvpair_name(elem), strval);
8740ee36e25aSMartin Matuska 			break;
8741ee36e25aSMartin Matuska 
8742eda14cbcSMatt Macy 		default:
8743eda14cbcSMatt Macy 			/*
8744eda14cbcSMatt Macy 			 * Set pool property values in the poolprops mos object.
8745eda14cbcSMatt Macy 			 */
8746eda14cbcSMatt Macy 			if (spa->spa_pool_props_object == 0) {
8747eda14cbcSMatt Macy 				spa->spa_pool_props_object =
8748eda14cbcSMatt Macy 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
8749eda14cbcSMatt Macy 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
8750eda14cbcSMatt Macy 				    tx);
8751eda14cbcSMatt Macy 			}
8752eda14cbcSMatt Macy 
8753eda14cbcSMatt Macy 			/* normalize the property name */
8754eda14cbcSMatt Macy 			propname = zpool_prop_to_name(prop);
8755eda14cbcSMatt Macy 			proptype = zpool_prop_get_type(prop);
8756eda14cbcSMatt Macy 
8757eda14cbcSMatt Macy 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
8758eda14cbcSMatt Macy 				ASSERT(proptype == PROP_TYPE_STRING);
8759eda14cbcSMatt Macy 				strval = fnvpair_value_string(elem);
8760eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
8761eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
8762eda14cbcSMatt Macy 				    1, strlen(strval) + 1, strval, tx));
8763eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
8764eda14cbcSMatt Macy 				    "%s=%s", nvpair_name(elem), strval);
8765eda14cbcSMatt Macy 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
8766eda14cbcSMatt Macy 				intval = fnvpair_value_uint64(elem);
8767eda14cbcSMatt Macy 
8768eda14cbcSMatt Macy 				if (proptype == PROP_TYPE_INDEX) {
8769eda14cbcSMatt Macy 					const char *unused;
8770eda14cbcSMatt Macy 					VERIFY0(zpool_prop_index_to_string(
8771eda14cbcSMatt Macy 					    prop, intval, &unused));
8772eda14cbcSMatt Macy 				}
8773eda14cbcSMatt Macy 				VERIFY0(zap_update(mos,
8774eda14cbcSMatt Macy 				    spa->spa_pool_props_object, propname,
8775eda14cbcSMatt Macy 				    8, 1, &intval, tx));
8776eda14cbcSMatt Macy 				spa_history_log_internal(spa, "set", tx,
8777eda14cbcSMatt Macy 				    "%s=%lld", nvpair_name(elem),
8778eda14cbcSMatt Macy 				    (longlong_t)intval);
8779eda14cbcSMatt Macy 			} else {
8780eda14cbcSMatt Macy 				ASSERT(0); /* not allowed */
8781eda14cbcSMatt Macy 			}
8782eda14cbcSMatt Macy 
8783eda14cbcSMatt Macy 			switch (prop) {
8784eda14cbcSMatt Macy 			case ZPOOL_PROP_DELEGATION:
8785eda14cbcSMatt Macy 				spa->spa_delegation = intval;
8786eda14cbcSMatt Macy 				break;
8787eda14cbcSMatt Macy 			case ZPOOL_PROP_BOOTFS:
8788eda14cbcSMatt Macy 				spa->spa_bootfs = intval;
8789eda14cbcSMatt Macy 				break;
8790eda14cbcSMatt Macy 			case ZPOOL_PROP_FAILUREMODE:
8791eda14cbcSMatt Macy 				spa->spa_failmode = intval;
8792eda14cbcSMatt Macy 				break;
8793eda14cbcSMatt Macy 			case ZPOOL_PROP_AUTOTRIM:
8794eda14cbcSMatt Macy 				spa->spa_autotrim = intval;
8795eda14cbcSMatt Macy 				spa_async_request(spa,
8796eda14cbcSMatt Macy 				    SPA_ASYNC_AUTOTRIM_RESTART);
8797eda14cbcSMatt Macy 				break;
8798eda14cbcSMatt Macy 			case ZPOOL_PROP_AUTOEXPAND:
8799eda14cbcSMatt Macy 				spa->spa_autoexpand = intval;
8800eda14cbcSMatt Macy 				if (tx->tx_txg != TXG_INITIAL)
8801eda14cbcSMatt Macy 					spa_async_request(spa,
8802eda14cbcSMatt Macy 					    SPA_ASYNC_AUTOEXPAND);
8803eda14cbcSMatt Macy 				break;
8804eda14cbcSMatt Macy 			case ZPOOL_PROP_MULTIHOST:
8805eda14cbcSMatt Macy 				spa->spa_multihost = intval;
8806eda14cbcSMatt Macy 				break;
8807eda14cbcSMatt Macy 			default:
8808eda14cbcSMatt Macy 				break;
8809eda14cbcSMatt Macy 			}
8810eda14cbcSMatt Macy 		}
8811eda14cbcSMatt Macy 
8812eda14cbcSMatt Macy 	}
8813eda14cbcSMatt Macy 
8814eda14cbcSMatt Macy 	mutex_exit(&spa->spa_props_lock);
8815eda14cbcSMatt Macy }
8816eda14cbcSMatt Macy 
8817eda14cbcSMatt Macy /*
8818eda14cbcSMatt Macy  * Perform one-time upgrade on-disk changes.  spa_version() does not
8819eda14cbcSMatt Macy  * reflect the new version this txg, so there must be no changes this
8820eda14cbcSMatt Macy  * txg to anything that the upgrade code depends on after it executes.
8821eda14cbcSMatt Macy  * Therefore this must be called after dsl_pool_sync() does the sync
8822eda14cbcSMatt Macy  * tasks.
8823eda14cbcSMatt Macy  */
8824eda14cbcSMatt Macy static void
8825eda14cbcSMatt Macy spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
8826eda14cbcSMatt Macy {
8827eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
8828eda14cbcSMatt Macy 		return;
8829eda14cbcSMatt Macy 
8830eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
8831eda14cbcSMatt Macy 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
8832eda14cbcSMatt Macy 
8833eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
8834eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
8835eda14cbcSMatt Macy 		dsl_pool_create_origin(dp, tx);
8836eda14cbcSMatt Macy 
8837eda14cbcSMatt Macy 		/* Keeping the origin open increases spa_minref */
8838eda14cbcSMatt Macy 		spa->spa_minref += 3;
8839eda14cbcSMatt Macy 	}
8840eda14cbcSMatt Macy 
8841eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
8842eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
8843eda14cbcSMatt Macy 		dsl_pool_upgrade_clones(dp, tx);
8844eda14cbcSMatt Macy 	}
8845eda14cbcSMatt Macy 
8846eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
8847eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
8848eda14cbcSMatt Macy 		dsl_pool_upgrade_dir_clones(dp, tx);
8849eda14cbcSMatt Macy 
8850eda14cbcSMatt Macy 		/* Keeping the freedir open increases spa_minref */
8851eda14cbcSMatt Macy 		spa->spa_minref += 3;
8852eda14cbcSMatt Macy 	}
8853eda14cbcSMatt Macy 
8854eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
8855eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8856eda14cbcSMatt Macy 		spa_feature_create_zap_objects(spa, tx);
8857eda14cbcSMatt Macy 	}
8858eda14cbcSMatt Macy 
8859eda14cbcSMatt Macy 	/*
8860eda14cbcSMatt Macy 	 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
8861eda14cbcSMatt Macy 	 * when possibility to use lz4 compression for metadata was added
8862eda14cbcSMatt Macy 	 * Old pools that have this feature enabled must be upgraded to have
8863eda14cbcSMatt Macy 	 * this feature active
8864eda14cbcSMatt Macy 	 */
8865eda14cbcSMatt Macy 	if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
8866eda14cbcSMatt Macy 		boolean_t lz4_en = spa_feature_is_enabled(spa,
8867eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
8868eda14cbcSMatt Macy 		boolean_t lz4_ac = spa_feature_is_active(spa,
8869eda14cbcSMatt Macy 		    SPA_FEATURE_LZ4_COMPRESS);
8870eda14cbcSMatt Macy 
8871eda14cbcSMatt Macy 		if (lz4_en && !lz4_ac)
8872eda14cbcSMatt Macy 			spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
8873eda14cbcSMatt Macy 	}
8874eda14cbcSMatt Macy 
8875eda14cbcSMatt Macy 	/*
8876eda14cbcSMatt Macy 	 * If we haven't written the salt, do so now.  Note that the
8877eda14cbcSMatt Macy 	 * feature may not be activated yet, but that's fine since
8878eda14cbcSMatt Macy 	 * the presence of this ZAP entry is backwards compatible.
8879eda14cbcSMatt Macy 	 */
8880eda14cbcSMatt Macy 	if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
8881eda14cbcSMatt Macy 	    DMU_POOL_CHECKSUM_SALT) == ENOENT) {
8882eda14cbcSMatt Macy 		VERIFY0(zap_add(spa->spa_meta_objset,
8883eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
8884eda14cbcSMatt Macy 		    sizeof (spa->spa_cksum_salt.zcs_bytes),
8885eda14cbcSMatt Macy 		    spa->spa_cksum_salt.zcs_bytes, tx));
8886eda14cbcSMatt Macy 	}
8887eda14cbcSMatt Macy 
8888eda14cbcSMatt Macy 	rrw_exit(&dp->dp_config_rwlock, FTAG);
8889eda14cbcSMatt Macy }
8890eda14cbcSMatt Macy 
8891eda14cbcSMatt Macy static void
8892eda14cbcSMatt Macy vdev_indirect_state_sync_verify(vdev_t *vd)
8893eda14cbcSMatt Macy {
8894eda14cbcSMatt Macy 	vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
8895eda14cbcSMatt Macy 	vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
8896eda14cbcSMatt Macy 
8897eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_indirect_ops) {
8898eda14cbcSMatt Macy 		ASSERT(vim != NULL);
8899eda14cbcSMatt Macy 		ASSERT(vib != NULL);
8900eda14cbcSMatt Macy 	}
8901eda14cbcSMatt Macy 
8902eda14cbcSMatt Macy 	uint64_t obsolete_sm_object = 0;
8903eda14cbcSMatt Macy 	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
8904eda14cbcSMatt Macy 	if (obsolete_sm_object != 0) {
8905eda14cbcSMatt Macy 		ASSERT(vd->vdev_obsolete_sm != NULL);
8906eda14cbcSMatt Macy 		ASSERT(vd->vdev_removing ||
8907eda14cbcSMatt Macy 		    vd->vdev_ops == &vdev_indirect_ops);
8908eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
8909eda14cbcSMatt Macy 		ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
8910eda14cbcSMatt Macy 		ASSERT3U(obsolete_sm_object, ==,
8911eda14cbcSMatt Macy 		    space_map_object(vd->vdev_obsolete_sm));
8912eda14cbcSMatt Macy 		ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
8913eda14cbcSMatt Macy 		    space_map_allocated(vd->vdev_obsolete_sm));
8914eda14cbcSMatt Macy 	}
8915eda14cbcSMatt Macy 	ASSERT(vd->vdev_obsolete_segments != NULL);
8916eda14cbcSMatt Macy 
8917eda14cbcSMatt Macy 	/*
8918eda14cbcSMatt Macy 	 * Since frees / remaps to an indirect vdev can only
8919eda14cbcSMatt Macy 	 * happen in syncing context, the obsolete segments
8920eda14cbcSMatt Macy 	 * tree must be empty when we start syncing.
8921eda14cbcSMatt Macy 	 */
8922eda14cbcSMatt Macy 	ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
8923eda14cbcSMatt Macy }
8924eda14cbcSMatt Macy 
8925eda14cbcSMatt Macy /*
8926eda14cbcSMatt Macy  * Set the top-level vdev's max queue depth. Evaluate each top-level's
8927eda14cbcSMatt Macy  * async write queue depth in case it changed. The max queue depth will
8928eda14cbcSMatt Macy  * not change in the middle of syncing out this txg.
8929eda14cbcSMatt Macy  */
8930eda14cbcSMatt Macy static void
8931eda14cbcSMatt Macy spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
8932eda14cbcSMatt Macy {
8933eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8934eda14cbcSMatt Macy 
8935eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
8936eda14cbcSMatt Macy 	uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
8937eda14cbcSMatt Macy 	    zfs_vdev_queue_depth_pct / 100;
8938eda14cbcSMatt Macy 	metaslab_class_t *normal = spa_normal_class(spa);
8939eda14cbcSMatt Macy 	metaslab_class_t *special = spa_special_class(spa);
8940eda14cbcSMatt Macy 	metaslab_class_t *dedup = spa_dedup_class(spa);
8941eda14cbcSMatt Macy 
8942eda14cbcSMatt Macy 	uint64_t slots_per_allocator = 0;
8943eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
8944eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
8945eda14cbcSMatt Macy 
8946eda14cbcSMatt Macy 		metaslab_group_t *mg = tvd->vdev_mg;
8947eda14cbcSMatt Macy 		if (mg == NULL || !metaslab_group_initialized(mg))
8948eda14cbcSMatt Macy 			continue;
8949eda14cbcSMatt Macy 
8950eda14cbcSMatt Macy 		metaslab_class_t *mc = mg->mg_class;
8951eda14cbcSMatt Macy 		if (mc != normal && mc != special && mc != dedup)
8952eda14cbcSMatt Macy 			continue;
8953eda14cbcSMatt Macy 
8954eda14cbcSMatt Macy 		/*
8955eda14cbcSMatt Macy 		 * It is safe to do a lock-free check here because only async
8956eda14cbcSMatt Macy 		 * allocations look at mg_max_alloc_queue_depth, and async
8957eda14cbcSMatt Macy 		 * allocations all happen from spa_sync().
8958eda14cbcSMatt Macy 		 */
8959eda14cbcSMatt Macy 		for (int i = 0; i < mg->mg_allocators; i++) {
8960eda14cbcSMatt Macy 			ASSERT0(zfs_refcount_count(
8961eda14cbcSMatt Macy 			    &(mg->mg_allocator[i].mga_alloc_queue_depth)));
8962eda14cbcSMatt Macy 		}
8963eda14cbcSMatt Macy 		mg->mg_max_alloc_queue_depth = max_queue_depth;
8964eda14cbcSMatt Macy 
8965eda14cbcSMatt Macy 		for (int i = 0; i < mg->mg_allocators; i++) {
8966eda14cbcSMatt Macy 			mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
8967eda14cbcSMatt Macy 			    zfs_vdev_def_queue_depth;
8968eda14cbcSMatt Macy 		}
8969eda14cbcSMatt Macy 		slots_per_allocator += zfs_vdev_def_queue_depth;
8970eda14cbcSMatt Macy 	}
8971eda14cbcSMatt Macy 
8972eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
89737877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
89747877fdebSMatt Macy 		    mca_alloc_slots));
89757877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
89767877fdebSMatt Macy 		    mca_alloc_slots));
89777877fdebSMatt Macy 		ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
89787877fdebSMatt Macy 		    mca_alloc_slots));
89797877fdebSMatt Macy 		normal->mc_allocator[i].mca_alloc_max_slots =
89807877fdebSMatt Macy 		    slots_per_allocator;
89817877fdebSMatt Macy 		special->mc_allocator[i].mca_alloc_max_slots =
89827877fdebSMatt Macy 		    slots_per_allocator;
89837877fdebSMatt Macy 		dedup->mc_allocator[i].mca_alloc_max_slots =
89847877fdebSMatt Macy 		    slots_per_allocator;
8985eda14cbcSMatt Macy 	}
8986eda14cbcSMatt Macy 	normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8987eda14cbcSMatt Macy 	special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8988eda14cbcSMatt Macy 	dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
8989eda14cbcSMatt Macy }
8990eda14cbcSMatt Macy 
8991eda14cbcSMatt Macy static void
8992eda14cbcSMatt Macy spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
8993eda14cbcSMatt Macy {
8994eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
8995eda14cbcSMatt Macy 
8996eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
8997eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
8998eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
8999eda14cbcSMatt Macy 		vdev_indirect_state_sync_verify(vd);
9000eda14cbcSMatt Macy 
9001eda14cbcSMatt Macy 		if (vdev_indirect_should_condense(vd)) {
9002eda14cbcSMatt Macy 			spa_condense_indirect_start_sync(vd, tx);
9003eda14cbcSMatt Macy 			break;
9004eda14cbcSMatt Macy 		}
9005eda14cbcSMatt Macy 	}
9006eda14cbcSMatt Macy }
9007eda14cbcSMatt Macy 
9008eda14cbcSMatt Macy static void
9009eda14cbcSMatt Macy spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
9010eda14cbcSMatt Macy {
9011eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
9012eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9013eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
9014eda14cbcSMatt Macy 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
9015eda14cbcSMatt Macy 
9016eda14cbcSMatt Macy 	do {
9017eda14cbcSMatt Macy 		int pass = ++spa->spa_sync_pass;
9018eda14cbcSMatt Macy 
9019eda14cbcSMatt Macy 		spa_sync_config_object(spa, tx);
9020eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
9021eda14cbcSMatt Macy 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
9022eda14cbcSMatt Macy 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
9023eda14cbcSMatt Macy 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
9024eda14cbcSMatt Macy 		spa_errlog_sync(spa, txg);
9025eda14cbcSMatt Macy 		dsl_pool_sync(dp, txg);
9026eda14cbcSMatt Macy 
9027eda14cbcSMatt Macy 		if (pass < zfs_sync_pass_deferred_free ||
9028eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
9029eda14cbcSMatt Macy 			/*
9030eda14cbcSMatt Macy 			 * If the log space map feature is active we don't
9031eda14cbcSMatt Macy 			 * care about deferred frees and the deferred bpobj
9032eda14cbcSMatt Macy 			 * as the log space map should effectively have the
9033eda14cbcSMatt Macy 			 * same results (i.e. appending only to one object).
9034eda14cbcSMatt Macy 			 */
9035eda14cbcSMatt Macy 			spa_sync_frees(spa, free_bpl, tx);
9036eda14cbcSMatt Macy 		} else {
9037eda14cbcSMatt Macy 			/*
9038eda14cbcSMatt Macy 			 * We can not defer frees in pass 1, because
9039eda14cbcSMatt Macy 			 * we sync the deferred frees later in pass 1.
9040eda14cbcSMatt Macy 			 */
9041eda14cbcSMatt Macy 			ASSERT3U(pass, >, 1);
9042eda14cbcSMatt Macy 			bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
9043eda14cbcSMatt Macy 			    &spa->spa_deferred_bpobj, tx);
9044eda14cbcSMatt Macy 		}
9045eda14cbcSMatt Macy 
9046eda14cbcSMatt Macy 		ddt_sync(spa, txg);
9047eda14cbcSMatt Macy 		dsl_scan_sync(dp, tx);
9048eda14cbcSMatt Macy 		svr_sync(spa, tx);
9049eda14cbcSMatt Macy 		spa_sync_upgrades(spa, tx);
9050eda14cbcSMatt Macy 
9051eda14cbcSMatt Macy 		spa_flush_metaslabs(spa, tx);
9052eda14cbcSMatt Macy 
9053eda14cbcSMatt Macy 		vdev_t *vd = NULL;
9054eda14cbcSMatt Macy 		while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
9055eda14cbcSMatt Macy 		    != NULL)
9056eda14cbcSMatt Macy 			vdev_sync(vd, txg);
9057eda14cbcSMatt Macy 
9058eda14cbcSMatt Macy 		/*
9059eda14cbcSMatt Macy 		 * Note: We need to check if the MOS is dirty because we could
9060eda14cbcSMatt Macy 		 * have marked the MOS dirty without updating the uberblock
9061eda14cbcSMatt Macy 		 * (e.g. if we have sync tasks but no dirty user data). We need
9062eda14cbcSMatt Macy 		 * to check the uberblock's rootbp because it is updated if we
9063eda14cbcSMatt Macy 		 * have synced out dirty data (though in this case the MOS will
9064eda14cbcSMatt Macy 		 * most likely also be dirty due to second order effects, we
9065eda14cbcSMatt Macy 		 * don't want to rely on that here).
9066eda14cbcSMatt Macy 		 */
9067eda14cbcSMatt Macy 		if (pass == 1 &&
9068eda14cbcSMatt Macy 		    spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
9069eda14cbcSMatt Macy 		    !dmu_objset_is_dirty(mos, txg)) {
9070eda14cbcSMatt Macy 			/*
9071eda14cbcSMatt Macy 			 * Nothing changed on the first pass, therefore this
9072eda14cbcSMatt Macy 			 * TXG is a no-op. Avoid syncing deferred frees, so
9073eda14cbcSMatt Macy 			 * that we can keep this TXG as a no-op.
9074eda14cbcSMatt Macy 			 */
9075eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
9076eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
9077eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
9078eda14cbcSMatt Macy 			ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
9079eda14cbcSMatt Macy 			break;
9080eda14cbcSMatt Macy 		}
9081eda14cbcSMatt Macy 
9082eda14cbcSMatt Macy 		spa_sync_deferred_frees(spa, tx);
9083eda14cbcSMatt Macy 	} while (dmu_objset_is_dirty(mos, txg));
9084eda14cbcSMatt Macy }
9085eda14cbcSMatt Macy 
9086eda14cbcSMatt Macy /*
9087eda14cbcSMatt Macy  * Rewrite the vdev configuration (which includes the uberblock) to
9088eda14cbcSMatt Macy  * commit the transaction group.
9089eda14cbcSMatt Macy  *
9090eda14cbcSMatt Macy  * If there are no dirty vdevs, we sync the uberblock to a few random
9091eda14cbcSMatt Macy  * top-level vdevs that are known to be visible in the config cache
9092eda14cbcSMatt Macy  * (see spa_vdev_add() for a complete description). If there *are* dirty
9093eda14cbcSMatt Macy  * vdevs, sync the uberblock to all vdevs.
9094eda14cbcSMatt Macy  */
9095eda14cbcSMatt Macy static void
9096eda14cbcSMatt Macy spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
9097eda14cbcSMatt Macy {
9098eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9099eda14cbcSMatt Macy 	uint64_t txg = tx->tx_txg;
9100eda14cbcSMatt Macy 
9101eda14cbcSMatt Macy 	for (;;) {
9102eda14cbcSMatt Macy 		int error = 0;
9103eda14cbcSMatt Macy 
9104eda14cbcSMatt Macy 		/*
9105eda14cbcSMatt Macy 		 * We hold SCL_STATE to prevent vdev open/close/etc.
9106eda14cbcSMatt Macy 		 * while we're attempting to write the vdev labels.
9107eda14cbcSMatt Macy 		 */
9108eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9109eda14cbcSMatt Macy 
9110eda14cbcSMatt Macy 		if (list_is_empty(&spa->spa_config_dirty_list)) {
9111eda14cbcSMatt Macy 			vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
9112eda14cbcSMatt Macy 			int svdcount = 0;
9113eda14cbcSMatt Macy 			int children = rvd->vdev_children;
9114eda14cbcSMatt Macy 			int c0 = spa_get_random(children);
9115eda14cbcSMatt Macy 
9116eda14cbcSMatt Macy 			for (int c = 0; c < children; c++) {
9117eda14cbcSMatt Macy 				vdev_t *vd =
9118eda14cbcSMatt Macy 				    rvd->vdev_child[(c0 + c) % children];
9119eda14cbcSMatt Macy 
9120eda14cbcSMatt Macy 				/* Stop when revisiting the first vdev */
9121eda14cbcSMatt Macy 				if (c > 0 && svd[0] == vd)
9122eda14cbcSMatt Macy 					break;
9123eda14cbcSMatt Macy 
9124eda14cbcSMatt Macy 				if (vd->vdev_ms_array == 0 ||
9125eda14cbcSMatt Macy 				    vd->vdev_islog ||
9126eda14cbcSMatt Macy 				    !vdev_is_concrete(vd))
9127eda14cbcSMatt Macy 					continue;
9128eda14cbcSMatt Macy 
9129eda14cbcSMatt Macy 				svd[svdcount++] = vd;
9130eda14cbcSMatt Macy 				if (svdcount == SPA_SYNC_MIN_VDEVS)
9131eda14cbcSMatt Macy 					break;
9132eda14cbcSMatt Macy 			}
9133eda14cbcSMatt Macy 			error = vdev_config_sync(svd, svdcount, txg);
9134eda14cbcSMatt Macy 		} else {
9135eda14cbcSMatt Macy 			error = vdev_config_sync(rvd->vdev_child,
9136eda14cbcSMatt Macy 			    rvd->vdev_children, txg);
9137eda14cbcSMatt Macy 		}
9138eda14cbcSMatt Macy 
9139eda14cbcSMatt Macy 		if (error == 0)
9140eda14cbcSMatt Macy 			spa->spa_last_synced_guid = rvd->vdev_guid;
9141eda14cbcSMatt Macy 
9142eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_STATE, FTAG);
9143eda14cbcSMatt Macy 
9144eda14cbcSMatt Macy 		if (error == 0)
9145eda14cbcSMatt Macy 			break;
9146eda14cbcSMatt Macy 		zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
9147eda14cbcSMatt Macy 		zio_resume_wait(spa);
9148eda14cbcSMatt Macy 	}
9149eda14cbcSMatt Macy }
9150eda14cbcSMatt Macy 
9151eda14cbcSMatt Macy /*
9152eda14cbcSMatt Macy  * Sync the specified transaction group.  New blocks may be dirtied as
9153eda14cbcSMatt Macy  * part of the process, so we iterate until it converges.
9154eda14cbcSMatt Macy  */
9155eda14cbcSMatt Macy void
9156eda14cbcSMatt Macy spa_sync(spa_t *spa, uint64_t txg)
9157eda14cbcSMatt Macy {
9158eda14cbcSMatt Macy 	vdev_t *vd = NULL;
9159eda14cbcSMatt Macy 
9160eda14cbcSMatt Macy 	VERIFY(spa_writeable(spa));
9161eda14cbcSMatt Macy 
9162eda14cbcSMatt Macy 	/*
9163eda14cbcSMatt Macy 	 * Wait for i/os issued in open context that need to complete
9164eda14cbcSMatt Macy 	 * before this txg syncs.
9165eda14cbcSMatt Macy 	 */
9166eda14cbcSMatt Macy 	(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
9167eda14cbcSMatt Macy 	spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
9168eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL);
9169eda14cbcSMatt Macy 
9170eda14cbcSMatt Macy 	/*
9171eda14cbcSMatt Macy 	 * Lock out configuration changes.
9172eda14cbcSMatt Macy 	 */
9173eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9174eda14cbcSMatt Macy 
9175eda14cbcSMatt Macy 	spa->spa_syncing_txg = txg;
9176eda14cbcSMatt Macy 	spa->spa_sync_pass = 0;
9177eda14cbcSMatt Macy 
9178eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9179eda14cbcSMatt Macy 		mutex_enter(&spa->spa_alloc_locks[i]);
9180eda14cbcSMatt Macy 		VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9181eda14cbcSMatt Macy 		mutex_exit(&spa->spa_alloc_locks[i]);
9182eda14cbcSMatt Macy 	}
9183eda14cbcSMatt Macy 
9184eda14cbcSMatt Macy 	/*
9185eda14cbcSMatt Macy 	 * If there are any pending vdev state changes, convert them
9186eda14cbcSMatt Macy 	 * into config changes that go out with this transaction group.
9187eda14cbcSMatt Macy 	 */
9188eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
9189eda14cbcSMatt Macy 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
9190eda14cbcSMatt Macy 		/*
9191eda14cbcSMatt Macy 		 * We need the write lock here because, for aux vdevs,
9192eda14cbcSMatt Macy 		 * calling vdev_config_dirty() modifies sav_config.
9193eda14cbcSMatt Macy 		 * This is ugly and will become unnecessary when we
9194eda14cbcSMatt Macy 		 * eliminate the aux vdev wart by integrating all vdevs
9195eda14cbcSMatt Macy 		 * into the root vdev tree.
9196eda14cbcSMatt Macy 		 */
9197eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9198eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
9199eda14cbcSMatt Macy 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
9200eda14cbcSMatt Macy 			vdev_state_clean(vd);
9201eda14cbcSMatt Macy 			vdev_config_dirty(vd);
9202eda14cbcSMatt Macy 		}
9203eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9204eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9205eda14cbcSMatt Macy 	}
9206eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_STATE, FTAG);
9207eda14cbcSMatt Macy 
9208eda14cbcSMatt Macy 	dsl_pool_t *dp = spa->spa_dsl_pool;
9209eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
9210eda14cbcSMatt Macy 
9211eda14cbcSMatt Macy 	spa->spa_sync_starttime = gethrtime();
9212eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9213eda14cbcSMatt Macy 	spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
9214eda14cbcSMatt Macy 	    spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
9215eda14cbcSMatt Macy 	    NSEC_TO_TICK(spa->spa_deadman_synctime));
9216eda14cbcSMatt Macy 
9217eda14cbcSMatt Macy 	/*
9218eda14cbcSMatt Macy 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
9219eda14cbcSMatt Macy 	 * set spa_deflate if we have no raid-z vdevs.
9220eda14cbcSMatt Macy 	 */
9221eda14cbcSMatt Macy 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
9222eda14cbcSMatt Macy 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
9223eda14cbcSMatt Macy 		vdev_t *rvd = spa->spa_root_vdev;
9224eda14cbcSMatt Macy 
9225eda14cbcSMatt Macy 		int i;
9226eda14cbcSMatt Macy 		for (i = 0; i < rvd->vdev_children; i++) {
9227eda14cbcSMatt Macy 			vd = rvd->vdev_child[i];
9228eda14cbcSMatt Macy 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
9229eda14cbcSMatt Macy 				break;
9230eda14cbcSMatt Macy 		}
9231eda14cbcSMatt Macy 		if (i == rvd->vdev_children) {
9232eda14cbcSMatt Macy 			spa->spa_deflate = TRUE;
9233eda14cbcSMatt Macy 			VERIFY0(zap_add(spa->spa_meta_objset,
9234eda14cbcSMatt Macy 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
9235eda14cbcSMatt Macy 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
9236eda14cbcSMatt Macy 		}
9237eda14cbcSMatt Macy 	}
9238eda14cbcSMatt Macy 
9239eda14cbcSMatt Macy 	spa_sync_adjust_vdev_max_queue_depth(spa);
9240eda14cbcSMatt Macy 
9241eda14cbcSMatt Macy 	spa_sync_condense_indirect(spa, tx);
9242eda14cbcSMatt Macy 
9243eda14cbcSMatt Macy 	spa_sync_iterate_to_convergence(spa, tx);
9244eda14cbcSMatt Macy 
9245eda14cbcSMatt Macy #ifdef ZFS_DEBUG
9246eda14cbcSMatt Macy 	if (!list_is_empty(&spa->spa_config_dirty_list)) {
9247eda14cbcSMatt Macy 	/*
9248eda14cbcSMatt Macy 	 * Make sure that the number of ZAPs for all the vdevs matches
9249eda14cbcSMatt Macy 	 * the number of ZAPs in the per-vdev ZAP list. This only gets
9250eda14cbcSMatt Macy 	 * called if the config is dirty; otherwise there may be
9251eda14cbcSMatt Macy 	 * outstanding AVZ operations that weren't completed in
9252eda14cbcSMatt Macy 	 * spa_sync_config_object.
9253eda14cbcSMatt Macy 	 */
9254eda14cbcSMatt Macy 		uint64_t all_vdev_zap_entry_count;
9255eda14cbcSMatt Macy 		ASSERT0(zap_count(spa->spa_meta_objset,
9256eda14cbcSMatt Macy 		    spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
9257eda14cbcSMatt Macy 		ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
9258eda14cbcSMatt Macy 		    all_vdev_zap_entry_count);
9259eda14cbcSMatt Macy 	}
9260eda14cbcSMatt Macy #endif
9261eda14cbcSMatt Macy 
9262eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL) {
9263eda14cbcSMatt Macy 		ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
9264eda14cbcSMatt Macy 	}
9265eda14cbcSMatt Macy 
9266eda14cbcSMatt Macy 	spa_sync_rewrite_vdev_config(spa, tx);
9267eda14cbcSMatt Macy 	dmu_tx_commit(tx);
9268eda14cbcSMatt Macy 
9269eda14cbcSMatt Macy 	taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
9270eda14cbcSMatt Macy 	spa->spa_deadman_tqid = 0;
9271eda14cbcSMatt Macy 
9272eda14cbcSMatt Macy 	/*
9273eda14cbcSMatt Macy 	 * Clear the dirty config list.
9274eda14cbcSMatt Macy 	 */
9275eda14cbcSMatt Macy 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
9276eda14cbcSMatt Macy 		vdev_config_clean(vd);
9277eda14cbcSMatt Macy 
9278eda14cbcSMatt Macy 	/*
9279eda14cbcSMatt Macy 	 * Now that the new config has synced transactionally,
9280eda14cbcSMatt Macy 	 * let it become visible to the config cache.
9281eda14cbcSMatt Macy 	 */
9282eda14cbcSMatt Macy 	if (spa->spa_config_syncing != NULL) {
9283eda14cbcSMatt Macy 		spa_config_set(spa, spa->spa_config_syncing);
9284eda14cbcSMatt Macy 		spa->spa_config_txg = txg;
9285eda14cbcSMatt Macy 		spa->spa_config_syncing = NULL;
9286eda14cbcSMatt Macy 	}
9287eda14cbcSMatt Macy 
9288eda14cbcSMatt Macy 	dsl_pool_sync_done(dp, txg);
9289eda14cbcSMatt Macy 
9290eda14cbcSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9291eda14cbcSMatt Macy 		mutex_enter(&spa->spa_alloc_locks[i]);
9292eda14cbcSMatt Macy 		VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i]));
9293eda14cbcSMatt Macy 		mutex_exit(&spa->spa_alloc_locks[i]);
9294eda14cbcSMatt Macy 	}
9295eda14cbcSMatt Macy 
9296eda14cbcSMatt Macy 	/*
9297eda14cbcSMatt Macy 	 * Update usable space statistics.
9298eda14cbcSMatt Macy 	 */
9299eda14cbcSMatt Macy 	while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
9300eda14cbcSMatt Macy 	    != NULL)
9301eda14cbcSMatt Macy 		vdev_sync_done(vd, txg);
9302eda14cbcSMatt Macy 
9303eda14cbcSMatt Macy 	metaslab_class_evict_old(spa->spa_normal_class, txg);
9304eda14cbcSMatt Macy 	metaslab_class_evict_old(spa->spa_log_class, txg);
9305eda14cbcSMatt Macy 
9306eda14cbcSMatt Macy 	spa_sync_close_syncing_log_sm(spa);
9307eda14cbcSMatt Macy 
9308eda14cbcSMatt Macy 	spa_update_dspace(spa);
9309eda14cbcSMatt Macy 
9310eda14cbcSMatt Macy 	/*
9311eda14cbcSMatt Macy 	 * It had better be the case that we didn't dirty anything
9312eda14cbcSMatt Macy 	 * since vdev_config_sync().
9313eda14cbcSMatt Macy 	 */
9314eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
9315eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
9316eda14cbcSMatt Macy 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
9317eda14cbcSMatt Macy 
9318eda14cbcSMatt Macy 	while (zfs_pause_spa_sync)
9319eda14cbcSMatt Macy 		delay(1);
9320eda14cbcSMatt Macy 
9321eda14cbcSMatt Macy 	spa->spa_sync_pass = 0;
9322eda14cbcSMatt Macy 
9323eda14cbcSMatt Macy 	/*
9324eda14cbcSMatt Macy 	 * Update the last synced uberblock here. We want to do this at
9325eda14cbcSMatt Macy 	 * the end of spa_sync() so that consumers of spa_last_synced_txg()
9326eda14cbcSMatt Macy 	 * will be guaranteed that all the processing associated with
9327eda14cbcSMatt Macy 	 * that txg has been completed.
9328eda14cbcSMatt Macy 	 */
9329eda14cbcSMatt Macy 	spa->spa_ubsync = spa->spa_uberblock;
9330eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG, FTAG);
9331eda14cbcSMatt Macy 
9332eda14cbcSMatt Macy 	spa_handle_ignored_writes(spa);
9333eda14cbcSMatt Macy 
9334eda14cbcSMatt Macy 	/*
9335eda14cbcSMatt Macy 	 * If any async tasks have been requested, kick them off.
9336eda14cbcSMatt Macy 	 */
9337eda14cbcSMatt Macy 	spa_async_dispatch(spa);
9338eda14cbcSMatt Macy }
9339eda14cbcSMatt Macy 
9340eda14cbcSMatt Macy /*
9341eda14cbcSMatt Macy  * Sync all pools.  We don't want to hold the namespace lock across these
9342eda14cbcSMatt Macy  * operations, so we take a reference on the spa_t and drop the lock during the
9343eda14cbcSMatt Macy  * sync.
9344eda14cbcSMatt Macy  */
9345eda14cbcSMatt Macy void
9346eda14cbcSMatt Macy spa_sync_allpools(void)
9347eda14cbcSMatt Macy {
9348eda14cbcSMatt Macy 	spa_t *spa = NULL;
9349eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
9350eda14cbcSMatt Macy 	while ((spa = spa_next(spa)) != NULL) {
9351eda14cbcSMatt Macy 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
9352eda14cbcSMatt Macy 		    !spa_writeable(spa) || spa_suspended(spa))
9353eda14cbcSMatt Macy 			continue;
9354eda14cbcSMatt Macy 		spa_open_ref(spa, FTAG);
9355eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9356eda14cbcSMatt Macy 		txg_wait_synced(spa_get_dsl(spa), 0);
9357eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9358eda14cbcSMatt Macy 		spa_close(spa, FTAG);
9359eda14cbcSMatt Macy 	}
9360eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
9361eda14cbcSMatt Macy }
9362eda14cbcSMatt Macy 
9363eda14cbcSMatt Macy /*
9364eda14cbcSMatt Macy  * ==========================================================================
9365eda14cbcSMatt Macy  * Miscellaneous routines
9366eda14cbcSMatt Macy  * ==========================================================================
9367eda14cbcSMatt Macy  */
9368eda14cbcSMatt Macy 
9369eda14cbcSMatt Macy /*
9370eda14cbcSMatt Macy  * Remove all pools in the system.
9371eda14cbcSMatt Macy  */
9372eda14cbcSMatt Macy void
9373eda14cbcSMatt Macy spa_evict_all(void)
9374eda14cbcSMatt Macy {
9375eda14cbcSMatt Macy 	spa_t *spa;
9376eda14cbcSMatt Macy 
9377eda14cbcSMatt Macy 	/*
9378eda14cbcSMatt Macy 	 * Remove all cached state.  All pools should be closed now,
9379eda14cbcSMatt Macy 	 * so every spa in the AVL tree should be unreferenced.
9380eda14cbcSMatt Macy 	 */
9381eda14cbcSMatt Macy 	mutex_enter(&spa_namespace_lock);
9382eda14cbcSMatt Macy 	while ((spa = spa_next(NULL)) != NULL) {
9383eda14cbcSMatt Macy 		/*
9384eda14cbcSMatt Macy 		 * Stop async tasks.  The async thread may need to detach
9385eda14cbcSMatt Macy 		 * a device that's been replaced, which requires grabbing
9386eda14cbcSMatt Macy 		 * spa_namespace_lock, so we must drop it here.
9387eda14cbcSMatt Macy 		 */
9388eda14cbcSMatt Macy 		spa_open_ref(spa, FTAG);
9389eda14cbcSMatt Macy 		mutex_exit(&spa_namespace_lock);
9390eda14cbcSMatt Macy 		spa_async_suspend(spa);
9391eda14cbcSMatt Macy 		mutex_enter(&spa_namespace_lock);
9392eda14cbcSMatt Macy 		spa_close(spa, FTAG);
9393eda14cbcSMatt Macy 
9394eda14cbcSMatt Macy 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
9395eda14cbcSMatt Macy 			spa_unload(spa);
9396eda14cbcSMatt Macy 			spa_deactivate(spa);
9397eda14cbcSMatt Macy 		}
9398eda14cbcSMatt Macy 		spa_remove(spa);
9399eda14cbcSMatt Macy 	}
9400eda14cbcSMatt Macy 	mutex_exit(&spa_namespace_lock);
9401eda14cbcSMatt Macy }
9402eda14cbcSMatt Macy 
9403eda14cbcSMatt Macy vdev_t *
9404eda14cbcSMatt Macy spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
9405eda14cbcSMatt Macy {
9406eda14cbcSMatt Macy 	vdev_t *vd;
9407eda14cbcSMatt Macy 	int i;
9408eda14cbcSMatt Macy 
9409eda14cbcSMatt Macy 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
9410eda14cbcSMatt Macy 		return (vd);
9411eda14cbcSMatt Macy 
9412eda14cbcSMatt Macy 	if (aux) {
9413eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
9414eda14cbcSMatt Macy 			vd = spa->spa_l2cache.sav_vdevs[i];
9415eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
9416eda14cbcSMatt Macy 				return (vd);
9417eda14cbcSMatt Macy 		}
9418eda14cbcSMatt Macy 
9419eda14cbcSMatt Macy 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
9420eda14cbcSMatt Macy 			vd = spa->spa_spares.sav_vdevs[i];
9421eda14cbcSMatt Macy 			if (vd->vdev_guid == guid)
9422eda14cbcSMatt Macy 				return (vd);
9423eda14cbcSMatt Macy 		}
9424eda14cbcSMatt Macy 	}
9425eda14cbcSMatt Macy 
9426eda14cbcSMatt Macy 	return (NULL);
9427eda14cbcSMatt Macy }
9428eda14cbcSMatt Macy 
9429eda14cbcSMatt Macy void
9430eda14cbcSMatt Macy spa_upgrade(spa_t *spa, uint64_t version)
9431eda14cbcSMatt Macy {
9432eda14cbcSMatt Macy 	ASSERT(spa_writeable(spa));
9433eda14cbcSMatt Macy 
9434eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
9435eda14cbcSMatt Macy 
9436eda14cbcSMatt Macy 	/*
9437eda14cbcSMatt Macy 	 * This should only be called for a non-faulted pool, and since a
9438eda14cbcSMatt Macy 	 * future version would result in an unopenable pool, this shouldn't be
9439eda14cbcSMatt Macy 	 * possible.
9440eda14cbcSMatt Macy 	 */
9441eda14cbcSMatt Macy 	ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
9442eda14cbcSMatt Macy 	ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
9443eda14cbcSMatt Macy 
9444eda14cbcSMatt Macy 	spa->spa_uberblock.ub_version = version;
9445eda14cbcSMatt Macy 	vdev_config_dirty(spa->spa_root_vdev);
9446eda14cbcSMatt Macy 
9447eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALL, FTAG);
9448eda14cbcSMatt Macy 
9449eda14cbcSMatt Macy 	txg_wait_synced(spa_get_dsl(spa), 0);
9450eda14cbcSMatt Macy }
9451eda14cbcSMatt Macy 
9452eda14cbcSMatt Macy boolean_t
9453eda14cbcSMatt Macy spa_has_spare(spa_t *spa, uint64_t guid)
9454eda14cbcSMatt Macy {
9455eda14cbcSMatt Macy 	int i;
9456eda14cbcSMatt Macy 	uint64_t spareguid;
9457eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_spares;
9458eda14cbcSMatt Macy 
9459eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++)
9460eda14cbcSMatt Macy 		if (sav->sav_vdevs[i]->vdev_guid == guid)
9461eda14cbcSMatt Macy 			return (B_TRUE);
9462eda14cbcSMatt Macy 
9463eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_npending; i++) {
9464eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
9465eda14cbcSMatt Macy 		    &spareguid) == 0 && spareguid == guid)
9466eda14cbcSMatt Macy 			return (B_TRUE);
9467eda14cbcSMatt Macy 	}
9468eda14cbcSMatt Macy 
9469eda14cbcSMatt Macy 	return (B_FALSE);
9470eda14cbcSMatt Macy }
9471eda14cbcSMatt Macy 
9472eda14cbcSMatt Macy /*
9473eda14cbcSMatt Macy  * Check if a pool has an active shared spare device.
9474eda14cbcSMatt Macy  * Note: reference count of an active spare is 2, as a spare and as a replace
9475eda14cbcSMatt Macy  */
9476eda14cbcSMatt Macy static boolean_t
9477eda14cbcSMatt Macy spa_has_active_shared_spare(spa_t *spa)
9478eda14cbcSMatt Macy {
9479eda14cbcSMatt Macy 	int i, refcnt;
9480eda14cbcSMatt Macy 	uint64_t pool;
9481eda14cbcSMatt Macy 	spa_aux_vdev_t *sav = &spa->spa_spares;
9482eda14cbcSMatt Macy 
9483eda14cbcSMatt Macy 	for (i = 0; i < sav->sav_count; i++) {
9484eda14cbcSMatt Macy 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
9485eda14cbcSMatt Macy 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
9486eda14cbcSMatt Macy 		    refcnt > 2)
9487eda14cbcSMatt Macy 			return (B_TRUE);
9488eda14cbcSMatt Macy 	}
9489eda14cbcSMatt Macy 
9490eda14cbcSMatt Macy 	return (B_FALSE);
9491eda14cbcSMatt Macy }
9492eda14cbcSMatt Macy 
9493eda14cbcSMatt Macy uint64_t
9494eda14cbcSMatt Macy spa_total_metaslabs(spa_t *spa)
9495eda14cbcSMatt Macy {
9496eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
9497eda14cbcSMatt Macy 
9498eda14cbcSMatt Macy 	uint64_t m = 0;
9499eda14cbcSMatt Macy 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
9500eda14cbcSMatt Macy 		vdev_t *vd = rvd->vdev_child[c];
9501eda14cbcSMatt Macy 		if (!vdev_is_concrete(vd))
9502eda14cbcSMatt Macy 			continue;
9503eda14cbcSMatt Macy 		m += vd->vdev_ms_count;
9504eda14cbcSMatt Macy 	}
9505eda14cbcSMatt Macy 	return (m);
9506eda14cbcSMatt Macy }
9507eda14cbcSMatt Macy 
9508eda14cbcSMatt Macy /*
9509eda14cbcSMatt Macy  * Notify any waiting threads that some activity has switched from being in-
9510eda14cbcSMatt Macy  * progress to not-in-progress so that the thread can wake up and determine
9511eda14cbcSMatt Macy  * whether it is finished waiting.
9512eda14cbcSMatt Macy  */
9513eda14cbcSMatt Macy void
9514eda14cbcSMatt Macy spa_notify_waiters(spa_t *spa)
9515eda14cbcSMatt Macy {
9516eda14cbcSMatt Macy 	/*
9517eda14cbcSMatt Macy 	 * Acquiring spa_activities_lock here prevents the cv_broadcast from
9518eda14cbcSMatt Macy 	 * happening between the waiting thread's check and cv_wait.
9519eda14cbcSMatt Macy 	 */
9520eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
9521eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
9522eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
9523eda14cbcSMatt Macy }
9524eda14cbcSMatt Macy 
9525eda14cbcSMatt Macy /*
9526eda14cbcSMatt Macy  * Notify any waiting threads that the pool is exporting, and then block until
9527eda14cbcSMatt Macy  * they are finished using the spa_t.
9528eda14cbcSMatt Macy  */
9529eda14cbcSMatt Macy void
9530eda14cbcSMatt Macy spa_wake_waiters(spa_t *spa)
9531eda14cbcSMatt Macy {
9532eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
9533eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_TRUE;
9534eda14cbcSMatt Macy 	cv_broadcast(&spa->spa_activities_cv);
9535eda14cbcSMatt Macy 	while (spa->spa_waiters != 0)
9536eda14cbcSMatt Macy 		cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
9537eda14cbcSMatt Macy 	spa->spa_waiters_cancel = B_FALSE;
9538eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
9539eda14cbcSMatt Macy }
9540eda14cbcSMatt Macy 
9541eda14cbcSMatt Macy /* Whether the vdev or any of its descendants are being initialized/trimmed. */
9542eda14cbcSMatt Macy static boolean_t
9543eda14cbcSMatt Macy spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
9544eda14cbcSMatt Macy {
9545eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
9546eda14cbcSMatt Macy 
9547eda14cbcSMatt Macy 	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
9548eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9549eda14cbcSMatt Macy 	ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
9550eda14cbcSMatt Macy 	    activity == ZPOOL_WAIT_TRIM);
9551eda14cbcSMatt Macy 
9552eda14cbcSMatt Macy 	kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
9553eda14cbcSMatt Macy 	    &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
9554eda14cbcSMatt Macy 
9555eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
9556eda14cbcSMatt Macy 	mutex_enter(lock);
9557eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
9558eda14cbcSMatt Macy 
9559eda14cbcSMatt Macy 	boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
9560eda14cbcSMatt Macy 	    (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
9561eda14cbcSMatt Macy 	    (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
9562eda14cbcSMatt Macy 	mutex_exit(lock);
9563eda14cbcSMatt Macy 
9564eda14cbcSMatt Macy 	if (in_progress)
9565eda14cbcSMatt Macy 		return (B_TRUE);
9566eda14cbcSMatt Macy 
9567eda14cbcSMatt Macy 	for (int i = 0; i < vd->vdev_children; i++) {
9568eda14cbcSMatt Macy 		if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
9569eda14cbcSMatt Macy 		    activity))
9570eda14cbcSMatt Macy 			return (B_TRUE);
9571eda14cbcSMatt Macy 	}
9572eda14cbcSMatt Macy 
9573eda14cbcSMatt Macy 	return (B_FALSE);
9574eda14cbcSMatt Macy }
9575eda14cbcSMatt Macy 
9576eda14cbcSMatt Macy /*
9577eda14cbcSMatt Macy  * If use_guid is true, this checks whether the vdev specified by guid is
9578eda14cbcSMatt Macy  * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
9579eda14cbcSMatt Macy  * is being initialized/trimmed. The caller must hold the config lock and
9580eda14cbcSMatt Macy  * spa_activities_lock.
9581eda14cbcSMatt Macy  */
9582eda14cbcSMatt Macy static int
9583eda14cbcSMatt Macy spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
9584eda14cbcSMatt Macy     zpool_wait_activity_t activity, boolean_t *in_progress)
9585eda14cbcSMatt Macy {
9586eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
9587eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9588eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
9589eda14cbcSMatt Macy 
9590eda14cbcSMatt Macy 	vdev_t *vd;
9591eda14cbcSMatt Macy 	if (use_guid) {
9592eda14cbcSMatt Macy 		vd = spa_lookup_by_guid(spa, guid, B_FALSE);
9593eda14cbcSMatt Macy 		if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
9594eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9595eda14cbcSMatt Macy 			return (EINVAL);
9596eda14cbcSMatt Macy 		}
9597eda14cbcSMatt Macy 	} else {
9598eda14cbcSMatt Macy 		vd = spa->spa_root_vdev;
9599eda14cbcSMatt Macy 	}
9600eda14cbcSMatt Macy 
9601eda14cbcSMatt Macy 	*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
9602eda14cbcSMatt Macy 
9603eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9604eda14cbcSMatt Macy 	return (0);
9605eda14cbcSMatt Macy }
9606eda14cbcSMatt Macy 
9607eda14cbcSMatt Macy /*
9608eda14cbcSMatt Macy  * Locking for waiting threads
9609eda14cbcSMatt Macy  * ---------------------------
9610eda14cbcSMatt Macy  *
9611eda14cbcSMatt Macy  * Waiting threads need a way to check whether a given activity is in progress,
9612eda14cbcSMatt Macy  * and then, if it is, wait for it to complete. Each activity will have some
9613eda14cbcSMatt Macy  * in-memory representation of the relevant on-disk state which can be used to
9614eda14cbcSMatt Macy  * determine whether or not the activity is in progress. The in-memory state and
9615eda14cbcSMatt Macy  * the locking used to protect it will be different for each activity, and may
9616eda14cbcSMatt Macy  * not be suitable for use with a cvar (e.g., some state is protected by the
9617eda14cbcSMatt Macy  * config lock). To allow waiting threads to wait without any races, another
9618eda14cbcSMatt Macy  * lock, spa_activities_lock, is used.
9619eda14cbcSMatt Macy  *
9620eda14cbcSMatt Macy  * When the state is checked, both the activity-specific lock (if there is one)
9621eda14cbcSMatt Macy  * and spa_activities_lock are held. In some cases, the activity-specific lock
9622eda14cbcSMatt Macy  * is acquired explicitly (e.g. the config lock). In others, the locking is
9623eda14cbcSMatt Macy  * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
9624eda14cbcSMatt Macy  * thread releases the activity-specific lock and, if the activity is in
9625eda14cbcSMatt Macy  * progress, then cv_waits using spa_activities_lock.
9626eda14cbcSMatt Macy  *
9627eda14cbcSMatt Macy  * The waiting thread is woken when another thread, one completing some
9628eda14cbcSMatt Macy  * activity, updates the state of the activity and then calls
9629eda14cbcSMatt Macy  * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
9630eda14cbcSMatt Macy  * needs to hold its activity-specific lock when updating the state, and this
9631eda14cbcSMatt Macy  * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
9632eda14cbcSMatt Macy  *
9633eda14cbcSMatt Macy  * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
9634eda14cbcSMatt Macy  * and because it is held when the waiting thread checks the state of the
9635eda14cbcSMatt Macy  * activity, it can never be the case that the completing thread both updates
9636eda14cbcSMatt Macy  * the activity state and cv_broadcasts in between the waiting thread's check
9637eda14cbcSMatt Macy  * and cv_wait. Thus, a waiting thread can never miss a wakeup.
9638eda14cbcSMatt Macy  *
9639eda14cbcSMatt Macy  * In order to prevent deadlock, when the waiting thread does its check, in some
9640eda14cbcSMatt Macy  * cases it will temporarily drop spa_activities_lock in order to acquire the
9641eda14cbcSMatt Macy  * activity-specific lock. The order in which spa_activities_lock and the
9642eda14cbcSMatt Macy  * activity specific lock are acquired in the waiting thread is determined by
9643eda14cbcSMatt Macy  * the order in which they are acquired in the completing thread; if the
9644eda14cbcSMatt Macy  * completing thread calls spa_notify_waiters with the activity-specific lock
9645eda14cbcSMatt Macy  * held, then the waiting thread must also acquire the activity-specific lock
9646eda14cbcSMatt Macy  * first.
9647eda14cbcSMatt Macy  */
9648eda14cbcSMatt Macy 
9649eda14cbcSMatt Macy static int
9650eda14cbcSMatt Macy spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
9651eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
9652eda14cbcSMatt Macy {
9653eda14cbcSMatt Macy 	int error = 0;
9654eda14cbcSMatt Macy 
9655eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
9656eda14cbcSMatt Macy 
9657eda14cbcSMatt Macy 	switch (activity) {
9658eda14cbcSMatt Macy 	case ZPOOL_WAIT_CKPT_DISCARD:
9659eda14cbcSMatt Macy 		*in_progress =
9660eda14cbcSMatt Macy 		    (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
9661eda14cbcSMatt Macy 		    zap_contains(spa_meta_objset(spa),
9662eda14cbcSMatt Macy 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
9663eda14cbcSMatt Macy 		    ENOENT);
9664eda14cbcSMatt Macy 		break;
9665eda14cbcSMatt Macy 	case ZPOOL_WAIT_FREE:
9666eda14cbcSMatt Macy 		*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
9667eda14cbcSMatt Macy 		    !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
9668eda14cbcSMatt Macy 		    spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
9669eda14cbcSMatt Macy 		    spa_livelist_delete_check(spa));
9670eda14cbcSMatt Macy 		break;
9671eda14cbcSMatt Macy 	case ZPOOL_WAIT_INITIALIZE:
9672eda14cbcSMatt Macy 	case ZPOOL_WAIT_TRIM:
9673eda14cbcSMatt Macy 		error = spa_vdev_activity_in_progress(spa, use_tag, tag,
9674eda14cbcSMatt Macy 		    activity, in_progress);
9675eda14cbcSMatt Macy 		break;
9676eda14cbcSMatt Macy 	case ZPOOL_WAIT_REPLACE:
9677eda14cbcSMatt Macy 		mutex_exit(&spa->spa_activities_lock);
9678eda14cbcSMatt Macy 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9679eda14cbcSMatt Macy 		mutex_enter(&spa->spa_activities_lock);
9680eda14cbcSMatt Macy 
9681eda14cbcSMatt Macy 		*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
9682eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9683eda14cbcSMatt Macy 		break;
9684eda14cbcSMatt Macy 	case ZPOOL_WAIT_REMOVE:
9685eda14cbcSMatt Macy 		*in_progress = (spa->spa_removing_phys.sr_state ==
9686eda14cbcSMatt Macy 		    DSS_SCANNING);
9687eda14cbcSMatt Macy 		break;
9688eda14cbcSMatt Macy 	case ZPOOL_WAIT_RESILVER:
9689eda14cbcSMatt Macy 		if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
9690eda14cbcSMatt Macy 			break;
9691eda14cbcSMatt Macy 		/* fall through */
9692eda14cbcSMatt Macy 	case ZPOOL_WAIT_SCRUB:
9693eda14cbcSMatt Macy 	{
9694eda14cbcSMatt Macy 		boolean_t scanning, paused, is_scrub;
9695eda14cbcSMatt Macy 		dsl_scan_t *scn =  spa->spa_dsl_pool->dp_scan;
9696eda14cbcSMatt Macy 
9697eda14cbcSMatt Macy 		is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
9698eda14cbcSMatt Macy 		scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
9699eda14cbcSMatt Macy 		paused = dsl_scan_is_paused_scrub(scn);
9700eda14cbcSMatt Macy 		*in_progress = (scanning && !paused &&
9701eda14cbcSMatt Macy 		    is_scrub == (activity == ZPOOL_WAIT_SCRUB));
9702eda14cbcSMatt Macy 		break;
9703eda14cbcSMatt Macy 	}
9704eda14cbcSMatt Macy 	default:
9705eda14cbcSMatt Macy 		panic("unrecognized value for activity %d", activity);
9706eda14cbcSMatt Macy 	}
9707eda14cbcSMatt Macy 
9708eda14cbcSMatt Macy 	return (error);
9709eda14cbcSMatt Macy }
9710eda14cbcSMatt Macy 
9711eda14cbcSMatt Macy static int
9712eda14cbcSMatt Macy spa_wait_common(const char *pool, zpool_wait_activity_t activity,
9713eda14cbcSMatt Macy     boolean_t use_tag, uint64_t tag, boolean_t *waited)
9714eda14cbcSMatt Macy {
9715eda14cbcSMatt Macy 	/*
9716eda14cbcSMatt Macy 	 * The tag is used to distinguish between instances of an activity.
9717eda14cbcSMatt Macy 	 * 'initialize' and 'trim' are the only activities that we use this for.
9718eda14cbcSMatt Macy 	 * The other activities can only have a single instance in progress in a
9719eda14cbcSMatt Macy 	 * pool at one time, making the tag unnecessary.
9720eda14cbcSMatt Macy 	 *
9721eda14cbcSMatt Macy 	 * There can be multiple devices being replaced at once, but since they
9722eda14cbcSMatt Macy 	 * all finish once resilvering finishes, we don't bother keeping track
9723eda14cbcSMatt Macy 	 * of them individually, we just wait for them all to finish.
9724eda14cbcSMatt Macy 	 */
9725eda14cbcSMatt Macy 	if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
9726eda14cbcSMatt Macy 	    activity != ZPOOL_WAIT_TRIM)
9727eda14cbcSMatt Macy 		return (EINVAL);
9728eda14cbcSMatt Macy 
9729eda14cbcSMatt Macy 	if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
9730eda14cbcSMatt Macy 		return (EINVAL);
9731eda14cbcSMatt Macy 
9732eda14cbcSMatt Macy 	spa_t *spa;
9733eda14cbcSMatt Macy 	int error = spa_open(pool, &spa, FTAG);
9734eda14cbcSMatt Macy 	if (error != 0)
9735eda14cbcSMatt Macy 		return (error);
9736eda14cbcSMatt Macy 
9737eda14cbcSMatt Macy 	/*
9738eda14cbcSMatt Macy 	 * Increment the spa's waiter count so that we can call spa_close and
9739eda14cbcSMatt Macy 	 * still ensure that the spa_t doesn't get freed before this thread is
9740eda14cbcSMatt Macy 	 * finished with it when the pool is exported. We want to call spa_close
9741eda14cbcSMatt Macy 	 * before we start waiting because otherwise the additional ref would
9742eda14cbcSMatt Macy 	 * prevent the pool from being exported or destroyed throughout the
9743eda14cbcSMatt Macy 	 * potentially long wait.
9744eda14cbcSMatt Macy 	 */
9745eda14cbcSMatt Macy 	mutex_enter(&spa->spa_activities_lock);
9746eda14cbcSMatt Macy 	spa->spa_waiters++;
9747eda14cbcSMatt Macy 	spa_close(spa, FTAG);
9748eda14cbcSMatt Macy 
9749eda14cbcSMatt Macy 	*waited = B_FALSE;
9750eda14cbcSMatt Macy 	for (;;) {
9751eda14cbcSMatt Macy 		boolean_t in_progress;
9752eda14cbcSMatt Macy 		error = spa_activity_in_progress(spa, activity, use_tag, tag,
9753eda14cbcSMatt Macy 		    &in_progress);
9754eda14cbcSMatt Macy 
9755eda14cbcSMatt Macy 		if (error || !in_progress || spa->spa_waiters_cancel)
9756eda14cbcSMatt Macy 			break;
9757eda14cbcSMatt Macy 
9758eda14cbcSMatt Macy 		*waited = B_TRUE;
9759eda14cbcSMatt Macy 
9760eda14cbcSMatt Macy 		if (cv_wait_sig(&spa->spa_activities_cv,
9761eda14cbcSMatt Macy 		    &spa->spa_activities_lock) == 0) {
9762eda14cbcSMatt Macy 			error = EINTR;
9763eda14cbcSMatt Macy 			break;
9764eda14cbcSMatt Macy 		}
9765eda14cbcSMatt Macy 	}
9766eda14cbcSMatt Macy 
9767eda14cbcSMatt Macy 	spa->spa_waiters--;
9768eda14cbcSMatt Macy 	cv_signal(&spa->spa_waiters_cv);
9769eda14cbcSMatt Macy 	mutex_exit(&spa->spa_activities_lock);
9770eda14cbcSMatt Macy 
9771eda14cbcSMatt Macy 	return (error);
9772eda14cbcSMatt Macy }
9773eda14cbcSMatt Macy 
9774eda14cbcSMatt Macy /*
9775eda14cbcSMatt Macy  * Wait for a particular instance of the specified activity to complete, where
9776eda14cbcSMatt Macy  * the instance is identified by 'tag'
9777eda14cbcSMatt Macy  */
9778eda14cbcSMatt Macy int
9779eda14cbcSMatt Macy spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
9780eda14cbcSMatt Macy     boolean_t *waited)
9781eda14cbcSMatt Macy {
9782eda14cbcSMatt Macy 	return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
9783eda14cbcSMatt Macy }
9784eda14cbcSMatt Macy 
9785eda14cbcSMatt Macy /*
9786eda14cbcSMatt Macy  * Wait for all instances of the specified activity complete
9787eda14cbcSMatt Macy  */
9788eda14cbcSMatt Macy int
9789eda14cbcSMatt Macy spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
9790eda14cbcSMatt Macy {
9791eda14cbcSMatt Macy 
9792eda14cbcSMatt Macy 	return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
9793eda14cbcSMatt Macy }
9794eda14cbcSMatt Macy 
9795eda14cbcSMatt Macy sysevent_t *
9796eda14cbcSMatt Macy spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9797eda14cbcSMatt Macy {
9798eda14cbcSMatt Macy 	sysevent_t *ev = NULL;
9799eda14cbcSMatt Macy #ifdef _KERNEL
9800eda14cbcSMatt Macy 	nvlist_t *resource;
9801eda14cbcSMatt Macy 
9802eda14cbcSMatt Macy 	resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
9803eda14cbcSMatt Macy 	if (resource) {
9804eda14cbcSMatt Macy 		ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
9805eda14cbcSMatt Macy 		ev->resource = resource;
9806eda14cbcSMatt Macy 	}
9807eda14cbcSMatt Macy #endif
9808eda14cbcSMatt Macy 	return (ev);
9809eda14cbcSMatt Macy }
9810eda14cbcSMatt Macy 
9811eda14cbcSMatt Macy void
9812eda14cbcSMatt Macy spa_event_post(sysevent_t *ev)
9813eda14cbcSMatt Macy {
9814eda14cbcSMatt Macy #ifdef _KERNEL
9815eda14cbcSMatt Macy 	if (ev) {
9816eda14cbcSMatt Macy 		zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
9817eda14cbcSMatt Macy 		kmem_free(ev, sizeof (*ev));
9818eda14cbcSMatt Macy 	}
9819eda14cbcSMatt Macy #endif
9820eda14cbcSMatt Macy }
9821eda14cbcSMatt Macy 
9822eda14cbcSMatt Macy /*
9823eda14cbcSMatt Macy  * Post a zevent corresponding to the given sysevent.   The 'name' must be one
9824eda14cbcSMatt Macy  * of the event definitions in sys/sysevent/eventdefs.h.  The payload will be
9825eda14cbcSMatt Macy  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
9826eda14cbcSMatt Macy  * in the userland libzpool, as we don't want consumers to misinterpret ztest
9827eda14cbcSMatt Macy  * or zdb as real changes.
9828eda14cbcSMatt Macy  */
9829eda14cbcSMatt Macy void
9830eda14cbcSMatt Macy spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
9831eda14cbcSMatt Macy {
9832eda14cbcSMatt Macy 	spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
9833eda14cbcSMatt Macy }
9834eda14cbcSMatt Macy 
9835eda14cbcSMatt Macy /* state manipulation functions */
9836eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open);
9837eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open_rewind);
9838eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_stats);
9839eda14cbcSMatt Macy EXPORT_SYMBOL(spa_create);
9840eda14cbcSMatt Macy EXPORT_SYMBOL(spa_import);
9841eda14cbcSMatt Macy EXPORT_SYMBOL(spa_tryimport);
9842eda14cbcSMatt Macy EXPORT_SYMBOL(spa_destroy);
9843eda14cbcSMatt Macy EXPORT_SYMBOL(spa_export);
9844eda14cbcSMatt Macy EXPORT_SYMBOL(spa_reset);
9845eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_request);
9846eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_suspend);
9847eda14cbcSMatt Macy EXPORT_SYMBOL(spa_async_resume);
9848eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_addref);
9849eda14cbcSMatt Macy EXPORT_SYMBOL(spa_inject_delref);
9850eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stat_init);
9851eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_get_stats);
9852eda14cbcSMatt Macy 
9853eda14cbcSMatt Macy /* device manipulation */
9854eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_add);
9855eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_attach);
9856eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_detach);
9857eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setpath);
9858eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_setfru);
9859eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_split_mirror);
9860eda14cbcSMatt Macy 
9861eda14cbcSMatt Macy /* spare statech is global across all pools) */
9862eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_add);
9863eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_remove);
9864eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_exists);
9865eda14cbcSMatt Macy EXPORT_SYMBOL(spa_spare_activate);
9866eda14cbcSMatt Macy 
9867eda14cbcSMatt Macy /* L2ARC statech is global across all pools) */
9868eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_add);
9869eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_remove);
9870eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_exists);
9871eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_activate);
9872eda14cbcSMatt Macy EXPORT_SYMBOL(spa_l2cache_drop);
9873eda14cbcSMatt Macy 
9874eda14cbcSMatt Macy /* scanning */
9875eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan);
9876eda14cbcSMatt Macy EXPORT_SYMBOL(spa_scan_stop);
9877eda14cbcSMatt Macy 
9878eda14cbcSMatt Macy /* spa syncing */
9879eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync); /* only for DMU use */
9880eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync_allpools);
9881eda14cbcSMatt Macy 
9882eda14cbcSMatt Macy /* properties */
9883eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_set);
9884eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_get);
9885eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prop_clear_bootfs);
9886eda14cbcSMatt Macy 
9887eda14cbcSMatt Macy /* asynchronous event notification */
9888eda14cbcSMatt Macy EXPORT_SYMBOL(spa_event_notify);
9889eda14cbcSMatt Macy 
9890eda14cbcSMatt Macy /* BEGIN CSTYLED */
9891eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
9892*16038816SMartin Matuska 	"log2 fraction of arc that can be used by inflight I/Os when "
9893eda14cbcSMatt Macy 	"verifying pool during import");
9894eda14cbcSMatt Macy 
9895eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
9896eda14cbcSMatt Macy 	"Set to traverse metadata on pool import");
9897eda14cbcSMatt Macy 
9898eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
9899eda14cbcSMatt Macy 	"Set to traverse data on pool import");
9900eda14cbcSMatt Macy 
9901eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
9902eda14cbcSMatt Macy 	"Print vdev tree to zfs_dbgmsg during pool import");
9903eda14cbcSMatt Macy 
9904eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
9905eda14cbcSMatt Macy 	"Percentage of CPUs to run an IO worker thread");
9906eda14cbcSMatt Macy 
9907*16038816SMartin Matuska ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
9908*16038816SMartin Matuska 	"Number of threads per IO worker taskqueue");
9909*16038816SMartin Matuska 
9910eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
9911eda14cbcSMatt Macy 	"Allow importing pool with up to this number of missing top-level "
9912eda14cbcSMatt Macy 	"vdevs (in read-only mode)");
9913eda14cbcSMatt Macy 
9914eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
9915eda14cbcSMatt Macy 	"Set the livelist condense zthr to pause");
9916eda14cbcSMatt Macy 
9917eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
9918eda14cbcSMatt Macy 	"Set the livelist condense synctask to pause");
9919eda14cbcSMatt Macy 
9920eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
9921eda14cbcSMatt Macy 	"Whether livelist condensing was canceled in the synctask");
9922eda14cbcSMatt Macy 
9923eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
9924eda14cbcSMatt Macy 	"Whether livelist condensing was canceled in the zthr function");
9925eda14cbcSMatt Macy 
9926eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
9927eda14cbcSMatt Macy 	"Whether extra ALLOC blkptrs were added to a livelist entry while it "
9928eda14cbcSMatt Macy 	"was being condensed");
9929eda14cbcSMatt Macy /* END CSTYLED */
9930