1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright 2013 Saso Kiselkov. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2016 Toomas Soome <tsoome@me.com>
31 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
32 * Copyright 2018 Joyent, Inc.
33 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
34 * Copyright 2017 Joyent, Inc.
35 * Copyright (c) 2017, Intel Corporation.
36 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
37 * Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
38 * Copyright (c) 2023, 2024, Klara Inc.
39 */
40
41 /*
42 * SPA: Storage Pool Allocator
43 *
44 * This file contains all the routines used when modifying on-disk SPA state.
45 * This includes opening, importing, destroying, exporting a pool, and syncing a
46 * pool.
47 */
48
49 #include <sys/zfs_context.h>
50 #include <sys/fm/fs/zfs.h>
51 #include <sys/spa_impl.h>
52 #include <sys/zio.h>
53 #include <sys/zio_checksum.h>
54 #include <sys/dmu.h>
55 #include <sys/dmu_tx.h>
56 #include <sys/zap.h>
57 #include <sys/zil.h>
58 #include <sys/brt.h>
59 #include <sys/ddt.h>
60 #include <sys/vdev_impl.h>
61 #include <sys/vdev_removal.h>
62 #include <sys/vdev_indirect_mapping.h>
63 #include <sys/vdev_indirect_births.h>
64 #include <sys/vdev_initialize.h>
65 #include <sys/vdev_rebuild.h>
66 #include <sys/vdev_trim.h>
67 #include <sys/vdev_disk.h>
68 #include <sys/vdev_raidz.h>
69 #include <sys/vdev_draid.h>
70 #include <sys/metaslab.h>
71 #include <sys/metaslab_impl.h>
72 #include <sys/mmp.h>
73 #include <sys/uberblock_impl.h>
74 #include <sys/txg.h>
75 #include <sys/avl.h>
76 #include <sys/bpobj.h>
77 #include <sys/dmu_traverse.h>
78 #include <sys/dmu_objset.h>
79 #include <sys/unique.h>
80 #include <sys/dsl_pool.h>
81 #include <sys/dsl_dataset.h>
82 #include <sys/dsl_dir.h>
83 #include <sys/dsl_prop.h>
84 #include <sys/dsl_synctask.h>
85 #include <sys/fs/zfs.h>
86 #include <sys/arc.h>
87 #include <sys/callb.h>
88 #include <sys/systeminfo.h>
89 #include <sys/zfs_ioctl.h>
90 #include <sys/dsl_scan.h>
91 #include <sys/zfeature.h>
92 #include <sys/dsl_destroy.h>
93 #include <sys/zvol.h>
94
95 #ifdef _KERNEL
96 #include <sys/fm/protocol.h>
97 #include <sys/fm/util.h>
98 #include <sys/callb.h>
99 #include <sys/zone.h>
100 #include <sys/vmsystm.h>
101 #endif /* _KERNEL */
102
103 #include "zfs_crrd.h"
104 #include "zfs_prop.h"
105 #include "zfs_comutil.h"
106 #include <cityhash.h>
107
108 /*
109 * spa_thread() existed on Illumos as a parent thread for the various worker
110 * threads that actually run the pool, as a way to both reference the entire
111 * pool work as a single object, and to share properties like scheduling
112 * options. It has not yet been adapted to Linux or FreeBSD. This define is
113 * used to mark related parts of the code to make things easier for the reader,
114 * and to compile this code out. It can be removed when someone implements it,
115 * moves it to some Illumos-specific place, or removes it entirely.
116 */
117 #undef HAVE_SPA_THREAD
118
119 /*
120 * The "System Duty Cycle" scheduling class is an Illumos feature to help
121 * prevent CPU-intensive kernel threads from affecting latency on interactive
122 * threads. It doesn't exist on Linux or FreeBSD, so the supporting code is
123 * gated behind a define. On Illumos SDC depends on spa_thread(), but
124 * spa_thread() also has other uses, so this is a separate define.
125 */
126 #undef HAVE_SYSDC
127
128 /*
129 * The interval, in seconds, at which failed configuration cache file writes
130 * should be retried.
131 */
132 int zfs_ccw_retry_interval = 300;
133
134 typedef enum zti_modes {
135 ZTI_MODE_FIXED, /* value is # of threads (min 1) */
136 ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
137 ZTI_MODE_SYNC, /* sync thread assigned */
138 ZTI_MODE_NULL, /* don't create a taskq */
139 ZTI_NMODES
140 } zti_modes_t;
141
142 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
143 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
144 #define ZTI_SCALE(min) { ZTI_MODE_SCALE, (min), 1 }
145 #define ZTI_SYNC { ZTI_MODE_SYNC, 0, 1 }
146 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
147
148 #define ZTI_N(n) ZTI_P(n, 1)
149 #define ZTI_ONE ZTI_N(1)
150
151 typedef struct zio_taskq_info {
152 zti_modes_t zti_mode;
153 uint_t zti_value;
154 uint_t zti_count;
155 } zio_taskq_info_t;
156
157 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
158 "iss", "iss_h", "int", "int_h"
159 };
160
161 /*
162 * This table defines the taskq settings for each ZFS I/O type. When
163 * initializing a pool, we use this table to create an appropriately sized
164 * taskq. Some operations are low volume and therefore have a small, static
165 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
166 * macros. Other operations process a large amount of data; the ZTI_SCALE
167 * macro causes us to create a taskq oriented for throughput. Some operations
168 * are so high frequency and short-lived that the taskq itself can become a
169 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
170 * additional degree of parallelism specified by the number of threads per-
171 * taskq and the number of taskqs; when dispatching an event in this case, the
172 * particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs
173 * that scales with the number of CPUs.
174 *
175 * The different taskq priorities are to handle the different contexts (issue
176 * and interrupt) and then to reserve threads for high priority I/Os that
177 * need to be handled with minimum delay. Illumos taskq has unfair TQ_FRONT
178 * implementation, so separate high priority threads are used there.
179 */
180 static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
181 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */
182 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
183 { ZTI_N(8), ZTI_NULL, ZTI_SCALE(0), ZTI_NULL }, /* READ */
184 #ifdef illumos
185 { ZTI_SYNC, ZTI_N(5), ZTI_SCALE(0), ZTI_N(5) }, /* WRITE */
186 #else
187 { ZTI_SYNC, ZTI_NULL, ZTI_SCALE(0), ZTI_NULL }, /* WRITE */
188 #endif
189 { ZTI_SCALE(32), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
190 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
191 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FLUSH */
192 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
193 };
194
195 static void spa_sync_version(void *arg, dmu_tx_t *tx);
196 static void spa_sync_props(void *arg, dmu_tx_t *tx);
197 static boolean_t spa_has_active_shared_spare(spa_t *spa);
198 static int spa_load_impl(spa_t *spa, spa_import_type_t type,
199 const char **ereport);
200 static void spa_vdev_resilver_done(spa_t *spa);
201
202 /*
203 * Percentage of all CPUs that can be used by the metaslab preload taskq.
204 */
205 static uint_t metaslab_preload_pct = 50;
206
207 static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
208 static uint_t zio_taskq_batch_tpq; /* threads per taskq */
209
210 #ifdef HAVE_SYSDC
211 static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
212 static const uint_t zio_taskq_basedc = 80; /* base duty cycle */
213 #endif
214
215 #ifdef HAVE_SPA_THREAD
216 static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
217 #endif
218
219 static uint_t zio_taskq_write_tpq = 16;
220
221 /*
222 * Report any spa_load_verify errors found, but do not fail spa_load.
223 * This is used by zdb to analyze non-idle pools.
224 */
225 boolean_t spa_load_verify_dryrun = B_FALSE;
226
227 /*
228 * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
229 * This is used by zdb for spacemaps verification.
230 */
231 boolean_t spa_mode_readable_spacemaps = B_FALSE;
232
233 /*
234 * This (illegal) pool name is used when temporarily importing a spa_t in order
235 * to get the vdev stats associated with the imported devices.
236 */
237 #define TRYIMPORT_NAME "$import"
238
239 /*
240 * For debugging purposes: print out vdev tree during pool import.
241 */
242 static int spa_load_print_vdev_tree = B_FALSE;
243
244 /*
245 * A non-zero value for zfs_max_missing_tvds means that we allow importing
246 * pools with missing top-level vdevs. This is strictly intended for advanced
247 * pool recovery cases since missing data is almost inevitable. Pools with
248 * missing devices can only be imported read-only for safety reasons, and their
249 * fail-mode will be automatically set to "continue".
250 *
251 * With 1 missing vdev we should be able to import the pool and mount all
252 * datasets. User data that was not modified after the missing device has been
253 * added should be recoverable. This means that snapshots created prior to the
254 * addition of that device should be completely intact.
255 *
256 * With 2 missing vdevs, some datasets may fail to mount since there are
257 * dataset statistics that are stored as regular metadata. Some data might be
258 * recoverable if those vdevs were added recently.
259 *
260 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
261 * may be missing entirely. Chances of data recovery are very low. Note that
262 * there are also risks of performing an inadvertent rewind as we might be
263 * missing all the vdevs with the latest uberblocks.
264 */
265 uint64_t zfs_max_missing_tvds = 0;
266
267 /*
268 * The parameters below are similar to zfs_max_missing_tvds but are only
269 * intended for a preliminary open of the pool with an untrusted config which
270 * might be incomplete or out-dated.
271 *
272 * We are more tolerant for pools opened from a cachefile since we could have
273 * an out-dated cachefile where a device removal was not registered.
274 * We could have set the limit arbitrarily high but in the case where devices
275 * are really missing we would want to return the proper error codes; we chose
276 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
277 * and we get a chance to retrieve the trusted config.
278 */
279 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
280
281 /*
282 * In the case where config was assembled by scanning device paths (/dev/dsks
283 * by default) we are less tolerant since all the existing devices should have
284 * been detected and we want spa_load to return the right error codes.
285 */
286 uint64_t zfs_max_missing_tvds_scan = 0;
287
288 /*
289 * Debugging aid that pauses spa_sync() towards the end.
290 */
291 static const boolean_t zfs_pause_spa_sync = B_FALSE;
292
293 /*
294 * Variables to indicate the livelist condense zthr func should wait at certain
295 * points for the livelist to be removed - used to test condense/destroy races
296 */
297 static int zfs_livelist_condense_zthr_pause = 0;
298 static int zfs_livelist_condense_sync_pause = 0;
299
300 /*
301 * Variables to track whether or not condense cancellation has been
302 * triggered in testing.
303 */
304 static int zfs_livelist_condense_sync_cancel = 0;
305 static int zfs_livelist_condense_zthr_cancel = 0;
306
307 /*
308 * Variable to track whether or not extra ALLOC blkptrs were added to a
309 * livelist entry while it was being condensed (caused by the way we track
310 * remapped blkptrs in dbuf_remap_impl)
311 */
312 static int zfs_livelist_condense_new_alloc = 0;
313
314 /*
315 * Time variable to decide how often the txg should be added into the
316 * database (in seconds).
317 * The smallest available resolution is in minutes, which means an update occurs
318 * each time we reach `spa_note_txg_time` and the txg has changed. We provide
319 * a 256-slot ring buffer for minute-level resolution. The number is limited by
320 * the size of the structure we use and the maximum amount of bytes we can write
321 * into ZAP. Setting `spa_note_txg_time` to 10 minutes results in approximately
322 * 144 records per day. Given the 256 slots, this provides roughly 1.5 days of
323 * high-resolution data.
324 *
325 * The user can decrease `spa_note_txg_time` to increase resolution within
326 * a day, at the cost of retaining fewer days of data. Alternatively, increasing
327 * the interval allows storing data over a longer period, but with lower
328 * frequency.
329 *
330 * This parameter does not affect the daily or monthly databases, as those only
331 * store one record per day and per month, respectively.
332 */
333 static uint_t spa_note_txg_time = 10 * 60;
334
335 /*
336 * How often flush txg database to a disk (in seconds).
337 * We flush data every time we write to it, making it the most reliable option.
338 * Since this happens every 10 minutes, it shouldn't introduce any noticeable
339 * overhead for the system. In case of failure, we will always have an
340 * up-to-date version of the database.
341 *
342 * The user can adjust the flush interval to a lower value, but it probably
343 * doesn't make sense to flush more often than the database is updated.
344 * The user can also increase the interval if they're concerned about the
345 * performance of writing the entire database to disk.
346 */
347 static uint_t spa_flush_txg_time = 10 * 60;
348
349 /*
350 * ==========================================================================
351 * SPA properties routines
352 * ==========================================================================
353 */
354
355 /*
356 * Add a (source=src, propname=propval) list to an nvlist.
357 */
358 static void
spa_prop_add_list(nvlist_t * nvl,zpool_prop_t prop,const char * strval,uint64_t intval,zprop_source_t src)359 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
360 uint64_t intval, zprop_source_t src)
361 {
362 const char *propname = zpool_prop_to_name(prop);
363 nvlist_t *propval;
364
365 propval = fnvlist_alloc();
366 fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
367
368 if (strval != NULL)
369 fnvlist_add_string(propval, ZPROP_VALUE, strval);
370 else
371 fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
372
373 fnvlist_add_nvlist(nvl, propname, propval);
374 nvlist_free(propval);
375 }
376
377 static int
spa_prop_add(spa_t * spa,const char * propname,nvlist_t * outnvl)378 spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl)
379 {
380 zpool_prop_t prop = zpool_name_to_prop(propname);
381 zprop_source_t src = ZPROP_SRC_NONE;
382 uint64_t intval;
383 int err;
384
385 /*
386 * NB: Not all properties lookups via this API require
387 * the spa props lock, so they must explicitly grab it here.
388 */
389 switch (prop) {
390 case ZPOOL_PROP_DEDUPCACHED:
391 err = ddt_get_pool_dedup_cached(spa, &intval);
392 if (err != 0)
393 return (SET_ERROR(err));
394 break;
395 default:
396 return (SET_ERROR(EINVAL));
397 }
398
399 spa_prop_add_list(outnvl, prop, NULL, intval, src);
400
401 return (0);
402 }
403
404 int
spa_prop_get_nvlist(spa_t * spa,char ** props,unsigned int n_props,nvlist_t * outnvl)405 spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props,
406 nvlist_t *outnvl)
407 {
408 int err = 0;
409
410 if (props == NULL)
411 return (0);
412
413 for (unsigned int i = 0; i < n_props && err == 0; i++) {
414 err = spa_prop_add(spa, props[i], outnvl);
415 }
416
417 return (err);
418 }
419
420 /*
421 * Add metaslab class properties to an nvlist.
422 */
423 static void
spa_prop_add_metaslab_class(nvlist_t * nv,metaslab_class_t * mc,zpool_mc_props_t mcp,uint64_t * sizep,uint64_t * allocp,uint64_t * usablep,uint64_t * usedp)424 spa_prop_add_metaslab_class(nvlist_t *nv, metaslab_class_t *mc,
425 zpool_mc_props_t mcp, uint64_t *sizep, uint64_t *allocp, uint64_t *usablep,
426 uint64_t *usedp)
427 {
428 uint64_t size = metaslab_class_get_space(mc);
429 uint64_t alloc = metaslab_class_get_alloc(mc);
430 uint64_t dsize = metaslab_class_get_dspace(mc);
431 uint64_t dalloc = metaslab_class_get_dalloc(mc);
432 uint64_t cap = (size == 0) ? 0 : (alloc * 100 / size);
433 const zprop_source_t src = ZPROP_SRC_NONE;
434
435 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_SIZE, NULL, size, src);
436 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_ALLOCATED, NULL, alloc, src);
437 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_USABLE, NULL, dsize, src);
438 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_USED, NULL, dalloc, src);
439 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_FRAGMENTATION, NULL,
440 metaslab_class_fragmentation(mc), src);
441 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_EXPANDSZ, NULL,
442 metaslab_class_expandable_space(mc), src);
443 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_FREE, NULL, size - alloc,
444 src);
445 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_AVAILABLE, NULL,
446 dsize - dalloc, src);
447 spa_prop_add_list(nv, mcp + ZPOOL_MC_PROP_CAPACITY, NULL, cap, src);
448 if (sizep != NULL)
449 *sizep += size;
450 if (allocp != NULL)
451 *allocp += alloc;
452 if (usablep != NULL)
453 *usablep += dsize;
454 if (usedp != NULL)
455 *usedp += dalloc;
456 }
457
458 /*
459 * Add a user property (source=src, propname=propval) to an nvlist.
460 */
461 static void
spa_prop_add_user(nvlist_t * nvl,const char * propname,char * strval,zprop_source_t src)462 spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
463 zprop_source_t src)
464 {
465 nvlist_t *propval;
466
467 VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
468 VERIFY0(nvlist_add_uint64(propval, ZPROP_SOURCE, src));
469 VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, strval));
470 VERIFY0(nvlist_add_nvlist(nvl, propname, propval));
471 nvlist_free(propval);
472 }
473
474 /*
475 * Get property values from the spa configuration.
476 */
477 static void
spa_prop_get_config(spa_t * spa,nvlist_t * nv)478 spa_prop_get_config(spa_t *spa, nvlist_t *nv)
479 {
480 vdev_t *rvd = spa->spa_root_vdev;
481 dsl_pool_t *pool = spa->spa_dsl_pool;
482 uint64_t size, alloc, usable, used, cap, version;
483 const zprop_source_t src = ZPROP_SRC_NONE;
484 spa_config_dirent_t *dp;
485 metaslab_class_t *mc = spa_normal_class(spa);
486
487 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
488
489 if (rvd != NULL) {
490 spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
491
492 size = alloc = usable = used = 0;
493 spa_prop_add_metaslab_class(nv, mc, ZPOOL_MC_PROPS_NORMAL,
494 &size, &alloc, &usable, &used);
495 spa_prop_add_metaslab_class(nv, spa_special_class(spa),
496 ZPOOL_MC_PROPS_SPECIAL, &size, &alloc, &usable, &used);
497 spa_prop_add_metaslab_class(nv, spa_dedup_class(spa),
498 ZPOOL_MC_PROPS_DEDUP, &size, &alloc, &usable, &used);
499 spa_prop_add_metaslab_class(nv, spa_log_class(spa),
500 ZPOOL_MC_PROPS_LOG, NULL, NULL, NULL, NULL);
501 spa_prop_add_metaslab_class(nv, spa_embedded_log_class(spa),
502 ZPOOL_MC_PROPS_ELOG, &size, &alloc, &usable, &used);
503 spa_prop_add_metaslab_class(nv,
504 spa_special_embedded_log_class(spa), ZPOOL_MC_PROPS_SELOG,
505 &size, &alloc, &usable, &used);
506
507 spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
508 spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
509 spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL,
510 size - alloc, src);
511 spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL,
512 metaslab_class_fragmentation(mc), src);
513 spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL,
514 metaslab_class_expandable_space(mc), src);
515 cap = (size == 0) ? 0 : (alloc * 100 / size);
516 spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src);
517 spa_prop_add_list(nv, ZPOOL_PROP_AVAILABLE, NULL, usable - used,
518 src);
519 spa_prop_add_list(nv, ZPOOL_PROP_USABLE, NULL, usable, src);
520 spa_prop_add_list(nv, ZPOOL_PROP_USED, NULL, used, src);
521
522 spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL,
523 spa->spa_checkpoint_info.sci_dspace, src);
524 spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL,
525 (spa_mode(spa) == SPA_MODE_READ), src);
526
527 spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL,
528 ddt_get_pool_dedup_ratio(spa), src);
529 spa_prop_add_list(nv, ZPOOL_PROP_DEDUPUSED, NULL,
530 ddt_get_dedup_used(spa), src);
531 spa_prop_add_list(nv, ZPOOL_PROP_DEDUPSAVED, NULL,
532 ddt_get_dedup_saved(spa), src);
533 spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL,
534 brt_get_used(spa), src);
535 spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL,
536 brt_get_saved(spa), src);
537 spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL,
538 brt_get_ratio(spa), src);
539
540 spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL,
541 ddt_get_ddt_dsize(spa), src);
542 spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL,
543 rvd->vdev_state, src);
544 spa_prop_add_list(nv, ZPOOL_PROP_LAST_SCRUBBED_TXG, NULL,
545 spa_get_last_scrubbed_txg(spa), src);
546
547 version = spa_version(spa);
548 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
549 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
550 version, ZPROP_SRC_DEFAULT);
551 } else {
552 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL,
553 version, ZPROP_SRC_LOCAL);
554 }
555 spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID,
556 NULL, spa_load_guid(spa), src);
557 }
558
559 if (pool != NULL) {
560 /*
561 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
562 * when opening pools before this version freedir will be NULL.
563 */
564 if (pool->dp_free_dir != NULL) {
565 spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL,
566 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
567 src);
568 } else {
569 spa_prop_add_list(nv, ZPOOL_PROP_FREEING,
570 NULL, 0, src);
571 }
572
573 if (pool->dp_leak_dir != NULL) {
574 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL,
575 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
576 src);
577 } else {
578 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED,
579 NULL, 0, src);
580 }
581 }
582
583 spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
584
585 if (spa->spa_comment != NULL) {
586 spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment,
587 0, ZPROP_SRC_LOCAL);
588 }
589
590 if (spa->spa_compatibility != NULL) {
591 spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY,
592 spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
593 }
594
595 if (spa->spa_root != NULL)
596 spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root,
597 0, ZPROP_SRC_LOCAL);
598
599 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
600 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
601 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
602 } else {
603 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
604 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
605 }
606
607 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
608 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
609 DNODE_MAX_SIZE, ZPROP_SRC_NONE);
610 } else {
611 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL,
612 DNODE_MIN_SIZE, ZPROP_SRC_NONE);
613 }
614
615 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
616 if (dp->scd_path == NULL) {
617 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
618 "none", 0, ZPROP_SRC_LOCAL);
619 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
620 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE,
621 dp->scd_path, 0, ZPROP_SRC_LOCAL);
622 }
623 }
624 }
625
626 /*
627 * Get zpool property values.
628 */
629 int
spa_prop_get(spa_t * spa,nvlist_t * nv)630 spa_prop_get(spa_t *spa, nvlist_t *nv)
631 {
632 objset_t *mos = spa->spa_meta_objset;
633 zap_cursor_t zc;
634 zap_attribute_t *za;
635 dsl_pool_t *dp;
636 int err = 0;
637
638 dp = spa_get_dsl(spa);
639 dsl_pool_config_enter(dp, FTAG);
640 za = zap_attribute_alloc();
641 mutex_enter(&spa->spa_props_lock);
642
643 /*
644 * Get properties from the spa config.
645 */
646 spa_prop_get_config(spa, nv);
647
648 /* If no pool property object, no more prop to get. */
649 if (mos == NULL || spa->spa_pool_props_object == 0)
650 goto out;
651
652 /*
653 * Get properties from the MOS pool property object.
654 */
655 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
656 (err = zap_cursor_retrieve(&zc, za)) == 0;
657 zap_cursor_advance(&zc)) {
658 uint64_t intval = 0;
659 char *strval = NULL;
660 zprop_source_t src = ZPROP_SRC_DEFAULT;
661 zpool_prop_t prop;
662
663 if ((prop = zpool_name_to_prop(za->za_name)) ==
664 ZPOOL_PROP_INVAL && !zfs_prop_user(za->za_name))
665 continue;
666
667 switch (za->za_integer_length) {
668 case 8:
669 /* integer property */
670 if (za->za_first_integer !=
671 zpool_prop_default_numeric(prop))
672 src = ZPROP_SRC_LOCAL;
673
674 if (prop == ZPOOL_PROP_BOOTFS) {
675 dsl_dataset_t *ds = NULL;
676
677 err = dsl_dataset_hold_obj(dp,
678 za->za_first_integer, FTAG, &ds);
679 if (err != 0)
680 break;
681
682 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
683 KM_SLEEP);
684 dsl_dataset_name(ds, strval);
685 dsl_dataset_rele(ds, FTAG);
686 } else {
687 strval = NULL;
688 intval = za->za_first_integer;
689 }
690
691 spa_prop_add_list(nv, prop, strval, intval, src);
692
693 if (strval != NULL)
694 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
695
696 break;
697
698 case 1:
699 /* string property */
700 strval = kmem_alloc(za->za_num_integers, KM_SLEEP);
701 err = zap_lookup(mos, spa->spa_pool_props_object,
702 za->za_name, 1, za->za_num_integers, strval);
703 if (err) {
704 kmem_free(strval, za->za_num_integers);
705 break;
706 }
707 if (prop != ZPOOL_PROP_INVAL) {
708 spa_prop_add_list(nv, prop, strval, 0, src);
709 } else {
710 src = ZPROP_SRC_LOCAL;
711 spa_prop_add_user(nv, za->za_name, strval,
712 src);
713 }
714 kmem_free(strval, za->za_num_integers);
715 break;
716
717 default:
718 break;
719 }
720 }
721 zap_cursor_fini(&zc);
722 out:
723 mutex_exit(&spa->spa_props_lock);
724 dsl_pool_config_exit(dp, FTAG);
725 zap_attribute_free(za);
726
727 if (err && err != ENOENT)
728 return (err);
729
730 return (0);
731 }
732
733 /*
734 * Validate the given pool properties nvlist and modify the list
735 * for the property values to be set.
736 */
737 static int
spa_prop_validate(spa_t * spa,nvlist_t * props)738 spa_prop_validate(spa_t *spa, nvlist_t *props)
739 {
740 nvpair_t *elem;
741 int error = 0, reset_bootfs = 0;
742 uint64_t objnum = 0;
743 boolean_t has_feature = B_FALSE;
744
745 elem = NULL;
746 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
747 uint64_t intval;
748 const char *strval, *slash, *check, *fname;
749 const char *propname = nvpair_name(elem);
750 zpool_prop_t prop = zpool_name_to_prop(propname);
751
752 switch (prop) {
753 case ZPOOL_PROP_INVAL:
754 /*
755 * Sanitize the input.
756 */
757 if (zfs_prop_user(propname)) {
758 if (strlen(propname) >= ZAP_MAXNAMELEN) {
759 error = SET_ERROR(ENAMETOOLONG);
760 break;
761 }
762
763 if (strlen(fnvpair_value_string(elem)) >=
764 ZAP_MAXVALUELEN) {
765 error = SET_ERROR(E2BIG);
766 break;
767 }
768 } else if (zpool_prop_feature(propname)) {
769 if (nvpair_type(elem) != DATA_TYPE_UINT64) {
770 error = SET_ERROR(EINVAL);
771 break;
772 }
773
774 if (nvpair_value_uint64(elem, &intval) != 0) {
775 error = SET_ERROR(EINVAL);
776 break;
777 }
778
779 if (intval != 0) {
780 error = SET_ERROR(EINVAL);
781 break;
782 }
783
784 fname = strchr(propname, '@') + 1;
785 if (zfeature_lookup_name(fname, NULL) != 0) {
786 error = SET_ERROR(EINVAL);
787 break;
788 }
789
790 has_feature = B_TRUE;
791 } else {
792 error = SET_ERROR(EINVAL);
793 break;
794 }
795 break;
796
797 case ZPOOL_PROP_VERSION:
798 error = nvpair_value_uint64(elem, &intval);
799 if (!error &&
800 (intval < spa_version(spa) ||
801 intval > SPA_VERSION_BEFORE_FEATURES ||
802 has_feature))
803 error = SET_ERROR(EINVAL);
804 break;
805
806 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
807 error = nvpair_value_uint64(elem, &intval);
808 break;
809
810 case ZPOOL_PROP_DELEGATION:
811 case ZPOOL_PROP_AUTOREPLACE:
812 case ZPOOL_PROP_LISTSNAPS:
813 case ZPOOL_PROP_AUTOEXPAND:
814 case ZPOOL_PROP_AUTOTRIM:
815 error = nvpair_value_uint64(elem, &intval);
816 if (!error && intval > 1)
817 error = SET_ERROR(EINVAL);
818 break;
819
820 case ZPOOL_PROP_MULTIHOST:
821 error = nvpair_value_uint64(elem, &intval);
822 if (!error && intval > 1)
823 error = SET_ERROR(EINVAL);
824
825 if (!error) {
826 uint32_t hostid = zone_get_hostid(NULL);
827 if (hostid)
828 spa->spa_hostid = hostid;
829 else
830 error = SET_ERROR(ENOTSUP);
831 }
832
833 break;
834
835 case ZPOOL_PROP_BOOTFS:
836 /*
837 * If the pool version is less than SPA_VERSION_BOOTFS,
838 * or the pool is still being created (version == 0),
839 * the bootfs property cannot be set.
840 */
841 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
842 error = SET_ERROR(ENOTSUP);
843 break;
844 }
845
846 /*
847 * Make sure the vdev config is bootable
848 */
849 if (!vdev_is_bootable(spa->spa_root_vdev)) {
850 error = SET_ERROR(ENOTSUP);
851 break;
852 }
853
854 reset_bootfs = 1;
855
856 error = nvpair_value_string(elem, &strval);
857
858 if (!error) {
859 objset_t *os;
860
861 if (strval == NULL || strval[0] == '\0') {
862 objnum = zpool_prop_default_numeric(
863 ZPOOL_PROP_BOOTFS);
864 break;
865 }
866
867 error = dmu_objset_hold(strval, FTAG, &os);
868 if (error != 0)
869 break;
870
871 /* Must be ZPL. */
872 if (dmu_objset_type(os) != DMU_OST_ZFS) {
873 error = SET_ERROR(ENOTSUP);
874 } else {
875 objnum = dmu_objset_id(os);
876 }
877 dmu_objset_rele(os, FTAG);
878 }
879 break;
880
881 case ZPOOL_PROP_FAILUREMODE:
882 error = nvpair_value_uint64(elem, &intval);
883 if (!error && intval > ZIO_FAILURE_MODE_PANIC)
884 error = SET_ERROR(EINVAL);
885
886 /*
887 * This is a special case which only occurs when
888 * the pool has completely failed. This allows
889 * the user to change the in-core failmode property
890 * without syncing it out to disk (I/Os might
891 * currently be blocked). We do this by returning
892 * EIO to the caller (spa_prop_set) to trick it
893 * into thinking we encountered a property validation
894 * error.
895 */
896 if (!error && spa_suspended(spa)) {
897 spa->spa_failmode = intval;
898 error = SET_ERROR(EIO);
899 }
900 break;
901
902 case ZPOOL_PROP_CACHEFILE:
903 if ((error = nvpair_value_string(elem, &strval)) != 0)
904 break;
905
906 if (strval[0] == '\0')
907 break;
908
909 if (strcmp(strval, "none") == 0)
910 break;
911
912 if (strval[0] != '/') {
913 error = SET_ERROR(EINVAL);
914 break;
915 }
916
917 slash = strrchr(strval, '/');
918 ASSERT(slash != NULL);
919
920 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
921 strcmp(slash, "/..") == 0)
922 error = SET_ERROR(EINVAL);
923 break;
924
925 case ZPOOL_PROP_COMMENT:
926 if ((error = nvpair_value_string(elem, &strval)) != 0)
927 break;
928 for (check = strval; *check != '\0'; check++) {
929 if (!isprint(*check)) {
930 error = SET_ERROR(EINVAL);
931 break;
932 }
933 }
934 if (strlen(strval) > ZPROP_MAX_COMMENT)
935 error = SET_ERROR(E2BIG);
936 break;
937
938 default:
939 break;
940 }
941
942 if (error)
943 break;
944 }
945
946 (void) nvlist_remove_all(props,
947 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
948
949 if (!error && reset_bootfs) {
950 error = nvlist_remove(props,
951 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
952
953 if (!error) {
954 error = nvlist_add_uint64(props,
955 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
956 }
957 }
958
959 return (error);
960 }
961
962 void
spa_configfile_set(spa_t * spa,nvlist_t * nvp,boolean_t need_sync)963 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
964 {
965 const char *cachefile;
966 spa_config_dirent_t *dp;
967
968 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
969 &cachefile) != 0)
970 return;
971
972 dp = kmem_alloc(sizeof (spa_config_dirent_t),
973 KM_SLEEP);
974
975 if (cachefile[0] == '\0')
976 dp->scd_path = spa_strdup(spa_config_path);
977 else if (strcmp(cachefile, "none") == 0)
978 dp->scd_path = NULL;
979 else
980 dp->scd_path = spa_strdup(cachefile);
981
982 list_insert_head(&spa->spa_config_list, dp);
983 if (need_sync)
984 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
985 }
986
987 int
spa_prop_set(spa_t * spa,nvlist_t * nvp)988 spa_prop_set(spa_t *spa, nvlist_t *nvp)
989 {
990 int error;
991 nvpair_t *elem = NULL;
992 boolean_t need_sync = B_FALSE;
993
994 if ((error = spa_prop_validate(spa, nvp)) != 0)
995 return (error);
996
997 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
998 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
999
1000 if (prop == ZPOOL_PROP_CACHEFILE ||
1001 prop == ZPOOL_PROP_ALTROOT ||
1002 prop == ZPOOL_PROP_READONLY)
1003 continue;
1004
1005 if (prop == ZPOOL_PROP_INVAL &&
1006 zfs_prop_user(nvpair_name(elem))) {
1007 need_sync = B_TRUE;
1008 break;
1009 }
1010
1011 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
1012 uint64_t ver = 0;
1013
1014 if (prop == ZPOOL_PROP_VERSION) {
1015 VERIFY0(nvpair_value_uint64(elem, &ver));
1016 } else {
1017 ASSERT(zpool_prop_feature(nvpair_name(elem)));
1018 ver = SPA_VERSION_FEATURES;
1019 need_sync = B_TRUE;
1020 }
1021
1022 /* Save time if the version is already set. */
1023 if (ver == spa_version(spa))
1024 continue;
1025
1026 /*
1027 * In addition to the pool directory object, we might
1028 * create the pool properties object, the features for
1029 * read object, the features for write object, or the
1030 * feature descriptions object.
1031 */
1032 error = dsl_sync_task(spa->spa_name, NULL,
1033 spa_sync_version, &ver,
1034 6, ZFS_SPACE_CHECK_RESERVED);
1035 if (error)
1036 return (error);
1037 continue;
1038 }
1039
1040 need_sync = B_TRUE;
1041 break;
1042 }
1043
1044 if (need_sync) {
1045 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
1046 nvp, 6, ZFS_SPACE_CHECK_RESERVED));
1047 }
1048
1049 return (0);
1050 }
1051
1052 /*
1053 * If the bootfs property value is dsobj, clear it.
1054 */
1055 void
spa_prop_clear_bootfs(spa_t * spa,uint64_t dsobj,dmu_tx_t * tx)1056 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
1057 {
1058 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
1059 VERIFY(zap_remove(spa->spa_meta_objset,
1060 spa->spa_pool_props_object,
1061 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
1062 spa->spa_bootfs = 0;
1063 }
1064 }
1065
1066 static int
spa_change_guid_check(void * arg,dmu_tx_t * tx)1067 spa_change_guid_check(void *arg, dmu_tx_t *tx)
1068 {
1069 uint64_t *newguid __maybe_unused = arg;
1070 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1071 vdev_t *rvd = spa->spa_root_vdev;
1072 uint64_t vdev_state;
1073
1074 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
1075 int error = (spa_has_checkpoint(spa)) ?
1076 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
1077 return (SET_ERROR(error));
1078 }
1079
1080 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1081 vdev_state = rvd->vdev_state;
1082 spa_config_exit(spa, SCL_STATE, FTAG);
1083
1084 if (vdev_state != VDEV_STATE_HEALTHY)
1085 return (SET_ERROR(ENXIO));
1086
1087 ASSERT3U(spa_guid(spa), !=, *newguid);
1088
1089 return (0);
1090 }
1091
1092 static void
spa_change_guid_sync(void * arg,dmu_tx_t * tx)1093 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
1094 {
1095 uint64_t *newguid = arg;
1096 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1097 uint64_t oldguid;
1098 vdev_t *rvd = spa->spa_root_vdev;
1099
1100 oldguid = spa_guid(spa);
1101
1102 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1103 rvd->vdev_guid = *newguid;
1104 rvd->vdev_guid_sum += (*newguid - oldguid);
1105 vdev_config_dirty(rvd);
1106 spa_config_exit(spa, SCL_STATE, FTAG);
1107
1108 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
1109 (u_longlong_t)oldguid, (u_longlong_t)*newguid);
1110 }
1111
1112 /*
1113 * Change the GUID for the pool. This is done so that we can later
1114 * re-import a pool built from a clone of our own vdevs. We will modify
1115 * the root vdev's guid, our own pool guid, and then mark all of our
1116 * vdevs dirty. Note that we must make sure that all our vdevs are
1117 * online when we do this, or else any vdevs that weren't present
1118 * would be orphaned from our pool. We are also going to issue a
1119 * sysevent to update any watchers.
1120 *
1121 * The GUID of the pool will be changed to the value pointed to by guidp.
1122 * The GUID may not be set to the reserverd value of 0.
1123 * The new GUID will be generated if guidp is NULL.
1124 */
1125 int
spa_change_guid(spa_t * spa,const uint64_t * guidp)1126 spa_change_guid(spa_t *spa, const uint64_t *guidp)
1127 {
1128 uint64_t guid;
1129 int error;
1130
1131 mutex_enter(&spa->spa_vdev_top_lock);
1132 spa_namespace_enter(FTAG);
1133
1134 if (guidp != NULL) {
1135 guid = *guidp;
1136 if (guid == 0) {
1137 error = SET_ERROR(EINVAL);
1138 goto out;
1139 }
1140
1141 if (spa_guid_exists(guid, 0)) {
1142 error = SET_ERROR(EEXIST);
1143 goto out;
1144 }
1145 } else {
1146 guid = spa_generate_guid(NULL);
1147 }
1148
1149 error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
1150 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
1151
1152 if (error == 0) {
1153 /*
1154 * Clear the kobj flag from all the vdevs to allow
1155 * vdev_cache_process_kobj_evt() to post events to all the
1156 * vdevs since GUID is updated.
1157 */
1158 vdev_clear_kobj_evt(spa->spa_root_vdev);
1159 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
1160 vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
1161
1162 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
1163 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
1164 }
1165
1166 out:
1167 spa_namespace_exit(FTAG);
1168 mutex_exit(&spa->spa_vdev_top_lock);
1169
1170 return (error);
1171 }
1172
1173 /*
1174 * ==========================================================================
1175 * SPA state manipulation (open/create/destroy/import/export)
1176 * ==========================================================================
1177 */
1178
1179 static int
spa_error_entry_compare(const void * a,const void * b)1180 spa_error_entry_compare(const void *a, const void *b)
1181 {
1182 const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
1183 const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
1184 int ret;
1185
1186 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
1187 sizeof (zbookmark_phys_t));
1188
1189 return (TREE_ISIGN(ret));
1190 }
1191
1192 /*
1193 * Utility function which retrieves copies of the current logs and
1194 * re-initializes them in the process.
1195 */
1196 void
spa_get_errlists(spa_t * spa,avl_tree_t * last,avl_tree_t * scrub)1197 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
1198 {
1199 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
1200
1201 memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
1202 memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
1203
1204 avl_create(&spa->spa_errlist_scrub,
1205 spa_error_entry_compare, sizeof (spa_error_entry_t),
1206 offsetof(spa_error_entry_t, se_avl));
1207 avl_create(&spa->spa_errlist_last,
1208 spa_error_entry_compare, sizeof (spa_error_entry_t),
1209 offsetof(spa_error_entry_t, se_avl));
1210 }
1211
1212 static void
spa_taskqs_init(spa_t * spa,zio_type_t t,zio_taskq_type_t q)1213 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1214 {
1215 const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
1216 enum zti_modes mode = ztip->zti_mode;
1217 uint_t value = ztip->zti_value;
1218 uint_t count = ztip->zti_count;
1219 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1220 uint_t cpus, threads, flags = TASKQ_DYNAMIC;
1221
1222 switch (mode) {
1223 case ZTI_MODE_FIXED:
1224 ASSERT3U(value, >, 0);
1225 break;
1226
1227 case ZTI_MODE_SYNC:
1228
1229 /*
1230 * Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs,
1231 * not to exceed the number of spa allocators, and align to it.
1232 */
1233 threads = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
1234 count = MAX(1, threads / MAX(1, zio_taskq_write_tpq));
1235 count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
1236 count = MIN(count, spa->spa_alloc_count);
1237 while (spa->spa_alloc_count % count != 0 &&
1238 spa->spa_alloc_count < count * 2)
1239 count--;
1240
1241 /*
1242 * zio_taskq_batch_pct is unbounded and may exceed 100%, but no
1243 * single taskq may have more threads than 100% of online cpus.
1244 */
1245 value = (zio_taskq_batch_pct + count / 2) / count;
1246 value = MIN(value, 100);
1247 flags |= TASKQ_THREADS_CPU_PCT;
1248 break;
1249
1250 case ZTI_MODE_SCALE:
1251 /*
1252 * We want more taskqs to reduce lock contention, but we want
1253 * less for better request ordering and CPU utilization.
1254 */
1255 threads = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
1256 threads = MAX(threads, value);
1257 if (zio_taskq_batch_tpq > 0) {
1258 count = MAX(1, (threads + zio_taskq_batch_tpq / 2) /
1259 zio_taskq_batch_tpq);
1260 } else {
1261 /*
1262 * Prefer 6 threads per taskq, but no more taskqs
1263 * than threads in them on large systems. For 80%:
1264 *
1265 * taskq taskq total
1266 * cpus taskqs percent threads threads
1267 * ------- ------- ------- ------- -------
1268 * 1 1 80% 1 1
1269 * 2 1 80% 1 1
1270 * 4 1 80% 3 3
1271 * 8 2 40% 3 6
1272 * 16 3 27% 4 12
1273 * 32 5 16% 5 25
1274 * 64 7 11% 7 49
1275 * 128 10 8% 10 100
1276 * 256 14 6% 15 210
1277 */
1278 cpus = MIN(threads, boot_ncpus);
1279 count = 1 + threads / 6;
1280 while (count * count > cpus)
1281 count--;
1282 }
1283
1284 /*
1285 * Try to represent the number of threads per taskq as percent
1286 * of online CPUs to allow scaling with later online/offline.
1287 * Fall back to absolute numbers if can't.
1288 */
1289 value = (threads * 100 + boot_ncpus * count / 2) /
1290 (boot_ncpus * count);
1291 if (value < 5 || value > 100)
1292 value = MAX(1, (threads + count / 2) / count);
1293 else
1294 flags |= TASKQ_THREADS_CPU_PCT;
1295 break;
1296
1297 case ZTI_MODE_NULL:
1298 tqs->stqs_count = 0;
1299 tqs->stqs_taskq = NULL;
1300 return;
1301
1302 default:
1303 panic("unrecognized mode for %s_%s taskq (%u:%u) in "
1304 "spa_taskqs_init()",
1305 zio_type_name[t], zio_taskq_types[q], mode, value);
1306 break;
1307 }
1308
1309 ASSERT3U(count, >, 0);
1310 tqs->stqs_count = count;
1311 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
1312
1313 for (uint_t i = 0; i < count; i++) {
1314 taskq_t *tq;
1315 char name[32];
1316
1317 if (count > 1)
1318 (void) snprintf(name, sizeof (name), "%s_%s_%u",
1319 zio_type_name[t], zio_taskq_types[q], i);
1320 else
1321 (void) snprintf(name, sizeof (name), "%s_%s",
1322 zio_type_name[t], zio_taskq_types[q]);
1323
1324 #ifdef HAVE_SYSDC
1325 if (zio_taskq_sysdc && spa->spa_proc != &p0) {
1326 (void) zio_taskq_basedc;
1327 tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1328 spa->spa_proc, zio_taskq_basedc, flags);
1329 } else {
1330 #endif
1331 /*
1332 * The write issue taskq can be extremely CPU
1333 * intensive. Run it at slightly less important
1334 * priority than the other taskqs.
1335 */
1336 const pri_t pri = (t == ZIO_TYPE_WRITE &&
1337 q == ZIO_TASKQ_ISSUE) ?
1338 wtqclsyspri : maxclsyspri;
1339 tq = taskq_create_proc(name, value, pri, 50,
1340 INT_MAX, spa->spa_proc, flags);
1341 #ifdef HAVE_SYSDC
1342 }
1343 #endif
1344
1345 tqs->stqs_taskq[i] = tq;
1346 }
1347 }
1348
1349 static void
spa_taskqs_fini(spa_t * spa,zio_type_t t,zio_taskq_type_t q)1350 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1351 {
1352 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1353
1354 if (tqs->stqs_taskq == NULL) {
1355 ASSERT0(tqs->stqs_count);
1356 return;
1357 }
1358
1359 for (uint_t i = 0; i < tqs->stqs_count; i++) {
1360 ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1361 taskq_destroy(tqs->stqs_taskq[i]);
1362 }
1363
1364 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1365 tqs->stqs_taskq = NULL;
1366 }
1367
1368 #ifdef _KERNEL
1369 /*
1370 * The READ and WRITE rows of zio_taskqs are configurable at module load time
1371 * by setting zio_taskq_read or zio_taskq_write.
1372 *
1373 * Example (the defaults for READ and WRITE)
1374 * zio_taskq_read='fixed,1,8 null scale null'
1375 * zio_taskq_write='sync null scale null'
1376 *
1377 * Each sets the entire row at a time.
1378 *
1379 * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number
1380 * of threads per taskq.
1381 *
1382 * 'null' can only be set on the high-priority queues (queue selection for
1383 * high-priority queues will fall back to the regular queue if the high-pri
1384 * is NULL.
1385 */
1386 static const char *const modes[ZTI_NMODES] = {
1387 "fixed", "scale", "sync", "null"
1388 };
1389
1390 /* Parse the incoming config string. Modifies cfg */
1391 static int
spa_taskq_param_set(zio_type_t t,char * cfg)1392 spa_taskq_param_set(zio_type_t t, char *cfg)
1393 {
1394 int err = 0;
1395
1396 zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}};
1397
1398 char *next = cfg, *tok, *c;
1399
1400 /*
1401 * Parse out each element from the string and fill `row`. The entire
1402 * row has to be set at once, so any errors are flagged by just
1403 * breaking out of this loop early.
1404 */
1405 uint_t q;
1406 for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
1407 /* `next` is the start of the config */
1408 if (next == NULL)
1409 break;
1410
1411 /* Eat up leading space */
1412 while (isspace(*next))
1413 next++;
1414 if (*next == '\0')
1415 break;
1416
1417 /* Mode ends at space or end of string */
1418 tok = next;
1419 next = strchr(tok, ' ');
1420 if (next != NULL) *next++ = '\0';
1421
1422 /* Parameters start after a comma */
1423 c = strchr(tok, ',');
1424 if (c != NULL) *c++ = '\0';
1425
1426 /* Match mode string */
1427 uint_t mode;
1428 for (mode = 0; mode < ZTI_NMODES; mode++)
1429 if (strcmp(tok, modes[mode]) == 0)
1430 break;
1431 if (mode == ZTI_NMODES)
1432 break;
1433
1434 /* Invalid canary */
1435 row[q].zti_mode = ZTI_NMODES;
1436
1437 /* Per-mode setup */
1438 switch (mode) {
1439
1440 /*
1441 * FIXED is parameterised: number of queues, and number of
1442 * threads per queue.
1443 */
1444 case ZTI_MODE_FIXED: {
1445 /* No parameters? */
1446 if (c == NULL || *c == '\0')
1447 break;
1448
1449 /* Find next parameter */
1450 tok = c;
1451 c = strchr(tok, ',');
1452 if (c == NULL)
1453 break;
1454
1455 /* Take digits and convert */
1456 unsigned long long nq;
1457 if (!(isdigit(*tok)))
1458 break;
1459 err = ddi_strtoull(tok, &tok, 10, &nq);
1460 /* Must succeed and also end at the next param sep */
1461 if (err != 0 || tok != c)
1462 break;
1463
1464 /* Move past the comma */
1465 tok++;
1466 /* Need another number */
1467 if (!(isdigit(*tok)))
1468 break;
1469 /* Remember start to make sure we moved */
1470 c = tok;
1471
1472 /* Take digits */
1473 unsigned long long ntpq;
1474 err = ddi_strtoull(tok, &tok, 10, &ntpq);
1475 /* Must succeed, and moved forward */
1476 if (err != 0 || tok == c || *tok != '\0')
1477 break;
1478
1479 /*
1480 * sanity; zero queues/threads make no sense, and
1481 * 16K is almost certainly more than anyone will ever
1482 * need and avoids silly numbers like UINT32_MAX
1483 */
1484 if (nq == 0 || nq >= 16384 ||
1485 ntpq == 0 || ntpq >= 16384)
1486 break;
1487
1488 const zio_taskq_info_t zti = ZTI_P(ntpq, nq);
1489 row[q] = zti;
1490 break;
1491 }
1492
1493 /*
1494 * SCALE is optionally parameterised by minimum number of
1495 * threads.
1496 */
1497 case ZTI_MODE_SCALE: {
1498 unsigned long long mint = 0;
1499 if (c != NULL && *c != '\0') {
1500 /* Need a number */
1501 if (!(isdigit(*c)))
1502 break;
1503 tok = c;
1504
1505 /* Take digits */
1506 err = ddi_strtoull(tok, &tok, 10, &mint);
1507 /* Must succeed, and moved forward */
1508 if (err != 0 || tok == c || *tok != '\0')
1509 break;
1510
1511 /* Sanity check */
1512 if (mint >= 16384)
1513 break;
1514 }
1515
1516 const zio_taskq_info_t zti = ZTI_SCALE(mint);
1517 row[q] = zti;
1518 break;
1519 }
1520
1521 case ZTI_MODE_SYNC: {
1522 const zio_taskq_info_t zti = ZTI_SYNC;
1523 row[q] = zti;
1524 break;
1525 }
1526
1527 case ZTI_MODE_NULL: {
1528 /*
1529 * Can only null the high-priority queues; the general-
1530 * purpose ones have to exist.
1531 */
1532 if (q != ZIO_TASKQ_ISSUE_HIGH &&
1533 q != ZIO_TASKQ_INTERRUPT_HIGH)
1534 break;
1535
1536 const zio_taskq_info_t zti = ZTI_NULL;
1537 row[q] = zti;
1538 break;
1539 }
1540
1541 default:
1542 break;
1543 }
1544
1545 /* Ensure we set a mode */
1546 if (row[q].zti_mode == ZTI_NMODES)
1547 break;
1548 }
1549
1550 /* Didn't get a full row, fail */
1551 if (q < ZIO_TASKQ_TYPES)
1552 return (SET_ERROR(EINVAL));
1553
1554 /* Eat trailing space */
1555 if (next != NULL)
1556 while (isspace(*next))
1557 next++;
1558
1559 /* If there's anything left over then fail */
1560 if (next != NULL && *next != '\0')
1561 return (SET_ERROR(EINVAL));
1562
1563 /* Success! Copy it into the real config */
1564 for (q = 0; q < ZIO_TASKQ_TYPES; q++)
1565 zio_taskqs[t][q] = row[q];
1566
1567 return (0);
1568 }
1569
1570 static int
spa_taskq_param_get(zio_type_t t,char * buf,boolean_t add_newline)1571 spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline)
1572 {
1573 int pos = 0;
1574
1575 /* Build paramater string from live config */
1576 const char *sep = "";
1577 for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) {
1578 const zio_taskq_info_t *zti = &zio_taskqs[t][q];
1579 if (zti->zti_mode == ZTI_MODE_FIXED)
1580 pos += sprintf(&buf[pos], "%s%s,%u,%u", sep,
1581 modes[zti->zti_mode], zti->zti_count,
1582 zti->zti_value);
1583 else if (zti->zti_mode == ZTI_MODE_SCALE && zti->zti_value > 0)
1584 pos += sprintf(&buf[pos], "%s%s,%u", sep,
1585 modes[zti->zti_mode], zti->zti_value);
1586 else
1587 pos += sprintf(&buf[pos], "%s%s", sep,
1588 modes[zti->zti_mode]);
1589 sep = " ";
1590 }
1591
1592 if (add_newline)
1593 buf[pos++] = '\n';
1594 buf[pos] = '\0';
1595
1596 return (pos);
1597 }
1598
1599 #ifdef __linux__
1600 static int
spa_taskq_read_param_set(const char * val,zfs_kernel_param_t * kp)1601 spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp)
1602 {
1603 char *cfg = kmem_strdup(val);
1604 int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg);
1605 kmem_strfree(cfg);
1606 return (-err);
1607 }
1608
1609 static int
spa_taskq_read_param_get(char * buf,zfs_kernel_param_t * kp)1610 spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp)
1611 {
1612 return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE));
1613 }
1614
1615 static int
spa_taskq_write_param_set(const char * val,zfs_kernel_param_t * kp)1616 spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp)
1617 {
1618 char *cfg = kmem_strdup(val);
1619 int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg);
1620 kmem_strfree(cfg);
1621 return (-err);
1622 }
1623
1624 static int
spa_taskq_write_param_get(char * buf,zfs_kernel_param_t * kp)1625 spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp)
1626 {
1627 return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE));
1628 }
1629
1630 static int
spa_taskq_free_param_set(const char * val,zfs_kernel_param_t * kp)1631 spa_taskq_free_param_set(const char *val, zfs_kernel_param_t *kp)
1632 {
1633 char *cfg = kmem_strdup(val);
1634 int err = spa_taskq_param_set(ZIO_TYPE_FREE, cfg);
1635 kmem_strfree(cfg);
1636 return (-err);
1637 }
1638
1639 static int
spa_taskq_free_param_get(char * buf,zfs_kernel_param_t * kp)1640 spa_taskq_free_param_get(char *buf, zfs_kernel_param_t *kp)
1641 {
1642 return (spa_taskq_param_get(ZIO_TYPE_FREE, buf, TRUE));
1643 }
1644 #else
1645 /*
1646 * On FreeBSD load-time parameters can be set up before malloc() is available,
1647 * so we have to do all the parsing work on the stack.
1648 */
1649 #define SPA_TASKQ_PARAM_MAX (128)
1650
1651 static int
spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)1652 spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)
1653 {
1654 char buf[SPA_TASKQ_PARAM_MAX];
1655 int err;
1656
1657 (void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE);
1658 err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
1659 if (err || req->newptr == NULL)
1660 return (err);
1661 return (spa_taskq_param_set(ZIO_TYPE_READ, buf));
1662 }
1663
1664 static int
spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)1665 spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
1666 {
1667 char buf[SPA_TASKQ_PARAM_MAX];
1668 int err;
1669
1670 (void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE);
1671 err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
1672 if (err || req->newptr == NULL)
1673 return (err);
1674 return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf));
1675 }
1676
1677 static int
spa_taskq_free_param(ZFS_MODULE_PARAM_ARGS)1678 spa_taskq_free_param(ZFS_MODULE_PARAM_ARGS)
1679 {
1680 char buf[SPA_TASKQ_PARAM_MAX];
1681 int err;
1682
1683 (void) spa_taskq_param_get(ZIO_TYPE_FREE, buf, FALSE);
1684 err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
1685 if (err || req->newptr == NULL)
1686 return (err);
1687 return (spa_taskq_param_set(ZIO_TYPE_FREE, buf));
1688 }
1689 #endif
1690 #endif /* _KERNEL */
1691
1692 /*
1693 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1694 * Note that a type may have multiple discrete taskqs to avoid lock contention
1695 * on the taskq itself.
1696 */
1697 void
spa_taskq_dispatch(spa_t * spa,zio_type_t t,zio_taskq_type_t q,task_func_t * func,zio_t * zio,boolean_t cutinline)1698 spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1699 task_func_t *func, zio_t *zio, boolean_t cutinline)
1700 {
1701 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1702 taskq_t *tq;
1703
1704 ASSERT3P(tqs->stqs_taskq, !=, NULL);
1705 ASSERT3U(tqs->stqs_count, !=, 0);
1706
1707 /*
1708 * NB: We are assuming that the zio can only be dispatched
1709 * to a single taskq at a time. It would be a grievous error
1710 * to dispatch the zio to another taskq at the same time.
1711 */
1712 ASSERT(zio);
1713 ASSERT(taskq_empty_ent(&zio->io_tqent));
1714
1715 if (tqs->stqs_count == 1) {
1716 tq = tqs->stqs_taskq[0];
1717 } else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
1718 ZIO_HAS_ALLOCATOR(zio)) {
1719 tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
1720 } else {
1721 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
1722 }
1723
1724 taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0,
1725 &zio->io_tqent);
1726 }
1727
1728 static void
spa_create_zio_taskqs(spa_t * spa)1729 spa_create_zio_taskqs(spa_t *spa)
1730 {
1731 for (int t = 0; t < ZIO_TYPES; t++) {
1732 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1733 spa_taskqs_init(spa, t, q);
1734 }
1735 }
1736 }
1737
1738 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
1739 static void
spa_thread(void * arg)1740 spa_thread(void *arg)
1741 {
1742 psetid_t zio_taskq_psrset_bind = PS_NONE;
1743 callb_cpr_t cprinfo;
1744
1745 spa_t *spa = arg;
1746 user_t *pu = PTOU(curproc);
1747
1748 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1749 spa->spa_name);
1750
1751 ASSERT(curproc != &p0);
1752 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1753 "zpool-%s", spa->spa_name);
1754 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1755
1756 /* bind this thread to the requested psrset */
1757 if (zio_taskq_psrset_bind != PS_NONE) {
1758 pool_lock();
1759 mutex_enter(&cpu_lock);
1760 mutex_enter(&pidlock);
1761 mutex_enter(&curproc->p_lock);
1762
1763 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1764 0, NULL, NULL) == 0) {
1765 curthread->t_bind_pset = zio_taskq_psrset_bind;
1766 } else {
1767 cmn_err(CE_WARN,
1768 "Couldn't bind process for zfs pool \"%s\" to "
1769 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1770 }
1771
1772 mutex_exit(&curproc->p_lock);
1773 mutex_exit(&pidlock);
1774 mutex_exit(&cpu_lock);
1775 pool_unlock();
1776 }
1777
1778 #ifdef HAVE_SYSDC
1779 if (zio_taskq_sysdc) {
1780 sysdc_thread_enter(curthread, 100, 0);
1781 }
1782 #endif
1783
1784 spa->spa_proc = curproc;
1785 spa->spa_did = curthread->t_did;
1786
1787 spa_create_zio_taskqs(spa);
1788
1789 mutex_enter(&spa->spa_proc_lock);
1790 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1791
1792 spa->spa_proc_state = SPA_PROC_ACTIVE;
1793 cv_broadcast(&spa->spa_proc_cv);
1794
1795 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1796 while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1797 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1798 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1799
1800 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1801 spa->spa_proc_state = SPA_PROC_GONE;
1802 spa->spa_proc = &p0;
1803 cv_broadcast(&spa->spa_proc_cv);
1804 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
1805
1806 mutex_enter(&curproc->p_lock);
1807 lwp_exit();
1808 }
1809 #endif
1810
1811 extern metaslab_ops_t *metaslab_allocator(spa_t *spa);
1812
1813 /*
1814 * Activate an uninitialized pool.
1815 */
1816 static void
spa_activate(spa_t * spa,spa_mode_t mode)1817 spa_activate(spa_t *spa, spa_mode_t mode)
1818 {
1819 metaslab_ops_t *msp = metaslab_allocator(spa);
1820 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1821
1822 spa->spa_state = POOL_STATE_ACTIVE;
1823 spa->spa_final_txg = UINT64_MAX;
1824 spa->spa_mode = mode;
1825 spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
1826
1827 spa->spa_normal_class = metaslab_class_create(spa, "normal",
1828 msp, B_FALSE);
1829 spa->spa_log_class = metaslab_class_create(spa, "log", msp, B_TRUE);
1830 spa->spa_embedded_log_class = metaslab_class_create(spa,
1831 "embedded_log", msp, B_TRUE);
1832 spa->spa_special_class = metaslab_class_create(spa, "special",
1833 msp, B_FALSE);
1834 spa->spa_special_embedded_log_class = metaslab_class_create(spa,
1835 "special_embedded_log", msp, B_TRUE);
1836 spa->spa_dedup_class = metaslab_class_create(spa, "dedup",
1837 msp, B_FALSE);
1838
1839 /* Try to create a covering process */
1840 mutex_enter(&spa->spa_proc_lock);
1841 ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1842 ASSERT(spa->spa_proc == &p0);
1843 spa->spa_did = 0;
1844
1845 #ifdef HAVE_SPA_THREAD
1846 /* Only create a process if we're going to be around a while. */
1847 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1848 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1849 NULL, 0) == 0) {
1850 spa->spa_proc_state = SPA_PROC_CREATED;
1851 while (spa->spa_proc_state == SPA_PROC_CREATED) {
1852 cv_wait(&spa->spa_proc_cv,
1853 &spa->spa_proc_lock);
1854 }
1855 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1856 ASSERT(spa->spa_proc != &p0);
1857 ASSERT(spa->spa_did != 0);
1858 } else {
1859 #ifdef _KERNEL
1860 cmn_err(CE_WARN,
1861 "Couldn't create process for zfs pool \"%s\"\n",
1862 spa->spa_name);
1863 #endif
1864 }
1865 }
1866 #endif /* HAVE_SPA_THREAD */
1867 mutex_exit(&spa->spa_proc_lock);
1868
1869 /* If we didn't create a process, we need to create our taskqs. */
1870 if (spa->spa_proc == &p0) {
1871 spa_create_zio_taskqs(spa);
1872 }
1873
1874 for (size_t i = 0; i < TXG_SIZE; i++) {
1875 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1876 ZIO_FLAG_CANFAIL);
1877 }
1878
1879 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1880 offsetof(vdev_t, vdev_config_dirty_node));
1881 list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1882 offsetof(objset_t, os_evicting_node));
1883 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1884 offsetof(vdev_t, vdev_state_dirty_node));
1885
1886 txg_list_create(&spa->spa_vdev_txg_list, spa,
1887 offsetof(struct vdev, vdev_txg_node));
1888
1889 avl_create(&spa->spa_errlist_scrub,
1890 spa_error_entry_compare, sizeof (spa_error_entry_t),
1891 offsetof(spa_error_entry_t, se_avl));
1892 avl_create(&spa->spa_errlist_last,
1893 spa_error_entry_compare, sizeof (spa_error_entry_t),
1894 offsetof(spa_error_entry_t, se_avl));
1895 avl_create(&spa->spa_errlist_healed,
1896 spa_error_entry_compare, sizeof (spa_error_entry_t),
1897 offsetof(spa_error_entry_t, se_avl));
1898
1899 spa_activate_os(spa);
1900
1901 spa_keystore_init(&spa->spa_keystore);
1902
1903 /*
1904 * This taskq is used to perform zvol-minor-related tasks
1905 * asynchronously. This has several advantages, including easy
1906 * resolution of various deadlocks.
1907 *
1908 * The taskq must be single threaded to ensure tasks are always
1909 * processed in the order in which they were dispatched.
1910 *
1911 * A taskq per pool allows one to keep the pools independent.
1912 * This way if one pool is suspended, it will not impact another.
1913 *
1914 * The preferred location to dispatch a zvol minor task is a sync
1915 * task. In this context, there is easy access to the spa_t and minimal
1916 * error handling is required because the sync task must succeed.
1917 */
1918 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1919 1, INT_MAX, 0);
1920
1921 /*
1922 * The taskq to preload metaslabs.
1923 */
1924 spa->spa_metaslab_taskq = taskq_create("z_metaslab",
1925 metaslab_preload_pct, maxclsyspri, 1, INT_MAX,
1926 TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1927
1928 /*
1929 * Taskq dedicated to prefetcher threads: this is used to prevent the
1930 * pool traverse code from monopolizing the global (and limited)
1931 * system_taskq by inappropriately scheduling long running tasks on it.
1932 */
1933 spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
1934 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1935
1936 /*
1937 * The taskq to upgrade datasets in this pool. Currently used by
1938 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1939 */
1940 spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
1941 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
1942 }
1943
1944 /*
1945 * Opposite of spa_activate().
1946 */
1947 static void
spa_deactivate(spa_t * spa)1948 spa_deactivate(spa_t *spa)
1949 {
1950 if (spa->spa_create_info != NULL) {
1951 nvlist_free(spa->spa_create_info);
1952 spa->spa_create_info = NULL;
1953 }
1954 ASSERT(spa->spa_sync_on == B_FALSE);
1955 ASSERT0P(spa->spa_dsl_pool);
1956 ASSERT0P(spa->spa_root_vdev);
1957 ASSERT0P(spa->spa_async_zio_root);
1958 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1959
1960 spa_evicting_os_wait(spa);
1961
1962 if (spa->spa_zvol_taskq) {
1963 taskq_destroy(spa->spa_zvol_taskq);
1964 spa->spa_zvol_taskq = NULL;
1965 }
1966
1967 if (spa->spa_metaslab_taskq) {
1968 taskq_destroy(spa->spa_metaslab_taskq);
1969 spa->spa_metaslab_taskq = NULL;
1970 }
1971
1972 if (spa->spa_prefetch_taskq) {
1973 taskq_destroy(spa->spa_prefetch_taskq);
1974 spa->spa_prefetch_taskq = NULL;
1975 }
1976
1977 if (spa->spa_upgrade_taskq) {
1978 taskq_destroy(spa->spa_upgrade_taskq);
1979 spa->spa_upgrade_taskq = NULL;
1980 }
1981
1982 txg_list_destroy(&spa->spa_vdev_txg_list);
1983
1984 list_destroy(&spa->spa_config_dirty_list);
1985 list_destroy(&spa->spa_evicting_os_list);
1986 list_destroy(&spa->spa_state_dirty_list);
1987
1988 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
1989
1990 for (int t = 0; t < ZIO_TYPES; t++) {
1991 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1992 spa_taskqs_fini(spa, t, q);
1993 }
1994 }
1995
1996 for (size_t i = 0; i < TXG_SIZE; i++) {
1997 ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1998 VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1999 spa->spa_txg_zio[i] = NULL;
2000 }
2001
2002 metaslab_class_destroy(spa->spa_normal_class);
2003 spa->spa_normal_class = NULL;
2004
2005 metaslab_class_destroy(spa->spa_log_class);
2006 spa->spa_log_class = NULL;
2007
2008 metaslab_class_destroy(spa->spa_embedded_log_class);
2009 spa->spa_embedded_log_class = NULL;
2010
2011 metaslab_class_destroy(spa->spa_special_class);
2012 spa->spa_special_class = NULL;
2013
2014 metaslab_class_destroy(spa->spa_special_embedded_log_class);
2015 spa->spa_special_embedded_log_class = NULL;
2016
2017 metaslab_class_destroy(spa->spa_dedup_class);
2018 spa->spa_dedup_class = NULL;
2019
2020 /*
2021 * If this was part of an import or the open otherwise failed, we may
2022 * still have errors left in the queues. Empty them just in case.
2023 */
2024 spa_errlog_drain(spa);
2025 avl_destroy(&spa->spa_errlist_scrub);
2026 avl_destroy(&spa->spa_errlist_last);
2027 avl_destroy(&spa->spa_errlist_healed);
2028
2029 spa_keystore_fini(&spa->spa_keystore);
2030
2031 spa->spa_state = POOL_STATE_UNINITIALIZED;
2032
2033 mutex_enter(&spa->spa_proc_lock);
2034 if (spa->spa_proc_state != SPA_PROC_NONE) {
2035 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
2036 spa->spa_proc_state = SPA_PROC_DEACTIVATE;
2037 cv_broadcast(&spa->spa_proc_cv);
2038 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
2039 ASSERT(spa->spa_proc != &p0);
2040 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
2041 }
2042 ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
2043 spa->spa_proc_state = SPA_PROC_NONE;
2044 }
2045 ASSERT(spa->spa_proc == &p0);
2046 mutex_exit(&spa->spa_proc_lock);
2047
2048 /*
2049 * We want to make sure spa_thread() has actually exited the ZFS
2050 * module, so that the module can't be unloaded out from underneath
2051 * it.
2052 */
2053 if (spa->spa_did != 0) {
2054 thread_join(spa->spa_did);
2055 spa->spa_did = 0;
2056 }
2057
2058 spa_deactivate_os(spa);
2059
2060 }
2061
2062 /*
2063 * Verify a pool configuration, and construct the vdev tree appropriately. This
2064 * will create all the necessary vdevs in the appropriate layout, with each vdev
2065 * in the CLOSED state. This will prep the pool before open/creation/import.
2066 * All vdev validation is done by the vdev_alloc() routine.
2067 */
2068 int
spa_config_parse(spa_t * spa,vdev_t ** vdp,nvlist_t * nv,vdev_t * parent,uint_t id,int atype)2069 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
2070 uint_t id, int atype)
2071 {
2072 nvlist_t **child;
2073 uint_t children;
2074 int error;
2075
2076 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
2077 return (error);
2078
2079 if ((*vdp)->vdev_ops->vdev_op_leaf)
2080 return (0);
2081
2082 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2083 &child, &children);
2084
2085 if (error == ENOENT)
2086 return (0);
2087
2088 if (error) {
2089 vdev_free(*vdp);
2090 *vdp = NULL;
2091 return (SET_ERROR(EINVAL));
2092 }
2093
2094 for (int c = 0; c < children; c++) {
2095 vdev_t *vd;
2096 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
2097 atype)) != 0) {
2098 vdev_free(*vdp);
2099 *vdp = NULL;
2100 return (error);
2101 }
2102 }
2103
2104 ASSERT(*vdp != NULL);
2105
2106 return (0);
2107 }
2108
2109 static boolean_t
spa_should_flush_logs_on_unload(spa_t * spa)2110 spa_should_flush_logs_on_unload(spa_t *spa)
2111 {
2112 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
2113 return (B_FALSE);
2114
2115 if (!spa_writeable(spa))
2116 return (B_FALSE);
2117
2118 if (!spa->spa_sync_on)
2119 return (B_FALSE);
2120
2121 if (spa_state(spa) != POOL_STATE_EXPORTED)
2122 return (B_FALSE);
2123
2124 if (zfs_keep_log_spacemaps_at_export)
2125 return (B_FALSE);
2126
2127 return (B_TRUE);
2128 }
2129
2130 /*
2131 * Opens a transaction that will set the flag that will instruct
2132 * spa_sync to attempt to flush all the metaslabs for that txg.
2133 */
2134 static void
spa_unload_log_sm_flush_all(spa_t * spa)2135 spa_unload_log_sm_flush_all(spa_t *spa)
2136 {
2137 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2138 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
2139
2140 ASSERT0(spa->spa_log_flushall_txg);
2141 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
2142
2143 dmu_tx_commit(tx);
2144 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
2145 }
2146
2147 static void
spa_unload_log_sm_metadata(spa_t * spa)2148 spa_unload_log_sm_metadata(spa_t *spa)
2149 {
2150 void *cookie = NULL;
2151 spa_log_sm_t *sls;
2152 log_summary_entry_t *e;
2153
2154 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
2155 &cookie)) != NULL) {
2156 VERIFY0(sls->sls_mscount);
2157 kmem_free(sls, sizeof (spa_log_sm_t));
2158 }
2159
2160 while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
2161 VERIFY0(e->lse_mscount);
2162 kmem_free(e, sizeof (log_summary_entry_t));
2163 }
2164
2165 spa->spa_unflushed_stats.sus_nblocks = 0;
2166 spa->spa_unflushed_stats.sus_memused = 0;
2167 spa->spa_unflushed_stats.sus_blocklimit = 0;
2168 }
2169
2170 static void
spa_destroy_aux_threads(spa_t * spa)2171 spa_destroy_aux_threads(spa_t *spa)
2172 {
2173 if (spa->spa_condense_zthr != NULL) {
2174 zthr_destroy(spa->spa_condense_zthr);
2175 spa->spa_condense_zthr = NULL;
2176 }
2177 if (spa->spa_checkpoint_discard_zthr != NULL) {
2178 zthr_destroy(spa->spa_checkpoint_discard_zthr);
2179 spa->spa_checkpoint_discard_zthr = NULL;
2180 }
2181 if (spa->spa_livelist_delete_zthr != NULL) {
2182 zthr_destroy(spa->spa_livelist_delete_zthr);
2183 spa->spa_livelist_delete_zthr = NULL;
2184 }
2185 if (spa->spa_livelist_condense_zthr != NULL) {
2186 zthr_destroy(spa->spa_livelist_condense_zthr);
2187 spa->spa_livelist_condense_zthr = NULL;
2188 }
2189 if (spa->spa_raidz_expand_zthr != NULL) {
2190 zthr_destroy(spa->spa_raidz_expand_zthr);
2191 spa->spa_raidz_expand_zthr = NULL;
2192 }
2193 }
2194
2195 static void
spa_sync_time_logger(spa_t * spa,uint64_t txg,boolean_t force)2196 spa_sync_time_logger(spa_t *spa, uint64_t txg, boolean_t force)
2197 {
2198 uint64_t curtime, dirty;
2199 dmu_tx_t *tx;
2200 dsl_pool_t *dp = spa->spa_dsl_pool;
2201 uint64_t idx = txg & TXG_MASK;
2202
2203 if (!spa_writeable(spa)) {
2204 return;
2205 }
2206
2207 curtime = gethrestime_sec();
2208 if (txg > spa->spa_last_noted_txg &&
2209 (force ||
2210 curtime >= spa->spa_last_noted_txg_time + spa_note_txg_time)) {
2211 spa->spa_last_noted_txg_time = curtime;
2212 spa->spa_last_noted_txg = txg;
2213
2214 mutex_enter(&spa->spa_txg_log_time_lock);
2215 dbrrd_add(&spa->spa_txg_log_time, curtime, txg);
2216 mutex_exit(&spa->spa_txg_log_time_lock);
2217 }
2218
2219 if (!force &&
2220 curtime < spa->spa_last_flush_txg_time + spa_flush_txg_time) {
2221 return;
2222 }
2223 if (txg > spa_final_dirty_txg(spa)) {
2224 return;
2225 }
2226 spa->spa_last_flush_txg_time = curtime;
2227
2228 mutex_enter(&dp->dp_lock);
2229 dirty = dp->dp_dirty_pertxg[idx];
2230 mutex_exit(&dp->dp_lock);
2231 if (!force && dirty == 0) {
2232 return;
2233 }
2234
2235 spa->spa_last_flush_txg_time = curtime;
2236 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2237
2238 VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
2239 DMU_POOL_TXG_LOG_TIME_MINUTES, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2240 &spa->spa_txg_log_time.dbr_minutes, tx));
2241 VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
2242 DMU_POOL_TXG_LOG_TIME_DAYS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2243 &spa->spa_txg_log_time.dbr_days, tx));
2244 VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
2245 DMU_POOL_TXG_LOG_TIME_MONTHS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2246 &spa->spa_txg_log_time.dbr_months, tx));
2247 dmu_tx_commit(tx);
2248 }
2249
2250 static void
spa_unload_sync_time_logger(spa_t * spa)2251 spa_unload_sync_time_logger(spa_t *spa)
2252 {
2253 uint64_t txg;
2254 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2255 VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
2256
2257 txg = dmu_tx_get_txg(tx);
2258 spa_sync_time_logger(spa, txg, B_TRUE);
2259
2260 dmu_tx_commit(tx);
2261 }
2262
2263 static void
spa_load_txg_log_time(spa_t * spa)2264 spa_load_txg_log_time(spa_t *spa)
2265 {
2266 int error;
2267
2268 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2269 DMU_POOL_TXG_LOG_TIME_MINUTES, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2270 &spa->spa_txg_log_time.dbr_minutes);
2271 if (error != 0 && error != ENOENT) {
2272 spa_load_note(spa, "unable to load a txg time database with "
2273 "minute resolution [error=%d]", error);
2274 }
2275 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2276 DMU_POOL_TXG_LOG_TIME_DAYS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2277 &spa->spa_txg_log_time.dbr_days);
2278 if (error != 0 && error != ENOENT) {
2279 spa_load_note(spa, "unable to load a txg time database with "
2280 "day resolution [error=%d]", error);
2281 }
2282 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2283 DMU_POOL_TXG_LOG_TIME_MONTHS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
2284 &spa->spa_txg_log_time.dbr_months);
2285 if (error != 0 && error != ENOENT) {
2286 spa_load_note(spa, "unable to load a txg time database with "
2287 "month resolution [error=%d]", error);
2288 }
2289 }
2290
2291 static boolean_t
spa_should_sync_time_logger_on_unload(spa_t * spa)2292 spa_should_sync_time_logger_on_unload(spa_t *spa)
2293 {
2294
2295 if (!spa_writeable(spa))
2296 return (B_FALSE);
2297
2298 if (!spa->spa_sync_on)
2299 return (B_FALSE);
2300
2301 if (spa_state(spa) != POOL_STATE_EXPORTED)
2302 return (B_FALSE);
2303
2304 if (spa->spa_last_noted_txg == 0)
2305 return (B_FALSE);
2306
2307 return (B_TRUE);
2308 }
2309
2310
2311 /*
2312 * Opposite of spa_load().
2313 */
2314 static void
spa_unload(spa_t * spa)2315 spa_unload(spa_t *spa)
2316 {
2317 ASSERT(spa_namespace_held() ||
2318 spa->spa_export_thread == curthread);
2319 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
2320
2321 spa_import_progress_remove(spa_guid(spa));
2322 spa_load_note(spa, "UNLOADING");
2323
2324 spa_wake_waiters(spa);
2325
2326 /*
2327 * If we have set the spa_final_txg, we have already performed the
2328 * tasks below in spa_export_common(). We should not redo it here since
2329 * we delay the final TXGs beyond what spa_final_txg is set at.
2330 */
2331 if (spa->spa_final_txg == UINT64_MAX) {
2332 if (spa_should_sync_time_logger_on_unload(spa))
2333 spa_unload_sync_time_logger(spa);
2334
2335 /*
2336 * If the log space map feature is enabled and the pool is
2337 * getting exported (but not destroyed), we want to spend some
2338 * time flushing as many metaslabs as we can in an attempt to
2339 * destroy log space maps and save import time.
2340 */
2341 if (spa_should_flush_logs_on_unload(spa))
2342 spa_unload_log_sm_flush_all(spa);
2343
2344 /*
2345 * Stop async tasks.
2346 */
2347 spa_async_suspend(spa);
2348
2349 if (spa->spa_root_vdev) {
2350 vdev_t *root_vdev = spa->spa_root_vdev;
2351 vdev_initialize_stop_all(root_vdev,
2352 VDEV_INITIALIZE_ACTIVE);
2353 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
2354 vdev_autotrim_stop_all(spa);
2355 vdev_rebuild_stop_all(spa);
2356 l2arc_spa_rebuild_stop(spa);
2357 }
2358
2359 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2360 spa->spa_final_txg = spa_last_synced_txg(spa) +
2361 TXG_DEFER_SIZE + 1;
2362 spa_config_exit(spa, SCL_ALL, FTAG);
2363 }
2364
2365 /*
2366 * Stop syncing.
2367 */
2368 if (spa->spa_sync_on) {
2369 txg_sync_stop(spa->spa_dsl_pool);
2370 spa->spa_sync_on = B_FALSE;
2371 }
2372
2373 /*
2374 * This ensures that there is no async metaslab prefetching
2375 * while we attempt to unload the spa.
2376 */
2377 taskq_wait(spa->spa_metaslab_taskq);
2378
2379 if (spa->spa_mmp.mmp_thread)
2380 mmp_thread_stop(spa);
2381
2382 /*
2383 * Wait for any outstanding async I/O to complete.
2384 */
2385 if (spa->spa_async_zio_root != NULL) {
2386 for (int i = 0; i < max_ncpus; i++)
2387 (void) zio_wait(spa->spa_async_zio_root[i]);
2388 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
2389 spa->spa_async_zio_root = NULL;
2390 }
2391
2392 if (spa->spa_vdev_removal != NULL) {
2393 spa_vdev_removal_destroy(spa->spa_vdev_removal);
2394 spa->spa_vdev_removal = NULL;
2395 }
2396
2397 spa_destroy_aux_threads(spa);
2398
2399 spa_condense_fini(spa);
2400
2401 bpobj_close(&spa->spa_deferred_bpobj);
2402
2403 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
2404
2405 /*
2406 * Close all vdevs.
2407 */
2408 if (spa->spa_root_vdev)
2409 vdev_free(spa->spa_root_vdev);
2410 ASSERT0P(spa->spa_root_vdev);
2411
2412 /*
2413 * Close the dsl pool.
2414 */
2415 if (spa->spa_dsl_pool) {
2416 dsl_pool_close(spa->spa_dsl_pool);
2417 spa->spa_dsl_pool = NULL;
2418 spa->spa_meta_objset = NULL;
2419 }
2420
2421 ddt_unload(spa);
2422 brt_unload(spa);
2423 spa_unload_log_sm_metadata(spa);
2424
2425 /*
2426 * Drop and purge level 2 cache
2427 */
2428 spa_l2cache_drop(spa);
2429
2430 if (spa->spa_spares.sav_vdevs) {
2431 for (int i = 0; i < spa->spa_spares.sav_count; i++)
2432 vdev_free(spa->spa_spares.sav_vdevs[i]);
2433 kmem_free(spa->spa_spares.sav_vdevs,
2434 spa->spa_spares.sav_count * sizeof (void *));
2435 spa->spa_spares.sav_vdevs = NULL;
2436 }
2437 if (spa->spa_spares.sav_config) {
2438 nvlist_free(spa->spa_spares.sav_config);
2439 spa->spa_spares.sav_config = NULL;
2440 }
2441 spa->spa_spares.sav_count = 0;
2442
2443 if (spa->spa_l2cache.sav_vdevs) {
2444 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
2445 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
2446 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
2447 }
2448 kmem_free(spa->spa_l2cache.sav_vdevs,
2449 spa->spa_l2cache.sav_count * sizeof (void *));
2450 spa->spa_l2cache.sav_vdevs = NULL;
2451 }
2452 if (spa->spa_l2cache.sav_config) {
2453 nvlist_free(spa->spa_l2cache.sav_config);
2454 spa->spa_l2cache.sav_config = NULL;
2455 }
2456 spa->spa_l2cache.sav_count = 0;
2457
2458 spa->spa_async_suspended = 0;
2459
2460 spa->spa_indirect_vdevs_loaded = B_FALSE;
2461
2462 if (spa->spa_comment != NULL) {
2463 spa_strfree(spa->spa_comment);
2464 spa->spa_comment = NULL;
2465 }
2466 if (spa->spa_compatibility != NULL) {
2467 spa_strfree(spa->spa_compatibility);
2468 spa->spa_compatibility = NULL;
2469 }
2470
2471 spa->spa_raidz_expand = NULL;
2472 spa->spa_checkpoint_txg = 0;
2473
2474 spa_config_exit(spa, SCL_ALL, spa);
2475 }
2476
2477 /*
2478 * Load (or re-load) the current list of vdevs describing the active spares for
2479 * this pool. When this is called, we have some form of basic information in
2480 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
2481 * then re-generate a more complete list including status information.
2482 */
2483 void
spa_load_spares(spa_t * spa)2484 spa_load_spares(spa_t *spa)
2485 {
2486 nvlist_t **spares;
2487 uint_t nspares;
2488 int i;
2489 vdev_t *vd, *tvd;
2490
2491 #ifndef _KERNEL
2492 /*
2493 * zdb opens both the current state of the pool and the
2494 * checkpointed state (if present), with a different spa_t.
2495 *
2496 * As spare vdevs are shared among open pools, we skip loading
2497 * them when we load the checkpointed state of the pool.
2498 */
2499 if (!spa_writeable(spa))
2500 return;
2501 #endif
2502
2503 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2504
2505 /*
2506 * First, close and free any existing spare vdevs.
2507 */
2508 if (spa->spa_spares.sav_vdevs) {
2509 for (i = 0; i < spa->spa_spares.sav_count; i++) {
2510 vd = spa->spa_spares.sav_vdevs[i];
2511
2512 /* Undo the call to spa_activate() below */
2513 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2514 B_FALSE)) != NULL && tvd->vdev_isspare)
2515 spa_spare_remove(tvd);
2516 vdev_close(vd);
2517 vdev_free(vd);
2518 }
2519
2520 kmem_free(spa->spa_spares.sav_vdevs,
2521 spa->spa_spares.sav_count * sizeof (void *));
2522 }
2523
2524 if (spa->spa_spares.sav_config == NULL)
2525 nspares = 0;
2526 else
2527 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2528 ZPOOL_CONFIG_SPARES, &spares, &nspares));
2529
2530 spa->spa_spares.sav_count = (int)nspares;
2531 spa->spa_spares.sav_vdevs = NULL;
2532
2533 if (nspares == 0)
2534 return;
2535
2536 /*
2537 * Construct the array of vdevs, opening them to get status in the
2538 * process. For each spare, there is potentially two different vdev_t
2539 * structures associated with it: one in the list of spares (used only
2540 * for basic validation purposes) and one in the active vdev
2541 * configuration (if it's spared in). During this phase we open and
2542 * validate each vdev on the spare list. If the vdev also exists in the
2543 * active configuration, then we also mark this vdev as an active spare.
2544 */
2545 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
2546 KM_SLEEP);
2547 for (i = 0; i < spa->spa_spares.sav_count; i++) {
2548 VERIFY0(spa_config_parse(spa, &vd, spares[i], NULL, 0,
2549 VDEV_ALLOC_SPARE));
2550 ASSERT(vd != NULL);
2551
2552 spa->spa_spares.sav_vdevs[i] = vd;
2553
2554 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
2555 B_FALSE)) != NULL) {
2556 if (!tvd->vdev_isspare)
2557 spa_spare_add(tvd);
2558
2559 /*
2560 * We only mark the spare active if we were successfully
2561 * able to load the vdev. Otherwise, importing a pool
2562 * with a bad active spare would result in strange
2563 * behavior, because multiple pool would think the spare
2564 * is actively in use.
2565 *
2566 * There is a vulnerability here to an equally bizarre
2567 * circumstance, where a dead active spare is later
2568 * brought back to life (onlined or otherwise). Given
2569 * the rarity of this scenario, and the extra complexity
2570 * it adds, we ignore the possibility.
2571 */
2572 if (!vdev_is_dead(tvd))
2573 spa_spare_activate(tvd);
2574 }
2575
2576 vd->vdev_top = vd;
2577 vd->vdev_aux = &spa->spa_spares;
2578
2579 if (vdev_open(vd) != 0)
2580 continue;
2581
2582 if (vdev_validate_aux(vd) == 0)
2583 spa_spare_add(vd);
2584 }
2585
2586 /*
2587 * Recompute the stashed list of spares, with status information
2588 * this time.
2589 */
2590 fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
2591
2592 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
2593 KM_SLEEP);
2594 for (i = 0; i < spa->spa_spares.sav_count; i++)
2595 spares[i] = vdev_config_generate(spa,
2596 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
2597 fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
2598 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
2599 spa->spa_spares.sav_count);
2600 for (i = 0; i < spa->spa_spares.sav_count; i++)
2601 nvlist_free(spares[i]);
2602 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
2603 }
2604
2605 /*
2606 * Load (or re-load) the current list of vdevs describing the active l2cache for
2607 * this pool. When this is called, we have some form of basic information in
2608 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
2609 * then re-generate a more complete list including status information.
2610 * Devices which are already active have their details maintained, and are
2611 * not re-opened.
2612 */
2613 void
spa_load_l2cache(spa_t * spa)2614 spa_load_l2cache(spa_t *spa)
2615 {
2616 nvlist_t **l2cache = NULL;
2617 uint_t nl2cache;
2618 int i, j, oldnvdevs;
2619 uint64_t guid;
2620 vdev_t *vd, **oldvdevs, **newvdevs;
2621 spa_aux_vdev_t *sav = &spa->spa_l2cache;
2622
2623 #ifndef _KERNEL
2624 /*
2625 * zdb opens both the current state of the pool and the
2626 * checkpointed state (if present), with a different spa_t.
2627 *
2628 * As L2 caches are part of the ARC which is shared among open
2629 * pools, we skip loading them when we load the checkpointed
2630 * state of the pool.
2631 */
2632 if (!spa_writeable(spa))
2633 return;
2634 #endif
2635
2636 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2637
2638 oldvdevs = sav->sav_vdevs;
2639 oldnvdevs = sav->sav_count;
2640 sav->sav_vdevs = NULL;
2641 sav->sav_count = 0;
2642
2643 if (sav->sav_config == NULL) {
2644 nl2cache = 0;
2645 newvdevs = NULL;
2646 goto out;
2647 }
2648
2649 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
2650 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
2651 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
2652
2653 /*
2654 * Process new nvlist of vdevs.
2655 */
2656 for (i = 0; i < nl2cache; i++) {
2657 guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
2658
2659 newvdevs[i] = NULL;
2660 for (j = 0; j < oldnvdevs; j++) {
2661 vd = oldvdevs[j];
2662 if (vd != NULL && guid == vd->vdev_guid) {
2663 /*
2664 * Retain previous vdev for add/remove ops.
2665 */
2666 newvdevs[i] = vd;
2667 oldvdevs[j] = NULL;
2668 break;
2669 }
2670 }
2671
2672 if (newvdevs[i] == NULL) {
2673 /*
2674 * Create new vdev
2675 */
2676 VERIFY0(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
2677 VDEV_ALLOC_L2CACHE));
2678 ASSERT(vd != NULL);
2679 newvdevs[i] = vd;
2680
2681 /*
2682 * Commit this vdev as an l2cache device,
2683 * even if it fails to open.
2684 */
2685 spa_l2cache_add(vd);
2686
2687 vd->vdev_top = vd;
2688 vd->vdev_aux = sav;
2689
2690 spa_l2cache_activate(vd);
2691
2692 if (vdev_open(vd) != 0)
2693 continue;
2694
2695 (void) vdev_validate_aux(vd);
2696
2697 if (!vdev_is_dead(vd))
2698 l2arc_add_vdev(spa, vd);
2699
2700 /*
2701 * Upon cache device addition to a pool or pool
2702 * creation with a cache device or if the header
2703 * of the device is invalid we issue an async
2704 * TRIM command for the whole device which will
2705 * execute if l2arc_trim_ahead > 0.
2706 */
2707 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2708 }
2709 }
2710
2711 sav->sav_vdevs = newvdevs;
2712 sav->sav_count = (int)nl2cache;
2713
2714 /*
2715 * Recompute the stashed list of l2cache devices, with status
2716 * information this time.
2717 */
2718 fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
2719
2720 if (sav->sav_count > 0)
2721 l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
2722 KM_SLEEP);
2723 for (i = 0; i < sav->sav_count; i++)
2724 l2cache[i] = vdev_config_generate(spa,
2725 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
2726 fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
2727 (const nvlist_t * const *)l2cache, sav->sav_count);
2728
2729 out:
2730 /*
2731 * Purge vdevs that were dropped
2732 */
2733 if (oldvdevs) {
2734 for (i = 0; i < oldnvdevs; i++) {
2735 uint64_t pool;
2736
2737 vd = oldvdevs[i];
2738 if (vd != NULL) {
2739 ASSERT(vd->vdev_isl2cache);
2740
2741 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2742 pool != 0ULL && l2arc_vdev_present(vd))
2743 l2arc_remove_vdev(vd);
2744 vdev_clear_stats(vd);
2745 vdev_free(vd);
2746 }
2747 }
2748
2749 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
2750 }
2751
2752 for (i = 0; i < sav->sav_count; i++)
2753 nvlist_free(l2cache[i]);
2754 if (sav->sav_count)
2755 kmem_free(l2cache, sav->sav_count * sizeof (void *));
2756 }
2757
2758 static int
load_nvlist(spa_t * spa,uint64_t obj,nvlist_t ** value)2759 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
2760 {
2761 dmu_buf_t *db;
2762 char *packed = NULL;
2763 size_t nvsize = 0;
2764 int error;
2765 *value = NULL;
2766
2767 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
2768 if (error)
2769 return (error);
2770
2771 nvsize = *(uint64_t *)db->db_data;
2772 dmu_buf_rele(db, FTAG);
2773
2774 packed = vmem_alloc(nvsize, KM_SLEEP);
2775 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
2776 DMU_READ_PREFETCH);
2777 if (error == 0)
2778 error = nvlist_unpack(packed, nvsize, value, 0);
2779 vmem_free(packed, nvsize);
2780
2781 return (error);
2782 }
2783
2784 /*
2785 * Concrete top-level vdevs that are not missing and are not logs. At every
2786 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
2787 */
2788 static uint64_t
spa_healthy_core_tvds(spa_t * spa)2789 spa_healthy_core_tvds(spa_t *spa)
2790 {
2791 vdev_t *rvd = spa->spa_root_vdev;
2792 uint64_t tvds = 0;
2793
2794 for (uint64_t i = 0; i < rvd->vdev_children; i++) {
2795 vdev_t *vd = rvd->vdev_child[i];
2796 if (vd->vdev_islog)
2797 continue;
2798 if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
2799 tvds++;
2800 }
2801
2802 return (tvds);
2803 }
2804
2805 /*
2806 * Checks to see if the given vdev could not be opened, in which case we post a
2807 * sysevent to notify the autoreplace code that the device has been removed.
2808 */
2809 static void
spa_check_removed(vdev_t * vd)2810 spa_check_removed(vdev_t *vd)
2811 {
2812 for (uint64_t c = 0; c < vd->vdev_children; c++)
2813 spa_check_removed(vd->vdev_child[c]);
2814
2815 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
2816 vdev_is_concrete(vd)) {
2817 zfs_post_autoreplace(vd->vdev_spa, vd);
2818 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
2819 }
2820 }
2821
2822 static int
spa_check_for_missing_logs(spa_t * spa)2823 spa_check_for_missing_logs(spa_t *spa)
2824 {
2825 vdev_t *rvd = spa->spa_root_vdev;
2826
2827 /*
2828 * If we're doing a normal import, then build up any additional
2829 * diagnostic information about missing log devices.
2830 * We'll pass this up to the user for further processing.
2831 */
2832 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
2833 nvlist_t **child, *nv;
2834 uint64_t idx = 0;
2835
2836 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
2837 KM_SLEEP);
2838 nv = fnvlist_alloc();
2839
2840 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2841 vdev_t *tvd = rvd->vdev_child[c];
2842
2843 /*
2844 * We consider a device as missing only if it failed
2845 * to open (i.e. offline or faulted is not considered
2846 * as missing).
2847 */
2848 if (tvd->vdev_islog &&
2849 tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2850 child[idx++] = vdev_config_generate(spa, tvd,
2851 B_FALSE, VDEV_CONFIG_MISSING);
2852 }
2853 }
2854
2855 if (idx > 0) {
2856 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2857 (const nvlist_t * const *)child, idx);
2858 fnvlist_add_nvlist(spa->spa_load_info,
2859 ZPOOL_CONFIG_MISSING_DEVICES, nv);
2860
2861 for (uint64_t i = 0; i < idx; i++)
2862 nvlist_free(child[i]);
2863 }
2864 nvlist_free(nv);
2865 kmem_free(child, rvd->vdev_children * sizeof (char **));
2866
2867 if (idx > 0) {
2868 spa_load_failed(spa, "some log devices are missing");
2869 vdev_dbgmsg_print_tree(rvd, 2);
2870 return (SET_ERROR(ENXIO));
2871 }
2872 } else {
2873 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2874 vdev_t *tvd = rvd->vdev_child[c];
2875
2876 if (tvd->vdev_islog &&
2877 tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
2878 spa_set_log_state(spa, SPA_LOG_CLEAR);
2879 spa_load_note(spa, "some log devices are "
2880 "missing, ZIL is dropped.");
2881 vdev_dbgmsg_print_tree(rvd, 2);
2882 break;
2883 }
2884 }
2885 }
2886
2887 return (0);
2888 }
2889
2890 /*
2891 * Check for missing log devices
2892 */
2893 static boolean_t
spa_check_logs(spa_t * spa)2894 spa_check_logs(spa_t *spa)
2895 {
2896 boolean_t rv = B_FALSE;
2897 dsl_pool_t *dp = spa_get_dsl(spa);
2898
2899 switch (spa->spa_log_state) {
2900 default:
2901 break;
2902 case SPA_LOG_MISSING:
2903 /* need to recheck in case slog has been restored */
2904 case SPA_LOG_UNKNOWN:
2905 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2906 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
2907 if (rv)
2908 spa_set_log_state(spa, SPA_LOG_MISSING);
2909 break;
2910 }
2911 return (rv);
2912 }
2913
2914 /*
2915 * Passivate any log vdevs (note, does not apply to embedded log metaslabs).
2916 */
2917 static boolean_t
spa_passivate_log(spa_t * spa)2918 spa_passivate_log(spa_t *spa)
2919 {
2920 vdev_t *rvd = spa->spa_root_vdev;
2921 boolean_t slog_found = B_FALSE;
2922
2923 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2924
2925 for (int c = 0; c < rvd->vdev_children; c++) {
2926 vdev_t *tvd = rvd->vdev_child[c];
2927
2928 if (tvd->vdev_islog) {
2929 ASSERT0P(tvd->vdev_log_mg);
2930 metaslab_group_passivate(tvd->vdev_mg);
2931 slog_found = B_TRUE;
2932 }
2933 }
2934
2935 return (slog_found);
2936 }
2937
2938 /*
2939 * Activate any log vdevs (note, does not apply to embedded log metaslabs).
2940 */
2941 static void
spa_activate_log(spa_t * spa)2942 spa_activate_log(spa_t *spa)
2943 {
2944 vdev_t *rvd = spa->spa_root_vdev;
2945
2946 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2947
2948 for (int c = 0; c < rvd->vdev_children; c++) {
2949 vdev_t *tvd = rvd->vdev_child[c];
2950
2951 if (tvd->vdev_islog) {
2952 ASSERT0P(tvd->vdev_log_mg);
2953 metaslab_group_activate(tvd->vdev_mg);
2954 }
2955 }
2956 }
2957
2958 int
spa_reset_logs(spa_t * spa)2959 spa_reset_logs(spa_t *spa)
2960 {
2961 int error;
2962
2963 error = dmu_objset_find(spa_name(spa), zil_reset,
2964 NULL, DS_FIND_CHILDREN);
2965 if (error == 0) {
2966 /*
2967 * We successfully offlined the log device, sync out the
2968 * current txg so that the "stubby" block can be removed
2969 * by zil_sync().
2970 */
2971 txg_wait_synced(spa->spa_dsl_pool, 0);
2972 }
2973 return (error);
2974 }
2975
2976 static void
spa_aux_check_removed(spa_aux_vdev_t * sav)2977 spa_aux_check_removed(spa_aux_vdev_t *sav)
2978 {
2979 for (int i = 0; i < sav->sav_count; i++)
2980 spa_check_removed(sav->sav_vdevs[i]);
2981 }
2982
2983 void
spa_claim_notify(zio_t * zio)2984 spa_claim_notify(zio_t *zio)
2985 {
2986 spa_t *spa = zio->io_spa;
2987
2988 if (zio->io_error)
2989 return;
2990
2991 mutex_enter(&spa->spa_props_lock); /* any mutex will do */
2992 if (spa->spa_claim_max_txg < BP_GET_BIRTH(zio->io_bp))
2993 spa->spa_claim_max_txg = BP_GET_BIRTH(zio->io_bp);
2994 mutex_exit(&spa->spa_props_lock);
2995 }
2996
2997 typedef struct spa_load_error {
2998 boolean_t sle_verify_data;
2999 uint64_t sle_meta_count;
3000 uint64_t sle_data_count;
3001 } spa_load_error_t;
3002
3003 static void
spa_load_verify_done(zio_t * zio)3004 spa_load_verify_done(zio_t *zio)
3005 {
3006 blkptr_t *bp = zio->io_bp;
3007 spa_load_error_t *sle = zio->io_private;
3008 dmu_object_type_t type = BP_GET_TYPE(bp);
3009 int error = zio->io_error;
3010 spa_t *spa = zio->io_spa;
3011
3012 abd_free(zio->io_abd);
3013 if (error) {
3014 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
3015 type != DMU_OT_INTENT_LOG)
3016 atomic_inc_64(&sle->sle_meta_count);
3017 else
3018 atomic_inc_64(&sle->sle_data_count);
3019 }
3020
3021 mutex_enter(&spa->spa_scrub_lock);
3022 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
3023 cv_broadcast(&spa->spa_scrub_io_cv);
3024 mutex_exit(&spa->spa_scrub_lock);
3025 }
3026
3027 /*
3028 * Maximum number of inflight bytes is the log2 fraction of the arc size.
3029 * By default, we set it to 1/16th of the arc.
3030 */
3031 static uint_t spa_load_verify_shift = 4;
3032 static int spa_load_verify_metadata = B_TRUE;
3033 static int spa_load_verify_data = B_TRUE;
3034
3035 static int
spa_load_verify_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)3036 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
3037 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
3038 {
3039 zio_t *rio = arg;
3040 spa_load_error_t *sle = rio->io_private;
3041
3042 (void) zilog, (void) dnp;
3043
3044 /*
3045 * Note: normally this routine will not be called if
3046 * spa_load_verify_metadata is not set. However, it may be useful
3047 * to manually set the flag after the traversal has begun.
3048 */
3049 if (!spa_load_verify_metadata)
3050 return (0);
3051
3052 /*
3053 * Sanity check the block pointer in order to detect obvious damage
3054 * before using the contents in subsequent checks or in zio_read().
3055 * When damaged consider it to be a metadata error since we cannot
3056 * trust the BP_GET_TYPE and BP_GET_LEVEL values.
3057 */
3058 if (zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
3059 atomic_inc_64(&sle->sle_meta_count);
3060 return (0);
3061 }
3062
3063 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
3064 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3065 return (0);
3066
3067 if (!BP_IS_METADATA(bp) &&
3068 (!spa_load_verify_data || !sle->sle_verify_data))
3069 return (0);
3070
3071 uint64_t maxinflight_bytes =
3072 arc_target_bytes() >> spa_load_verify_shift;
3073 size_t size = BP_GET_PSIZE(bp);
3074
3075 mutex_enter(&spa->spa_scrub_lock);
3076 while (spa->spa_load_verify_bytes >= maxinflight_bytes)
3077 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3078 spa->spa_load_verify_bytes += size;
3079 mutex_exit(&spa->spa_scrub_lock);
3080
3081 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
3082 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
3083 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
3084 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
3085 return (0);
3086 }
3087
3088 static int
verify_dataset_name_len(dsl_pool_t * dp,dsl_dataset_t * ds,void * arg)3089 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
3090 {
3091 (void) dp, (void) arg;
3092
3093 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
3094 return (SET_ERROR(ENAMETOOLONG));
3095
3096 return (0);
3097 }
3098
3099 static int
spa_load_verify(spa_t * spa)3100 spa_load_verify(spa_t *spa)
3101 {
3102 zio_t *rio;
3103 spa_load_error_t sle = { 0 };
3104 zpool_load_policy_t policy;
3105 boolean_t verify_ok = B_FALSE;
3106 int error = 0;
3107
3108 zpool_get_load_policy(spa->spa_config, &policy);
3109
3110 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
3111 policy.zlp_maxmeta == UINT64_MAX)
3112 return (0);
3113
3114 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
3115 error = dmu_objset_find_dp(spa->spa_dsl_pool,
3116 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
3117 DS_FIND_CHILDREN);
3118 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
3119 if (error != 0)
3120 return (error);
3121
3122 /*
3123 * Verify data only if we are rewinding or error limit was set.
3124 * Otherwise nothing except dbgmsg care about it to waste time.
3125 */
3126 sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
3127 (policy.zlp_maxdata < UINT64_MAX);
3128
3129 rio = zio_root(spa, NULL, &sle,
3130 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
3131
3132 if (spa_load_verify_metadata) {
3133 if (spa->spa_extreme_rewind) {
3134 spa_load_note(spa, "performing a complete scan of the "
3135 "pool since extreme rewind is on. This may take "
3136 "a very long time.\n (spa_load_verify_data=%u, "
3137 "spa_load_verify_metadata=%u)",
3138 spa_load_verify_data, spa_load_verify_metadata);
3139 }
3140
3141 error = traverse_pool(spa, spa->spa_verify_min_txg,
3142 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
3143 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
3144 }
3145
3146 (void) zio_wait(rio);
3147 ASSERT0(spa->spa_load_verify_bytes);
3148
3149 spa->spa_load_meta_errors = sle.sle_meta_count;
3150 spa->spa_load_data_errors = sle.sle_data_count;
3151
3152 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
3153 spa_load_note(spa, "spa_load_verify found %llu metadata errors "
3154 "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
3155 (u_longlong_t)sle.sle_data_count);
3156 }
3157
3158 if (spa_load_verify_dryrun ||
3159 (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
3160 sle.sle_data_count <= policy.zlp_maxdata)) {
3161 int64_t loss = 0;
3162
3163 verify_ok = B_TRUE;
3164 spa->spa_load_txg = spa->spa_uberblock.ub_txg;
3165 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
3166
3167 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
3168 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
3169 spa->spa_load_txg_ts);
3170 fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
3171 loss);
3172 fnvlist_add_uint64(spa->spa_load_info,
3173 ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
3174 fnvlist_add_uint64(spa->spa_load_info,
3175 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
3176 } else {
3177 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
3178 }
3179
3180 if (spa_load_verify_dryrun)
3181 return (0);
3182
3183 if (error) {
3184 if (error != ENXIO && error != EIO)
3185 error = SET_ERROR(EIO);
3186 return (error);
3187 }
3188
3189 return (verify_ok ? 0 : EIO);
3190 }
3191
3192 /*
3193 * Find a value in the pool props object.
3194 */
3195 static void
spa_prop_find(spa_t * spa,zpool_prop_t prop,uint64_t * val)3196 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
3197 {
3198 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
3199 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
3200 }
3201
3202 /*
3203 * Find a value in the pool directory object.
3204 */
3205 static int
spa_dir_prop(spa_t * spa,const char * name,uint64_t * val,boolean_t log_enoent)3206 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
3207 {
3208 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3209 name, sizeof (uint64_t), 1, val);
3210
3211 if (error != 0 && (error != ENOENT || log_enoent)) {
3212 spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
3213 "[error=%d]", name, error);
3214 }
3215
3216 return (error);
3217 }
3218
3219 static int
spa_vdev_err(vdev_t * vdev,vdev_aux_t aux,int err)3220 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
3221 {
3222 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
3223 return (SET_ERROR(err));
3224 }
3225
3226 boolean_t
spa_livelist_delete_check(spa_t * spa)3227 spa_livelist_delete_check(spa_t *spa)
3228 {
3229 return (spa->spa_livelists_to_delete != 0);
3230 }
3231
3232 static boolean_t
spa_livelist_delete_cb_check(void * arg,zthr_t * z)3233 spa_livelist_delete_cb_check(void *arg, zthr_t *z)
3234 {
3235 (void) z;
3236 spa_t *spa = arg;
3237 return (spa_livelist_delete_check(spa));
3238 }
3239
3240 static int
delete_blkptr_cb(void * arg,const blkptr_t * bp,dmu_tx_t * tx)3241 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3242 {
3243 spa_t *spa = arg;
3244 zio_free(spa, tx->tx_txg, bp);
3245 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
3246 -bp_get_dsize_sync(spa, bp),
3247 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
3248 return (0);
3249 }
3250
3251 static int
dsl_get_next_livelist_obj(objset_t * os,uint64_t zap_obj,uint64_t * llp)3252 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
3253 {
3254 int err;
3255 zap_cursor_t zc;
3256 zap_attribute_t *za = zap_attribute_alloc();
3257 zap_cursor_init(&zc, os, zap_obj);
3258 err = zap_cursor_retrieve(&zc, za);
3259 zap_cursor_fini(&zc);
3260 if (err == 0)
3261 *llp = za->za_first_integer;
3262 zap_attribute_free(za);
3263 return (err);
3264 }
3265
3266 /*
3267 * Components of livelist deletion that must be performed in syncing
3268 * context: freeing block pointers and updating the pool-wide data
3269 * structures to indicate how much work is left to do
3270 */
3271 typedef struct sublist_delete_arg {
3272 spa_t *spa;
3273 dsl_deadlist_t *ll;
3274 uint64_t key;
3275 bplist_t *to_free;
3276 } sublist_delete_arg_t;
3277
3278 static void
sublist_delete_sync(void * arg,dmu_tx_t * tx)3279 sublist_delete_sync(void *arg, dmu_tx_t *tx)
3280 {
3281 sublist_delete_arg_t *sda = arg;
3282 spa_t *spa = sda->spa;
3283 dsl_deadlist_t *ll = sda->ll;
3284 uint64_t key = sda->key;
3285 bplist_t *to_free = sda->to_free;
3286
3287 bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
3288 dsl_deadlist_remove_entry(ll, key, tx);
3289 }
3290
3291 typedef struct livelist_delete_arg {
3292 spa_t *spa;
3293 uint64_t ll_obj;
3294 uint64_t zap_obj;
3295 } livelist_delete_arg_t;
3296
3297 static void
livelist_delete_sync(void * arg,dmu_tx_t * tx)3298 livelist_delete_sync(void *arg, dmu_tx_t *tx)
3299 {
3300 livelist_delete_arg_t *lda = arg;
3301 spa_t *spa = lda->spa;
3302 uint64_t ll_obj = lda->ll_obj;
3303 uint64_t zap_obj = lda->zap_obj;
3304 objset_t *mos = spa->spa_meta_objset;
3305 uint64_t count;
3306
3307 /* free the livelist and decrement the feature count */
3308 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
3309 dsl_deadlist_free(mos, ll_obj, tx);
3310 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
3311 VERIFY0(zap_count(mos, zap_obj, &count));
3312 if (count == 0) {
3313 /* no more livelists to delete */
3314 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
3315 DMU_POOL_DELETED_CLONES, tx));
3316 VERIFY0(zap_destroy(mos, zap_obj, tx));
3317 spa->spa_livelists_to_delete = 0;
3318 spa_notify_waiters(spa);
3319 }
3320 }
3321
3322 /*
3323 * Load in the value for the livelist to be removed and open it. Then,
3324 * load its first sublist and determine which block pointers should actually
3325 * be freed. Then, call a synctask which performs the actual frees and updates
3326 * the pool-wide livelist data.
3327 */
3328 static void
spa_livelist_delete_cb(void * arg,zthr_t * z)3329 spa_livelist_delete_cb(void *arg, zthr_t *z)
3330 {
3331 spa_t *spa = arg;
3332 uint64_t ll_obj = 0, count;
3333 objset_t *mos = spa->spa_meta_objset;
3334 uint64_t zap_obj = spa->spa_livelists_to_delete;
3335 /*
3336 * Determine the next livelist to delete. This function should only
3337 * be called if there is at least one deleted clone.
3338 */
3339 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
3340 VERIFY0(zap_count(mos, ll_obj, &count));
3341 if (count > 0) {
3342 dsl_deadlist_t *ll;
3343 dsl_deadlist_entry_t *dle;
3344 bplist_t to_free;
3345 ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
3346 VERIFY0(dsl_deadlist_open(ll, mos, ll_obj));
3347 dle = dsl_deadlist_first(ll);
3348 ASSERT3P(dle, !=, NULL);
3349 bplist_create(&to_free);
3350 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
3351 z, NULL);
3352 if (err == 0) {
3353 sublist_delete_arg_t sync_arg = {
3354 .spa = spa,
3355 .ll = ll,
3356 .key = dle->dle_mintxg,
3357 .to_free = &to_free
3358 };
3359 zfs_dbgmsg("deleting sublist (id %llu) from"
3360 " livelist %llu, %lld remaining",
3361 (u_longlong_t)dle->dle_bpobj.bpo_object,
3362 (u_longlong_t)ll_obj, (longlong_t)count - 1);
3363 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
3364 sublist_delete_sync, &sync_arg, 0,
3365 ZFS_SPACE_CHECK_DESTROY));
3366 } else {
3367 VERIFY3U(err, ==, EINTR);
3368 }
3369 bplist_clear(&to_free);
3370 bplist_destroy(&to_free);
3371 dsl_deadlist_close(ll);
3372 kmem_free(ll, sizeof (dsl_deadlist_t));
3373 } else {
3374 livelist_delete_arg_t sync_arg = {
3375 .spa = spa,
3376 .ll_obj = ll_obj,
3377 .zap_obj = zap_obj
3378 };
3379 zfs_dbgmsg("deletion of livelist %llu completed",
3380 (u_longlong_t)ll_obj);
3381 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
3382 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
3383 }
3384 }
3385
3386 static void
spa_start_livelist_destroy_thread(spa_t * spa)3387 spa_start_livelist_destroy_thread(spa_t *spa)
3388 {
3389 ASSERT0P(spa->spa_livelist_delete_zthr);
3390 spa->spa_livelist_delete_zthr =
3391 zthr_create("z_livelist_destroy",
3392 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
3393 minclsyspri);
3394 }
3395
3396 typedef struct livelist_new_arg {
3397 bplist_t *allocs;
3398 bplist_t *frees;
3399 } livelist_new_arg_t;
3400
3401 static int
livelist_track_new_cb(void * arg,const blkptr_t * bp,boolean_t bp_freed,dmu_tx_t * tx)3402 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
3403 dmu_tx_t *tx)
3404 {
3405 ASSERT0P(tx);
3406 livelist_new_arg_t *lna = arg;
3407 if (bp_freed) {
3408 bplist_append(lna->frees, bp);
3409 } else {
3410 bplist_append(lna->allocs, bp);
3411 zfs_livelist_condense_new_alloc++;
3412 }
3413 return (0);
3414 }
3415
3416 typedef struct livelist_condense_arg {
3417 spa_t *spa;
3418 bplist_t to_keep;
3419 uint64_t first_size;
3420 uint64_t next_size;
3421 } livelist_condense_arg_t;
3422
3423 static void
spa_livelist_condense_sync(void * arg,dmu_tx_t * tx)3424 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
3425 {
3426 livelist_condense_arg_t *lca = arg;
3427 spa_t *spa = lca->spa;
3428 bplist_t new_frees;
3429 dsl_dataset_t *ds = spa->spa_to_condense.ds;
3430
3431 /* Have we been cancelled? */
3432 if (spa->spa_to_condense.cancelled) {
3433 zfs_livelist_condense_sync_cancel++;
3434 goto out;
3435 }
3436
3437 dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3438 dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3439 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
3440
3441 /*
3442 * It's possible that the livelist was changed while the zthr was
3443 * running. Therefore, we need to check for new blkptrs in the two
3444 * entries being condensed and continue to track them in the livelist.
3445 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
3446 * it's possible that the newly added blkptrs are FREEs or ALLOCs so
3447 * we need to sort them into two different bplists.
3448 */
3449 uint64_t first_obj = first->dle_bpobj.bpo_object;
3450 uint64_t next_obj = next->dle_bpobj.bpo_object;
3451 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3452 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
3453
3454 bplist_create(&new_frees);
3455 livelist_new_arg_t new_bps = {
3456 .allocs = &lca->to_keep,
3457 .frees = &new_frees,
3458 };
3459
3460 if (cur_first_size > lca->first_size) {
3461 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
3462 livelist_track_new_cb, &new_bps, lca->first_size));
3463 }
3464 if (cur_next_size > lca->next_size) {
3465 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
3466 livelist_track_new_cb, &new_bps, lca->next_size));
3467 }
3468
3469 dsl_deadlist_clear_entry(first, ll, tx);
3470 ASSERT(bpobj_is_empty(&first->dle_bpobj));
3471 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
3472
3473 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
3474 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
3475 bplist_destroy(&new_frees);
3476
3477 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3478 dsl_dataset_name(ds, dsname);
3479 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
3480 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
3481 "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
3482 (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
3483 (u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
3484 (u_longlong_t)cur_next_size,
3485 (u_longlong_t)first->dle_bpobj.bpo_object,
3486 (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
3487 out:
3488 dmu_buf_rele(ds->ds_dbuf, spa);
3489 spa->spa_to_condense.ds = NULL;
3490 bplist_clear(&lca->to_keep);
3491 bplist_destroy(&lca->to_keep);
3492 kmem_free(lca, sizeof (livelist_condense_arg_t));
3493 spa->spa_to_condense.syncing = B_FALSE;
3494 }
3495
3496 static void
spa_livelist_condense_cb(void * arg,zthr_t * t)3497 spa_livelist_condense_cb(void *arg, zthr_t *t)
3498 {
3499 while (zfs_livelist_condense_zthr_pause &&
3500 !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3501 delay(1);
3502
3503 spa_t *spa = arg;
3504 dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
3505 dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
3506 uint64_t first_size, next_size;
3507
3508 livelist_condense_arg_t *lca =
3509 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
3510 bplist_create(&lca->to_keep);
3511
3512 /*
3513 * Process the livelists (matching FREEs and ALLOCs) in open context
3514 * so we have minimal work in syncing context to condense.
3515 *
3516 * We save bpobj sizes (first_size and next_size) to use later in
3517 * syncing context to determine if entries were added to these sublists
3518 * while in open context. This is possible because the clone is still
3519 * active and open for normal writes and we want to make sure the new,
3520 * unprocessed blockpointers are inserted into the livelist normally.
3521 *
3522 * Note that dsl_process_sub_livelist() both stores the size number of
3523 * blockpointers and iterates over them while the bpobj's lock held, so
3524 * the sizes returned to us are consistent which what was actually
3525 * processed.
3526 */
3527 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
3528 &first_size);
3529 if (err == 0)
3530 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
3531 t, &next_size);
3532
3533 if (err == 0) {
3534 while (zfs_livelist_condense_sync_pause &&
3535 !(zthr_has_waiters(t) || zthr_iscancelled(t)))
3536 delay(1);
3537
3538 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3539 dmu_tx_mark_netfree(tx);
3540 dmu_tx_hold_space(tx, 1);
3541 err = dmu_tx_assign(tx, DMU_TX_NOWAIT | DMU_TX_NOTHROTTLE);
3542 if (err == 0) {
3543 /*
3544 * Prevent the condense zthr restarting before
3545 * the synctask completes.
3546 */
3547 spa->spa_to_condense.syncing = B_TRUE;
3548 lca->spa = spa;
3549 lca->first_size = first_size;
3550 lca->next_size = next_size;
3551 dsl_sync_task_nowait(spa_get_dsl(spa),
3552 spa_livelist_condense_sync, lca, tx);
3553 dmu_tx_commit(tx);
3554 return;
3555 }
3556 }
3557 /*
3558 * Condensing can not continue: either it was externally stopped or
3559 * we were unable to assign to a tx because the pool has run out of
3560 * space. In the second case, we'll just end up trying to condense
3561 * again in a later txg.
3562 */
3563 ASSERT(err != 0);
3564 bplist_clear(&lca->to_keep);
3565 bplist_destroy(&lca->to_keep);
3566 kmem_free(lca, sizeof (livelist_condense_arg_t));
3567 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
3568 spa->spa_to_condense.ds = NULL;
3569 if (err == EINTR)
3570 zfs_livelist_condense_zthr_cancel++;
3571 }
3572
3573 /*
3574 * Check that there is something to condense but that a condense is not
3575 * already in progress and that condensing has not been cancelled.
3576 */
3577 static boolean_t
spa_livelist_condense_cb_check(void * arg,zthr_t * z)3578 spa_livelist_condense_cb_check(void *arg, zthr_t *z)
3579 {
3580 (void) z;
3581 spa_t *spa = arg;
3582 if ((spa->spa_to_condense.ds != NULL) &&
3583 (spa->spa_to_condense.syncing == B_FALSE) &&
3584 (spa->spa_to_condense.cancelled == B_FALSE)) {
3585 return (B_TRUE);
3586 }
3587 return (B_FALSE);
3588 }
3589
3590 static void
spa_start_livelist_condensing_thread(spa_t * spa)3591 spa_start_livelist_condensing_thread(spa_t *spa)
3592 {
3593 spa->spa_to_condense.ds = NULL;
3594 spa->spa_to_condense.first = NULL;
3595 spa->spa_to_condense.next = NULL;
3596 spa->spa_to_condense.syncing = B_FALSE;
3597 spa->spa_to_condense.cancelled = B_FALSE;
3598
3599 ASSERT0P(spa->spa_livelist_condense_zthr);
3600 spa->spa_livelist_condense_zthr =
3601 zthr_create("z_livelist_condense",
3602 spa_livelist_condense_cb_check,
3603 spa_livelist_condense_cb, spa, minclsyspri);
3604 }
3605
3606 static void
spa_spawn_aux_threads(spa_t * spa)3607 spa_spawn_aux_threads(spa_t *spa)
3608 {
3609 ASSERT(spa_writeable(spa));
3610
3611 spa_start_raidz_expansion_thread(spa);
3612 spa_start_indirect_condensing_thread(spa);
3613 spa_start_livelist_destroy_thread(spa);
3614 spa_start_livelist_condensing_thread(spa);
3615
3616 ASSERT0P(spa->spa_checkpoint_discard_zthr);
3617 spa->spa_checkpoint_discard_zthr =
3618 zthr_create("z_checkpoint_discard",
3619 spa_checkpoint_discard_thread_check,
3620 spa_checkpoint_discard_thread, spa, minclsyspri);
3621 }
3622
3623 /*
3624 * Fix up config after a partly-completed split. This is done with the
3625 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
3626 * pool have that entry in their config, but only the splitting one contains
3627 * a list of all the guids of the vdevs that are being split off.
3628 *
3629 * This function determines what to do with that list: either rejoin
3630 * all the disks to the pool, or complete the splitting process. To attempt
3631 * the rejoin, each disk that is offlined is marked online again, and
3632 * we do a reopen() call. If the vdev label for every disk that was
3633 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
3634 * then we call vdev_split() on each disk, and complete the split.
3635 *
3636 * Otherwise we leave the config alone, with all the vdevs in place in
3637 * the original pool.
3638 */
3639 static void
spa_try_repair(spa_t * spa,nvlist_t * config)3640 spa_try_repair(spa_t *spa, nvlist_t *config)
3641 {
3642 uint_t extracted;
3643 uint64_t *glist;
3644 uint_t i, gcount;
3645 nvlist_t *nvl;
3646 vdev_t **vd;
3647 boolean_t attempt_reopen;
3648
3649 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
3650 return;
3651
3652 /* check that the config is complete */
3653 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
3654 &glist, &gcount) != 0)
3655 return;
3656
3657 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
3658
3659 /* attempt to online all the vdevs & validate */
3660 attempt_reopen = B_TRUE;
3661 for (i = 0; i < gcount; i++) {
3662 if (glist[i] == 0) /* vdev is hole */
3663 continue;
3664
3665 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
3666 if (vd[i] == NULL) {
3667 /*
3668 * Don't bother attempting to reopen the disks;
3669 * just do the split.
3670 */
3671 attempt_reopen = B_FALSE;
3672 } else {
3673 /* attempt to re-online it */
3674 vd[i]->vdev_offline = B_FALSE;
3675 }
3676 }
3677
3678 if (attempt_reopen) {
3679 vdev_reopen(spa->spa_root_vdev);
3680
3681 /* check each device to see what state it's in */
3682 for (extracted = 0, i = 0; i < gcount; i++) {
3683 if (vd[i] != NULL &&
3684 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
3685 break;
3686 ++extracted;
3687 }
3688 }
3689
3690 /*
3691 * If every disk has been moved to the new pool, or if we never
3692 * even attempted to look at them, then we split them off for
3693 * good.
3694 */
3695 if (!attempt_reopen || gcount == extracted) {
3696 for (i = 0; i < gcount; i++)
3697 if (vd[i] != NULL)
3698 vdev_split(vd[i]);
3699 vdev_reopen(spa->spa_root_vdev);
3700 }
3701
3702 kmem_free(vd, gcount * sizeof (vdev_t *));
3703 }
3704
3705 static int
spa_load(spa_t * spa,spa_load_state_t state,spa_import_type_t type)3706 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
3707 {
3708 const char *ereport = FM_EREPORT_ZFS_POOL;
3709 int error;
3710
3711 spa->spa_load_state = state;
3712 (void) spa_import_progress_set_state(spa_guid(spa),
3713 spa_load_state(spa));
3714 spa_import_progress_set_notes(spa, "spa_load()");
3715
3716 gethrestime(&spa->spa_loaded_ts);
3717 error = spa_load_impl(spa, type, &ereport);
3718
3719 /*
3720 * Don't count references from objsets that are already closed
3721 * and are making their way through the eviction process.
3722 */
3723 spa_evicting_os_wait(spa);
3724 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
3725 if (error) {
3726 if (error != EEXIST) {
3727 spa->spa_loaded_ts.tv_sec = 0;
3728 spa->spa_loaded_ts.tv_nsec = 0;
3729 }
3730 if (error != EBADF) {
3731 (void) zfs_ereport_post(ereport, spa,
3732 NULL, NULL, NULL, 0);
3733 }
3734 }
3735 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
3736 spa->spa_ena = 0;
3737
3738 (void) spa_import_progress_set_state(spa_guid(spa),
3739 spa_load_state(spa));
3740
3741 return (error);
3742 }
3743
3744 #ifdef ZFS_DEBUG
3745 /*
3746 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
3747 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
3748 * spa's per-vdev ZAP list.
3749 */
3750 static uint64_t
vdev_count_verify_zaps(vdev_t * vd)3751 vdev_count_verify_zaps(vdev_t *vd)
3752 {
3753 spa_t *spa = vd->vdev_spa;
3754 uint64_t total = 0;
3755
3756 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
3757 vd->vdev_root_zap != 0) {
3758 total++;
3759 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3760 spa->spa_all_vdev_zaps, vd->vdev_root_zap));
3761 }
3762 if (vd->vdev_top_zap != 0) {
3763 total++;
3764 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3765 spa->spa_all_vdev_zaps, vd->vdev_top_zap));
3766 }
3767 if (vd->vdev_leaf_zap != 0) {
3768 total++;
3769 ASSERT0(zap_lookup_int(spa->spa_meta_objset,
3770 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
3771 }
3772
3773 for (uint64_t i = 0; i < vd->vdev_children; i++) {
3774 total += vdev_count_verify_zaps(vd->vdev_child[i]);
3775 }
3776
3777 return (total);
3778 }
3779 #else
3780 #define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
3781 #endif
3782
3783 /*
3784 * Check the results load_info results from previous tryimport.
3785 *
3786 * error results:
3787 * 0 - Pool remains in an idle state
3788 * EREMOTEIO - Pool was known to be active on the other host
3789 * ENOENT - The config does not contain complete tryimport info
3790 */
3791 static int
spa_activity_verify_config(spa_t * spa,uberblock_t * ub)3792 spa_activity_verify_config(spa_t *spa, uberblock_t *ub)
3793 {
3794 uint64_t tryconfig_mmp_state = MMP_STATE_ACTIVE;
3795 uint64_t tryconfig_txg = 0;
3796 uint64_t tryconfig_timestamp = 0;
3797 uint16_t tryconfig_mmp_seq = 0;
3798 nvlist_t *nvinfo, *config = spa->spa_config;
3799 int error;
3800
3801 /* Simply a non-zero value to indicate the verify was done. */
3802 spa->spa_mmp.mmp_import_ns = 1000;
3803
3804 error = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo);
3805 if (error)
3806 return (SET_ERROR(ENOENT));
3807
3808 /*
3809 * If ZPOOL_CONFIG_MMP_STATE is present an activity check was performed
3810 * during the earlier tryimport. If the state recorded there isn't
3811 * MMP_STATE_INACTIVE the pool is known to be active on another host.
3812 */
3813 error = nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_STATE,
3814 &tryconfig_mmp_state);
3815 if (error)
3816 return (SET_ERROR(ENOENT));
3817
3818 if (tryconfig_mmp_state != MMP_STATE_INACTIVE) {
3819 spa_load_failed(spa, "mmp: pool is active on remote host, "
3820 "state=%llu", (u_longlong_t)tryconfig_mmp_state);
3821 return (SET_ERROR(EREMOTEIO));
3822 }
3823
3824 /*
3825 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
3826 * during the earlier tryimport. If the txg recorded there is 0 then
3827 * the pool is known to be active on another host.
3828 */
3829 error = nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
3830 &tryconfig_txg);
3831 if (error)
3832 return (SET_ERROR(ENOENT));
3833
3834 if (tryconfig_txg == 0) {
3835 spa_load_failed(spa, "mmp: pool is active on remote host, "
3836 "tryconfig_txg=%llu", (u_longlong_t)tryconfig_txg);
3837 return (SET_ERROR(EREMOTEIO));
3838 }
3839
3840 error = nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3841 &tryconfig_timestamp);
3842 if (error)
3843 return (SET_ERROR(ENOENT));
3844
3845 error = nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
3846 &tryconfig_mmp_seq);
3847 if (error)
3848 return (SET_ERROR(ENOENT));
3849
3850 if (tryconfig_timestamp == ub->ub_timestamp &&
3851 tryconfig_txg == ub->ub_txg &&
3852 MMP_SEQ_VALID(ub) && tryconfig_mmp_seq == MMP_SEQ(ub)) {
3853 zfs_dbgmsg("mmp: verified pool mmp tryimport config, "
3854 "spa=%s", spa_load_name(spa));
3855 return (0);
3856 }
3857
3858 spa_load_failed(spa, "mmp: pool is active on remote host, "
3859 "tc_timestamp=%llu ub_timestamp=%llu "
3860 "tc_txg=%llu ub_txg=%llu tc_seq=%llu ub_seq=%llu",
3861 (u_longlong_t)tryconfig_timestamp, (u_longlong_t)ub->ub_timestamp,
3862 (u_longlong_t)tryconfig_txg, (u_longlong_t)ub->ub_txg,
3863 (u_longlong_t)tryconfig_mmp_seq, (u_longlong_t)MMP_SEQ(ub));
3864
3865 return (SET_ERROR(EREMOTEIO));
3866 }
3867
3868 /*
3869 * Determine whether the activity check is required.
3870 */
3871 static boolean_t
spa_activity_check_required(spa_t * spa,uberblock_t * ub,nvlist_t * label)3872 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label)
3873 {
3874 nvlist_t *config = spa->spa_config;
3875 uint64_t state = POOL_STATE_ACTIVE;
3876 uint64_t hostid = 0;
3877
3878 /*
3879 * Disable the MMP activity check - This is used by zdb which
3880 * is always read-only and intended to be used on potentially
3881 * active pools.
3882 */
3883 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) {
3884 zfs_dbgmsg("mmp: skipping check ZFS_IMPORT_SKIP_MMP is set, "
3885 "spa=%s", spa_load_name(spa));
3886 return (B_FALSE);
3887 }
3888
3889 /*
3890 * Skip the activity check when the MMP feature is disabled.
3891 * - MMP_MAGIC not set - Legacy pool predates the MMP feature, or
3892 * - MMP_MAGIC set && mmp_delay == 0 - MMP feature is disabled.
3893 */
3894 if ((ub->ub_mmp_magic != MMP_MAGIC) ||
3895 (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)) {
3896 zfs_dbgmsg("mmp: skipping check: feature is disabled, "
3897 "spa=%s", spa_load_name(spa));
3898 return (B_FALSE);
3899 }
3900
3901 /*
3902 * Allow the activity check to be skipped when importing a cleanly
3903 * exported pool on the same host which last imported it. Since the
3904 * hostid from configuration may be stale use the one read from the
3905 * label. Imports from other hostids must perform the activity check.
3906 */
3907 if (label != NULL) {
3908 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
3909 hostid = fnvlist_lookup_uint64(label,
3910 ZPOOL_CONFIG_HOSTID);
3911
3912 if (nvlist_exists(config, ZPOOL_CONFIG_POOL_STATE))
3913 state = fnvlist_lookup_uint64(config,
3914 ZPOOL_CONFIG_POOL_STATE);
3915
3916 if (spa_get_hostid(spa) && hostid == spa_get_hostid(spa) &&
3917 state == POOL_STATE_EXPORTED) {
3918 zfs_dbgmsg("mmp: skipping check: hostid matches "
3919 "and pool is exported, spa=%s, hostid=%llx",
3920 spa_load_name(spa), (u_longlong_t)hostid);
3921 return (B_FALSE);
3922 }
3923
3924 if (state == POOL_STATE_DESTROYED) {
3925 zfs_dbgmsg("mmp: skipping check: intentionally "
3926 "destroyed pool, spa=%s", spa_load_name(spa));
3927 return (B_FALSE);
3928 }
3929 }
3930
3931 return (B_TRUE);
3932 }
3933
3934 /*
3935 * Nanoseconds the activity check must watch for changes on-disk.
3936 */
3937 static uint64_t
spa_activity_check_duration(spa_t * spa,uberblock_t * ub)3938 spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
3939 {
3940 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
3941 uint64_t multihost_interval = MSEC2NSEC(
3942 MMP_INTERVAL_OK(zfs_multihost_interval));
3943 uint64_t import_delay = MAX(NANOSEC, import_intervals *
3944 multihost_interval);
3945
3946 /*
3947 * Local tunables determine a minimum duration except for the case
3948 * where we know when the remote host will suspend the pool if MMP
3949 * writes do not land.
3950 *
3951 * See Big Theory comment at the top of mmp.c for the reasoning behind
3952 * these cases and times.
3953 */
3954
3955 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
3956
3957 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3958 MMP_FAIL_INT(ub) > 0) {
3959
3960 /* MMP on remote host will suspend pool after failed writes */
3961 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
3962 MMP_IMPORT_SAFETY_FACTOR / 100;
3963
3964 zfs_dbgmsg("mmp: settings spa=%s fail_intvals>0 "
3965 "import_delay=%llu mmp_fails=%llu mmp_interval=%llu "
3966 "import_intervals=%llu", spa_load_name(spa),
3967 (u_longlong_t)import_delay,
3968 (u_longlong_t)MMP_FAIL_INT(ub),
3969 (u_longlong_t)MMP_INTERVAL(ub),
3970 (u_longlong_t)import_intervals);
3971
3972 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
3973 MMP_FAIL_INT(ub) == 0) {
3974
3975 /* MMP on remote host will never suspend pool */
3976 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
3977 ub->ub_mmp_delay) * import_intervals);
3978
3979 zfs_dbgmsg("mmp: settings spa=%s fail_intvals=0 "
3980 "import_delay=%llu mmp_interval=%llu ub_mmp_delay=%llu "
3981 "import_intervals=%llu", spa_load_name(spa),
3982 (u_longlong_t)import_delay,
3983 (u_longlong_t)MMP_INTERVAL(ub),
3984 (u_longlong_t)ub->ub_mmp_delay,
3985 (u_longlong_t)import_intervals);
3986
3987 } else if (MMP_VALID(ub)) {
3988 /*
3989 * zfs-0.7 compatibility case
3990 */
3991
3992 import_delay = MAX(import_delay, (multihost_interval +
3993 ub->ub_mmp_delay) * import_intervals);
3994
3995 zfs_dbgmsg("mmp: settings spa=%s import_delay=%llu "
3996 "ub_mmp_delay=%llu import_intervals=%llu leaves=%u",
3997 spa_load_name(spa), (u_longlong_t)import_delay,
3998 (u_longlong_t)ub->ub_mmp_delay,
3999 (u_longlong_t)import_intervals,
4000 vdev_count_leaves(spa));
4001 } else {
4002 /* Using local tunings is the only reasonable option */
4003 zfs_dbgmsg("mmp: pool last imported on non-MMP aware "
4004 "host using settings spa=%s import_delay=%llu "
4005 "multihost_interval=%llu import_intervals=%llu",
4006 spa_load_name(spa), (u_longlong_t)import_delay,
4007 (u_longlong_t)multihost_interval,
4008 (u_longlong_t)import_intervals);
4009 }
4010
4011 return (import_delay);
4012 }
4013
4014 /*
4015 * Store the observed pool status in spa->spa_load_info nvlist. If the
4016 * remote hostname or hostid are available from configuration read from
4017 * disk store them as well. Additionally, provide some diagnostic info
4018 * for which activity checks were run and their duration. This allows
4019 * 'zpool import' to generate a more useful message.
4020 *
4021 * Mandatory observed pool status
4022 * - ZPOOL_CONFIG_MMP_STATE - observed pool status (active/inactive)
4023 * - ZPOOL_CONFIG_MMP_TXG - observed pool txg number
4024 * - ZPOOL_CONFIG_MMP_SEQ - observed pool sequence id
4025 *
4026 * Optional information for detailed reporting
4027 * - ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
4028 * - ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
4029 * - ZPOOL_CONFIG_MMP_RESULT - set to result of activity check
4030 * - ZPOOL_CONFIG_MMP_TRYIMPORT_NS - tryimport duration in nanosec
4031 * - ZPOOL_CONFIG_MMP_IMPORT_NS - import duration in nanosec
4032 * - ZPOOL_CONFIG_MMP_CLAIM_NS - claim duration in nanosec
4033 *
4034 * ZPOOL_CONFIG_MMP_RESULT can be set to:
4035 * - ENXIO - system hostid not set
4036 * - ESRCH - activity check skipped
4037 * - EREMOTEIO - activity check detected active pool
4038 * - EINTR - activity check interrupted
4039 * - 0 - activity check detected no activity
4040 */
4041 static void
spa_activity_set_load_info(spa_t * spa,nvlist_t * label,mmp_state_t state,uint64_t txg,uint16_t seq,int error)4042 spa_activity_set_load_info(spa_t *spa, nvlist_t *label, mmp_state_t state,
4043 uint64_t txg, uint16_t seq, int error)
4044 {
4045 mmp_thread_t *mmp = &spa->spa_mmp;
4046 const char *hostname = NULL;
4047 uint64_t hostid = 0;
4048
4049 /* Always report a zero txg and seq id for active pools. */
4050 if (state == MMP_STATE_ACTIVE) {
4051 ASSERT0(txg);
4052 ASSERT0(seq);
4053 }
4054
4055 if (label) {
4056 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTNAME)) {
4057 hostname = fnvlist_lookup_string(label,
4058 ZPOOL_CONFIG_HOSTNAME);
4059 fnvlist_add_string(spa->spa_load_info,
4060 ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
4061 }
4062
4063 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) {
4064 hostid = fnvlist_lookup_uint64(label,
4065 ZPOOL_CONFIG_HOSTID);
4066 fnvlist_add_uint64(spa->spa_load_info,
4067 ZPOOL_CONFIG_MMP_HOSTID, hostid);
4068 }
4069 }
4070
4071 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_MMP_STATE, state);
4072 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_MMP_TXG, txg);
4073 fnvlist_add_uint16(spa->spa_load_info, ZPOOL_CONFIG_MMP_SEQ, seq);
4074 fnvlist_add_uint32(spa->spa_load_info, ZPOOL_CONFIG_MMP_RESULT, error);
4075
4076 if (mmp->mmp_tryimport_ns > 0) {
4077 fnvlist_add_uint64(spa->spa_load_info,
4078 ZPOOL_CONFIG_MMP_TRYIMPORT_NS, mmp->mmp_tryimport_ns);
4079 }
4080
4081 if (mmp->mmp_import_ns > 0) {
4082 fnvlist_add_uint64(spa->spa_load_info,
4083 ZPOOL_CONFIG_MMP_IMPORT_NS, mmp->mmp_import_ns);
4084 }
4085
4086 if (mmp->mmp_claim_ns > 0) {
4087 fnvlist_add_uint64(spa->spa_load_info,
4088 ZPOOL_CONFIG_MMP_CLAIM_NS, mmp->mmp_claim_ns);
4089 }
4090
4091 zfs_dbgmsg("mmp: set spa_load_info, spa=%s hostname=%s hostid=%llx "
4092 "state=%d txg=%llu seq=%llu tryimport_ns=%lld import_ns=%lld "
4093 "claim_ns=%lld", spa_load_name(spa),
4094 hostname != NULL ? hostname : "none", (u_longlong_t)hostid,
4095 (int)state, (u_longlong_t)txg, (u_longlong_t)seq,
4096 (longlong_t)mmp->mmp_tryimport_ns, (longlong_t)mmp->mmp_import_ns,
4097 (longlong_t)mmp->mmp_claim_ns);
4098 }
4099
4100 static int
spa_ld_activity_result(spa_t * spa,int error,const char * state)4101 spa_ld_activity_result(spa_t *spa, int error, const char *state)
4102 {
4103 switch (error) {
4104 case ENXIO:
4105 cmn_err(CE_WARN, "pool '%s' system hostid not set, "
4106 "aborted import during %s", spa_load_name(spa), state);
4107 /* Userspace expects EREMOTEIO for no system hostid */
4108 error = EREMOTEIO;
4109 break;
4110 case EREMOTEIO:
4111 cmn_err(CE_WARN, "pool '%s' activity detected, aborted "
4112 "import during %s", spa_load_name(spa), state);
4113 break;
4114 case EINTR:
4115 cmn_err(CE_WARN, "pool '%s' activity check, interrupted "
4116 "import during %s", spa_load_name(spa), state);
4117 break;
4118 case 0:
4119 cmn_err(CE_NOTE, "pool '%s' activity check completed "
4120 "successfully", spa_load_name(spa));
4121 break;
4122 }
4123
4124 return (error);
4125 }
4126
4127
4128 /*
4129 * Remote host activity check. Performed during tryimport when the pool
4130 * has passed on the basic sanity check and is open read-only.
4131 *
4132 * error results:
4133 * 0 - no activity detected
4134 * EREMOTEIO - remote activity detected
4135 * EINTR - user canceled the operation
4136 */
4137 static int
spa_activity_check_tryimport(spa_t * spa,uberblock_t * spa_ub,boolean_t importing)4138 spa_activity_check_tryimport(spa_t *spa, uberblock_t *spa_ub,
4139 boolean_t importing)
4140 {
4141 kcondvar_t cv;
4142 kmutex_t mtx;
4143 int error = 0;
4144
4145 cv_init(&cv, NULL, CV_DEFAULT, NULL);
4146 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
4147 mutex_enter(&mtx);
4148
4149 uint64_t import_delay = spa_activity_check_duration(spa, spa_ub);
4150 hrtime_t start_time = gethrtime();
4151
4152 /* Add a small random factor in case of simultaneous imports (0-25%) */
4153 import_delay += import_delay * random_in_range(250) / 1000;
4154 hrtime_t import_expire = gethrtime() + import_delay;
4155
4156 if (importing) {
4157 /* Console message includes tryimport and claim time */
4158 hrtime_t extra_delay = MMP_IMPORT_VERIFY_ITERS *
4159 MSEC2NSEC(MMP_INTERVAL_VALID(spa_ub) ?
4160 MMP_INTERVAL(spa_ub) : MMP_MIN_INTERVAL);
4161 cmn_err(CE_NOTE, "pool '%s' activity check required, "
4162 "%llu seconds remaining", spa_load_name(spa),
4163 (u_longlong_t)MAX(NSEC2SEC(import_delay + extra_delay), 1));
4164 spa_import_progress_set_notes(spa, "Checking MMP activity, "
4165 "waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay));
4166 }
4167
4168 hrtime_t now;
4169 nvlist_t *mmp_label = NULL;
4170
4171 while ((now = gethrtime()) < import_expire) {
4172 vdev_t *rvd = spa->spa_root_vdev;
4173 uberblock_t mmp_ub;
4174
4175 if (importing) {
4176 (void) spa_import_progress_set_mmp_check(spa_guid(spa),
4177 NSEC2SEC(import_expire - gethrtime()));
4178 }
4179
4180 vdev_uberblock_load(rvd, &mmp_ub, &mmp_label);
4181
4182 if (vdev_uberblock_compare(spa_ub, &mmp_ub)) {
4183 spa_load_failed(spa, "mmp: activity detected during "
4184 "tryimport, spa_ub_txg=%llu mmp_ub_txg=%llu "
4185 "spa_ub_seq=%llu mmp_ub_seq=%llu "
4186 "spa_ub_timestamp=%llu mmp_ub_timestamp=%llu "
4187 "spa_ub_config=%#llx mmp_ub_config=%#llx",
4188 (u_longlong_t)spa_ub->ub_txg,
4189 (u_longlong_t)mmp_ub.ub_txg,
4190 (u_longlong_t)(MMP_SEQ_VALID(spa_ub) ?
4191 MMP_SEQ(spa_ub) : 0),
4192 (u_longlong_t)(MMP_SEQ_VALID(&mmp_ub) ?
4193 MMP_SEQ(&mmp_ub) : 0),
4194 (u_longlong_t)spa_ub->ub_timestamp,
4195 (u_longlong_t)mmp_ub.ub_timestamp,
4196 (u_longlong_t)spa_ub->ub_mmp_config,
4197 (u_longlong_t)mmp_ub.ub_mmp_config);
4198 error = SET_ERROR(EREMOTEIO);
4199 break;
4200 }
4201
4202 if (mmp_label) {
4203 nvlist_free(mmp_label);
4204 mmp_label = NULL;
4205 }
4206
4207 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
4208 if (error != -1) {
4209 error = SET_ERROR(EINTR);
4210 break;
4211 }
4212 error = 0;
4213 }
4214
4215 mutex_exit(&mtx);
4216 mutex_destroy(&mtx);
4217 cv_destroy(&cv);
4218
4219 if (mmp_label)
4220 nvlist_free(mmp_label);
4221
4222 if (spa->spa_load_state == SPA_LOAD_IMPORT ||
4223 spa->spa_load_state == SPA_LOAD_OPEN) {
4224 spa->spa_mmp.mmp_import_ns = gethrtime() - start_time;
4225 } else {
4226 spa->spa_mmp.mmp_tryimport_ns = gethrtime() - start_time;
4227 }
4228
4229 return (error);
4230 }
4231
4232 /*
4233 * Remote host activity check. Performed during import when the pool has
4234 * passed most sanity check and has been reopened read/write.
4235 *
4236 * error results:
4237 * 0 - no activity detected
4238 * EREMOTEIO - remote activity detected
4239 * EINTR - user canceled the operation
4240 */
4241 static int
spa_activity_check_claim(spa_t * spa)4242 spa_activity_check_claim(spa_t *spa)
4243 {
4244 vdev_t *rvd = spa->spa_root_vdev;
4245 nvlist_t *mmp_label;
4246 uberblock_t spa_ub;
4247 kcondvar_t cv;
4248 kmutex_t mtx;
4249 int error = 0;
4250
4251 cv_init(&cv, NULL, CV_DEFAULT, NULL);
4252 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
4253 mutex_enter(&mtx);
4254
4255 hrtime_t start_time = gethrtime();
4256
4257 /*
4258 * Load the best uberblock and verify it matches the uberblock already
4259 * identified and stored as spa->spa_uberblock to verify the pool has
4260 * not changed.
4261 */
4262 vdev_uberblock_load(rvd, &spa_ub, &mmp_label);
4263
4264 if (memcmp(&spa->spa_uberblock, &spa_ub, sizeof (uberblock_t))) {
4265 spa_load_failed(spa, "mmp: uberblock changed on disk");
4266 error = SET_ERROR(EREMOTEIO);
4267 goto out;
4268 }
4269
4270 if (!MMP_VALID(&spa_ub) || !MMP_INTERVAL_VALID(&spa_ub) ||
4271 !MMP_SEQ_VALID(&spa_ub) || !MMP_FAIL_INT_VALID(&spa_ub)) {
4272 spa_load_failed(spa, "mmp: is not enabled in spa uberblock");
4273 error = SET_ERROR(EREMOTEIO);
4274 goto out;
4275 }
4276
4277 nvlist_free(mmp_label);
4278 mmp_label = NULL;
4279
4280 uint64_t spa_ub_interval = MMP_INTERVAL(&spa_ub);
4281 uint16_t spa_ub_seq = MMP_SEQ(&spa_ub);
4282
4283 /*
4284 * In the highly unlikely event the sequence numbers have been
4285 * exhaused reset the sequence to zero. As long as the MMP
4286 * uberblock is updated on all of the vdevs the activity will
4287 * still be detected.
4288 */
4289 if (MMP_SEQ_MAX == spa_ub_seq)
4290 spa_ub_seq = 0;
4291
4292 spa_import_progress_set_notes(spa,
4293 "Establishing MMP claim, waiting %llu ms",
4294 (u_longlong_t)(MMP_IMPORT_VERIFY_ITERS * spa_ub_interval));
4295
4296 /*
4297 * Repeatedly sync out an MMP uberblock with a randomly selected
4298 * sequence number, then read it back after the MMP interval. This
4299 * random value acts as a claim token and is visible on other hosts.
4300 * If the same random value is read back we can be certain no other
4301 * pool is attempting to import the pool.
4302 */
4303 for (int i = MMP_IMPORT_VERIFY_ITERS; i > 0; i--) {
4304 uberblock_t set_ub, mmp_ub;
4305 uint16_t mmp_seq;
4306
4307 (void) spa_import_progress_set_mmp_check(spa_guid(spa),
4308 NSEC2SEC(i * MSEC2NSEC(spa_ub_interval)));
4309
4310 set_ub = spa_ub;
4311 mmp_seq = spa_ub_seq + 1 +
4312 random_in_range(MMP_SEQ_MAX - spa_ub_seq);
4313 MMP_SEQ_CLEAR(&set_ub);
4314 set_ub.ub_mmp_config |= MMP_SEQ_SET(mmp_seq);
4315
4316 error = mmp_claim_uberblock(spa, rvd, &set_ub);
4317 if (error) {
4318 spa_load_failed(spa, "mmp: uberblock claim "
4319 "failed, error=%d", error);
4320 error = SET_ERROR(EREMOTEIO);
4321 break;
4322 }
4323
4324 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() +
4325 MSEC_TO_TICK(spa_ub_interval));
4326 if (error != -1) {
4327 error = SET_ERROR(EINTR);
4328 break;
4329 }
4330
4331 vdev_uberblock_load(rvd, &mmp_ub, &mmp_label);
4332
4333 if (vdev_uberblock_compare(&set_ub, &mmp_ub)) {
4334 spa_load_failed(spa, "mmp: activity detected during "
4335 "claim, set_ub_txg=%llu mmp_ub_txg=%llu "
4336 "set_ub_seq=%llu mmp_ub_seq=%llu "
4337 "set_ub_timestamp=%llu mmp_ub_timestamp=%llu "
4338 "set_ub_config=%#llx mmp_ub_config=%#llx",
4339 (u_longlong_t)set_ub.ub_txg,
4340 (u_longlong_t)mmp_ub.ub_txg,
4341 (u_longlong_t)(MMP_SEQ_VALID(&set_ub) ?
4342 MMP_SEQ(&set_ub) : 0),
4343 (u_longlong_t)(MMP_SEQ_VALID(&mmp_ub) ?
4344 MMP_SEQ(&mmp_ub) : 0),
4345 (u_longlong_t)set_ub.ub_timestamp,
4346 (u_longlong_t)mmp_ub.ub_timestamp,
4347 (u_longlong_t)set_ub.ub_mmp_config,
4348 (u_longlong_t)mmp_ub.ub_mmp_config);
4349 error = SET_ERROR(EREMOTEIO);
4350 break;
4351 }
4352
4353 if (mmp_label) {
4354 nvlist_free(mmp_label);
4355 mmp_label = NULL;
4356 }
4357
4358 error = 0;
4359 }
4360 out:
4361 spa->spa_mmp.mmp_claim_ns = gethrtime() - start_time;
4362 (void) spa_import_progress_set_mmp_check(spa_guid(spa), 0);
4363
4364 if (error == EREMOTEIO) {
4365 spa_activity_set_load_info(spa, mmp_label,
4366 MMP_STATE_ACTIVE, 0, 0, EREMOTEIO);
4367 } else {
4368 spa_activity_set_load_info(spa, mmp_label,
4369 MMP_STATE_INACTIVE, spa_ub.ub_txg, MMP_SEQ(&spa_ub), 0);
4370 }
4371
4372 /*
4373 * Restore the original sequence, this allows us to retry the
4374 * import procedure if a subsequent step fails during import.
4375 * Failure to restore it reduces the available sequence ids for
4376 * the next import but shouldn't be considered fatal.
4377 */
4378 int restore_error = mmp_claim_uberblock(spa, rvd, &spa_ub);
4379 if (restore_error) {
4380 zfs_dbgmsg("mmp: uberblock restore failed, spa=%s error=%d",
4381 spa_load_name(spa), restore_error);
4382 }
4383
4384 if (mmp_label)
4385 nvlist_free(mmp_label);
4386
4387 mutex_exit(&mtx);
4388 mutex_destroy(&mtx);
4389 cv_destroy(&cv);
4390
4391 return (error);
4392 }
4393
4394 static int
spa_ld_activity_check(spa_t * spa,uberblock_t * ub,nvlist_t * label)4395 spa_ld_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *label)
4396 {
4397 vdev_t *rvd = spa->spa_root_vdev;
4398 int error;
4399
4400 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
4401 spa_get_hostid(spa) == 0) {
4402 spa_activity_set_load_info(spa, label, MMP_STATE_NO_HOSTID,
4403 ub->ub_txg, MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0, ENXIO);
4404 zfs_dbgmsg("mmp: system hostid not set, ub_mmp_magic=%llx "
4405 "ub_mmp_delay=%llu hostid=%llx",
4406 (u_longlong_t)ub->ub_mmp_magic,
4407 (u_longlong_t)ub->ub_mmp_delay,
4408 (u_longlong_t)spa_get_hostid(spa));
4409 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, ENXIO));
4410 }
4411
4412 switch (spa->spa_load_state) {
4413 case SPA_LOAD_TRYIMPORT:
4414 tryimport:
4415 error = spa_activity_check_tryimport(spa, ub, B_TRUE);
4416 if (error == EREMOTEIO) {
4417 spa_activity_set_load_info(spa, label,
4418 MMP_STATE_ACTIVE, 0, 0, EREMOTEIO);
4419 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4420 } else if (error) {
4421 ASSERT3S(error, ==, EINTR);
4422 spa_activity_set_load_info(spa, label,
4423 MMP_STATE_ACTIVE, 0, 0, EINTR);
4424 return (error);
4425 }
4426
4427 spa_activity_set_load_info(spa, label, MMP_STATE_INACTIVE,
4428 ub->ub_txg, MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0, 0);
4429
4430 break;
4431
4432 case SPA_LOAD_IMPORT:
4433 case SPA_LOAD_OPEN:
4434 error = spa_activity_verify_config(spa, ub);
4435 if (error == EREMOTEIO) {
4436 spa_activity_set_load_info(spa, label,
4437 MMP_STATE_ACTIVE, 0, 0, EREMOTEIO);
4438 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4439 } else if (error) {
4440 ASSERT3S(error, ==, ENOENT);
4441 goto tryimport;
4442 }
4443
4444 /* Load info set in spa_activity_check_claim() */
4445
4446 break;
4447
4448 case SPA_LOAD_RECOVER:
4449 zfs_dbgmsg("mmp: skipping mmp check for rewind, spa=%s",
4450 spa_load_name(spa));
4451 break;
4452
4453 default:
4454 spa_activity_set_load_info(spa, label, MMP_STATE_ACTIVE,
4455 0, 0, EREMOTEIO);
4456 zfs_dbgmsg("mmp: unreachable, spa=%s spa_load_state=%d",
4457 spa_load_name(spa), spa->spa_load_state);
4458 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
4459 }
4460
4461 return (0);
4462 }
4463
4464 /*
4465 * Called from zfs_ioc_clear for a pool that was suspended
4466 * after failing mmp write checks.
4467 */
4468 boolean_t
spa_mmp_remote_host_activity(spa_t * spa)4469 spa_mmp_remote_host_activity(spa_t *spa)
4470 {
4471 ASSERT(spa_multihost(spa) && spa_suspended(spa));
4472
4473 nvlist_t *best_label;
4474 uberblock_t best_ub;
4475
4476 /*
4477 * Locate the best uberblock on disk
4478 */
4479 vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label);
4480 if (best_label) {
4481 /*
4482 * confirm that the best hostid matches our hostid
4483 */
4484 if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) &&
4485 spa_get_hostid(spa) !=
4486 fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) {
4487 nvlist_free(best_label);
4488 return (B_TRUE);
4489 }
4490 nvlist_free(best_label);
4491 } else {
4492 return (B_TRUE);
4493 }
4494
4495 if (!MMP_VALID(&best_ub) ||
4496 !MMP_FAIL_INT_VALID(&best_ub) ||
4497 MMP_FAIL_INT(&best_ub) == 0) {
4498 return (B_TRUE);
4499 }
4500
4501 if (best_ub.ub_txg != spa->spa_uberblock.ub_txg ||
4502 best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) {
4503 zfs_dbgmsg("mmp: txg mismatch detected during pool clear, "
4504 "spa=%s txg=%llu ub_txg=%llu timestamp=%llu "
4505 "ub_timestamp=%llu", spa_name(spa),
4506 (u_longlong_t)spa->spa_uberblock.ub_txg,
4507 (u_longlong_t)best_ub.ub_txg,
4508 (u_longlong_t)spa->spa_uberblock.ub_timestamp,
4509 (u_longlong_t)best_ub.ub_timestamp);
4510 return (B_TRUE);
4511 }
4512
4513 /*
4514 * Perform an activity check looking for any remote writer
4515 */
4516 return (spa_activity_check_tryimport(spa, &best_ub, B_FALSE) != 0);
4517 }
4518
4519 static int
spa_verify_host(spa_t * spa,nvlist_t * mos_config)4520 spa_verify_host(spa_t *spa, nvlist_t *mos_config)
4521 {
4522 uint64_t hostid;
4523 const char *hostname;
4524 uint64_t myhostid = 0;
4525
4526 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
4527 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
4528 hostname = fnvlist_lookup_string(mos_config,
4529 ZPOOL_CONFIG_HOSTNAME);
4530
4531 myhostid = zone_get_hostid(NULL);
4532
4533 if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
4534 cmn_err(CE_WARN, "pool '%s' could not be "
4535 "loaded as it was last accessed by "
4536 "another system (host: %s hostid: 0x%llx). "
4537 "See: https://openzfs.github.io/openzfs-docs/msg/"
4538 "ZFS-8000-EY",
4539 spa_name(spa), hostname, (u_longlong_t)hostid);
4540 spa_load_failed(spa, "hostid verification failed: pool "
4541 "last accessed by host: %s (hostid: 0x%llx)",
4542 hostname, (u_longlong_t)hostid);
4543 return (SET_ERROR(EBADF));
4544 }
4545 }
4546
4547 return (0);
4548 }
4549
4550 static int
spa_ld_parse_config(spa_t * spa,spa_import_type_t type)4551 spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
4552 {
4553 int error = 0;
4554 nvlist_t *nvtree, *nvl, *config = spa->spa_config;
4555 int parse;
4556 vdev_t *rvd;
4557 uint64_t pool_guid;
4558 const char *comment;
4559 const char *compatibility;
4560
4561 /*
4562 * Versioning wasn't explicitly added to the label until later, so if
4563 * it's not present treat it as the initial version.
4564 */
4565 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4566 &spa->spa_ubsync.ub_version) != 0)
4567 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4568
4569 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
4570 spa_load_failed(spa, "invalid config provided: '%s' missing",
4571 ZPOOL_CONFIG_POOL_GUID);
4572 return (SET_ERROR(EINVAL));
4573 }
4574
4575 /*
4576 * If we are doing an import, ensure that the pool is not already
4577 * imported by checking if its pool guid already exists in the
4578 * spa namespace.
4579 *
4580 * The only case that we allow an already imported pool to be
4581 * imported again, is when the pool is checkpointed and we want to
4582 * look at its checkpointed state from userland tools like zdb.
4583 */
4584 #ifdef _KERNEL
4585 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
4586 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
4587 spa_guid_exists(pool_guid, 0)) {
4588 #else
4589 if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
4590 spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
4591 spa_guid_exists(pool_guid, 0) &&
4592 !spa_importing_readonly_checkpoint(spa)) {
4593 #endif
4594 spa_load_failed(spa, "a pool with guid %llu is already open",
4595 (u_longlong_t)pool_guid);
4596 return (SET_ERROR(EEXIST));
4597 }
4598
4599 spa->spa_config_guid = pool_guid;
4600
4601 nvlist_free(spa->spa_load_info);
4602 spa->spa_load_info = fnvlist_alloc();
4603
4604 ASSERT0P(spa->spa_comment);
4605 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
4606 spa->spa_comment = spa_strdup(comment);
4607
4608 ASSERT0P(spa->spa_compatibility);
4609 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
4610 &compatibility) == 0)
4611 spa->spa_compatibility = spa_strdup(compatibility);
4612
4613 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
4614 &spa->spa_config_txg);
4615
4616 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
4617 spa->spa_config_splitting = fnvlist_dup(nvl);
4618
4619 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
4620 spa_load_failed(spa, "invalid config provided: '%s' missing",
4621 ZPOOL_CONFIG_VDEV_TREE);
4622 return (SET_ERROR(EINVAL));
4623 }
4624
4625 /*
4626 * Create "The Godfather" zio to hold all async IOs
4627 */
4628 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
4629 KM_SLEEP);
4630 for (int i = 0; i < max_ncpus; i++) {
4631 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
4632 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
4633 ZIO_FLAG_GODFATHER);
4634 }
4635
4636 /*
4637 * Parse the configuration into a vdev tree. We explicitly set the
4638 * value that will be returned by spa_version() since parsing the
4639 * configuration requires knowing the version number.
4640 */
4641 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4642 parse = (type == SPA_IMPORT_EXISTING ?
4643 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
4644 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
4645 spa_config_exit(spa, SCL_ALL, FTAG);
4646
4647 if (error != 0) {
4648 spa_load_failed(spa, "unable to parse config [error=%d]",
4649 error);
4650 return (error);
4651 }
4652
4653 ASSERT(spa->spa_root_vdev == rvd);
4654 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
4655 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
4656
4657 if (type != SPA_IMPORT_ASSEMBLE) {
4658 ASSERT(spa_guid(spa) == pool_guid);
4659 }
4660
4661 return (0);
4662 }
4663
4664 /*
4665 * Recursively open all vdevs in the vdev tree. This function is called twice:
4666 * first with the untrusted config, then with the trusted config.
4667 */
4668 static int
4669 spa_ld_open_vdevs(spa_t *spa)
4670 {
4671 int error = 0;
4672
4673 /*
4674 * spa_missing_tvds_allowed defines how many top-level vdevs can be
4675 * missing/unopenable for the root vdev to be still considered openable.
4676 */
4677 if (spa->spa_trust_config) {
4678 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
4679 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
4680 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
4681 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
4682 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
4683 } else {
4684 spa->spa_missing_tvds_allowed = 0;
4685 }
4686
4687 spa->spa_missing_tvds_allowed =
4688 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
4689
4690 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4691 error = vdev_open(spa->spa_root_vdev);
4692 spa_config_exit(spa, SCL_ALL, FTAG);
4693
4694 if (spa->spa_missing_tvds != 0) {
4695 spa_load_note(spa, "vdev tree has %lld missing top-level "
4696 "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
4697 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
4698 /*
4699 * Although theoretically we could allow users to open
4700 * incomplete pools in RW mode, we'd need to add a lot
4701 * of extra logic (e.g. adjust pool space to account
4702 * for missing vdevs).
4703 * This limitation also prevents users from accidentally
4704 * opening the pool in RW mode during data recovery and
4705 * damaging it further.
4706 */
4707 spa_load_note(spa, "pools with missing top-level "
4708 "vdevs can only be opened in read-only mode.");
4709 error = SET_ERROR(ENXIO);
4710 } else {
4711 spa_load_note(spa, "current settings allow for maximum "
4712 "%lld missing top-level vdevs at this stage.",
4713 (u_longlong_t)spa->spa_missing_tvds_allowed);
4714 }
4715 }
4716 if (error != 0) {
4717 spa_load_failed(spa, "unable to open vdev tree [error=%d]",
4718 error);
4719 }
4720 if (spa->spa_missing_tvds != 0 || error != 0)
4721 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
4722
4723 return (error);
4724 }
4725
4726 /*
4727 * We need to validate the vdev labels against the configuration that
4728 * we have in hand. This function is called twice: first with an untrusted
4729 * config, then with a trusted config. The validation is more strict when the
4730 * config is trusted.
4731 */
4732 static int
4733 spa_ld_validate_vdevs(spa_t *spa)
4734 {
4735 int error = 0;
4736 vdev_t *rvd = spa->spa_root_vdev;
4737
4738 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4739 error = vdev_validate(rvd);
4740 spa_config_exit(spa, SCL_ALL, FTAG);
4741
4742 if (error != 0) {
4743 spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
4744 return (error);
4745 }
4746
4747 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
4748 spa_load_failed(spa, "cannot open vdev tree after invalidating "
4749 "some vdevs");
4750 vdev_dbgmsg_print_tree(rvd, 2);
4751 return (SET_ERROR(ENXIO));
4752 }
4753
4754 return (0);
4755 }
4756
4757 static void
4758 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
4759 {
4760 spa->spa_state = POOL_STATE_ACTIVE;
4761 spa->spa_ubsync = spa->spa_uberblock;
4762 spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
4763 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
4764 spa->spa_first_txg = spa->spa_last_ubsync_txg ?
4765 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
4766 spa->spa_claim_max_txg = spa->spa_first_txg;
4767 spa->spa_prev_software_version = ub->ub_software_version;
4768 }
4769
4770 static int
4771 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
4772 {
4773 vdev_t *rvd = spa->spa_root_vdev;
4774 nvlist_t *label;
4775 uberblock_t *ub = &spa->spa_uberblock;
4776
4777 /*
4778 * If we are opening the checkpointed state of the pool by
4779 * rewinding to it, at this point we will have written the
4780 * checkpointed uberblock to the vdev labels, so searching
4781 * the labels will find the right uberblock. However, if
4782 * we are opening the checkpointed state read-only, we have
4783 * not modified the labels. Therefore, we must ignore the
4784 * labels and continue using the spa_uberblock that was set
4785 * by spa_ld_checkpoint_rewind.
4786 *
4787 * Note that it would be fine to ignore the labels when
4788 * rewinding (opening writeable) as well. However, if we
4789 * crash just after writing the labels, we will end up
4790 * searching the labels. Doing so in the common case means
4791 * that this code path gets exercised normally, rather than
4792 * just in the edge case.
4793 */
4794 if (ub->ub_checkpoint_txg != 0 &&
4795 spa_importing_readonly_checkpoint(spa)) {
4796 spa_ld_select_uberblock_done(spa, ub);
4797 return (0);
4798 }
4799
4800 /*
4801 * Find the best uberblock.
4802 */
4803 vdev_uberblock_load(rvd, ub, &label);
4804
4805 /*
4806 * If we weren't able to find a single valid uberblock, return failure.
4807 */
4808 if (ub->ub_txg == 0) {
4809 nvlist_free(label);
4810 spa_load_failed(spa, "no valid uberblock found");
4811 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
4812 }
4813
4814 if (spa->spa_load_max_txg != UINT64_MAX) {
4815 (void) spa_import_progress_set_max_txg(spa_guid(spa),
4816 (u_longlong_t)spa->spa_load_max_txg);
4817 }
4818 spa_load_note(spa, "using uberblock with txg=%llu",
4819 (u_longlong_t)ub->ub_txg);
4820 if (ub->ub_raidz_reflow_info != 0) {
4821 spa_load_note(spa, "uberblock raidz_reflow_info: "
4822 "state=%u offset=%llu",
4823 (int)RRSS_GET_STATE(ub),
4824 (u_longlong_t)RRSS_GET_OFFSET(ub));
4825 }
4826
4827 /*
4828 * For pools which have the multihost property on determine if the
4829 * pool is truly inactive and can be safely imported. Prevent
4830 * hosts which don't have a hostid set from importing the pool.
4831 */
4832 spa->spa_activity_check = spa_activity_check_required(spa, ub, label);
4833 if (spa->spa_activity_check) {
4834 int error = spa_ld_activity_check(spa, ub, label);
4835 if (error) {
4836 spa_load_state_t state = spa->spa_load_state;
4837 error = spa_ld_activity_result(spa, error,
4838 state == SPA_LOAD_TRYIMPORT ? "tryimport" :
4839 state == SPA_LOAD_IMPORT ? "import" : "open");
4840 nvlist_free(label);
4841 return (error);
4842 }
4843 } else {
4844 fnvlist_add_uint32(spa->spa_load_info,
4845 ZPOOL_CONFIG_MMP_RESULT, ESRCH);
4846 }
4847
4848 /*
4849 * If the pool has an unsupported version we can't open it.
4850 */
4851 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
4852 nvlist_free(label);
4853 spa_load_failed(spa, "version %llu is not supported",
4854 (u_longlong_t)ub->ub_version);
4855 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
4856 }
4857
4858 if (ub->ub_version >= SPA_VERSION_FEATURES) {
4859 nvlist_t *features;
4860
4861 /*
4862 * If we weren't able to find what's necessary for reading the
4863 * MOS in the label, return failure.
4864 */
4865 if (label == NULL) {
4866 spa_load_failed(spa, "label config unavailable");
4867 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4868 ENXIO));
4869 }
4870
4871 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
4872 &features) != 0) {
4873 nvlist_free(label);
4874 spa_load_failed(spa, "invalid label: '%s' missing",
4875 ZPOOL_CONFIG_FEATURES_FOR_READ);
4876 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
4877 ENXIO));
4878 }
4879
4880 /*
4881 * Update our in-core representation with the definitive values
4882 * from the label.
4883 */
4884 nvlist_free(spa->spa_label_features);
4885 spa->spa_label_features = fnvlist_dup(features);
4886 }
4887
4888 nvlist_free(label);
4889
4890 /*
4891 * Look through entries in the label nvlist's features_for_read. If
4892 * there is a feature listed there which we don't understand then we
4893 * cannot open a pool.
4894 */
4895 if (ub->ub_version >= SPA_VERSION_FEATURES) {
4896 nvlist_t *unsup_feat;
4897
4898 unsup_feat = fnvlist_alloc();
4899
4900 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
4901 NULL); nvp != NULL;
4902 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
4903 if (!zfeature_is_supported(nvpair_name(nvp))) {
4904 fnvlist_add_string(unsup_feat,
4905 nvpair_name(nvp), "");
4906 }
4907 }
4908
4909 if (!nvlist_empty(unsup_feat)) {
4910 fnvlist_add_nvlist(spa->spa_load_info,
4911 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
4912 nvlist_free(unsup_feat);
4913 spa_load_failed(spa, "some features are unsupported");
4914 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
4915 ENOTSUP));
4916 }
4917
4918 nvlist_free(unsup_feat);
4919 }
4920
4921 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
4922 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4923 spa_try_repair(spa, spa->spa_config);
4924 spa_config_exit(spa, SCL_ALL, FTAG);
4925 nvlist_free(spa->spa_config_splitting);
4926 spa->spa_config_splitting = NULL;
4927 }
4928
4929 /*
4930 * Initialize internal SPA structures.
4931 */
4932 spa_ld_select_uberblock_done(spa, ub);
4933
4934 return (0);
4935 }
4936
4937 static int
4938 spa_ld_open_rootbp(spa_t *spa)
4939 {
4940 int error = 0;
4941 vdev_t *rvd = spa->spa_root_vdev;
4942
4943 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
4944 if (error != 0) {
4945 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
4946 "[error=%d]", error);
4947 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4948 }
4949 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
4950
4951 return (0);
4952 }
4953
4954 static int
4955 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
4956 boolean_t reloading)
4957 {
4958 vdev_t *mrvd, *rvd = spa->spa_root_vdev;
4959 nvlist_t *nv, *mos_config, *policy;
4960 int error = 0, copy_error;
4961 uint64_t healthy_tvds, healthy_tvds_mos;
4962 uint64_t mos_config_txg;
4963
4964 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
4965 != 0)
4966 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4967
4968 /*
4969 * If we're assembling a pool from a split, the config provided is
4970 * already trusted so there is nothing to do.
4971 */
4972 if (type == SPA_IMPORT_ASSEMBLE)
4973 return (0);
4974
4975 healthy_tvds = spa_healthy_core_tvds(spa);
4976
4977 if (load_nvlist(spa, spa->spa_config_object, &mos_config)
4978 != 0) {
4979 spa_load_failed(spa, "unable to retrieve MOS config");
4980 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
4981 }
4982
4983 /*
4984 * If we are doing an open, pool owner wasn't verified yet, thus do
4985 * the verification here.
4986 */
4987 if (spa->spa_load_state == SPA_LOAD_OPEN) {
4988 error = spa_verify_host(spa, mos_config);
4989 if (error != 0) {
4990 nvlist_free(mos_config);
4991 return (error);
4992 }
4993 }
4994
4995 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
4996
4997 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4998
4999 /*
5000 * Build a new vdev tree from the trusted config
5001 */
5002 error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
5003 if (error != 0) {
5004 nvlist_free(mos_config);
5005 spa_config_exit(spa, SCL_ALL, FTAG);
5006 spa_load_failed(spa, "spa_config_parse failed [error=%d]",
5007 error);
5008 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
5009 }
5010
5011 /*
5012 * Vdev paths in the MOS may be obsolete. If the untrusted config was
5013 * obtained by scanning /dev/dsk, then it will have the right vdev
5014 * paths. We update the trusted MOS config with this information.
5015 * We first try to copy the paths with vdev_copy_path_strict, which
5016 * succeeds only when both configs have exactly the same vdev tree.
5017 * If that fails, we fall back to a more flexible method that has a
5018 * best effort policy.
5019 */
5020 copy_error = vdev_copy_path_strict(rvd, mrvd);
5021 if (copy_error != 0 || spa_load_print_vdev_tree) {
5022 spa_load_note(spa, "provided vdev tree:");
5023 vdev_dbgmsg_print_tree(rvd, 2);
5024 spa_load_note(spa, "MOS vdev tree:");
5025 vdev_dbgmsg_print_tree(mrvd, 2);
5026 }
5027 if (copy_error != 0) {
5028 spa_load_note(spa, "vdev_copy_path_strict failed, falling "
5029 "back to vdev_copy_path_relaxed");
5030 vdev_copy_path_relaxed(rvd, mrvd);
5031 }
5032
5033 vdev_close(rvd);
5034 vdev_free(rvd);
5035 spa->spa_root_vdev = mrvd;
5036 rvd = mrvd;
5037 spa_config_exit(spa, SCL_ALL, FTAG);
5038
5039 /*
5040 * If 'zpool import' used a cached config, then the on-disk hostid and
5041 * hostname may be different to the cached config in ways that should
5042 * prevent import. Userspace can't discover this without a scan, but
5043 * we know, so we add these values to LOAD_INFO so the caller can know
5044 * the difference.
5045 *
5046 * Note that we have to do this before the config is regenerated,
5047 * because the new config will have the hostid and hostname for this
5048 * host, in readiness for import.
5049 */
5050 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID))
5051 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID,
5052 fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID));
5053 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME))
5054 fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME,
5055 fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME));
5056
5057 /*
5058 * We will use spa_config if we decide to reload the spa or if spa_load
5059 * fails and we rewind. We must thus regenerate the config using the
5060 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
5061 * pass settings on how to load the pool and is not stored in the MOS.
5062 * We copy it over to our new, trusted config.
5063 */
5064 mos_config_txg = fnvlist_lookup_uint64(mos_config,
5065 ZPOOL_CONFIG_POOL_TXG);
5066 nvlist_free(mos_config);
5067 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
5068 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
5069 &policy) == 0)
5070 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
5071 spa_config_set(spa, mos_config);
5072 spa->spa_config_source = SPA_CONFIG_SRC_MOS;
5073
5074 /*
5075 * Now that we got the config from the MOS, we should be more strict
5076 * in checking blkptrs and can make assumptions about the consistency
5077 * of the vdev tree. spa_trust_config must be set to true before opening
5078 * vdevs in order for them to be writeable.
5079 */
5080 spa->spa_trust_config = B_TRUE;
5081
5082 /*
5083 * Open and validate the new vdev tree
5084 */
5085 error = spa_ld_open_vdevs(spa);
5086 if (error != 0)
5087 return (error);
5088
5089 error = spa_ld_validate_vdevs(spa);
5090 if (error != 0)
5091 return (error);
5092
5093 if (copy_error != 0 || spa_load_print_vdev_tree) {
5094 spa_load_note(spa, "final vdev tree:");
5095 vdev_dbgmsg_print_tree(rvd, 2);
5096 }
5097
5098 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
5099 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
5100 /*
5101 * Sanity check to make sure that we are indeed loading the
5102 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
5103 * in the config provided and they happened to be the only ones
5104 * to have the latest uberblock, we could involuntarily perform
5105 * an extreme rewind.
5106 */
5107 healthy_tvds_mos = spa_healthy_core_tvds(spa);
5108 if (healthy_tvds_mos - healthy_tvds >=
5109 SPA_SYNC_MIN_VDEVS) {
5110 spa_load_note(spa, "config provided misses too many "
5111 "top-level vdevs compared to MOS (%lld vs %lld). ",
5112 (u_longlong_t)healthy_tvds,
5113 (u_longlong_t)healthy_tvds_mos);
5114 spa_load_note(spa, "vdev tree:");
5115 vdev_dbgmsg_print_tree(rvd, 2);
5116 if (reloading) {
5117 spa_load_failed(spa, "config was already "
5118 "provided from MOS. Aborting.");
5119 return (spa_vdev_err(rvd,
5120 VDEV_AUX_CORRUPT_DATA, EIO));
5121 }
5122 spa_load_note(spa, "spa must be reloaded using MOS "
5123 "config");
5124 return (SET_ERROR(EAGAIN));
5125 }
5126 }
5127
5128 /*
5129 * Final sanity check for multihost pools that no other host is
5130 * accessing the pool. All of the read-only check have passed at
5131 * this point, perform targetted updates to the mmp uberblocks to
5132 * safely force a visible change.
5133 */
5134 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
5135 !spa->spa_extreme_rewind && spa->spa_activity_check) {
5136
5137 error = spa_activity_check_claim(spa);
5138 error = spa_ld_activity_result(spa, error, "claim");
5139
5140 if (error == EREMOTEIO)
5141 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
5142 else if (error)
5143 return (error);
5144 }
5145
5146 error = spa_check_for_missing_logs(spa);
5147 if (error != 0)
5148 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
5149
5150 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
5151 spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
5152 "guid sum (%llu != %llu)",
5153 (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
5154 (u_longlong_t)rvd->vdev_guid_sum);
5155 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
5156 ENXIO));
5157 }
5158
5159 return (0);
5160 }
5161
5162 static int
5163 spa_ld_open_indirect_vdev_metadata(spa_t *spa)
5164 {
5165 int error = 0;
5166 vdev_t *rvd = spa->spa_root_vdev;
5167
5168 /*
5169 * Everything that we read before spa_remove_init() must be stored
5170 * on concreted vdevs. Therefore we do this as early as possible.
5171 */
5172 error = spa_remove_init(spa);
5173 if (error != 0) {
5174 spa_load_failed(spa, "spa_remove_init failed [error=%d]",
5175 error);
5176 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5177 }
5178
5179 /*
5180 * Retrieve information needed to condense indirect vdev mappings.
5181 */
5182 error = spa_condense_init(spa);
5183 if (error != 0) {
5184 spa_load_failed(spa, "spa_condense_init failed [error=%d]",
5185 error);
5186 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
5187 }
5188
5189 return (0);
5190 }
5191
5192 static int
5193 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
5194 {
5195 int error = 0;
5196 vdev_t *rvd = spa->spa_root_vdev;
5197
5198 if (spa_version(spa) >= SPA_VERSION_FEATURES) {
5199 boolean_t missing_feat_read = B_FALSE;
5200 nvlist_t *unsup_feat, *enabled_feat;
5201
5202 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
5203 &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
5204 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5205 }
5206
5207 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
5208 &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
5209 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5210 }
5211
5212 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
5213 &spa->spa_feat_desc_obj, B_TRUE) != 0) {
5214 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5215 }
5216
5217 enabled_feat = fnvlist_alloc();
5218 unsup_feat = fnvlist_alloc();
5219
5220 if (!spa_features_check(spa, B_FALSE,
5221 unsup_feat, enabled_feat))
5222 missing_feat_read = B_TRUE;
5223
5224 if (spa_writeable(spa) ||
5225 spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
5226 if (!spa_features_check(spa, B_TRUE,
5227 unsup_feat, enabled_feat)) {
5228 *missing_feat_writep = B_TRUE;
5229 }
5230 }
5231
5232 fnvlist_add_nvlist(spa->spa_load_info,
5233 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
5234
5235 if (!nvlist_empty(unsup_feat)) {
5236 fnvlist_add_nvlist(spa->spa_load_info,
5237 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
5238 }
5239
5240 fnvlist_free(enabled_feat);
5241 fnvlist_free(unsup_feat);
5242
5243 if (!missing_feat_read) {
5244 fnvlist_add_boolean(spa->spa_load_info,
5245 ZPOOL_CONFIG_CAN_RDONLY);
5246 }
5247
5248 /*
5249 * If the state is SPA_LOAD_TRYIMPORT, our objective is
5250 * twofold: to determine whether the pool is available for
5251 * import in read-write mode and (if it is not) whether the
5252 * pool is available for import in read-only mode. If the pool
5253 * is available for import in read-write mode, it is displayed
5254 * as available in userland; if it is not available for import
5255 * in read-only mode, it is displayed as unavailable in
5256 * userland. If the pool is available for import in read-only
5257 * mode but not read-write mode, it is displayed as unavailable
5258 * in userland with a special note that the pool is actually
5259 * available for open in read-only mode.
5260 *
5261 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
5262 * missing a feature for write, we must first determine whether
5263 * the pool can be opened read-only before returning to
5264 * userland in order to know whether to display the
5265 * abovementioned note.
5266 */
5267 if (missing_feat_read || (*missing_feat_writep &&
5268 spa_writeable(spa))) {
5269 spa_load_failed(spa, "pool uses unsupported features");
5270 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
5271 ENOTSUP));
5272 }
5273
5274 /*
5275 * Load refcounts for ZFS features from disk into an in-memory
5276 * cache during SPA initialization.
5277 */
5278 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
5279 uint64_t refcount;
5280
5281 error = feature_get_refcount_from_disk(spa,
5282 &spa_feature_table[i], &refcount);
5283 if (error == 0) {
5284 spa->spa_feat_refcount_cache[i] = refcount;
5285 } else if (error == ENOTSUP) {
5286 spa->spa_feat_refcount_cache[i] =
5287 SPA_FEATURE_DISABLED;
5288 } else {
5289 spa_load_failed(spa, "error getting refcount "
5290 "for feature %s [error=%d]",
5291 spa_feature_table[i].fi_guid, error);
5292 return (spa_vdev_err(rvd,
5293 VDEV_AUX_CORRUPT_DATA, EIO));
5294 }
5295 }
5296 }
5297
5298 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
5299 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
5300 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
5301 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5302 }
5303
5304 /*
5305 * Encryption was added before bookmark_v2, even though bookmark_v2
5306 * is now a dependency. If this pool has encryption enabled without
5307 * bookmark_v2, trigger an errata message.
5308 */
5309 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
5310 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
5311 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
5312 }
5313
5314 return (0);
5315 }
5316
5317 static int
5318 spa_ld_load_special_directories(spa_t *spa)
5319 {
5320 int error = 0;
5321 vdev_t *rvd = spa->spa_root_vdev;
5322
5323 spa->spa_is_initializing = B_TRUE;
5324 error = dsl_pool_open(spa->spa_dsl_pool);
5325 spa->spa_is_initializing = B_FALSE;
5326 if (error != 0) {
5327 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
5328 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5329 }
5330
5331 return (0);
5332 }
5333
5334 static int
5335 spa_ld_get_props(spa_t *spa)
5336 {
5337 int error = 0;
5338 uint64_t obj;
5339 vdev_t *rvd = spa->spa_root_vdev;
5340
5341 /* Grab the checksum salt from the MOS. */
5342 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5343 DMU_POOL_CHECKSUM_SALT, 1,
5344 sizeof (spa->spa_cksum_salt.zcs_bytes),
5345 spa->spa_cksum_salt.zcs_bytes);
5346 if (error == ENOENT) {
5347 /* Generate a new salt for subsequent use */
5348 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5349 sizeof (spa->spa_cksum_salt.zcs_bytes));
5350 } else if (error != 0) {
5351 spa_load_failed(spa, "unable to retrieve checksum salt from "
5352 "MOS [error=%d]", error);
5353 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5354 }
5355
5356 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
5357 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5358 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
5359 if (error != 0) {
5360 spa_load_failed(spa, "error opening deferred-frees bpobj "
5361 "[error=%d]", error);
5362 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5363 }
5364
5365 /*
5366 * Load the bit that tells us to use the new accounting function
5367 * (raid-z deflation). If we have an older pool, this will not
5368 * be present.
5369 */
5370 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
5371 if (error != 0 && error != ENOENT)
5372 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5373
5374 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
5375 &spa->spa_creation_version, B_FALSE);
5376 if (error != 0 && error != ENOENT)
5377 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5378
5379 /* Load time log */
5380 spa_load_txg_log_time(spa);
5381
5382 /*
5383 * Load the persistent error log. If we have an older pool, this will
5384 * not be present.
5385 */
5386 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
5387 B_FALSE);
5388 if (error != 0 && error != ENOENT)
5389 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5390
5391 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
5392 &spa->spa_errlog_scrub, B_FALSE);
5393 if (error != 0 && error != ENOENT)
5394 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5395
5396 /* Load the last scrubbed txg. */
5397 error = spa_dir_prop(spa, DMU_POOL_LAST_SCRUBBED_TXG,
5398 &spa->spa_scrubbed_last_txg, B_FALSE);
5399 if (error != 0 && error != ENOENT)
5400 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5401
5402 /*
5403 * Load the livelist deletion field. If a livelist is queued for
5404 * deletion, indicate that in the spa
5405 */
5406 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
5407 &spa->spa_livelists_to_delete, B_FALSE);
5408 if (error != 0 && error != ENOENT)
5409 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5410
5411 /*
5412 * Load the history object. If we have an older pool, this
5413 * will not be present.
5414 */
5415 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
5416 if (error != 0 && error != ENOENT)
5417 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5418
5419 /*
5420 * Load the per-vdev ZAP map. If we have an older pool, this will not
5421 * be present; in this case, defer its creation to a later time to
5422 * avoid dirtying the MOS this early / out of sync context. See
5423 * spa_sync_config_object.
5424 */
5425
5426 /* The sentinel is only available in the MOS config. */
5427 nvlist_t *mos_config;
5428 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
5429 spa_load_failed(spa, "unable to retrieve MOS config");
5430 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5431 }
5432
5433 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
5434 &spa->spa_all_vdev_zaps, B_FALSE);
5435
5436 if (error == ENOENT) {
5437 VERIFY(!nvlist_exists(mos_config,
5438 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
5439 spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
5440 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
5441 } else if (error != 0) {
5442 nvlist_free(mos_config);
5443 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5444 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
5445 /*
5446 * An older version of ZFS overwrote the sentinel value, so
5447 * we have orphaned per-vdev ZAPs in the MOS. Defer their
5448 * destruction to later; see spa_sync_config_object.
5449 */
5450 spa->spa_avz_action = AVZ_ACTION_DESTROY;
5451 /*
5452 * We're assuming that no vdevs have had their ZAPs created
5453 * before this. Better be sure of it.
5454 */
5455 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
5456 }
5457 nvlist_free(mos_config);
5458
5459 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5460
5461 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
5462 B_FALSE);
5463 if (error && error != ENOENT)
5464 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5465
5466 if (error == 0) {
5467 uint64_t autoreplace = 0;
5468
5469 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
5470 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
5471 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
5472 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
5473 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
5474 spa_prop_find(spa, ZPOOL_PROP_DEDUP_TABLE_QUOTA,
5475 &spa->spa_dedup_table_quota);
5476 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
5477 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
5478 spa->spa_autoreplace = (autoreplace != 0);
5479 }
5480
5481 /*
5482 * If we are importing a pool with missing top-level vdevs,
5483 * we enforce that the pool doesn't panic or get suspended on
5484 * error since the likelihood of missing data is extremely high.
5485 */
5486 if (spa->spa_missing_tvds > 0 &&
5487 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
5488 spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
5489 spa_load_note(spa, "forcing failmode to 'continue' "
5490 "as some top level vdevs are missing");
5491 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
5492 }
5493
5494 return (0);
5495 }
5496
5497 static int
5498 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
5499 {
5500 int error = 0;
5501 vdev_t *rvd = spa->spa_root_vdev;
5502
5503 /*
5504 * If we're assembling the pool from the split-off vdevs of
5505 * an existing pool, we don't want to attach the spares & cache
5506 * devices.
5507 */
5508
5509 /*
5510 * Load any hot spares for this pool.
5511 */
5512 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
5513 B_FALSE);
5514 if (error != 0 && error != ENOENT)
5515 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5516 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
5517 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
5518 if (load_nvlist(spa, spa->spa_spares.sav_object,
5519 &spa->spa_spares.sav_config) != 0) {
5520 spa_load_failed(spa, "error loading spares nvlist");
5521 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5522 }
5523
5524 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5525 spa_load_spares(spa);
5526 spa_config_exit(spa, SCL_ALL, FTAG);
5527 } else if (error == 0) {
5528 spa->spa_spares.sav_sync = B_TRUE;
5529 }
5530
5531 /*
5532 * Load any level 2 ARC devices for this pool.
5533 */
5534 error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
5535 &spa->spa_l2cache.sav_object, B_FALSE);
5536 if (error != 0 && error != ENOENT)
5537 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5538 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
5539 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
5540 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
5541 &spa->spa_l2cache.sav_config) != 0) {
5542 spa_load_failed(spa, "error loading l2cache nvlist");
5543 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5544 }
5545
5546 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5547 spa_load_l2cache(spa);
5548 spa_config_exit(spa, SCL_ALL, FTAG);
5549 } else if (error == 0) {
5550 spa->spa_l2cache.sav_sync = B_TRUE;
5551 }
5552
5553 return (0);
5554 }
5555
5556 static int
5557 spa_ld_load_vdev_metadata(spa_t *spa)
5558 {
5559 int error = 0;
5560 vdev_t *rvd = spa->spa_root_vdev;
5561
5562 /*
5563 * If the 'multihost' property is set, then never allow a pool to
5564 * be imported when the system hostid is zero. The exception to
5565 * this rule is zdb which is always allowed to access pools.
5566 */
5567 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
5568 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
5569 fnvlist_add_uint64(spa->spa_load_info,
5570 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
5571 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
5572 }
5573
5574 /*
5575 * If the 'autoreplace' property is set, then post a resource notifying
5576 * the ZFS DE that it should not issue any faults for unopenable
5577 * devices. We also iterate over the vdevs, and post a sysevent for any
5578 * unopenable vdevs so that the normal autoreplace handler can take
5579 * over.
5580 */
5581 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
5582 spa_check_removed(spa->spa_root_vdev);
5583 /*
5584 * For the import case, this is done in spa_import(), because
5585 * at this point we're using the spare definitions from
5586 * the MOS config, not necessarily from the userland config.
5587 */
5588 if (spa->spa_load_state != SPA_LOAD_IMPORT) {
5589 spa_aux_check_removed(&spa->spa_spares);
5590 spa_aux_check_removed(&spa->spa_l2cache);
5591 }
5592 }
5593
5594 /*
5595 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
5596 */
5597 error = vdev_load(rvd);
5598 if (error != 0) {
5599 spa_load_failed(spa, "vdev_load failed [error=%d]", error);
5600 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
5601 }
5602
5603 error = spa_ld_log_spacemaps(spa);
5604 if (error != 0) {
5605 spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
5606 error);
5607 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
5608 }
5609
5610 /*
5611 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
5612 */
5613 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5614 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
5615 spa_config_exit(spa, SCL_ALL, FTAG);
5616
5617 return (0);
5618 }
5619
5620 static int
5621 spa_ld_load_dedup_tables(spa_t *spa)
5622 {
5623 int error = 0;
5624 vdev_t *rvd = spa->spa_root_vdev;
5625
5626 error = ddt_load(spa);
5627 if (error != 0) {
5628 spa_load_failed(spa, "ddt_load failed [error=%d]", error);
5629 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5630 }
5631
5632 return (0);
5633 }
5634
5635 static int
5636 spa_ld_load_brt(spa_t *spa)
5637 {
5638 int error = 0;
5639 vdev_t *rvd = spa->spa_root_vdev;
5640
5641 error = brt_load(spa);
5642 if (error != 0) {
5643 spa_load_failed(spa, "brt_load failed [error=%d]", error);
5644 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
5645 }
5646
5647 return (0);
5648 }
5649
5650 static int
5651 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
5652 {
5653 vdev_t *rvd = spa->spa_root_vdev;
5654
5655 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
5656 boolean_t missing = spa_check_logs(spa);
5657 if (missing) {
5658 if (spa->spa_missing_tvds != 0) {
5659 spa_load_note(spa, "spa_check_logs failed "
5660 "so dropping the logs");
5661 } else {
5662 *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
5663 spa_load_failed(spa, "spa_check_logs failed");
5664 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
5665 ENXIO));
5666 }
5667 }
5668 }
5669
5670 return (0);
5671 }
5672
5673 static int
5674 spa_ld_verify_pool_data(spa_t *spa)
5675 {
5676 int error = 0;
5677 vdev_t *rvd = spa->spa_root_vdev;
5678
5679 /*
5680 * We've successfully opened the pool, verify that we're ready
5681 * to start pushing transactions.
5682 */
5683 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
5684 error = spa_load_verify(spa);
5685 if (error != 0) {
5686 spa_load_failed(spa, "spa_load_verify failed "
5687 "[error=%d]", error);
5688 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
5689 error));
5690 }
5691 }
5692
5693 return (0);
5694 }
5695
5696 static void
5697 spa_ld_claim_log_blocks(spa_t *spa)
5698 {
5699 dmu_tx_t *tx;
5700 dsl_pool_t *dp = spa_get_dsl(spa);
5701
5702 /*
5703 * Claim log blocks that haven't been committed yet.
5704 * This must all happen in a single txg.
5705 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
5706 * invoked from zil_claim_log_block()'s i/o done callback.
5707 * Price of rollback is that we abandon the log.
5708 */
5709 spa->spa_claiming = B_TRUE;
5710
5711 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
5712 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
5713 zil_claim, tx, DS_FIND_CHILDREN);
5714 dmu_tx_commit(tx);
5715
5716 spa->spa_claiming = B_FALSE;
5717
5718 spa_set_log_state(spa, SPA_LOG_GOOD);
5719 }
5720
5721 static void
5722 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
5723 boolean_t update_config_cache)
5724 {
5725 vdev_t *rvd = spa->spa_root_vdev;
5726 int need_update = B_FALSE;
5727
5728 /*
5729 * If the config cache is stale, or we have uninitialized
5730 * metaslabs (see spa_vdev_add()), then update the config.
5731 *
5732 * If this is a verbatim import, trust the current
5733 * in-core spa_config and update the disk labels.
5734 */
5735 if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
5736 spa->spa_load_state == SPA_LOAD_IMPORT ||
5737 spa->spa_load_state == SPA_LOAD_RECOVER ||
5738 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
5739 need_update = B_TRUE;
5740
5741 for (int c = 0; c < rvd->vdev_children; c++)
5742 if (rvd->vdev_child[c]->vdev_ms_array == 0)
5743 need_update = B_TRUE;
5744
5745 /*
5746 * Update the config cache asynchronously in case we're the
5747 * root pool, in which case the config cache isn't writable yet.
5748 */
5749 if (need_update)
5750 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
5751 }
5752
5753 static void
5754 spa_ld_prepare_for_reload(spa_t *spa)
5755 {
5756 spa_mode_t mode = spa->spa_mode;
5757 int async_suspended = spa->spa_async_suspended;
5758
5759 spa_unload(spa);
5760 spa_deactivate(spa);
5761 spa_activate(spa, mode);
5762
5763 /*
5764 * We save the value of spa_async_suspended as it gets reset to 0 by
5765 * spa_unload(). We want to restore it back to the original value before
5766 * returning as we might be calling spa_async_resume() later.
5767 */
5768 spa->spa_async_suspended = async_suspended;
5769 }
5770
5771 static int
5772 spa_ld_read_checkpoint_txg(spa_t *spa)
5773 {
5774 uberblock_t checkpoint;
5775 int error = 0;
5776
5777 ASSERT0(spa->spa_checkpoint_txg);
5778 ASSERT(spa_namespace_held() ||
5779 spa->spa_load_thread == curthread);
5780
5781 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5782 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5783 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5784
5785 if (error == ENOENT)
5786 return (0);
5787
5788 if (error != 0)
5789 return (error);
5790
5791 ASSERT3U(checkpoint.ub_txg, !=, 0);
5792 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
5793 ASSERT3U(checkpoint.ub_timestamp, !=, 0);
5794 spa->spa_checkpoint_txg = checkpoint.ub_txg;
5795 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
5796
5797 return (0);
5798 }
5799
5800 static int
5801 spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
5802 {
5803 int error = 0;
5804
5805 ASSERT(spa_namespace_held());
5806 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
5807
5808 /*
5809 * Never trust the config that is provided unless we are assembling
5810 * a pool following a split.
5811 * This means don't trust blkptrs and the vdev tree in general. This
5812 * also effectively puts the spa in read-only mode since
5813 * spa_writeable() checks for spa_trust_config to be true.
5814 * We will later load a trusted config from the MOS.
5815 */
5816 if (type != SPA_IMPORT_ASSEMBLE)
5817 spa->spa_trust_config = B_FALSE;
5818
5819 /*
5820 * Parse the config provided to create a vdev tree.
5821 */
5822 error = spa_ld_parse_config(spa, type);
5823 if (error != 0)
5824 return (error);
5825
5826 spa_import_progress_add(spa);
5827
5828 /*
5829 * Now that we have the vdev tree, try to open each vdev. This involves
5830 * opening the underlying physical device, retrieving its geometry and
5831 * probing the vdev with a dummy I/O. The state of each vdev will be set
5832 * based on the success of those operations. After this we'll be ready
5833 * to read from the vdevs.
5834 */
5835 error = spa_ld_open_vdevs(spa);
5836 if (error != 0)
5837 return (error);
5838
5839 /*
5840 * Read the label of each vdev and make sure that the GUIDs stored
5841 * there match the GUIDs in the config provided.
5842 * If we're assembling a new pool that's been split off from an
5843 * existing pool, the labels haven't yet been updated so we skip
5844 * validation for now.
5845 */
5846 if (type != SPA_IMPORT_ASSEMBLE) {
5847 error = spa_ld_validate_vdevs(spa);
5848 if (error != 0)
5849 return (error);
5850 }
5851
5852 /*
5853 * Read all vdev labels to find the best uberblock (i.e. latest,
5854 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
5855 * get the list of features required to read blkptrs in the MOS from
5856 * the vdev label with the best uberblock and verify that our version
5857 * of zfs supports them all.
5858 */
5859 error = spa_ld_select_uberblock(spa, type);
5860 if (error != 0)
5861 return (error);
5862
5863 /*
5864 * Pass that uberblock to the dsl_pool layer which will open the root
5865 * blkptr. This blkptr points to the latest version of the MOS and will
5866 * allow us to read its contents.
5867 */
5868 error = spa_ld_open_rootbp(spa);
5869 if (error != 0)
5870 return (error);
5871
5872 return (0);
5873 }
5874
5875 static int
5876 spa_ld_checkpoint_rewind(spa_t *spa)
5877 {
5878 uberblock_t checkpoint;
5879 int error = 0;
5880
5881 ASSERT(spa_namespace_held());
5882 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
5883
5884 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
5885 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
5886 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
5887
5888 if (error != 0) {
5889 spa_load_failed(spa, "unable to retrieve checkpointed "
5890 "uberblock from the MOS config [error=%d]", error);
5891
5892 if (error == ENOENT)
5893 error = ZFS_ERR_NO_CHECKPOINT;
5894
5895 return (error);
5896 }
5897
5898 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
5899 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
5900
5901 /*
5902 * We need to update the txg and timestamp of the checkpointed
5903 * uberblock to be higher than the latest one. This ensures that
5904 * the checkpointed uberblock is selected if we were to close and
5905 * reopen the pool right after we've written it in the vdev labels.
5906 * (also see block comment in vdev_uberblock_compare)
5907 */
5908 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
5909 checkpoint.ub_timestamp = gethrestime_sec();
5910
5911 /*
5912 * Set current uberblock to be the checkpointed uberblock.
5913 */
5914 spa->spa_uberblock = checkpoint;
5915
5916 /*
5917 * If we are doing a normal rewind, then the pool is open for
5918 * writing and we sync the "updated" checkpointed uberblock to
5919 * disk. Once this is done, we've basically rewound the whole
5920 * pool and there is no way back.
5921 *
5922 * There are cases when we don't want to attempt and sync the
5923 * checkpointed uberblock to disk because we are opening a
5924 * pool as read-only. Specifically, verifying the checkpointed
5925 * state with zdb, and importing the checkpointed state to get
5926 * a "preview" of its content.
5927 */
5928 if (spa_writeable(spa)) {
5929 vdev_t *rvd = spa->spa_root_vdev;
5930
5931 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5932 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
5933 int svdcount = 0;
5934 int children = rvd->vdev_children;
5935 int c0 = random_in_range(children);
5936
5937 for (int c = 0; c < children; c++) {
5938 vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
5939
5940 /* Stop when revisiting the first vdev */
5941 if (c > 0 && svd[0] == vd)
5942 break;
5943
5944 if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
5945 !vdev_is_concrete(vd))
5946 continue;
5947
5948 svd[svdcount++] = vd;
5949 if (svdcount == SPA_SYNC_MIN_VDEVS)
5950 break;
5951 }
5952 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
5953 if (error == 0)
5954 spa->spa_last_synced_guid = rvd->vdev_guid;
5955 spa_config_exit(spa, SCL_ALL, FTAG);
5956
5957 if (error != 0) {
5958 spa_load_failed(spa, "failed to write checkpointed "
5959 "uberblock to the vdev labels [error=%d]", error);
5960 return (error);
5961 }
5962 }
5963
5964 return (0);
5965 }
5966
5967 static int
5968 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
5969 boolean_t *update_config_cache)
5970 {
5971 int error;
5972
5973 /*
5974 * Parse the config for pool, open and validate vdevs,
5975 * select an uberblock, and use that uberblock to open
5976 * the MOS.
5977 */
5978 error = spa_ld_mos_init(spa, type);
5979 if (error != 0)
5980 return (error);
5981
5982 /*
5983 * Retrieve the trusted config stored in the MOS and use it to create
5984 * a new, exact version of the vdev tree, then reopen all vdevs.
5985 */
5986 error = spa_ld_trusted_config(spa, type, B_FALSE);
5987 if (error == EAGAIN) {
5988 if (update_config_cache != NULL)
5989 *update_config_cache = B_TRUE;
5990
5991 /*
5992 * Redo the loading process with the trusted config if it is
5993 * too different from the untrusted config.
5994 */
5995 spa_ld_prepare_for_reload(spa);
5996 spa_load_note(spa, "RELOADING");
5997 error = spa_ld_mos_init(spa, type);
5998 if (error != 0)
5999 return (error);
6000
6001 error = spa_ld_trusted_config(spa, type, B_TRUE);
6002 if (error != 0)
6003 return (error);
6004
6005 } else if (error != 0) {
6006 return (error);
6007 }
6008
6009 return (0);
6010 }
6011
6012 /*
6013 * Load an existing storage pool, using the config provided. This config
6014 * describes which vdevs are part of the pool and is later validated against
6015 * partial configs present in each vdev's label and an entire copy of the
6016 * config stored in the MOS.
6017 */
6018 static int
6019 spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
6020 {
6021 int error = 0;
6022 boolean_t missing_feat_write = B_FALSE;
6023 boolean_t checkpoint_rewind =
6024 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
6025 boolean_t update_config_cache = B_FALSE;
6026 hrtime_t load_start = gethrtime();
6027
6028 ASSERT(spa_namespace_held());
6029 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
6030
6031 spa_load_note(spa, "LOADING");
6032
6033 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
6034 if (error != 0)
6035 return (error);
6036
6037 /*
6038 * If we are rewinding to the checkpoint then we need to repeat
6039 * everything we've done so far in this function but this time
6040 * selecting the checkpointed uberblock and using that to open
6041 * the MOS.
6042 */
6043 if (checkpoint_rewind) {
6044 /*
6045 * If we are rewinding to the checkpoint update config cache
6046 * anyway.
6047 */
6048 update_config_cache = B_TRUE;
6049
6050 /*
6051 * Extract the checkpointed uberblock from the current MOS
6052 * and use this as the pool's uberblock from now on. If the
6053 * pool is imported as writeable we also write the checkpoint
6054 * uberblock to the labels, making the rewind permanent.
6055 */
6056 error = spa_ld_checkpoint_rewind(spa);
6057 if (error != 0)
6058 return (error);
6059
6060 /*
6061 * Redo the loading process again with the
6062 * checkpointed uberblock.
6063 */
6064 spa_ld_prepare_for_reload(spa);
6065 spa_load_note(spa, "LOADING checkpointed uberblock");
6066 error = spa_ld_mos_with_trusted_config(spa, type, NULL);
6067 if (error != 0)
6068 return (error);
6069 }
6070
6071 /*
6072 * Drop the namespace lock for the rest of the function.
6073 */
6074 spa->spa_load_thread = curthread;
6075 spa_namespace_exit(FTAG);
6076
6077 /*
6078 * Retrieve the checkpoint txg if the pool has a checkpoint.
6079 */
6080 spa_import_progress_set_notes(spa, "Loading checkpoint txg");
6081 error = spa_ld_read_checkpoint_txg(spa);
6082 if (error != 0)
6083 goto fail;
6084
6085 /*
6086 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
6087 * from the pool and their contents were re-mapped to other vdevs. Note
6088 * that everything that we read before this step must have been
6089 * rewritten on concrete vdevs after the last device removal was
6090 * initiated. Otherwise we could be reading from indirect vdevs before
6091 * we have loaded their mappings.
6092 */
6093 spa_import_progress_set_notes(spa, "Loading indirect vdev metadata");
6094 error = spa_ld_open_indirect_vdev_metadata(spa);
6095 if (error != 0)
6096 goto fail;
6097
6098 /*
6099 * Retrieve the full list of active features from the MOS and check if
6100 * they are all supported.
6101 */
6102 spa_import_progress_set_notes(spa, "Checking feature flags");
6103 error = spa_ld_check_features(spa, &missing_feat_write);
6104 if (error != 0)
6105 goto fail;
6106
6107 /*
6108 * Load several special directories from the MOS needed by the dsl_pool
6109 * layer.
6110 */
6111 spa_import_progress_set_notes(spa, "Loading special MOS directories");
6112 error = spa_ld_load_special_directories(spa);
6113 if (error != 0)
6114 goto fail;
6115
6116 /*
6117 * Retrieve pool properties from the MOS.
6118 */
6119 spa_import_progress_set_notes(spa, "Loading properties");
6120 error = spa_ld_get_props(spa);
6121 if (error != 0)
6122 goto fail;
6123
6124 /*
6125 * Retrieve the list of auxiliary devices - cache devices and spares -
6126 * and open them.
6127 */
6128 spa_import_progress_set_notes(spa, "Loading AUX vdevs");
6129 error = spa_ld_open_aux_vdevs(spa, type);
6130 if (error != 0)
6131 goto fail;
6132
6133 /*
6134 * Load the metadata for all vdevs. Also check if unopenable devices
6135 * should be autoreplaced.
6136 */
6137 spa_import_progress_set_notes(spa, "Loading vdev metadata");
6138 error = spa_ld_load_vdev_metadata(spa);
6139 if (error != 0)
6140 goto fail;
6141
6142 spa_import_progress_set_notes(spa, "Loading dedup tables");
6143 error = spa_ld_load_dedup_tables(spa);
6144 if (error != 0)
6145 goto fail;
6146
6147 spa_import_progress_set_notes(spa, "Loading BRT");
6148 error = spa_ld_load_brt(spa);
6149 if (error != 0)
6150 goto fail;
6151
6152 /*
6153 * Verify the logs now to make sure we don't have any unexpected errors
6154 * when we claim log blocks later.
6155 */
6156 spa_import_progress_set_notes(spa, "Verifying Log Devices");
6157 error = spa_ld_verify_logs(spa, type, ereport);
6158 if (error != 0)
6159 goto fail;
6160
6161 if (missing_feat_write) {
6162 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
6163
6164 /*
6165 * At this point, we know that we can open the pool in
6166 * read-only mode but not read-write mode. We now have enough
6167 * information and can return to userland.
6168 */
6169 error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
6170 ENOTSUP);
6171 goto fail;
6172 }
6173
6174 /*
6175 * Traverse the last txgs to make sure the pool was left off in a safe
6176 * state. When performing an extreme rewind, we verify the whole pool,
6177 * which can take a very long time.
6178 */
6179 spa_import_progress_set_notes(spa, "Verifying pool data");
6180 error = spa_ld_verify_pool_data(spa);
6181 if (error != 0)
6182 goto fail;
6183
6184 /*
6185 * Calculate the deflated space for the pool. This must be done before
6186 * we write anything to the pool because we'd need to update the space
6187 * accounting using the deflated sizes.
6188 */
6189 spa_import_progress_set_notes(spa, "Calculating deflated space");
6190 spa_update_dspace(spa);
6191
6192 /*
6193 * We have now retrieved all the information we needed to open the
6194 * pool. If we are importing the pool in read-write mode, a few
6195 * additional steps must be performed to finish the import.
6196 */
6197 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
6198 spa->spa_load_max_txg == UINT64_MAX)) {
6199 uint64_t config_cache_txg = spa->spa_config_txg;
6200
6201 spa_import_progress_set_notes(spa, "Starting import");
6202
6203 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
6204
6205 /*
6206 * Before we do any zio_write's, complete the raidz expansion
6207 * scratch space copying, if necessary.
6208 */
6209 if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID)
6210 vdev_raidz_reflow_copy_scratch(spa);
6211
6212 /*
6213 * In case of a checkpoint rewind, log the original txg
6214 * of the checkpointed uberblock.
6215 */
6216 if (checkpoint_rewind) {
6217 spa_history_log_internal(spa, "checkpoint rewind",
6218 NULL, "rewound state to txg=%llu",
6219 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
6220 }
6221
6222 spa_import_progress_set_notes(spa, "Claiming ZIL blocks");
6223 /*
6224 * Traverse the ZIL and claim all blocks.
6225 */
6226 spa_ld_claim_log_blocks(spa);
6227
6228 /*
6229 * Kick-off the syncing thread.
6230 */
6231 spa->spa_sync_on = B_TRUE;
6232 txg_sync_start(spa->spa_dsl_pool);
6233 mmp_thread_start(spa);
6234
6235 /*
6236 * Wait for all claims to sync. We sync up to the highest
6237 * claimed log block birth time so that claimed log blocks
6238 * don't appear to be from the future. spa_claim_max_txg
6239 * will have been set for us by ZIL traversal operations
6240 * performed above.
6241 */
6242 spa_import_progress_set_notes(spa, "Syncing ZIL claims");
6243 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
6244
6245 /*
6246 * Check if we need to request an update of the config. On the
6247 * next sync, we would update the config stored in vdev labels
6248 * and the cachefile (by default /etc/zfs/zpool.cache).
6249 */
6250 spa_import_progress_set_notes(spa, "Updating configs");
6251 spa_ld_check_for_config_update(spa, config_cache_txg,
6252 update_config_cache);
6253
6254 /*
6255 * Check if a rebuild was in progress and if so resume it.
6256 * Then check all DTLs to see if anything needs resilvering.
6257 * The resilver will be deferred if a rebuild was started.
6258 */
6259 spa_import_progress_set_notes(spa, "Starting resilvers");
6260 if (vdev_rebuild_active(spa->spa_root_vdev)) {
6261 vdev_rebuild_restart(spa);
6262 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
6263 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6264 spa_async_request(spa, SPA_ASYNC_RESILVER);
6265 }
6266
6267 /*
6268 * Log the fact that we booted up (so that we can detect if
6269 * we rebooted in the middle of an operation).
6270 */
6271 spa_history_log_version(spa, "open", NULL);
6272
6273 spa_import_progress_set_notes(spa,
6274 "Restarting device removals");
6275 spa_restart_removal(spa);
6276 spa_spawn_aux_threads(spa);
6277
6278 /*
6279 * Delete any inconsistent datasets.
6280 *
6281 * Note:
6282 * Since we may be issuing deletes for clones here,
6283 * we make sure to do so after we've spawned all the
6284 * auxiliary threads above (from which the livelist
6285 * deletion zthr is part of).
6286 */
6287 spa_import_progress_set_notes(spa,
6288 "Cleaning up inconsistent objsets");
6289 (void) dmu_objset_find(spa_name(spa),
6290 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
6291
6292 /*
6293 * Clean up any stale temporary dataset userrefs.
6294 */
6295 spa_import_progress_set_notes(spa,
6296 "Cleaning up temporary userrefs");
6297 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
6298
6299 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6300 spa_import_progress_set_notes(spa, "Restarting initialize");
6301 vdev_initialize_restart(spa->spa_root_vdev);
6302 spa_import_progress_set_notes(spa, "Restarting TRIM");
6303 vdev_trim_restart(spa->spa_root_vdev);
6304 vdev_autotrim_restart(spa);
6305 spa_config_exit(spa, SCL_CONFIG, FTAG);
6306 spa_import_progress_set_notes(spa, "Finished importing");
6307 }
6308 zio_handle_import_delay(spa, gethrtime() - load_start);
6309
6310 spa_import_progress_remove(spa_guid(spa));
6311 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
6312
6313 spa_load_note(spa, "LOADED");
6314 fail:
6315 spa_namespace_enter(FTAG);
6316 spa->spa_load_thread = NULL;
6317 spa_namespace_broadcast();
6318
6319 return (error);
6320
6321 }
6322
6323 static int
6324 spa_load_retry(spa_t *spa, spa_load_state_t state)
6325 {
6326 spa_mode_t mode = spa->spa_mode;
6327
6328 spa_unload(spa);
6329 spa_deactivate(spa);
6330
6331 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
6332
6333 spa_activate(spa, mode);
6334 spa_async_suspend(spa);
6335
6336 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
6337 (u_longlong_t)spa->spa_load_max_txg);
6338
6339 return (spa_load(spa, state, SPA_IMPORT_EXISTING));
6340 }
6341
6342 /*
6343 * If spa_load() fails this function will try loading prior txg's. If
6344 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
6345 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
6346 * function will not rewind the pool and will return the same error as
6347 * spa_load().
6348 */
6349 static int
6350 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
6351 int rewind_flags)
6352 {
6353 nvlist_t *loadinfo = NULL;
6354 nvlist_t *config = NULL;
6355 int load_error, rewind_error;
6356 uint64_t safe_rewind_txg;
6357 uint64_t min_txg;
6358
6359 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
6360 spa->spa_load_max_txg = spa->spa_load_txg;
6361 spa_set_log_state(spa, SPA_LOG_CLEAR);
6362 } else {
6363 spa->spa_load_max_txg = max_request;
6364 if (max_request != UINT64_MAX)
6365 spa->spa_extreme_rewind = B_TRUE;
6366 }
6367
6368 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
6369 if (load_error == 0)
6370 return (0);
6371
6372 /* Do not attempt to load uberblocks from previous txgs when: */
6373 switch (load_error) {
6374 case ZFS_ERR_NO_CHECKPOINT:
6375 /* Attempting checkpoint-rewind on a pool with no checkpoint */
6376 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
6377 zfs_fallthrough;
6378 case EREMOTEIO:
6379 /* MMP determines the pool is active on another host */
6380 zfs_fallthrough;
6381 case EBADF:
6382 /* The config cache is out of sync (vdevs or hostid) */
6383 zfs_fallthrough;
6384 case EINTR:
6385 /* The user interactively interrupted the import */
6386 spa_import_progress_remove(spa_guid(spa));
6387 return (load_error);
6388 }
6389
6390 if (spa->spa_root_vdev != NULL)
6391 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6392
6393 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
6394 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
6395
6396 if (rewind_flags & ZPOOL_NEVER_REWIND) {
6397 nvlist_free(config);
6398 spa_import_progress_remove(spa_guid(spa));
6399 return (load_error);
6400 }
6401
6402 if (state == SPA_LOAD_RECOVER) {
6403 /* Price of rolling back is discarding txgs, including log */
6404 spa_set_log_state(spa, SPA_LOG_CLEAR);
6405 } else {
6406 /*
6407 * If we aren't rolling back save the load info from our first
6408 * import attempt so that we can restore it after attempting
6409 * to rewind.
6410 */
6411 loadinfo = spa->spa_load_info;
6412 spa->spa_load_info = fnvlist_alloc();
6413 }
6414
6415 spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
6416 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
6417 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
6418 TXG_INITIAL : safe_rewind_txg;
6419
6420 /*
6421 * Continue as long as we're finding errors, we're still within
6422 * the acceptable rewind range, and we're still finding uberblocks
6423 */
6424 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
6425 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
6426 if (spa->spa_load_max_txg < safe_rewind_txg)
6427 spa->spa_extreme_rewind = B_TRUE;
6428 rewind_error = spa_load_retry(spa, state);
6429 }
6430
6431 spa->spa_extreme_rewind = B_FALSE;
6432 spa->spa_load_max_txg = UINT64_MAX;
6433
6434 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
6435 spa_config_set(spa, config);
6436 else
6437 nvlist_free(config);
6438
6439 if (state == SPA_LOAD_RECOVER) {
6440 ASSERT0P(loadinfo);
6441 spa_import_progress_remove(spa_guid(spa));
6442 return (rewind_error);
6443 } else {
6444 /* Store the rewind info as part of the initial load info */
6445 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
6446 spa->spa_load_info);
6447
6448 /* Restore the initial load info */
6449 fnvlist_free(spa->spa_load_info);
6450 spa->spa_load_info = loadinfo;
6451
6452 spa_import_progress_remove(spa_guid(spa));
6453 return (load_error);
6454 }
6455 }
6456
6457 /*
6458 * Pool Open/Import
6459 *
6460 * The import case is identical to an open except that the configuration is sent
6461 * down from userland, instead of grabbed from the configuration cache. For the
6462 * case of an open, the pool configuration will exist in the
6463 * POOL_STATE_UNINITIALIZED state.
6464 *
6465 * The stats information (gen/count/ustats) is used to gather vdev statistics at
6466 * the same time open the pool, without having to keep around the spa_t in some
6467 * ambiguous state.
6468 */
6469 static int
6470 spa_open_common(const char *pool, spa_t **spapp, const void *tag,
6471 nvlist_t *nvpolicy, nvlist_t **config)
6472 {
6473 spa_t *spa;
6474 spa_load_state_t state = SPA_LOAD_OPEN;
6475 int error;
6476 int locked = B_FALSE;
6477 int firstopen = B_FALSE;
6478
6479 *spapp = NULL;
6480
6481 /*
6482 * As disgusting as this is, we need to support recursive calls to this
6483 * function because dsl_dir_open() is called during spa_load(), and ends
6484 * up calling spa_open() again. The real fix is to figure out how to
6485 * avoid dsl_dir_open() calling this in the first place.
6486 */
6487 if (!spa_namespace_held()) {
6488 spa_namespace_enter(FTAG);
6489 locked = B_TRUE;
6490 }
6491
6492 if ((spa = spa_lookup(pool)) == NULL) {
6493 if (locked)
6494 spa_namespace_exit(FTAG);
6495 return (SET_ERROR(ENOENT));
6496 }
6497
6498 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
6499 zpool_load_policy_t policy;
6500
6501 firstopen = B_TRUE;
6502
6503 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
6504 &policy);
6505 if (policy.zlp_rewind & ZPOOL_DO_REWIND)
6506 state = SPA_LOAD_RECOVER;
6507
6508 spa_activate(spa, spa_mode_global);
6509
6510 if (state != SPA_LOAD_RECOVER)
6511 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
6512 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
6513
6514 zfs_dbgmsg("spa_open_common: opening %s", pool);
6515 error = spa_load_best(spa, state, policy.zlp_txg,
6516 policy.zlp_rewind);
6517
6518 if (error == EBADF) {
6519 /*
6520 * If vdev_validate() returns failure (indicated by
6521 * EBADF), it indicates that one of the vdevs indicates
6522 * that the pool has been exported or destroyed. If
6523 * this is the case, the config cache is out of sync and
6524 * we should remove the pool from the namespace.
6525 */
6526 spa_unload(spa);
6527 spa_deactivate(spa);
6528 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
6529 spa_remove(spa);
6530 if (locked)
6531 spa_namespace_exit(FTAG);
6532 return (SET_ERROR(ENOENT));
6533 }
6534
6535 if (error) {
6536 /*
6537 * We can't open the pool, but we still have useful
6538 * information: the state of each vdev after the
6539 * attempted vdev_open(). Return this to the user.
6540 */
6541 if (config != NULL && spa->spa_config) {
6542 *config = fnvlist_dup(spa->spa_config);
6543 fnvlist_add_nvlist(*config,
6544 ZPOOL_CONFIG_LOAD_INFO,
6545 spa->spa_load_info);
6546 }
6547 spa_unload(spa);
6548 spa_deactivate(spa);
6549 spa->spa_last_open_failed = error;
6550 if (locked)
6551 spa_namespace_exit(FTAG);
6552 *spapp = NULL;
6553 return (error);
6554 }
6555 }
6556
6557 spa_open_ref(spa, tag);
6558
6559 if (config != NULL)
6560 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
6561
6562 /*
6563 * If we've recovered the pool, pass back any information we
6564 * gathered while doing the load.
6565 */
6566 if (state == SPA_LOAD_RECOVER && config != NULL) {
6567 fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
6568 spa->spa_load_info);
6569 }
6570
6571 if (locked) {
6572 spa->spa_last_open_failed = 0;
6573 spa->spa_last_ubsync_txg = 0;
6574 spa->spa_load_txg = 0;
6575 spa_namespace_exit(FTAG);
6576 }
6577
6578 if (firstopen)
6579 zvol_create_minors(spa_name(spa));
6580
6581 *spapp = spa;
6582
6583 return (0);
6584 }
6585
6586 int
6587 spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
6588 nvlist_t *policy, nvlist_t **config)
6589 {
6590 return (spa_open_common(name, spapp, tag, policy, config));
6591 }
6592
6593 int
6594 spa_open(const char *name, spa_t **spapp, const void *tag)
6595 {
6596 return (spa_open_common(name, spapp, tag, NULL, NULL));
6597 }
6598
6599 /*
6600 * Lookup the given spa_t, incrementing the inject count in the process,
6601 * preventing it from being exported or destroyed.
6602 */
6603 spa_t *
6604 spa_inject_addref(char *name)
6605 {
6606 spa_t *spa;
6607
6608 spa_namespace_enter(FTAG);
6609 if ((spa = spa_lookup(name)) == NULL) {
6610 spa_namespace_exit(FTAG);
6611 return (NULL);
6612 }
6613 spa->spa_inject_ref++;
6614 spa_namespace_exit(FTAG);
6615
6616 return (spa);
6617 }
6618
6619 void
6620 spa_inject_delref(spa_t *spa)
6621 {
6622 spa_namespace_enter(FTAG);
6623 spa->spa_inject_ref--;
6624 spa_namespace_exit(FTAG);
6625 }
6626
6627 /*
6628 * Add spares device information to the nvlist.
6629 */
6630 static void
6631 spa_add_spares(spa_t *spa, nvlist_t *config)
6632 {
6633 nvlist_t **spares;
6634 uint_t i, nspares;
6635 nvlist_t *nvroot;
6636 uint64_t guid;
6637 vdev_stat_t *vs;
6638 uint_t vsc;
6639 uint64_t pool;
6640
6641 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6642
6643 if (spa->spa_spares.sav_count == 0)
6644 return;
6645
6646 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
6647 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
6648 ZPOOL_CONFIG_SPARES, &spares, &nspares));
6649 if (nspares != 0) {
6650 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6651 (const nvlist_t * const *)spares, nspares);
6652 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
6653 &spares, &nspares));
6654
6655 /*
6656 * Go through and find any spares which have since been
6657 * repurposed as an active spare. If this is the case, update
6658 * their status appropriately.
6659 */
6660 for (i = 0; i < nspares; i++) {
6661 guid = fnvlist_lookup_uint64(spares[i],
6662 ZPOOL_CONFIG_GUID);
6663 VERIFY0(nvlist_lookup_uint64_array(spares[i],
6664 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
6665 if (spa_spare_exists(guid, &pool, NULL) &&
6666 pool != 0ULL) {
6667 vs->vs_state = VDEV_STATE_CANT_OPEN;
6668 vs->vs_aux = VDEV_AUX_SPARED;
6669 } else {
6670 vs->vs_state =
6671 spa->spa_spares.sav_vdevs[i]->vdev_state;
6672 }
6673 }
6674 }
6675 }
6676
6677 /*
6678 * Add l2cache device information to the nvlist, including vdev stats.
6679 */
6680 static void
6681 spa_add_l2cache(spa_t *spa, nvlist_t *config)
6682 {
6683 nvlist_t **l2cache;
6684 uint_t i, j, nl2cache;
6685 nvlist_t *nvroot;
6686 uint64_t guid;
6687 vdev_t *vd;
6688 vdev_stat_t *vs;
6689 uint_t vsc;
6690
6691 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6692
6693 if (spa->spa_l2cache.sav_count == 0)
6694 return;
6695
6696 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
6697 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
6698 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
6699 if (nl2cache != 0) {
6700 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6701 (const nvlist_t * const *)l2cache, nl2cache);
6702 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
6703 &l2cache, &nl2cache));
6704
6705 /*
6706 * Update level 2 cache device stats.
6707 */
6708
6709 for (i = 0; i < nl2cache; i++) {
6710 guid = fnvlist_lookup_uint64(l2cache[i],
6711 ZPOOL_CONFIG_GUID);
6712
6713 vd = NULL;
6714 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
6715 if (guid ==
6716 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
6717 vd = spa->spa_l2cache.sav_vdevs[j];
6718 break;
6719 }
6720 }
6721 ASSERT(vd != NULL);
6722
6723 VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
6724 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
6725 vdev_get_stats(vd, vs);
6726 vdev_config_generate_stats(vd, l2cache[i]);
6727
6728 }
6729 }
6730 }
6731
6732 static void
6733 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
6734 {
6735 zap_cursor_t zc;
6736 zap_attribute_t *za = zap_attribute_alloc();
6737
6738 if (spa->spa_feat_for_read_obj != 0) {
6739 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6740 spa->spa_feat_for_read_obj);
6741 zap_cursor_retrieve(&zc, za) == 0;
6742 zap_cursor_advance(&zc)) {
6743 ASSERT(za->za_integer_length == sizeof (uint64_t) &&
6744 za->za_num_integers == 1);
6745 VERIFY0(nvlist_add_uint64(features, za->za_name,
6746 za->za_first_integer));
6747 }
6748 zap_cursor_fini(&zc);
6749 }
6750
6751 if (spa->spa_feat_for_write_obj != 0) {
6752 for (zap_cursor_init(&zc, spa->spa_meta_objset,
6753 spa->spa_feat_for_write_obj);
6754 zap_cursor_retrieve(&zc, za) == 0;
6755 zap_cursor_advance(&zc)) {
6756 ASSERT(za->za_integer_length == sizeof (uint64_t) &&
6757 za->za_num_integers == 1);
6758 VERIFY0(nvlist_add_uint64(features, za->za_name,
6759 za->za_first_integer));
6760 }
6761 zap_cursor_fini(&zc);
6762 }
6763 zap_attribute_free(za);
6764 }
6765
6766 static void
6767 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
6768 {
6769 int i;
6770
6771 for (i = 0; i < SPA_FEATURES; i++) {
6772 zfeature_info_t feature = spa_feature_table[i];
6773 uint64_t refcount;
6774
6775 if (feature_get_refcount(spa, &feature, &refcount) != 0)
6776 continue;
6777
6778 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
6779 }
6780 }
6781
6782 /*
6783 * Store a list of pool features and their reference counts in the
6784 * config.
6785 *
6786 * The first time this is called on a spa, allocate a new nvlist, fetch
6787 * the pool features and reference counts from disk, then save the list
6788 * in the spa. In subsequent calls on the same spa use the saved nvlist
6789 * and refresh its values from the cached reference counts. This
6790 * ensures we don't block here on I/O on a suspended pool so 'zpool
6791 * clear' can resume the pool.
6792 */
6793 static void
6794 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
6795 {
6796 nvlist_t *features;
6797
6798 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
6799
6800 mutex_enter(&spa->spa_feat_stats_lock);
6801 features = spa->spa_feat_stats;
6802
6803 if (features != NULL) {
6804 spa_feature_stats_from_cache(spa, features);
6805 } else {
6806 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
6807 spa->spa_feat_stats = features;
6808 spa_feature_stats_from_disk(spa, features);
6809 }
6810
6811 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
6812 features));
6813
6814 mutex_exit(&spa->spa_feat_stats_lock);
6815 }
6816
6817 int
6818 spa_get_stats(const char *name, nvlist_t **config,
6819 char *altroot, size_t buflen)
6820 {
6821 int error;
6822 spa_t *spa;
6823
6824 *config = NULL;
6825 error = spa_open_common(name, &spa, FTAG, NULL, config);
6826
6827 if (spa != NULL) {
6828 /*
6829 * This still leaves a window of inconsistency where the spares
6830 * or l2cache devices could change and the config would be
6831 * self-inconsistent.
6832 */
6833 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6834
6835 if (*config != NULL) {
6836 uint64_t loadtimes[2];
6837
6838 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
6839 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
6840 fnvlist_add_uint64_array(*config,
6841 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
6842
6843 fnvlist_add_uint64(*config,
6844 ZPOOL_CONFIG_ERRCOUNT,
6845 spa_approx_errlog_size(spa));
6846
6847 if (spa_suspended(spa)) {
6848 fnvlist_add_uint64(*config,
6849 ZPOOL_CONFIG_SUSPENDED,
6850 spa->spa_failmode);
6851 fnvlist_add_uint64(*config,
6852 ZPOOL_CONFIG_SUSPENDED_REASON,
6853 spa->spa_suspended);
6854 }
6855
6856 spa_add_spares(spa, *config);
6857 spa_add_l2cache(spa, *config);
6858 spa_add_feature_stats(spa, *config);
6859 }
6860 }
6861
6862 /*
6863 * We want to get the alternate root even for faulted pools, so we cheat
6864 * and call spa_lookup() directly.
6865 */
6866 if (altroot) {
6867 if (spa == NULL) {
6868 spa_namespace_enter(FTAG);
6869 spa = spa_lookup(name);
6870 if (spa)
6871 spa_altroot(spa, altroot, buflen);
6872 else
6873 altroot[0] = '\0';
6874 spa = NULL;
6875 spa_namespace_exit(FTAG);
6876 } else {
6877 spa_altroot(spa, altroot, buflen);
6878 }
6879 }
6880
6881 if (spa != NULL) {
6882 spa_config_exit(spa, SCL_CONFIG, FTAG);
6883 spa_close(spa, FTAG);
6884 }
6885
6886 return (error);
6887 }
6888
6889 /*
6890 * Validate that the auxiliary device array is well formed. We must have an
6891 * array of nvlists, each which describes a valid leaf vdev. If this is an
6892 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
6893 * specified, as long as they are well-formed.
6894 */
6895 static int
6896 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
6897 spa_aux_vdev_t *sav, const char *config, uint64_t version,
6898 vdev_labeltype_t label)
6899 {
6900 nvlist_t **dev;
6901 uint_t i, ndev;
6902 vdev_t *vd;
6903 int error;
6904
6905 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6906
6907 /*
6908 * It's acceptable to have no devs specified.
6909 */
6910 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
6911 return (0);
6912
6913 if (ndev == 0)
6914 return (SET_ERROR(EINVAL));
6915
6916 /*
6917 * Make sure the pool is formatted with a version that supports this
6918 * device type.
6919 */
6920 if (spa_version(spa) < version)
6921 return (SET_ERROR(ENOTSUP));
6922
6923 /*
6924 * Set the pending device list so we correctly handle device in-use
6925 * checking.
6926 */
6927 sav->sav_pending = dev;
6928 sav->sav_npending = ndev;
6929
6930 for (i = 0; i < ndev; i++) {
6931 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
6932 mode)) != 0)
6933 goto out;
6934
6935 if (!vd->vdev_ops->vdev_op_leaf) {
6936 vdev_free(vd);
6937 error = SET_ERROR(EINVAL);
6938 goto out;
6939 }
6940
6941 vd->vdev_top = vd;
6942
6943 if ((error = vdev_open(vd)) == 0 &&
6944 (error = vdev_label_init(vd, crtxg, label)) == 0) {
6945 fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
6946 vd->vdev_guid);
6947 }
6948
6949 vdev_free(vd);
6950
6951 if (error &&
6952 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
6953 goto out;
6954 else
6955 error = 0;
6956 }
6957
6958 out:
6959 sav->sav_pending = NULL;
6960 sav->sav_npending = 0;
6961 return (error);
6962 }
6963
6964 static int
6965 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
6966 {
6967 int error;
6968
6969 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
6970
6971 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6972 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
6973 VDEV_LABEL_SPARE)) != 0) {
6974 return (error);
6975 }
6976
6977 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
6978 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
6979 VDEV_LABEL_L2CACHE));
6980 }
6981
6982 static void
6983 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
6984 const char *config)
6985 {
6986 int i;
6987
6988 if (sav->sav_config != NULL) {
6989 nvlist_t **olddevs;
6990 uint_t oldndevs;
6991 nvlist_t **newdevs;
6992
6993 /*
6994 * Generate new dev list by concatenating with the
6995 * current dev list.
6996 */
6997 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
6998 &olddevs, &oldndevs));
6999
7000 newdevs = kmem_alloc(sizeof (void *) *
7001 (ndevs + oldndevs), KM_SLEEP);
7002 for (i = 0; i < oldndevs; i++)
7003 newdevs[i] = fnvlist_dup(olddevs[i]);
7004 for (i = 0; i < ndevs; i++)
7005 newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
7006
7007 fnvlist_remove(sav->sav_config, config);
7008
7009 fnvlist_add_nvlist_array(sav->sav_config, config,
7010 (const nvlist_t * const *)newdevs, ndevs + oldndevs);
7011 for (i = 0; i < oldndevs + ndevs; i++)
7012 nvlist_free(newdevs[i]);
7013 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
7014 } else {
7015 /*
7016 * Generate a new dev list.
7017 */
7018 sav->sav_config = fnvlist_alloc();
7019 fnvlist_add_nvlist_array(sav->sav_config, config,
7020 (const nvlist_t * const *)devs, ndevs);
7021 }
7022 }
7023
7024 /*
7025 * Stop and drop level 2 ARC devices
7026 */
7027 void
7028 spa_l2cache_drop(spa_t *spa)
7029 {
7030 vdev_t *vd;
7031 int i;
7032 spa_aux_vdev_t *sav = &spa->spa_l2cache;
7033
7034 for (i = 0; i < sav->sav_count; i++) {
7035 uint64_t pool;
7036
7037 vd = sav->sav_vdevs[i];
7038 ASSERT(vd != NULL);
7039
7040 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
7041 pool != 0ULL && l2arc_vdev_present(vd))
7042 l2arc_remove_vdev(vd);
7043 }
7044 }
7045
7046 /*
7047 * Verify encryption parameters for spa creation. If we are encrypting, we must
7048 * have the encryption feature flag enabled.
7049 */
7050 static int
7051 spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
7052 boolean_t has_encryption)
7053 {
7054 if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
7055 dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
7056 !has_encryption)
7057 return (SET_ERROR(ENOTSUP));
7058
7059 return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
7060 }
7061
7062 /*
7063 * Pool Creation
7064 */
7065 int
7066 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
7067 nvlist_t *zplprops, dsl_crypto_params_t *dcp, nvlist_t **errinfo)
7068 {
7069 spa_t *spa;
7070 const char *altroot = NULL;
7071 vdev_t *rvd;
7072 dsl_pool_t *dp;
7073 dmu_tx_t *tx;
7074 int error = 0;
7075 uint64_t txg = TXG_INITIAL;
7076 nvlist_t **spares, **l2cache;
7077 uint_t nspares, nl2cache;
7078 uint64_t version, obj, ndraid = 0, draid_nfgroup = 0;
7079 boolean_t has_features;
7080 boolean_t has_encryption;
7081 boolean_t has_allocclass;
7082 boolean_t has_draid;
7083 boolean_t has_draid_fdomains;
7084 spa_feature_t feat;
7085 const char *feat_name;
7086 const char *poolname;
7087 nvlist_t *nvl;
7088
7089 if (props == NULL ||
7090 nvlist_lookup_string(props,
7091 zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0)
7092 poolname = (char *)pool;
7093
7094 /*
7095 * If this pool already exists, return failure.
7096 */
7097 spa_namespace_enter(FTAG);
7098 if (spa_lookup(poolname) != NULL) {
7099 spa_namespace_exit(FTAG);
7100 return (SET_ERROR(EEXIST));
7101 }
7102
7103 /*
7104 * Allocate a new spa_t structure.
7105 */
7106 nvl = fnvlist_alloc();
7107 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
7108 (void) nvlist_lookup_string(props,
7109 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
7110 spa = spa_add(poolname, nvl, altroot);
7111 fnvlist_free(nvl);
7112 spa_activate(spa, spa_mode_global);
7113
7114 if (props && (error = spa_prop_validate(spa, props))) {
7115 spa_deactivate(spa);
7116 spa_remove(spa);
7117 spa_namespace_exit(FTAG);
7118 return (error);
7119 }
7120
7121 /*
7122 * Temporary pool names should never be written to disk.
7123 */
7124 if (poolname != pool)
7125 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
7126
7127 has_features = B_FALSE;
7128 has_encryption = B_FALSE;
7129 has_allocclass = B_FALSE;
7130 has_draid = B_FALSE;
7131 has_draid_fdomains = B_FALSE;
7132 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
7133 elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
7134 if (zpool_prop_feature(nvpair_name(elem))) {
7135 has_features = B_TRUE;
7136
7137 feat_name = strchr(nvpair_name(elem), '@') + 1;
7138 VERIFY0(zfeature_lookup_name(feat_name, &feat));
7139 if (feat == SPA_FEATURE_ENCRYPTION)
7140 has_encryption = B_TRUE;
7141 if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
7142 has_allocclass = B_TRUE;
7143 if (feat == SPA_FEATURE_DRAID)
7144 has_draid = B_TRUE;
7145 if (feat == SPA_FEATURE_DRAID_FAIL_DOMAINS)
7146 has_draid_fdomains = B_TRUE;
7147 }
7148 }
7149
7150 /* verify encryption params, if they were provided */
7151 if (dcp != NULL) {
7152 error = spa_create_check_encryption_params(dcp, has_encryption);
7153 if (error != 0) {
7154 spa_deactivate(spa);
7155 spa_remove(spa);
7156 spa_namespace_exit(FTAG);
7157 return (error);
7158 }
7159 }
7160 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
7161 spa_deactivate(spa);
7162 spa_remove(spa);
7163 spa_namespace_exit(FTAG);
7164 return (ENOTSUP);
7165 }
7166
7167 if (has_features || nvlist_lookup_uint64(props,
7168 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
7169 version = SPA_VERSION;
7170 }
7171 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
7172
7173 spa->spa_first_txg = txg;
7174 spa->spa_uberblock.ub_txg = txg - 1;
7175 spa->spa_uberblock.ub_version = version;
7176 spa->spa_ubsync = spa->spa_uberblock;
7177 spa->spa_load_state = SPA_LOAD_CREATE;
7178 spa->spa_removing_phys.sr_state = DSS_NONE;
7179 spa->spa_removing_phys.sr_removing_vdev = -1;
7180 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
7181 spa->spa_indirect_vdevs_loaded = B_TRUE;
7182 spa->spa_deflate = (version >= SPA_VERSION_RAIDZ_DEFLATE);
7183
7184 /*
7185 * Create "The Godfather" zio to hold all async IOs
7186 */
7187 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
7188 KM_SLEEP);
7189 for (int i = 0; i < max_ncpus; i++) {
7190 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
7191 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
7192 ZIO_FLAG_GODFATHER);
7193 }
7194
7195 /*
7196 * Create the root vdev.
7197 */
7198 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7199
7200 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
7201
7202 ASSERT(error != 0 || rvd != NULL);
7203 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
7204
7205 if (error == 0 && !zfs_allocatable_devs(nvroot))
7206 error = SET_ERROR(EINVAL);
7207
7208 if (error == 0 &&
7209 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
7210 (error = vdev_draid_spare_create(nvroot, rvd, &ndraid,
7211 &draid_nfgroup, 0)) == 0 &&
7212 (ndraid == 0 || has_draid || (error = SET_ERROR(ENOTSUP))) &&
7213 (draid_nfgroup == 0 || has_draid_fdomains ||
7214 (error = SET_ERROR(ENOTSUP))) && error == 0 &&
7215 (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
7216 /*
7217 * instantiate the metaslab groups (this will dirty the vdevs)
7218 * we can no longer error exit past this point
7219 */
7220 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
7221 vdev_t *vd = rvd->vdev_child[c];
7222
7223 vdev_metaslab_set_size(vd);
7224 vdev_expand(vd, txg);
7225 }
7226 }
7227
7228 spa_config_exit(spa, SCL_ALL, FTAG);
7229
7230 if (error != 0) {
7231 if (errinfo != NULL) {
7232 *errinfo = spa->spa_create_info;
7233 spa->spa_create_info = NULL;
7234 }
7235 spa_unload(spa);
7236 spa_deactivate(spa);
7237 spa_remove(spa);
7238 spa_namespace_exit(FTAG);
7239 return (error);
7240 }
7241
7242 /*
7243 * Get the list of spares, if specified.
7244 */
7245 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
7246 &spares, &nspares) == 0) {
7247 spa->spa_spares.sav_config = fnvlist_alloc();
7248 fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
7249 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
7250 nspares);
7251 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7252 spa_load_spares(spa);
7253 spa_config_exit(spa, SCL_ALL, FTAG);
7254 spa->spa_spares.sav_sync = B_TRUE;
7255 }
7256
7257 /*
7258 * Get the list of level 2 cache devices, if specified.
7259 */
7260 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
7261 &l2cache, &nl2cache) == 0) {
7262 VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
7263 NV_UNIQUE_NAME, KM_SLEEP));
7264 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
7265 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
7266 nl2cache);
7267 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7268 spa_load_l2cache(spa);
7269 spa_config_exit(spa, SCL_ALL, FTAG);
7270 spa->spa_l2cache.sav_sync = B_TRUE;
7271 }
7272
7273 spa->spa_is_initializing = B_TRUE;
7274 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
7275 spa->spa_is_initializing = B_FALSE;
7276
7277 /*
7278 * Create DDTs (dedup tables).
7279 */
7280 ddt_create(spa);
7281 /*
7282 * Create BRT table and BRT table object.
7283 */
7284 brt_create(spa);
7285
7286 spa_update_dspace(spa);
7287
7288 tx = dmu_tx_create_assigned(dp, txg);
7289
7290 /*
7291 * Create the pool's history object.
7292 */
7293 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
7294 spa_history_create_obj(spa, tx);
7295
7296 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
7297 spa_history_log_version(spa, "create", tx);
7298
7299 /*
7300 * Create the pool config object.
7301 */
7302 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
7303 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
7304 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
7305
7306 if (zap_add(spa->spa_meta_objset,
7307 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
7308 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
7309 cmn_err(CE_PANIC, "failed to add pool config");
7310 }
7311
7312 if (zap_add(spa->spa_meta_objset,
7313 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
7314 sizeof (uint64_t), 1, &version, tx) != 0) {
7315 cmn_err(CE_PANIC, "failed to add pool version");
7316 }
7317
7318 /* Newly created pools with the right version are always deflated. */
7319 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
7320 if (zap_add(spa->spa_meta_objset,
7321 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
7322 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
7323 cmn_err(CE_PANIC, "failed to add deflate");
7324 }
7325 }
7326
7327 /*
7328 * Create the deferred-free bpobj. Turn off compression
7329 * because sync-to-convergence takes longer if the blocksize
7330 * keeps changing.
7331 */
7332 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
7333 dmu_object_set_compress(spa->spa_meta_objset, obj,
7334 ZIO_COMPRESS_OFF, tx);
7335 if (zap_add(spa->spa_meta_objset,
7336 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
7337 sizeof (uint64_t), 1, &obj, tx) != 0) {
7338 cmn_err(CE_PANIC, "failed to add bpobj");
7339 }
7340 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
7341 spa->spa_meta_objset, obj));
7342
7343 /*
7344 * Generate some random noise for salted checksums to operate on.
7345 */
7346 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
7347 sizeof (spa->spa_cksum_salt.zcs_bytes));
7348
7349 /*
7350 * Set pool properties.
7351 */
7352 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
7353 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
7354 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
7355 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
7356 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
7357 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
7358 spa->spa_dedup_table_quota =
7359 zpool_prop_default_numeric(ZPOOL_PROP_DEDUP_TABLE_QUOTA);
7360
7361 if (props != NULL) {
7362 spa_configfile_set(spa, props, B_FALSE);
7363 spa_sync_props(props, tx);
7364 }
7365
7366 for (int i = 0; i < ndraid; i++)
7367 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
7368
7369 for (int i = 0; i < draid_nfgroup; i++)
7370 spa_feature_incr(spa, SPA_FEATURE_DRAID_FAIL_DOMAINS, tx);
7371
7372 dmu_tx_commit(tx);
7373
7374 spa->spa_sync_on = B_TRUE;
7375 txg_sync_start(dp);
7376 mmp_thread_start(spa);
7377 txg_wait_synced(dp, txg);
7378
7379 spa_spawn_aux_threads(spa);
7380
7381 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
7382
7383 /*
7384 * Don't count references from objsets that are already closed
7385 * and are making their way through the eviction process.
7386 */
7387 spa_evicting_os_wait(spa);
7388 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
7389 spa->spa_load_state = SPA_LOAD_NONE;
7390
7391 spa_import_os(spa);
7392
7393 spa_namespace_exit(FTAG);
7394
7395 return (0);
7396 }
7397
7398 /*
7399 * Import a non-root pool into the system.
7400 */
7401 int
7402 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
7403 {
7404 spa_t *spa;
7405 const char *altroot = NULL;
7406 spa_load_state_t state = SPA_LOAD_IMPORT;
7407 zpool_load_policy_t policy;
7408 spa_mode_t mode = spa_mode_global;
7409 uint64_t readonly = B_FALSE;
7410 int error;
7411 nvlist_t *nvroot;
7412 nvlist_t **spares, **l2cache;
7413 uint_t nspares, nl2cache;
7414
7415 /*
7416 * If a pool with this name exists, return failure.
7417 */
7418 spa_namespace_enter(FTAG);
7419 if (spa_lookup(pool) != NULL) {
7420 spa_namespace_exit(FTAG);
7421 return (SET_ERROR(EEXIST));
7422 }
7423
7424 /*
7425 * Create and initialize the spa structure.
7426 */
7427 (void) nvlist_lookup_string(props,
7428 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
7429 (void) nvlist_lookup_uint64(props,
7430 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
7431 if (readonly)
7432 mode = SPA_MODE_READ;
7433 spa = spa_add(pool, config, altroot);
7434 spa->spa_import_flags = flags;
7435
7436 /*
7437 * Verbatim import - Take a pool and insert it into the namespace
7438 * as if it had been loaded at boot.
7439 */
7440 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
7441 if (props != NULL)
7442 spa_configfile_set(spa, props, B_FALSE);
7443
7444 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
7445 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
7446 zfs_dbgmsg("spa_import: verbatim import of %s", pool);
7447 spa_namespace_exit(FTAG);
7448 return (0);
7449 }
7450
7451 spa_activate(spa, mode);
7452
7453 /*
7454 * Don't start async tasks until we know everything is healthy.
7455 */
7456 spa_async_suspend(spa);
7457
7458 zpool_get_load_policy(config, &policy);
7459 if (policy.zlp_rewind & ZPOOL_DO_REWIND)
7460 state = SPA_LOAD_RECOVER;
7461
7462 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
7463
7464 if (state != SPA_LOAD_RECOVER) {
7465 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
7466 zfs_dbgmsg("spa_import: importing %s", pool);
7467 } else {
7468 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
7469 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
7470 }
7471 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
7472
7473 /*
7474 * Propagate anything learned while loading the pool and pass it
7475 * back to caller (i.e. rewind info, missing devices, etc).
7476 */
7477 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
7478
7479 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7480 /*
7481 * Toss any existing sparelist, as it doesn't have any validity
7482 * anymore, and conflicts with spa_has_spare().
7483 */
7484 if (spa->spa_spares.sav_config) {
7485 nvlist_free(spa->spa_spares.sav_config);
7486 spa->spa_spares.sav_config = NULL;
7487 spa_load_spares(spa);
7488 }
7489 if (spa->spa_l2cache.sav_config) {
7490 nvlist_free(spa->spa_l2cache.sav_config);
7491 spa->spa_l2cache.sav_config = NULL;
7492 spa_load_l2cache(spa);
7493 }
7494
7495 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7496 spa_config_exit(spa, SCL_ALL, FTAG);
7497
7498 if (props != NULL)
7499 spa_configfile_set(spa, props, B_FALSE);
7500
7501 if (error != 0 || (props && spa_writeable(spa) &&
7502 (error = spa_prop_set(spa, props)))) {
7503 spa_unload(spa);
7504 spa_deactivate(spa);
7505 spa_remove(spa);
7506 spa_namespace_exit(FTAG);
7507 return (error);
7508 }
7509
7510 spa_async_resume(spa);
7511
7512 /*
7513 * Override any spares and level 2 cache devices as specified by
7514 * the user, as these may have correct device names/devids, etc.
7515 */
7516 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
7517 &spares, &nspares) == 0) {
7518 if (spa->spa_spares.sav_config)
7519 fnvlist_remove(spa->spa_spares.sav_config,
7520 ZPOOL_CONFIG_SPARES);
7521 else
7522 spa->spa_spares.sav_config = fnvlist_alloc();
7523 fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
7524 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
7525 nspares);
7526 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7527 spa_load_spares(spa);
7528 spa_config_exit(spa, SCL_ALL, FTAG);
7529 spa->spa_spares.sav_sync = B_TRUE;
7530 spa->spa_spares.sav_label_sync = B_TRUE;
7531 }
7532 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
7533 &l2cache, &nl2cache) == 0) {
7534 if (spa->spa_l2cache.sav_config)
7535 fnvlist_remove(spa->spa_l2cache.sav_config,
7536 ZPOOL_CONFIG_L2CACHE);
7537 else
7538 spa->spa_l2cache.sav_config = fnvlist_alloc();
7539 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
7540 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
7541 nl2cache);
7542 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7543 spa_load_l2cache(spa);
7544 spa_config_exit(spa, SCL_ALL, FTAG);
7545 spa->spa_l2cache.sav_sync = B_TRUE;
7546 spa->spa_l2cache.sav_label_sync = B_TRUE;
7547 }
7548
7549 /*
7550 * Check for any removed devices.
7551 */
7552 if (spa->spa_autoreplace) {
7553 spa_aux_check_removed(&spa->spa_spares);
7554 spa_aux_check_removed(&spa->spa_l2cache);
7555 }
7556
7557 if (spa_writeable(spa)) {
7558 /*
7559 * Update the config cache to include the newly-imported pool.
7560 */
7561 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
7562 }
7563
7564 /*
7565 * It's possible that the pool was expanded while it was exported.
7566 * We kick off an async task to handle this for us.
7567 */
7568 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
7569
7570 spa_history_log_version(spa, "import", NULL);
7571
7572 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
7573
7574 spa_namespace_exit(FTAG);
7575
7576 zvol_create_minors(pool);
7577
7578 spa_import_os(spa);
7579
7580 return (0);
7581 }
7582
7583 nvlist_t *
7584 spa_tryimport(nvlist_t *tryconfig)
7585 {
7586 nvlist_t *config = NULL;
7587 const char *poolname, *cachefile;
7588 spa_t *spa;
7589 uint64_t state;
7590 int error;
7591 zpool_load_policy_t policy;
7592
7593 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
7594 return (NULL);
7595
7596 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
7597 return (NULL);
7598
7599 /*
7600 * Create and initialize the spa structure.
7601 */
7602 char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
7603 (void) snprintf(name, MAXPATHLEN, "%s-%llx-%s",
7604 TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname);
7605
7606 spa_namespace_enter(FTAG);
7607 spa = spa_add(name, tryconfig, NULL);
7608 spa_activate(spa, SPA_MODE_READ);
7609 kmem_free(name, MAXPATHLEN);
7610
7611 spa->spa_load_name = spa_strdup(poolname);
7612
7613 /*
7614 * Rewind pool if a max txg was provided.
7615 */
7616 zpool_get_load_policy(spa->spa_config, &policy);
7617 if (policy.zlp_txg != UINT64_MAX) {
7618 spa->spa_load_max_txg = policy.zlp_txg;
7619 spa->spa_extreme_rewind = B_TRUE;
7620 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
7621 spa_load_name(spa), (longlong_t)policy.zlp_txg);
7622 } else {
7623 zfs_dbgmsg("spa_tryimport: importing %s", spa_load_name(spa));
7624 }
7625
7626 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
7627 == 0) {
7628 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
7629 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
7630 } else {
7631 spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
7632 }
7633
7634 /*
7635 * spa_import() relies on a pool config fetched by spa_try_import()
7636 * for spare/cache devices. Import flags are not passed to
7637 * spa_tryimport(), which makes it return early due to a missing log
7638 * device and missing retrieving the cache device and spare eventually.
7639 * Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
7640 * the correct configuration regardless of the missing log device.
7641 */
7642 spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
7643
7644 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
7645
7646 /*
7647 * If 'tryconfig' was at least parsable, return the current config.
7648 */
7649 if (spa->spa_root_vdev != NULL) {
7650 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
7651 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
7652 spa_load_name(spa));
7653 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
7654 fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
7655 spa->spa_uberblock.ub_timestamp);
7656 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
7657 spa->spa_load_info);
7658 fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
7659 spa->spa_errata);
7660
7661 /*
7662 * If the bootfs property exists on this pool then we
7663 * copy it out so that external consumers can tell which
7664 * pools are bootable.
7665 */
7666 if ((!error || error == EEXIST) && spa->spa_bootfs) {
7667 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
7668
7669 /*
7670 * We have to play games with the name since the
7671 * pool was opened as TRYIMPORT_NAME.
7672 */
7673 if (dsl_dsobj_to_dsname(spa_name(spa),
7674 spa->spa_bootfs, tmpname) == 0) {
7675 char *cp;
7676 char *dsname;
7677
7678 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
7679
7680 cp = strchr(tmpname, '/');
7681 if (cp == NULL) {
7682 (void) strlcpy(dsname, tmpname,
7683 MAXPATHLEN);
7684 } else {
7685 (void) snprintf(dsname, MAXPATHLEN,
7686 "%s/%s", spa_load_name(spa), ++cp);
7687 }
7688 fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
7689 dsname);
7690 kmem_free(dsname, MAXPATHLEN);
7691 }
7692 kmem_free(tmpname, MAXPATHLEN);
7693 }
7694
7695 /*
7696 * Add the list of hot spares and level 2 cache devices.
7697 */
7698 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7699 spa_add_spares(spa, config);
7700 spa_add_l2cache(spa, config);
7701 spa_config_exit(spa, SCL_CONFIG, FTAG);
7702 }
7703
7704 spa_unload(spa);
7705 spa_deactivate(spa);
7706 spa_remove(spa);
7707 spa_namespace_exit(FTAG);
7708
7709 return (config);
7710 }
7711
7712 /*
7713 * Pool export/destroy
7714 *
7715 * The act of destroying or exporting a pool is very simple. We make sure there
7716 * is no more pending I/O and any references to the pool are gone. Then, we
7717 * update the pool state and sync all the labels to disk, removing the
7718 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
7719 * we don't sync the labels or remove the configuration cache.
7720 */
7721 static int
7722 spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
7723 boolean_t force, boolean_t hardforce)
7724 {
7725 int error = 0;
7726 spa_t *spa;
7727 hrtime_t export_start = gethrtime();
7728
7729 if (oldconfig)
7730 *oldconfig = NULL;
7731
7732 if (!(spa_mode_global & SPA_MODE_WRITE))
7733 return (SET_ERROR(EROFS));
7734
7735 spa_namespace_enter(FTAG);
7736 if ((spa = spa_lookup(pool)) == NULL) {
7737 spa_namespace_exit(FTAG);
7738 return (SET_ERROR(ENOENT));
7739 }
7740
7741 if (spa->spa_is_exporting) {
7742 /* the pool is being exported by another thread */
7743 spa_namespace_exit(FTAG);
7744 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
7745 }
7746 spa->spa_is_exporting = B_TRUE;
7747
7748 /*
7749 * Put a hold on the pool, drop the namespace lock, stop async tasks
7750 * and see if we can export.
7751 */
7752 spa_open_ref(spa, FTAG);
7753 spa_namespace_exit(FTAG);
7754 spa_async_suspend(spa);
7755 if (spa->spa_zvol_taskq) {
7756 zvol_remove_minors(spa, spa_name(spa), B_TRUE);
7757 taskq_wait(spa->spa_zvol_taskq);
7758 }
7759 spa_namespace_enter(FTAG);
7760 spa->spa_export_thread = curthread;
7761 spa_close(spa, FTAG);
7762
7763 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
7764 spa_namespace_exit(FTAG);
7765 goto export_spa;
7766 }
7767
7768 /*
7769 * The pool will be in core if it's openable, in which case we can
7770 * modify its state. Objsets may be open only because they're dirty,
7771 * so we have to force it to sync before checking spa_refcnt.
7772 */
7773 if (spa->spa_sync_on) {
7774 txg_wait_synced(spa->spa_dsl_pool, 0);
7775 spa_evicting_os_wait(spa);
7776 }
7777
7778 /*
7779 * A pool cannot be exported or destroyed if there are active
7780 * references. If we are resetting a pool, allow references by
7781 * fault injection handlers.
7782 */
7783 if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
7784 error = SET_ERROR(EBUSY);
7785 goto fail;
7786 }
7787
7788 spa_namespace_exit(FTAG);
7789 /*
7790 * At this point we no longer hold the spa_namespace_lock and
7791 * there were no references on the spa. Future spa_lookups will
7792 * notice the spa->spa_export_thread and wait until we signal
7793 * that we are finshed.
7794 */
7795
7796 if (spa->spa_sync_on) {
7797 vdev_t *rvd = spa->spa_root_vdev;
7798 /*
7799 * A pool cannot be exported if it has an active shared spare.
7800 * This is to prevent other pools stealing the active spare
7801 * from an exported pool. At user's own will, such pool can
7802 * be forcedly exported.
7803 */
7804 if (!force && new_state == POOL_STATE_EXPORTED &&
7805 spa_has_active_shared_spare(spa)) {
7806 error = SET_ERROR(EXDEV);
7807 spa_namespace_enter(FTAG);
7808 goto fail;
7809 }
7810
7811 /*
7812 * We're about to export or destroy this pool. Make sure
7813 * we stop all initialization and trim activity here before
7814 * we set the spa_final_txg. This will ensure that all
7815 * dirty data resulting from the initialization is
7816 * committed to disk before we unload the pool.
7817 */
7818 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
7819 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
7820 vdev_autotrim_stop_all(spa);
7821 vdev_rebuild_stop_all(spa);
7822 l2arc_spa_rebuild_stop(spa);
7823
7824 /*
7825 * We want this to be reflected on every label,
7826 * so mark them all dirty. spa_unload() will do the
7827 * final sync that pushes these changes out.
7828 */
7829 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7830 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7831 spa->spa_state = new_state;
7832 vdev_config_dirty(rvd);
7833 spa_config_exit(spa, SCL_ALL, FTAG);
7834 }
7835
7836 if (spa_should_sync_time_logger_on_unload(spa))
7837 spa_unload_sync_time_logger(spa);
7838
7839 /*
7840 * If the log space map feature is enabled and the pool is
7841 * getting exported (but not destroyed), we want to spend some
7842 * time flushing as many metaslabs as we can in an attempt to
7843 * destroy log space maps and save import time. This has to be
7844 * done before we set the spa_final_txg, otherwise
7845 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
7846 * spa_should_flush_logs_on_unload() should be called after
7847 * spa_state has been set to the new_state.
7848 */
7849 if (spa_should_flush_logs_on_unload(spa))
7850 spa_unload_log_sm_flush_all(spa);
7851
7852 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
7853 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7854 spa->spa_final_txg = spa_last_synced_txg(spa) +
7855 TXG_DEFER_SIZE + 1;
7856 spa_config_exit(spa, SCL_ALL, FTAG);
7857 }
7858 }
7859
7860 export_spa:
7861 spa_export_os(spa);
7862
7863 if (new_state == POOL_STATE_DESTROYED)
7864 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
7865 else if (new_state == POOL_STATE_EXPORTED)
7866 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
7867
7868 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7869 spa_unload(spa);
7870 spa_deactivate(spa);
7871 }
7872
7873 if (oldconfig && spa->spa_config)
7874 *oldconfig = fnvlist_dup(spa->spa_config);
7875
7876 if (new_state == POOL_STATE_EXPORTED)
7877 zio_handle_export_delay(spa, gethrtime() - export_start);
7878
7879 /*
7880 * Take the namespace lock for the actual spa_t removal
7881 */
7882 spa_namespace_enter(FTAG);
7883 if (new_state != POOL_STATE_UNINITIALIZED) {
7884 if (!hardforce)
7885 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
7886 spa_remove(spa);
7887 } else {
7888 /*
7889 * If spa_remove() is not called for this spa_t and
7890 * there is any possibility that it can be reused,
7891 * we make sure to reset the exporting flag.
7892 */
7893 spa->spa_is_exporting = B_FALSE;
7894 spa->spa_export_thread = NULL;
7895 }
7896
7897 /*
7898 * Wake up any waiters in spa_lookup()
7899 */
7900 spa_namespace_broadcast();
7901 spa_namespace_exit(FTAG);
7902 return (0);
7903
7904 fail:
7905 spa->spa_is_exporting = B_FALSE;
7906 spa->spa_export_thread = NULL;
7907
7908 spa_async_resume(spa);
7909 /*
7910 * Wake up any waiters in spa_lookup()
7911 */
7912 spa_namespace_broadcast();
7913 spa_namespace_exit(FTAG);
7914 return (error);
7915 }
7916
7917 /*
7918 * Destroy a storage pool.
7919 */
7920 int
7921 spa_destroy(const char *pool)
7922 {
7923 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
7924 B_FALSE, B_FALSE));
7925 }
7926
7927 /*
7928 * Export a storage pool.
7929 */
7930 int
7931 spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
7932 boolean_t hardforce)
7933 {
7934 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
7935 force, hardforce));
7936 }
7937
7938 /*
7939 * Similar to spa_export(), this unloads the spa_t without actually removing it
7940 * from the namespace in any way.
7941 */
7942 int
7943 spa_reset(const char *pool)
7944 {
7945 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
7946 B_FALSE, B_FALSE));
7947 }
7948
7949 /*
7950 * ==========================================================================
7951 * Device manipulation
7952 * ==========================================================================
7953 */
7954
7955 /*
7956 * This is called as a synctask to increment the draid feature flag
7957 */
7958 static void
7959 spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
7960 {
7961 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7962 int draid = (int)(uintptr_t)arg;
7963
7964 for (int c = 0; c < draid; c++)
7965 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
7966 }
7967
7968 /*
7969 * This is called as a synctask to increment the draid_fail_domains feature flag
7970 */
7971 static void
7972 spa_draid_fdomains_feature_incr(void *arg, dmu_tx_t *tx)
7973 {
7974 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7975 int nfgrp = (int)(uintptr_t)arg;
7976
7977 for (int c = 0; c < nfgrp; c++)
7978 spa_feature_incr(spa, SPA_FEATURE_DRAID_FAIL_DOMAINS, tx);
7979 }
7980
7981 /*
7982 * Add a device to a storage pool.
7983 */
7984 int
7985 spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift)
7986 {
7987 uint64_t txg, ndraid = 0, draid_nfgroup = 0;
7988 int error;
7989 vdev_t *rvd = spa->spa_root_vdev;
7990 vdev_t *vd, *tvd;
7991 nvlist_t **spares, **l2cache;
7992 uint_t nspares, nl2cache;
7993
7994 ASSERT(spa_writeable(spa));
7995
7996 txg = spa_vdev_enter(spa);
7997
7998 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
7999 VDEV_ALLOC_ADD)) != 0)
8000 return (spa_vdev_exit(spa, NULL, txg, error));
8001
8002 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
8003
8004 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
8005 &nspares) != 0)
8006 nspares = 0;
8007
8008 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
8009 &nl2cache) != 0)
8010 nl2cache = 0;
8011
8012 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
8013 return (spa_vdev_exit(spa, vd, txg, EINVAL));
8014
8015 if (vd->vdev_children != 0 &&
8016 (error = vdev_create(vd, txg, B_FALSE)) != 0) {
8017 return (spa_vdev_exit(spa, vd, txg, error));
8018 }
8019
8020 /*
8021 * The virtual dRAID spares must be added after vdev tree is created
8022 * and the vdev guids are generated. The guid of their associated
8023 * dRAID is stored in the config and used when opening the spare.
8024 */
8025 if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
8026 &draid_nfgroup, rvd->vdev_children)) == 0) {
8027
8028 if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
8029 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
8030 nspares = 0;
8031
8032 if (draid_nfgroup > 0 && !spa_feature_is_enabled(spa,
8033 SPA_FEATURE_DRAID_FAIL_DOMAINS))
8034 return (spa_vdev_exit(spa, vd, txg, ENOTSUP));
8035 } else {
8036 return (spa_vdev_exit(spa, vd, txg, error));
8037 }
8038
8039 /*
8040 * We must validate the spares and l2cache devices after checking the
8041 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
8042 */
8043 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
8044 return (spa_vdev_exit(spa, vd, txg, error));
8045
8046 /*
8047 * If we are in the middle of a device removal, we can only add
8048 * devices which match the existing devices in the pool.
8049 * If we are in the middle of a removal, or have some indirect
8050 * vdevs, we can not add raidz or dRAID top levels.
8051 */
8052 if (spa->spa_vdev_removal != NULL ||
8053 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
8054 for (int c = 0; c < vd->vdev_children; c++) {
8055 tvd = vd->vdev_child[c];
8056 if (spa->spa_vdev_removal != NULL &&
8057 tvd->vdev_ashift != spa->spa_max_ashift) {
8058 return (spa_vdev_exit(spa, vd, txg, EINVAL));
8059 }
8060 /* Fail if top level vdev is raidz or a dRAID */
8061 if (vdev_get_nparity(tvd) != 0)
8062 return (spa_vdev_exit(spa, vd, txg, EINVAL));
8063
8064 /*
8065 * Need the top level mirror to be
8066 * a mirror of leaf vdevs only
8067 */
8068 if (tvd->vdev_ops == &vdev_mirror_ops) {
8069 for (uint64_t cid = 0;
8070 cid < tvd->vdev_children; cid++) {
8071 vdev_t *cvd = tvd->vdev_child[cid];
8072 if (!cvd->vdev_ops->vdev_op_leaf) {
8073 return (spa_vdev_exit(spa, vd,
8074 txg, EINVAL));
8075 }
8076 }
8077 }
8078 }
8079 }
8080
8081 if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) {
8082 for (int c = 0; c < vd->vdev_children; c++) {
8083 tvd = vd->vdev_child[c];
8084 if (tvd->vdev_ashift != spa->spa_max_ashift) {
8085 return (spa_vdev_exit(spa, vd, txg,
8086 ZFS_ERR_ASHIFT_MISMATCH));
8087 }
8088 }
8089 }
8090
8091 for (int c = 0; c < vd->vdev_children; c++) {
8092 tvd = vd->vdev_child[c];
8093 vdev_remove_child(vd, tvd);
8094 tvd->vdev_id = rvd->vdev_children;
8095 vdev_add_child(rvd, tvd);
8096 vdev_config_dirty(tvd);
8097 }
8098
8099 if (nspares != 0) {
8100 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
8101 ZPOOL_CONFIG_SPARES);
8102 spa_load_spares(spa);
8103 spa->spa_spares.sav_sync = B_TRUE;
8104 }
8105
8106 if (nl2cache != 0) {
8107 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
8108 ZPOOL_CONFIG_L2CACHE);
8109 spa_load_l2cache(spa);
8110 spa->spa_l2cache.sav_sync = B_TRUE;
8111 }
8112
8113 /*
8114 * We can't increment a feature while holding spa_vdev so we
8115 * have to do it in a synctask.
8116 */
8117 if (ndraid != 0) {
8118 dmu_tx_t *tx;
8119
8120 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
8121
8122 dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
8123 (void *)(uintptr_t)ndraid, tx);
8124
8125 if (draid_nfgroup > 0)
8126 dsl_sync_task_nowait(spa->spa_dsl_pool,
8127 spa_draid_fdomains_feature_incr,
8128 (void *)(uintptr_t)draid_nfgroup, tx);
8129
8130 dmu_tx_commit(tx);
8131 }
8132
8133 /*
8134 * We have to be careful when adding new vdevs to an existing pool.
8135 * If other threads start allocating from these vdevs before we
8136 * sync the config cache, and we lose power, then upon reboot we may
8137 * fail to open the pool because there are DVAs that the config cache
8138 * can't translate. Therefore, we first add the vdevs without
8139 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
8140 * and then let spa_config_update() initialize the new metaslabs.
8141 *
8142 * spa_load() checks for added-but-not-initialized vdevs, so that
8143 * if we lose power at any point in this sequence, the remaining
8144 * steps will be completed the next time we load the pool.
8145 */
8146 (void) spa_vdev_exit(spa, vd, txg, 0);
8147
8148 spa_namespace_enter(FTAG);
8149 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
8150 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
8151 spa_namespace_exit(FTAG);
8152
8153 return (0);
8154 }
8155
8156 /*
8157 * Given a vdev to be replaced and its parent, check for a possible
8158 * "double spare" condition if a vdev is to be replaced by a spare. When this
8159 * happens, you can get two spares assigned to one failed vdev.
8160 *
8161 * To trigger a double spare condition:
8162 *
8163 * 1. disk1 fails
8164 * 2. 1st spare is kicked in for disk1 and it resilvers
8165 * 3. Someone replaces disk1 with a new blank disk
8166 * 4. New blank disk starts resilvering
8167 * 5. While resilvering, new blank disk has IO errors and faults
8168 * 6. 2nd spare is kicked in for new blank disk
8169 * 7. At this point two spares are kicked in for the original disk1.
8170 *
8171 * It looks like this:
8172 *
8173 * NAME STATE READ WRITE CKSUM
8174 * tank2 DEGRADED 0 0 0
8175 * draid2:6d:10c:2s-0 DEGRADED 0 0 0
8176 * scsi-0QEMU_QEMU_HARDDISK_d1 ONLINE 0 0 0
8177 * scsi-0QEMU_QEMU_HARDDISK_d2 ONLINE 0 0 0
8178 * scsi-0QEMU_QEMU_HARDDISK_d3 ONLINE 0 0 0
8179 * scsi-0QEMU_QEMU_HARDDISK_d4 ONLINE 0 0 0
8180 * scsi-0QEMU_QEMU_HARDDISK_d5 ONLINE 0 0 0
8181 * scsi-0QEMU_QEMU_HARDDISK_d6 ONLINE 0 0 0
8182 * scsi-0QEMU_QEMU_HARDDISK_d7 ONLINE 0 0 0
8183 * scsi-0QEMU_QEMU_HARDDISK_d8 ONLINE 0 0 0
8184 * scsi-0QEMU_QEMU_HARDDISK_d9 ONLINE 0 0 0
8185 * spare-9 DEGRADED 0 0 0
8186 * replacing-0 DEGRADED 0 93 0
8187 * scsi-0QEMU_QEMU_HARDDISK_d10-part1/old UNAVAIL 0 0 0
8188 * spare-1 DEGRADED 0 0 0
8189 * scsi-0QEMU_QEMU_HARDDISK_d10 REMOVED 0 0 0
8190 * draid2-0-0 ONLINE 0 0 0
8191 * draid2-0-1 ONLINE 0 0 0
8192 * spares
8193 * draid2-0-0 INUSE currently in use
8194 * draid2-0-1 INUSE currently in use
8195 *
8196 * ARGS:
8197 *
8198 * newvd: New spare disk
8199 * pvd: Parent vdev_t the spare should attach to
8200 *
8201 * This function returns B_TRUE if adding the new vdev would create a double
8202 * spare condition, B_FALSE otherwise.
8203 */
8204 static boolean_t
8205 spa_vdev_new_spare_would_cause_double_spares(vdev_t *newvd, vdev_t *pvd)
8206 {
8207 vdev_t *ppvd;
8208
8209 ppvd = pvd->vdev_parent;
8210 if (ppvd == NULL)
8211 return (B_FALSE);
8212
8213 /*
8214 * To determine if this configuration would cause a double spare, we
8215 * look at the vdev_op of the parent vdev, and of the parent's parent
8216 * vdev. We also look at vdev_isspare on the new disk. A double spare
8217 * condition looks like this:
8218 *
8219 * 1. parent of parent's op is a spare or draid spare
8220 * 2. parent's op is replacing
8221 * 3. new disk is a spare
8222 */
8223 if ((ppvd->vdev_ops == &vdev_spare_ops) ||
8224 (ppvd->vdev_ops == &vdev_draid_spare_ops))
8225 if (pvd->vdev_ops == &vdev_replacing_ops)
8226 if (newvd->vdev_isspare)
8227 return (B_TRUE);
8228
8229 return (B_FALSE);
8230 }
8231
8232 /*
8233 * Attach a device to a vdev specified by its guid. The vdev type can be
8234 * a mirror, a raidz, or a leaf device that is also a top-level (e.g. a
8235 * single device). When the vdev is a single device, a mirror vdev will be
8236 * automatically inserted.
8237 *
8238 * If 'replacing' is specified, the new device is intended to replace the
8239 * existing device; in this case the two devices are made into their own
8240 * mirror using the 'replacing' vdev, which is functionally identical to
8241 * the mirror vdev (it actually reuses all the same ops) but has a few
8242 * extra rules: you can't attach to it after it's been created, and upon
8243 * completion of resilvering, the first disk (the one being replaced)
8244 * is automatically detached.
8245 *
8246 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
8247 * should be performed instead of traditional healing reconstruction. From
8248 * an administrators perspective these are both resilver operations.
8249 */
8250 int
8251 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
8252 int rebuild)
8253 {
8254 uint64_t txg, dtl_max_txg;
8255 vdev_t *rvd = spa->spa_root_vdev;
8256 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
8257 vdev_ops_t *pvops;
8258 char *oldvdpath, *newvdpath;
8259 int newvd_isspare = B_FALSE;
8260 int error;
8261
8262 ASSERT(spa_writeable(spa));
8263
8264 txg = spa_vdev_enter(spa);
8265
8266 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
8267
8268 ASSERT(spa_namespace_held());
8269 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
8270 error = (spa_has_checkpoint(spa)) ?
8271 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
8272 return (spa_vdev_exit(spa, NULL, txg, error));
8273 }
8274
8275 if (rebuild) {
8276 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
8277 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8278
8279 if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
8280 dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
8281 return (spa_vdev_exit(spa, NULL, txg,
8282 ZFS_ERR_RESILVER_IN_PROGRESS));
8283 }
8284 } else {
8285 if (vdev_rebuild_active(rvd))
8286 return (spa_vdev_exit(spa, NULL, txg,
8287 ZFS_ERR_REBUILD_IN_PROGRESS));
8288 }
8289
8290 if (spa->spa_vdev_removal != NULL) {
8291 return (spa_vdev_exit(spa, NULL, txg,
8292 ZFS_ERR_DEVRM_IN_PROGRESS));
8293 }
8294
8295 if (oldvd == NULL)
8296 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
8297
8298 boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops;
8299
8300 if (raidz) {
8301 if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION))
8302 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8303
8304 /*
8305 * Can't expand a raidz while prior expand is in progress.
8306 */
8307 if (spa->spa_raidz_expand != NULL) {
8308 return (spa_vdev_exit(spa, NULL, txg,
8309 ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
8310 }
8311 } else if (!oldvd->vdev_ops->vdev_op_leaf) {
8312 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8313 }
8314
8315 if (raidz)
8316 pvd = oldvd;
8317 else
8318 pvd = oldvd->vdev_parent;
8319
8320 if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
8321 VDEV_ALLOC_ATTACH) != 0)
8322 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
8323
8324 if (newrootvd->vdev_children != 1)
8325 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
8326
8327 newvd = newrootvd->vdev_child[0];
8328
8329 if (!newvd->vdev_ops->vdev_op_leaf)
8330 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
8331
8332 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
8333 return (spa_vdev_exit(spa, newrootvd, txg, error));
8334
8335 /*
8336 * log, dedup and special vdevs should not be replaced by spares.
8337 */
8338 if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
8339 oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
8340 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8341 }
8342
8343 /*
8344 * A dRAID spare can only replace a child of its parent dRAID vdev.
8345 */
8346 if (newvd->vdev_ops == &vdev_draid_spare_ops &&
8347 oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
8348 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8349 }
8350
8351 if (rebuild) {
8352 /*
8353 * For rebuilds, the top vdev must support reconstruction
8354 * using only space maps. This means the only allowable
8355 * vdevs types are the root vdev, a mirror, or dRAID.
8356 */
8357 tvd = pvd;
8358 if (pvd->vdev_top != NULL)
8359 tvd = pvd->vdev_top;
8360
8361 if (tvd->vdev_ops != &vdev_mirror_ops &&
8362 tvd->vdev_ops != &vdev_root_ops &&
8363 tvd->vdev_ops != &vdev_draid_ops) {
8364 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8365 }
8366 }
8367
8368 if (!replacing) {
8369 /*
8370 * For attach, the only allowable parent is a mirror or
8371 * the root vdev. A raidz vdev can be attached to, but
8372 * you cannot attach to a raidz child.
8373 */
8374 if (pvd->vdev_ops != &vdev_mirror_ops &&
8375 pvd->vdev_ops != &vdev_root_ops &&
8376 !raidz)
8377 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8378
8379 pvops = &vdev_mirror_ops;
8380 } else {
8381 /*
8382 * Active hot spares can only be replaced by inactive hot
8383 * spares.
8384 */
8385 if (pvd->vdev_ops == &vdev_spare_ops &&
8386 oldvd->vdev_isspare &&
8387 !spa_has_spare(spa, newvd->vdev_guid))
8388 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8389
8390 /*
8391 * If the source is a hot spare, and the parent isn't already a
8392 * spare, then we want to create a new hot spare. Otherwise, we
8393 * want to create a replacing vdev. The user is not allowed to
8394 * attach to a spared vdev child unless the 'isspare' state is
8395 * the same (spare replaces spare, non-spare replaces
8396 * non-spare).
8397 */
8398 if (pvd->vdev_ops == &vdev_replacing_ops &&
8399 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
8400 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8401 } else if (pvd->vdev_ops == &vdev_spare_ops &&
8402 newvd->vdev_isspare != oldvd->vdev_isspare) {
8403 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8404 }
8405
8406 if (spa_vdev_new_spare_would_cause_double_spares(newvd, pvd)) {
8407 vdev_dbgmsg(newvd,
8408 "disk would create double spares, ignore.");
8409 return (spa_vdev_exit(spa, newrootvd, txg, EEXIST));
8410 }
8411
8412 if (newvd->vdev_isspare)
8413 pvops = &vdev_spare_ops;
8414 else
8415 pvops = &vdev_replacing_ops;
8416 }
8417
8418 /*
8419 * Make sure the new device is big enough.
8420 */
8421 vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd;
8422 if (newvd->vdev_asize < vdev_get_min_asize(min_vdev))
8423 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
8424
8425 /*
8426 * The new device cannot have a higher alignment requirement
8427 * than the top-level vdev.
8428 */
8429 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) {
8430 return (spa_vdev_exit(spa, newrootvd, txg,
8431 ZFS_ERR_ASHIFT_MISMATCH));
8432 }
8433
8434 /*
8435 * RAIDZ-expansion-specific checks.
8436 */
8437 if (raidz) {
8438 if (vdev_raidz_attach_check(newvd) != 0)
8439 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
8440
8441 /*
8442 * Fail early if a child is not healthy or being replaced
8443 */
8444 for (int i = 0; i < oldvd->vdev_children; i++) {
8445 if (vdev_is_dead(oldvd->vdev_child[i]) ||
8446 !oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) {
8447 return (spa_vdev_exit(spa, newrootvd, txg,
8448 ENXIO));
8449 }
8450 /* Also fail if reserved boot area is in-use */
8451 if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i])
8452 != 0) {
8453 return (spa_vdev_exit(spa, newrootvd, txg,
8454 EADDRINUSE));
8455 }
8456 }
8457 }
8458
8459 if (raidz) {
8460 /*
8461 * Note: oldvdpath is freed by spa_strfree(), but
8462 * kmem_asprintf() is freed by kmem_strfree(), so we have to
8463 * move it to a spa_strdup-ed string.
8464 */
8465 char *tmp = kmem_asprintf("raidz%u-%u",
8466 (uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id);
8467 oldvdpath = spa_strdup(tmp);
8468 kmem_strfree(tmp);
8469 } else {
8470 oldvdpath = spa_strdup(oldvd->vdev_path);
8471 }
8472 newvdpath = spa_strdup(newvd->vdev_path);
8473
8474 /*
8475 * If this is an in-place replacement, update oldvd's path and devid
8476 * to make it distinguishable from newvd, and unopenable from now on.
8477 */
8478 if (strcmp(oldvdpath, newvdpath) == 0) {
8479 spa_strfree(oldvd->vdev_path);
8480 oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5,
8481 KM_SLEEP);
8482 (void) sprintf(oldvd->vdev_path, "%s/old",
8483 newvdpath);
8484 if (oldvd->vdev_devid != NULL) {
8485 spa_strfree(oldvd->vdev_devid);
8486 oldvd->vdev_devid = NULL;
8487 }
8488 spa_strfree(oldvdpath);
8489 oldvdpath = spa_strdup(oldvd->vdev_path);
8490 }
8491
8492 /*
8493 * If the parent is not a mirror, or if we're replacing, insert the new
8494 * mirror/replacing/spare vdev above oldvd.
8495 */
8496 if (!raidz && pvd->vdev_ops != pvops) {
8497 pvd = vdev_add_parent(oldvd, pvops);
8498 ASSERT(pvd->vdev_ops == pvops);
8499 ASSERT(oldvd->vdev_parent == pvd);
8500 }
8501
8502 ASSERT(pvd->vdev_top->vdev_parent == rvd);
8503
8504 /*
8505 * Extract the new device from its root and add it to pvd.
8506 */
8507 vdev_remove_child(newrootvd, newvd);
8508 newvd->vdev_id = pvd->vdev_children;
8509 newvd->vdev_crtxg = oldvd->vdev_crtxg;
8510 vdev_add_child(pvd, newvd);
8511
8512 /*
8513 * Reevaluate the parent vdev state.
8514 */
8515 vdev_propagate_state(pvd);
8516
8517 tvd = newvd->vdev_top;
8518 ASSERT(pvd->vdev_top == tvd);
8519 ASSERT(tvd->vdev_parent == rvd);
8520
8521 vdev_config_dirty(tvd);
8522
8523 /*
8524 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
8525 * for any dmu_sync-ed blocks. It will propagate upward when
8526 * spa_vdev_exit() calls vdev_dtl_reassess().
8527 */
8528 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
8529
8530 if (raidz) {
8531 /*
8532 * Wait for the youngest allocations and frees to sync,
8533 * and then wait for the deferral of those frees to finish.
8534 */
8535 spa_vdev_config_exit(spa, NULL,
8536 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
8537
8538 vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE);
8539 vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE);
8540 vdev_autotrim_stop_wait(tvd);
8541
8542 dtl_max_txg = spa_vdev_config_enter(spa);
8543
8544 tvd->vdev_rz_expanding = B_TRUE;
8545
8546 vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg);
8547 vdev_config_dirty(tvd);
8548
8549 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
8550 dtl_max_txg);
8551 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync,
8552 newvd, tx);
8553 dmu_tx_commit(tx);
8554 } else {
8555 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
8556 dtl_max_txg - TXG_INITIAL);
8557
8558 if (newvd->vdev_isspare) {
8559 spa_spare_activate(newvd);
8560 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
8561 }
8562
8563 newvd_isspare = newvd->vdev_isspare;
8564
8565 /*
8566 * Mark newvd's DTL dirty in this txg.
8567 */
8568 vdev_dirty(tvd, VDD_DTL, newvd, txg);
8569
8570 /*
8571 * Schedule the resilver or rebuild to restart in the future.
8572 * We do this to ensure that dmu_sync-ed blocks have been
8573 * stitched into the respective datasets.
8574 */
8575 if (rebuild) {
8576 newvd->vdev_rebuild_txg = txg;
8577
8578 vdev_rebuild(tvd, txg);
8579 } else {
8580 newvd->vdev_resilver_txg = txg;
8581
8582 if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
8583 spa_feature_is_enabled(spa,
8584 SPA_FEATURE_RESILVER_DEFER)) {
8585 vdev_defer_resilver(newvd);
8586 } else {
8587 dsl_scan_restart_resilver(spa->spa_dsl_pool,
8588 dtl_max_txg);
8589 }
8590 }
8591 }
8592
8593 if (spa->spa_bootfs)
8594 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
8595
8596 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
8597
8598 /*
8599 * Commit the config
8600 */
8601 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
8602
8603 spa_history_log_internal(spa, "vdev attach", NULL,
8604 "%s vdev=%s %s vdev=%s",
8605 replacing && newvd_isspare ? "spare in" :
8606 replacing ? "replace" : "attach", newvdpath,
8607 replacing ? "for" : "to", oldvdpath);
8608
8609 spa_strfree(oldvdpath);
8610 spa_strfree(newvdpath);
8611
8612 return (0);
8613 }
8614
8615 /*
8616 * Detach a device from a mirror or replacing vdev.
8617 *
8618 * If 'replace_done' is specified, only detach if the parent
8619 * is a replacing or a spare vdev.
8620 */
8621 int
8622 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
8623 {
8624 uint64_t txg;
8625 int error;
8626 vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
8627 vdev_t *vd, *pvd, *cvd, *tvd;
8628 boolean_t unspare = B_FALSE;
8629 uint64_t unspare_guid = 0;
8630 char *vdpath;
8631
8632 ASSERT(spa_writeable(spa));
8633
8634 txg = spa_vdev_detach_enter(spa, guid);
8635
8636 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
8637
8638 /*
8639 * Besides being called directly from the userland through the
8640 * ioctl interface, spa_vdev_detach() can be potentially called
8641 * at the end of spa_vdev_resilver_done().
8642 *
8643 * In the regular case, when we have a checkpoint this shouldn't
8644 * happen as we never empty the DTLs of a vdev during the scrub
8645 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
8646 * should never get here when we have a checkpoint.
8647 *
8648 * That said, even in a case when we checkpoint the pool exactly
8649 * as spa_vdev_resilver_done() calls this function everything
8650 * should be fine as the resilver will return right away.
8651 */
8652 ASSERT(spa_namespace_held());
8653 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
8654 error = (spa_has_checkpoint(spa)) ?
8655 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
8656 return (spa_vdev_exit(spa, NULL, txg, error));
8657 }
8658
8659 if (vd == NULL)
8660 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
8661
8662 if (!vd->vdev_ops->vdev_op_leaf)
8663 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8664
8665 pvd = vd->vdev_parent;
8666
8667 /*
8668 * If the parent/child relationship is not as expected, don't do it.
8669 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
8670 * vdev that's replacing B with C. The user's intent in replacing
8671 * is to go from M(A,B) to M(A,C). If the user decides to cancel
8672 * the replace by detaching C, the expected behavior is to end up
8673 * M(A,B). But suppose that right after deciding to detach C,
8674 * the replacement of B completes. We would have M(A,C), and then
8675 * ask to detach C, which would leave us with just A -- not what
8676 * the user wanted. To prevent this, we make sure that the
8677 * parent/child relationship hasn't changed -- in this example,
8678 * that C's parent is still the replacing vdev R.
8679 */
8680 if (pvd->vdev_guid != pguid && pguid != 0)
8681 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
8682
8683 /*
8684 * Only 'replacing' or 'spare' vdevs can be replaced.
8685 */
8686 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
8687 pvd->vdev_ops != &vdev_spare_ops)
8688 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8689
8690 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
8691 spa_version(spa) >= SPA_VERSION_SPARES);
8692
8693 /*
8694 * Only mirror, replacing, and spare vdevs support detach.
8695 */
8696 if (pvd->vdev_ops != &vdev_replacing_ops &&
8697 pvd->vdev_ops != &vdev_mirror_ops &&
8698 pvd->vdev_ops != &vdev_spare_ops)
8699 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
8700
8701 /*
8702 * If this device has the only valid copy of some data,
8703 * we cannot safely detach it.
8704 */
8705 if (vdev_dtl_required(vd))
8706 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
8707
8708 ASSERT(pvd->vdev_children >= 2);
8709
8710 /*
8711 * If we are detaching the second disk from a replacing vdev, then
8712 * check to see if we changed the original vdev's path to have "/old"
8713 * at the end in spa_vdev_attach(). If so, undo that change now.
8714 */
8715 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
8716 vd->vdev_path != NULL) {
8717 size_t len = strlen(vd->vdev_path);
8718
8719 for (int c = 0; c < pvd->vdev_children; c++) {
8720 cvd = pvd->vdev_child[c];
8721
8722 if (cvd == vd || cvd->vdev_path == NULL)
8723 continue;
8724
8725 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
8726 strcmp(cvd->vdev_path + len, "/old") == 0) {
8727 spa_strfree(cvd->vdev_path);
8728 cvd->vdev_path = spa_strdup(vd->vdev_path);
8729 break;
8730 }
8731 }
8732 }
8733
8734 /*
8735 * If we are detaching the original disk from a normal spare, then it
8736 * implies that the spare should become a real disk, and be removed
8737 * from the active spare list for the pool. dRAID spares on the
8738 * other hand are coupled to the pool and thus should never be removed
8739 * from the spares list.
8740 */
8741 if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
8742 vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
8743
8744 if (last_cvd->vdev_isspare &&
8745 last_cvd->vdev_ops != &vdev_draid_spare_ops) {
8746 unspare = B_TRUE;
8747 }
8748 }
8749
8750 /*
8751 * Erase the disk labels so the disk can be used for other things.
8752 * This must be done after all other error cases are handled,
8753 * but before we disembowel vd (so we can still do I/O to it).
8754 * But if we can't do it, don't treat the error as fatal --
8755 * it may be that the unwritability of the disk is the reason
8756 * it's being detached!
8757 */
8758 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
8759
8760 /*
8761 * Remove vd from its parent and compact the parent's children.
8762 */
8763 vdev_remove_child(pvd, vd);
8764 vdev_compact_children(pvd);
8765
8766 /*
8767 * Remember one of the remaining children so we can get tvd below.
8768 */
8769 cvd = pvd->vdev_child[pvd->vdev_children - 1];
8770
8771 /*
8772 * If we need to remove the remaining child from the list of hot spares,
8773 * do it now, marking the vdev as no longer a spare in the process.
8774 * We must do this before vdev_remove_parent(), because that can
8775 * change the GUID if it creates a new toplevel GUID. For a similar
8776 * reason, we must remove the spare now, in the same txg as the detach;
8777 * otherwise someone could attach a new sibling, change the GUID, and
8778 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
8779 */
8780 if (unspare) {
8781 ASSERT(cvd->vdev_isspare);
8782 spa_spare_remove(cvd);
8783 unspare_guid = cvd->vdev_guid;
8784 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
8785 cvd->vdev_unspare = B_TRUE;
8786 }
8787
8788 /*
8789 * If the parent mirror/replacing vdev only has one child,
8790 * the parent is no longer needed. Remove it from the tree.
8791 */
8792 if (pvd->vdev_children == 1) {
8793 if (pvd->vdev_ops == &vdev_spare_ops)
8794 cvd->vdev_unspare = B_FALSE;
8795 vdev_remove_parent(cvd);
8796 }
8797
8798 /*
8799 * We don't set tvd until now because the parent we just removed
8800 * may have been the previous top-level vdev.
8801 */
8802 tvd = cvd->vdev_top;
8803 ASSERT(tvd->vdev_parent == rvd);
8804
8805 /*
8806 * Reevaluate the parent vdev state.
8807 */
8808 vdev_propagate_state(cvd);
8809
8810 /*
8811 * If the 'autoexpand' property is set on the pool then automatically
8812 * try to expand the size of the pool. For example if the device we
8813 * just detached was smaller than the others, it may be possible to
8814 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
8815 * first so that we can obtain the updated sizes of the leaf vdevs.
8816 */
8817 if (spa->spa_autoexpand) {
8818 vdev_reopen(tvd);
8819 vdev_expand(tvd, txg);
8820 }
8821
8822 vdev_config_dirty(tvd);
8823
8824 /*
8825 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
8826 * vd->vdev_detached is set and free vd's DTL object in syncing context.
8827 * But first make sure we're not on any *other* txg's DTL list, to
8828 * prevent vd from being accessed after it's freed.
8829 */
8830 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
8831 for (int t = 0; t < TXG_SIZE; t++)
8832 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
8833 vd->vdev_detached = B_TRUE;
8834 vdev_dirty(tvd, VDD_DTL, vd, txg);
8835
8836 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
8837 spa_notify_waiters(spa);
8838
8839 /* hang on to the spa before we release the lock */
8840 spa_open_ref(spa, FTAG);
8841
8842 error = spa_vdev_exit(spa, vd, txg, 0);
8843
8844 spa_history_log_internal(spa, "detach", NULL,
8845 "vdev=%s", vdpath);
8846 spa_strfree(vdpath);
8847
8848 /*
8849 * If this was the removal of the original device in a hot spare vdev,
8850 * then we want to go through and remove the device from the hot spare
8851 * list of every other pool.
8852 */
8853 if (unspare) {
8854 spa_t *altspa = NULL;
8855
8856 spa_namespace_enter(FTAG);
8857 while ((altspa = spa_next(altspa)) != NULL) {
8858 if (altspa->spa_state != POOL_STATE_ACTIVE ||
8859 altspa == spa)
8860 continue;
8861
8862 spa_open_ref(altspa, FTAG);
8863 spa_namespace_exit(FTAG);
8864 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
8865 spa_namespace_enter(FTAG);
8866 spa_close(altspa, FTAG);
8867 }
8868 spa_namespace_exit(FTAG);
8869
8870 /* search the rest of the vdevs for spares to remove */
8871 spa_vdev_resilver_done(spa);
8872 }
8873
8874 /* all done with the spa; OK to release */
8875 spa_namespace_enter(FTAG);
8876 spa_close(spa, FTAG);
8877 spa_namespace_exit(FTAG);
8878
8879 return (error);
8880 }
8881
8882 static int
8883 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
8884 list_t *vd_list)
8885 {
8886 ASSERT(spa_namespace_held());
8887
8888 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
8889
8890 /* Look up vdev and ensure it's a leaf. */
8891 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
8892 if (vd == NULL || vd->vdev_detached) {
8893 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8894 return (SET_ERROR(ENODEV));
8895 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
8896 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8897 return (SET_ERROR(EINVAL));
8898 } else if (!vdev_writeable(vd)) {
8899 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8900 return (SET_ERROR(EROFS));
8901 }
8902 mutex_enter(&vd->vdev_initialize_lock);
8903 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
8904
8905 /*
8906 * When we activate an initialize action we check to see
8907 * if the vdev_initialize_thread is NULL. We do this instead
8908 * of using the vdev_initialize_state since there might be
8909 * a previous initialization process which has completed but
8910 * the thread is not exited.
8911 */
8912 if (cmd_type == POOL_INITIALIZE_START &&
8913 (vd->vdev_initialize_thread != NULL ||
8914 vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) {
8915 mutex_exit(&vd->vdev_initialize_lock);
8916 return (SET_ERROR(EBUSY));
8917 } else if (cmd_type == POOL_INITIALIZE_CANCEL &&
8918 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
8919 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
8920 mutex_exit(&vd->vdev_initialize_lock);
8921 return (SET_ERROR(ESRCH));
8922 } else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
8923 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
8924 mutex_exit(&vd->vdev_initialize_lock);
8925 return (SET_ERROR(ESRCH));
8926 } else if (cmd_type == POOL_INITIALIZE_UNINIT &&
8927 vd->vdev_initialize_thread != NULL) {
8928 mutex_exit(&vd->vdev_initialize_lock);
8929 return (SET_ERROR(EBUSY));
8930 }
8931
8932 switch (cmd_type) {
8933 case POOL_INITIALIZE_START:
8934 vdev_initialize(vd);
8935 break;
8936 case POOL_INITIALIZE_CANCEL:
8937 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
8938 break;
8939 case POOL_INITIALIZE_SUSPEND:
8940 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
8941 break;
8942 case POOL_INITIALIZE_UNINIT:
8943 vdev_uninitialize(vd);
8944 break;
8945 default:
8946 panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
8947 }
8948 mutex_exit(&vd->vdev_initialize_lock);
8949
8950 return (0);
8951 }
8952
8953 int
8954 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
8955 nvlist_t *vdev_errlist)
8956 {
8957 int total_errors = 0;
8958 list_t vd_list;
8959
8960 list_create(&vd_list, sizeof (vdev_t),
8961 offsetof(vdev_t, vdev_initialize_node));
8962
8963 /*
8964 * We hold the namespace lock through the whole function
8965 * to prevent any changes to the pool while we're starting or
8966 * stopping initialization. The config and state locks are held so that
8967 * we can properly assess the vdev state before we commit to
8968 * the initializing operation.
8969 */
8970 spa_namespace_enter(FTAG);
8971
8972 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
8973 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
8974 uint64_t vdev_guid = fnvpair_value_uint64(pair);
8975
8976 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
8977 &vd_list);
8978 if (error != 0) {
8979 char guid_as_str[MAXNAMELEN];
8980
8981 (void) snprintf(guid_as_str, sizeof (guid_as_str),
8982 "%llu", (unsigned long long)vdev_guid);
8983 fnvlist_add_int64(vdev_errlist, guid_as_str, error);
8984 total_errors++;
8985 }
8986 }
8987
8988 /* Wait for all initialize threads to stop. */
8989 vdev_initialize_stop_wait(spa, &vd_list);
8990
8991 /* Sync out the initializing state */
8992 txg_wait_synced(spa->spa_dsl_pool, 0);
8993 spa_namespace_exit(FTAG);
8994
8995 list_destroy(&vd_list);
8996
8997 return (total_errors);
8998 }
8999
9000 static int
9001 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
9002 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
9003 {
9004 ASSERT(spa_namespace_held());
9005
9006 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
9007
9008 /* Look up vdev and ensure it's a leaf. */
9009 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
9010 if (vd == NULL || vd->vdev_detached) {
9011 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9012 return (SET_ERROR(ENODEV));
9013 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
9014 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9015 return (SET_ERROR(EINVAL));
9016 } else if (!vdev_writeable(vd)) {
9017 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9018 return (SET_ERROR(EROFS));
9019 } else if (!vd->vdev_has_trim) {
9020 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9021 return (SET_ERROR(EOPNOTSUPP));
9022 } else if (secure && !vd->vdev_has_securetrim) {
9023 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9024 return (SET_ERROR(EOPNOTSUPP));
9025 }
9026 mutex_enter(&vd->vdev_trim_lock);
9027 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
9028
9029 /*
9030 * When we activate a TRIM action we check to see if the
9031 * vdev_trim_thread is NULL. We do this instead of using the
9032 * vdev_trim_state since there might be a previous TRIM process
9033 * which has completed but the thread is not exited.
9034 */
9035 if (cmd_type == POOL_TRIM_START &&
9036 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing ||
9037 vd->vdev_top->vdev_rz_expanding)) {
9038 mutex_exit(&vd->vdev_trim_lock);
9039 return (SET_ERROR(EBUSY));
9040 } else if (cmd_type == POOL_TRIM_CANCEL &&
9041 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
9042 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
9043 mutex_exit(&vd->vdev_trim_lock);
9044 return (SET_ERROR(ESRCH));
9045 } else if (cmd_type == POOL_TRIM_SUSPEND &&
9046 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
9047 mutex_exit(&vd->vdev_trim_lock);
9048 return (SET_ERROR(ESRCH));
9049 }
9050
9051 switch (cmd_type) {
9052 case POOL_TRIM_START:
9053 vdev_trim(vd, rate, partial, secure);
9054 break;
9055 case POOL_TRIM_CANCEL:
9056 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
9057 break;
9058 case POOL_TRIM_SUSPEND:
9059 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
9060 break;
9061 default:
9062 panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
9063 }
9064 mutex_exit(&vd->vdev_trim_lock);
9065
9066 return (0);
9067 }
9068
9069 /*
9070 * Initiates a manual TRIM for the requested vdevs. This kicks off individual
9071 * TRIM threads for each child vdev. These threads pass over all of the free
9072 * space in the vdev's metaslabs and issues TRIM commands for that space.
9073 */
9074 int
9075 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
9076 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
9077 {
9078 int total_errors = 0;
9079 list_t vd_list;
9080
9081 list_create(&vd_list, sizeof (vdev_t),
9082 offsetof(vdev_t, vdev_trim_node));
9083
9084 /*
9085 * We hold the namespace lock through the whole function
9086 * to prevent any changes to the pool while we're starting or
9087 * stopping TRIM. The config and state locks are held so that
9088 * we can properly assess the vdev state before we commit to
9089 * the TRIM operation.
9090 */
9091 spa_namespace_enter(FTAG);
9092
9093 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
9094 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
9095 uint64_t vdev_guid = fnvpair_value_uint64(pair);
9096
9097 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
9098 rate, partial, secure, &vd_list);
9099 if (error != 0) {
9100 char guid_as_str[MAXNAMELEN];
9101
9102 (void) snprintf(guid_as_str, sizeof (guid_as_str),
9103 "%llu", (unsigned long long)vdev_guid);
9104 fnvlist_add_int64(vdev_errlist, guid_as_str, error);
9105 total_errors++;
9106 }
9107 }
9108
9109 /* Wait for all TRIM threads to stop. */
9110 vdev_trim_stop_wait(spa, &vd_list);
9111
9112 /* Sync out the TRIM state */
9113 txg_wait_synced(spa->spa_dsl_pool, 0);
9114 spa_namespace_exit(FTAG);
9115
9116 list_destroy(&vd_list);
9117
9118 return (total_errors);
9119 }
9120
9121 /*
9122 * Split a set of devices from their mirrors, and create a new pool from them.
9123 */
9124 int
9125 spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
9126 nvlist_t *props, boolean_t exp)
9127 {
9128 int error = 0;
9129 uint64_t txg, *glist;
9130 spa_t *newspa;
9131 uint_t c, children, lastlog;
9132 nvlist_t **child, *nvl, *tmp;
9133 dmu_tx_t *tx;
9134 const char *altroot = NULL;
9135 vdev_t *rvd, **vml = NULL; /* vdev modify list */
9136 boolean_t activate_slog;
9137
9138 ASSERT(spa_writeable(spa));
9139
9140 txg = spa_vdev_enter(spa);
9141
9142 ASSERT(spa_namespace_held());
9143 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
9144 error = (spa_has_checkpoint(spa)) ?
9145 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
9146 return (spa_vdev_exit(spa, NULL, txg, error));
9147 }
9148
9149 /* clear the log and flush everything up to now */
9150 activate_slog = spa_passivate_log(spa);
9151 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
9152 error = spa_reset_logs(spa);
9153 txg = spa_vdev_config_enter(spa);
9154
9155 if (activate_slog)
9156 spa_activate_log(spa);
9157
9158 if (error != 0)
9159 return (spa_vdev_exit(spa, NULL, txg, error));
9160
9161 /* check new spa name before going any further */
9162 if (spa_lookup(newname) != NULL)
9163 return (spa_vdev_exit(spa, NULL, txg, EEXIST));
9164
9165 /*
9166 * scan through all the children to ensure they're all mirrors
9167 */
9168 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
9169 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
9170 &children) != 0)
9171 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
9172
9173 /* first, check to ensure we've got the right child count */
9174 rvd = spa->spa_root_vdev;
9175 lastlog = 0;
9176 for (c = 0; c < rvd->vdev_children; c++) {
9177 vdev_t *vd = rvd->vdev_child[c];
9178
9179 /* don't count the holes & logs as children */
9180 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
9181 !vdev_is_concrete(vd))) {
9182 if (lastlog == 0)
9183 lastlog = c;
9184 continue;
9185 }
9186
9187 lastlog = 0;
9188 }
9189 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
9190 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
9191
9192 /* next, ensure no spare or cache devices are part of the split */
9193 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
9194 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
9195 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
9196
9197 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
9198 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
9199
9200 /* then, loop over each vdev and validate it */
9201 for (c = 0; c < children; c++) {
9202 uint64_t is_hole = 0;
9203
9204 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9205 &is_hole);
9206
9207 if (is_hole != 0) {
9208 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
9209 spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
9210 continue;
9211 } else {
9212 error = SET_ERROR(EINVAL);
9213 break;
9214 }
9215 }
9216
9217 /* deal with indirect vdevs */
9218 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
9219 &vdev_indirect_ops)
9220 continue;
9221
9222 /* which disk is going to be split? */
9223 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
9224 &glist[c]) != 0) {
9225 error = SET_ERROR(EINVAL);
9226 break;
9227 }
9228
9229 /* look it up in the spa */
9230 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
9231 if (vml[c] == NULL) {
9232 error = SET_ERROR(ENODEV);
9233 break;
9234 }
9235
9236 /* make sure there's nothing stopping the split */
9237 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
9238 vml[c]->vdev_islog ||
9239 !vdev_is_concrete(vml[c]) ||
9240 vml[c]->vdev_isspare ||
9241 vml[c]->vdev_isl2cache ||
9242 !vdev_writeable(vml[c]) ||
9243 vml[c]->vdev_children != 0 ||
9244 vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
9245 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
9246 error = SET_ERROR(EINVAL);
9247 break;
9248 }
9249
9250 if (vdev_dtl_required(vml[c]) ||
9251 vdev_resilver_needed(vml[c], NULL, NULL)) {
9252 error = SET_ERROR(EBUSY);
9253 break;
9254 }
9255
9256 /* we need certain info from the top level */
9257 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
9258 vml[c]->vdev_top->vdev_ms_array);
9259 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
9260 vml[c]->vdev_top->vdev_ms_shift);
9261 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
9262 vml[c]->vdev_top->vdev_asize);
9263 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
9264 vml[c]->vdev_top->vdev_ashift);
9265
9266 /* transfer per-vdev ZAPs */
9267 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
9268 VERIFY0(nvlist_add_uint64(child[c],
9269 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
9270
9271 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
9272 VERIFY0(nvlist_add_uint64(child[c],
9273 ZPOOL_CONFIG_VDEV_TOP_ZAP,
9274 vml[c]->vdev_parent->vdev_top_zap));
9275 }
9276
9277 if (error != 0) {
9278 kmem_free(vml, children * sizeof (vdev_t *));
9279 kmem_free(glist, children * sizeof (uint64_t));
9280 return (spa_vdev_exit(spa, NULL, txg, error));
9281 }
9282
9283 /* stop writers from using the disks */
9284 for (c = 0; c < children; c++) {
9285 if (vml[c] != NULL)
9286 vml[c]->vdev_offline = B_TRUE;
9287 }
9288 vdev_reopen(spa->spa_root_vdev);
9289
9290 /*
9291 * Temporarily record the splitting vdevs in the spa config. This
9292 * will disappear once the config is regenerated.
9293 */
9294 nvl = fnvlist_alloc();
9295 fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
9296 kmem_free(glist, children * sizeof (uint64_t));
9297
9298 mutex_enter(&spa->spa_props_lock);
9299 fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
9300 mutex_exit(&spa->spa_props_lock);
9301 spa->spa_config_splitting = nvl;
9302 vdev_config_dirty(spa->spa_root_vdev);
9303
9304 /* configure and create the new pool */
9305 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
9306 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
9307 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
9308 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
9309 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
9310 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
9311 spa_generate_guid(NULL));
9312 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
9313 (void) nvlist_lookup_string(props,
9314 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
9315
9316 /* add the new pool to the namespace */
9317 newspa = spa_add(newname, config, altroot);
9318 newspa->spa_avz_action = AVZ_ACTION_REBUILD;
9319 newspa->spa_config_txg = spa->spa_config_txg;
9320 spa_set_log_state(newspa, SPA_LOG_CLEAR);
9321
9322 /* release the spa config lock, retaining the namespace lock */
9323 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
9324
9325 if (zio_injection_enabled)
9326 zio_handle_panic_injection(spa, FTAG, 1);
9327
9328 spa_activate(newspa, spa_mode_global);
9329 spa_async_suspend(newspa);
9330
9331 /*
9332 * Temporarily stop the initializing and TRIM activity. We set the
9333 * state to ACTIVE so that we know to resume initializing or TRIM
9334 * once the split has completed.
9335 */
9336 list_t vd_initialize_list;
9337 list_create(&vd_initialize_list, sizeof (vdev_t),
9338 offsetof(vdev_t, vdev_initialize_node));
9339
9340 list_t vd_trim_list;
9341 list_create(&vd_trim_list, sizeof (vdev_t),
9342 offsetof(vdev_t, vdev_trim_node));
9343
9344 for (c = 0; c < children; c++) {
9345 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
9346 mutex_enter(&vml[c]->vdev_initialize_lock);
9347 vdev_initialize_stop(vml[c],
9348 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
9349 mutex_exit(&vml[c]->vdev_initialize_lock);
9350
9351 mutex_enter(&vml[c]->vdev_trim_lock);
9352 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
9353 mutex_exit(&vml[c]->vdev_trim_lock);
9354 }
9355 }
9356
9357 vdev_initialize_stop_wait(spa, &vd_initialize_list);
9358 vdev_trim_stop_wait(spa, &vd_trim_list);
9359
9360 list_destroy(&vd_initialize_list);
9361 list_destroy(&vd_trim_list);
9362
9363 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
9364 newspa->spa_is_splitting = B_TRUE;
9365
9366 /* create the new pool from the disks of the original pool */
9367 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
9368 if (error)
9369 goto out;
9370
9371 /* if that worked, generate a real config for the new pool */
9372 if (newspa->spa_root_vdev != NULL) {
9373 newspa->spa_config_splitting = fnvlist_alloc();
9374 fnvlist_add_uint64(newspa->spa_config_splitting,
9375 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
9376 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
9377 B_TRUE));
9378 }
9379
9380 /* set the props */
9381 if (props != NULL) {
9382 spa_configfile_set(newspa, props, B_FALSE);
9383 error = spa_prop_set(newspa, props);
9384 if (error)
9385 goto out;
9386 }
9387
9388 /* flush everything */
9389 txg = spa_vdev_config_enter(newspa);
9390 vdev_config_dirty(newspa->spa_root_vdev);
9391 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
9392
9393 if (zio_injection_enabled)
9394 zio_handle_panic_injection(spa, FTAG, 2);
9395
9396 spa_async_resume(newspa);
9397
9398 /* finally, update the original pool's config */
9399 txg = spa_vdev_config_enter(spa);
9400 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
9401 error = dmu_tx_assign(tx, DMU_TX_WAIT);
9402 if (error != 0)
9403 dmu_tx_abort(tx);
9404 for (c = 0; c < children; c++) {
9405 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
9406 vdev_t *tvd = vml[c]->vdev_top;
9407
9408 /*
9409 * Need to be sure the detachable VDEV is not
9410 * on any *other* txg's DTL list to prevent it
9411 * from being accessed after it's freed.
9412 */
9413 for (int t = 0; t < TXG_SIZE; t++) {
9414 (void) txg_list_remove_this(
9415 &tvd->vdev_dtl_list, vml[c], t);
9416 }
9417
9418 vdev_split(vml[c]);
9419 if (error == 0)
9420 spa_history_log_internal(spa, "detach", tx,
9421 "vdev=%s", vml[c]->vdev_path);
9422
9423 vdev_free(vml[c]);
9424 }
9425 }
9426 spa->spa_avz_action = AVZ_ACTION_REBUILD;
9427 vdev_config_dirty(spa->spa_root_vdev);
9428 spa->spa_config_splitting = NULL;
9429 nvlist_free(nvl);
9430 if (error == 0)
9431 dmu_tx_commit(tx);
9432 (void) spa_vdev_exit(spa, NULL, txg, 0);
9433
9434 if (zio_injection_enabled)
9435 zio_handle_panic_injection(spa, FTAG, 3);
9436
9437 /* split is complete; log a history record */
9438 spa_history_log_internal(newspa, "split", NULL,
9439 "from pool %s", spa_name(spa));
9440
9441 newspa->spa_is_splitting = B_FALSE;
9442 kmem_free(vml, children * sizeof (vdev_t *));
9443
9444 /* if we're not going to mount the filesystems in userland, export */
9445 if (exp)
9446 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
9447 B_FALSE, B_FALSE);
9448
9449 return (error);
9450
9451 out:
9452 spa_unload(newspa);
9453 spa_deactivate(newspa);
9454 spa_remove(newspa);
9455
9456 txg = spa_vdev_config_enter(spa);
9457
9458 /* re-online all offlined disks */
9459 for (c = 0; c < children; c++) {
9460 if (vml[c] != NULL)
9461 vml[c]->vdev_offline = B_FALSE;
9462 }
9463
9464 /* restart initializing or trimming disks as necessary */
9465 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
9466 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
9467 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
9468
9469 vdev_reopen(spa->spa_root_vdev);
9470
9471 nvlist_free(spa->spa_config_splitting);
9472 spa->spa_config_splitting = NULL;
9473 (void) spa_vdev_exit(spa, NULL, txg, error);
9474
9475 kmem_free(vml, children * sizeof (vdev_t *));
9476 return (error);
9477 }
9478
9479 /*
9480 * Find any device that's done replacing, or a vdev marked 'unspare' that's
9481 * currently spared, so we can detach it.
9482 */
9483 static vdev_t *
9484 spa_vdev_resilver_done_hunt(vdev_t *vd)
9485 {
9486 vdev_t *newvd, *oldvd;
9487
9488 for (int c = 0; c < vd->vdev_children; c++) {
9489 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
9490 if (oldvd != NULL)
9491 return (oldvd);
9492 }
9493
9494 /*
9495 * Check for a completed replacement. We always consider the first
9496 * vdev in the list to be the oldest vdev, and the last one to be
9497 * the newest (see spa_vdev_attach() for how that works). In
9498 * the case where the newest vdev is faulted, we will not automatically
9499 * remove it after a resilver completes. This is OK as it will require
9500 * user intervention to determine which disk the admin wishes to keep.
9501 */
9502 if (vd->vdev_ops == &vdev_replacing_ops) {
9503 ASSERT(vd->vdev_children > 1);
9504
9505 newvd = vd->vdev_child[vd->vdev_children - 1];
9506 oldvd = vd->vdev_child[0];
9507
9508 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
9509 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
9510 !vdev_dtl_required(oldvd))
9511 return (oldvd);
9512 }
9513
9514 /*
9515 * Check for a completed resilver with the 'unspare' flag set.
9516 * Also potentially update faulted state.
9517 */
9518 if (vd->vdev_ops == &vdev_spare_ops) {
9519 vdev_t *first = vd->vdev_child[0];
9520 vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
9521
9522 if (last->vdev_unspare) {
9523 oldvd = first;
9524 newvd = last;
9525 } else if (first->vdev_unspare) {
9526 oldvd = last;
9527 newvd = first;
9528 } else {
9529 oldvd = NULL;
9530 }
9531
9532 if (oldvd != NULL &&
9533 vdev_dtl_empty(newvd, DTL_MISSING) &&
9534 vdev_dtl_empty(newvd, DTL_OUTAGE) &&
9535 !vdev_dtl_required(oldvd))
9536 return (oldvd);
9537
9538 vdev_propagate_state(vd);
9539
9540 /*
9541 * If there are more than two spares attached to a disk,
9542 * and those spares are not required, then we want to
9543 * attempt to free them up now so that they can be used
9544 * by other pools. Once we're back down to a single
9545 * disk+spare, we stop removing them.
9546 */
9547 if (vd->vdev_children > 2) {
9548 newvd = vd->vdev_child[1];
9549
9550 if (newvd->vdev_isspare && last->vdev_isspare &&
9551 vdev_dtl_empty(last, DTL_MISSING) &&
9552 vdev_dtl_empty(last, DTL_OUTAGE) &&
9553 !vdev_dtl_required(newvd))
9554 return (newvd);
9555 }
9556 }
9557
9558 return (NULL);
9559 }
9560
9561 static void
9562 spa_vdev_resilver_done(spa_t *spa)
9563 {
9564 vdev_t *vd, *pvd, *ppvd;
9565 uint64_t guid, sguid, pguid, ppguid;
9566
9567 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
9568
9569 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
9570 pvd = vd->vdev_parent;
9571 ppvd = pvd->vdev_parent;
9572 guid = vd->vdev_guid;
9573 pguid = pvd->vdev_guid;
9574 ppguid = ppvd->vdev_guid;
9575 sguid = 0;
9576 /*
9577 * If we have just finished replacing a hot spared device, then
9578 * we need to detach the parent's first child (the original hot
9579 * spare) as well.
9580 */
9581 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
9582 ppvd->vdev_children == 2) {
9583 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
9584 sguid = ppvd->vdev_child[1]->vdev_guid;
9585 }
9586 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
9587
9588 spa_config_exit(spa, SCL_ALL, FTAG);
9589 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
9590 return;
9591 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
9592 return;
9593 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
9594 }
9595
9596 spa_config_exit(spa, SCL_ALL, FTAG);
9597
9598 /*
9599 * If a detach was not performed above replace waiters will not have
9600 * been notified. In which case we must do so now.
9601 */
9602 spa_notify_waiters(spa);
9603 }
9604
9605 /*
9606 * Update the stored path or FRU for this vdev.
9607 */
9608 static int
9609 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
9610 boolean_t ispath)
9611 {
9612 vdev_t *vd;
9613 boolean_t sync = B_FALSE;
9614
9615 ASSERT(spa_writeable(spa));
9616
9617 spa_vdev_state_enter(spa, SCL_ALL);
9618
9619 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
9620 return (spa_vdev_state_exit(spa, NULL, ENOENT));
9621
9622 if (!vd->vdev_ops->vdev_op_leaf)
9623 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
9624
9625 if (ispath) {
9626 if (strcmp(value, vd->vdev_path) != 0) {
9627 spa_strfree(vd->vdev_path);
9628 vd->vdev_path = spa_strdup(value);
9629 sync = B_TRUE;
9630 }
9631 } else {
9632 if (vd->vdev_fru == NULL) {
9633 vd->vdev_fru = spa_strdup(value);
9634 sync = B_TRUE;
9635 } else if (strcmp(value, vd->vdev_fru) != 0) {
9636 spa_strfree(vd->vdev_fru);
9637 vd->vdev_fru = spa_strdup(value);
9638 sync = B_TRUE;
9639 }
9640 }
9641
9642 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
9643 }
9644
9645 int
9646 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
9647 {
9648 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
9649 }
9650
9651 int
9652 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
9653 {
9654 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
9655 }
9656
9657 /*
9658 * ==========================================================================
9659 * SPA Scanning
9660 * ==========================================================================
9661 */
9662 int
9663 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
9664 {
9665 ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
9666
9667 if (dsl_scan_resilvering(spa->spa_dsl_pool))
9668 return (SET_ERROR(EBUSY));
9669
9670 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
9671 }
9672
9673 int
9674 spa_scan_stop(spa_t *spa)
9675 {
9676 ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
9677 if (dsl_scan_resilvering(spa->spa_dsl_pool))
9678 return (SET_ERROR(EBUSY));
9679
9680 return (dsl_scan_cancel(spa->spa_dsl_pool));
9681 }
9682
9683 int
9684 spa_scan(spa_t *spa, pool_scan_func_t func)
9685 {
9686 return (spa_scan_range(spa, func, 0, 0));
9687 }
9688
9689 int
9690 spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
9691 uint64_t txgend)
9692 {
9693 ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
9694
9695 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
9696 return (SET_ERROR(ENOTSUP));
9697
9698 if (func == POOL_SCAN_RESILVER &&
9699 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
9700 return (SET_ERROR(ENOTSUP));
9701
9702 if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0))
9703 return (SET_ERROR(ENOTSUP));
9704
9705 /*
9706 * If a resilver was requested, but there is no DTL on a
9707 * writeable leaf device, we have nothing to do.
9708 */
9709 if (func == POOL_SCAN_RESILVER &&
9710 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
9711 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
9712 return (0);
9713 }
9714
9715 if (func == POOL_SCAN_ERRORSCRUB &&
9716 !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
9717 return (SET_ERROR(ENOTSUP));
9718
9719 return (dsl_scan(spa->spa_dsl_pool, func, txgstart, txgend));
9720 }
9721
9722 /*
9723 * ==========================================================================
9724 * SPA async task processing
9725 * ==========================================================================
9726 */
9727
9728 static void
9729 spa_async_remove(spa_t *spa, vdev_t *vd, boolean_t by_kernel)
9730 {
9731 if (vd->vdev_remove_wanted) {
9732 vd->vdev_remove_wanted = B_FALSE;
9733 vd->vdev_delayed_close = B_FALSE;
9734 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
9735
9736 /*
9737 * We want to clear the stats, but we don't want to do a full
9738 * vdev_clear() as that will cause us to throw away
9739 * degraded/faulted state as well as attempt to reopen the
9740 * device, all of which is a waste.
9741 */
9742 vd->vdev_stat.vs_read_errors = 0;
9743 vd->vdev_stat.vs_write_errors = 0;
9744 vd->vdev_stat.vs_checksum_errors = 0;
9745
9746 vdev_state_dirty(vd->vdev_top);
9747
9748 /* Tell userspace that the vdev is gone. */
9749 zfs_post_remove(spa, vd, by_kernel);
9750 }
9751
9752 for (int c = 0; c < vd->vdev_children; c++)
9753 spa_async_remove(spa, vd->vdev_child[c], by_kernel);
9754 }
9755
9756 static void
9757 spa_async_fault_vdev(vdev_t *vd, boolean_t *suspend)
9758 {
9759 if (vd->vdev_fault_wanted) {
9760 vdev_state_t newstate = VDEV_STATE_FAULTED;
9761 vd->vdev_fault_wanted = B_FALSE;
9762
9763 /*
9764 * If this device has the only valid copy of the data, then
9765 * back off and simply mark the vdev as degraded instead.
9766 */
9767 if (!vd->vdev_top->vdev_islog && vd->vdev_aux == NULL &&
9768 vdev_dtl_required(vd)) {
9769 newstate = VDEV_STATE_DEGRADED;
9770 /* A required disk is missing so suspend the pool */
9771 *suspend = B_TRUE;
9772 }
9773 vdev_set_state(vd, B_TRUE, newstate, VDEV_AUX_ERR_EXCEEDED);
9774 }
9775 for (int c = 0; c < vd->vdev_children; c++)
9776 spa_async_fault_vdev(vd->vdev_child[c], suspend);
9777 }
9778
9779 static void
9780 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
9781 {
9782 if (!spa->spa_autoexpand)
9783 return;
9784
9785 for (int c = 0; c < vd->vdev_children; c++) {
9786 vdev_t *cvd = vd->vdev_child[c];
9787 spa_async_autoexpand(spa, cvd);
9788 }
9789
9790 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
9791 return;
9792
9793 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
9794 }
9795
9796 static __attribute__((noreturn)) void
9797 spa_async_thread(void *arg)
9798 {
9799 spa_t *spa = (spa_t *)arg;
9800 dsl_pool_t *dp = spa->spa_dsl_pool;
9801 int tasks;
9802
9803 ASSERT(spa->spa_sync_on);
9804
9805 mutex_enter(&spa->spa_async_lock);
9806 tasks = spa->spa_async_tasks;
9807 spa->spa_async_tasks = 0;
9808 mutex_exit(&spa->spa_async_lock);
9809
9810 /*
9811 * See if the config needs to be updated.
9812 */
9813 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
9814 uint64_t old_space, new_space;
9815
9816 spa_namespace_enter(FTAG);
9817 old_space = metaslab_class_get_space(spa_normal_class(spa));
9818 old_space += metaslab_class_get_space(spa_special_class(spa));
9819 old_space += metaslab_class_get_space(spa_dedup_class(spa));
9820 old_space += metaslab_class_get_space(
9821 spa_embedded_log_class(spa));
9822 old_space += metaslab_class_get_space(
9823 spa_special_embedded_log_class(spa));
9824
9825 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
9826
9827 new_space = metaslab_class_get_space(spa_normal_class(spa));
9828 new_space += metaslab_class_get_space(spa_special_class(spa));
9829 new_space += metaslab_class_get_space(spa_dedup_class(spa));
9830 new_space += metaslab_class_get_space(
9831 spa_embedded_log_class(spa));
9832 new_space += metaslab_class_get_space(
9833 spa_special_embedded_log_class(spa));
9834 spa_namespace_exit(FTAG);
9835
9836 /*
9837 * If the pool grew as a result of the config update,
9838 * then log an internal history event.
9839 */
9840 if (new_space != old_space) {
9841 spa_history_log_internal(spa, "vdev online", NULL,
9842 "pool '%s' size: %llu(+%llu)",
9843 spa_name(spa), (u_longlong_t)new_space,
9844 (u_longlong_t)(new_space - old_space));
9845 }
9846 }
9847
9848 /*
9849 * See if any devices need to be marked REMOVED.
9850 */
9851 if (tasks & (SPA_ASYNC_REMOVE | SPA_ASYNC_REMOVE_BY_USER)) {
9852 boolean_t by_kernel = B_TRUE;
9853 if (tasks & SPA_ASYNC_REMOVE_BY_USER)
9854 by_kernel = B_FALSE;
9855 spa_vdev_state_enter(spa, SCL_NONE);
9856 spa_async_remove(spa, spa->spa_root_vdev, by_kernel);
9857 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
9858 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i],
9859 by_kernel);
9860 for (int i = 0; i < spa->spa_spares.sav_count; i++)
9861 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i],
9862 by_kernel);
9863 (void) spa_vdev_state_exit(spa, NULL, 0);
9864 }
9865
9866 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
9867 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9868 spa_async_autoexpand(spa, spa->spa_root_vdev);
9869 spa_config_exit(spa, SCL_CONFIG, FTAG);
9870 }
9871
9872 /*
9873 * See if any devices need to be marked faulted.
9874 */
9875 if (tasks & SPA_ASYNC_FAULT_VDEV) {
9876 spa_vdev_state_enter(spa, SCL_NONE);
9877 boolean_t suspend = B_FALSE;
9878 spa_async_fault_vdev(spa->spa_root_vdev, &suspend);
9879 (void) spa_vdev_state_exit(spa, NULL, 0);
9880 if (suspend)
9881 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
9882 }
9883
9884 /*
9885 * If any devices are done replacing, detach them.
9886 */
9887 if (tasks & SPA_ASYNC_RESILVER_DONE ||
9888 tasks & SPA_ASYNC_REBUILD_DONE ||
9889 tasks & SPA_ASYNC_DETACH_SPARE) {
9890 spa_vdev_resilver_done(spa);
9891 }
9892
9893 /*
9894 * Kick off a resilver.
9895 */
9896 if (tasks & SPA_ASYNC_RESILVER &&
9897 !vdev_rebuild_active(spa->spa_root_vdev) &&
9898 (!dsl_scan_resilvering(dp) ||
9899 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
9900 dsl_scan_restart_resilver(dp, 0);
9901
9902 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
9903 spa_namespace_enter(FTAG);
9904 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9905 vdev_initialize_restart(spa->spa_root_vdev);
9906 spa_config_exit(spa, SCL_CONFIG, FTAG);
9907 spa_namespace_exit(FTAG);
9908 }
9909
9910 if (tasks & SPA_ASYNC_TRIM_RESTART) {
9911 spa_namespace_enter(FTAG);
9912 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9913 vdev_trim_restart(spa->spa_root_vdev);
9914 spa_config_exit(spa, SCL_CONFIG, FTAG);
9915 spa_namespace_exit(FTAG);
9916 }
9917
9918 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
9919 spa_namespace_enter(FTAG);
9920 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9921 vdev_autotrim_restart(spa);
9922 spa_config_exit(spa, SCL_CONFIG, FTAG);
9923 spa_namespace_exit(FTAG);
9924 }
9925
9926 /*
9927 * Kick off L2 cache whole device TRIM.
9928 */
9929 if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
9930 spa_namespace_enter(FTAG);
9931 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
9932 vdev_trim_l2arc(spa);
9933 spa_config_exit(spa, SCL_CONFIG, FTAG);
9934 spa_namespace_exit(FTAG);
9935 }
9936
9937 /*
9938 * Kick off L2 cache rebuilding.
9939 */
9940 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
9941 spa_namespace_enter(FTAG);
9942 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
9943 l2arc_spa_rebuild_start(spa);
9944 spa_config_exit(spa, SCL_L2ARC, FTAG);
9945 spa_namespace_exit(FTAG);
9946 }
9947
9948 /*
9949 * Let the world know that we're done.
9950 */
9951 mutex_enter(&spa->spa_async_lock);
9952 spa->spa_async_thread = NULL;
9953 cv_broadcast(&spa->spa_async_cv);
9954 mutex_exit(&spa->spa_async_lock);
9955 thread_exit();
9956 }
9957
9958 void
9959 spa_async_suspend(spa_t *spa)
9960 {
9961 mutex_enter(&spa->spa_async_lock);
9962 spa->spa_async_suspended++;
9963 while (spa->spa_async_thread != NULL)
9964 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
9965 mutex_exit(&spa->spa_async_lock);
9966
9967 spa_vdev_remove_suspend(spa);
9968
9969 zthr_t *condense_thread = spa->spa_condense_zthr;
9970 if (condense_thread != NULL)
9971 zthr_cancel(condense_thread);
9972
9973 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
9974 if (raidz_expand_thread != NULL)
9975 zthr_cancel(raidz_expand_thread);
9976
9977 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
9978 if (discard_thread != NULL)
9979 zthr_cancel(discard_thread);
9980
9981 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
9982 if (ll_delete_thread != NULL)
9983 zthr_cancel(ll_delete_thread);
9984
9985 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
9986 if (ll_condense_thread != NULL)
9987 zthr_cancel(ll_condense_thread);
9988 }
9989
9990 void
9991 spa_async_resume(spa_t *spa)
9992 {
9993 mutex_enter(&spa->spa_async_lock);
9994 ASSERT(spa->spa_async_suspended != 0);
9995 spa->spa_async_suspended--;
9996 mutex_exit(&spa->spa_async_lock);
9997 spa_restart_removal(spa);
9998
9999 zthr_t *condense_thread = spa->spa_condense_zthr;
10000 if (condense_thread != NULL)
10001 zthr_resume(condense_thread);
10002
10003 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
10004 if (raidz_expand_thread != NULL)
10005 zthr_resume(raidz_expand_thread);
10006
10007 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
10008 if (discard_thread != NULL)
10009 zthr_resume(discard_thread);
10010
10011 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
10012 if (ll_delete_thread != NULL)
10013 zthr_resume(ll_delete_thread);
10014
10015 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
10016 if (ll_condense_thread != NULL)
10017 zthr_resume(ll_condense_thread);
10018 }
10019
10020 static boolean_t
10021 spa_async_tasks_pending(spa_t *spa)
10022 {
10023 uint_t non_config_tasks;
10024 uint_t config_task;
10025 boolean_t config_task_suspended;
10026
10027 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
10028 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
10029 if (spa->spa_ccw_fail_time == 0) {
10030 config_task_suspended = B_FALSE;
10031 } else {
10032 config_task_suspended =
10033 (gethrtime() - spa->spa_ccw_fail_time) <
10034 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
10035 }
10036
10037 return (non_config_tasks || (config_task && !config_task_suspended));
10038 }
10039
10040 static void
10041 spa_async_dispatch(spa_t *spa)
10042 {
10043 mutex_enter(&spa->spa_async_lock);
10044 if (spa_async_tasks_pending(spa) &&
10045 !spa->spa_async_suspended &&
10046 spa->spa_async_thread == NULL)
10047 spa->spa_async_thread = thread_create(NULL, 0,
10048 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
10049 mutex_exit(&spa->spa_async_lock);
10050 }
10051
10052 void
10053 spa_async_request(spa_t *spa, int task)
10054 {
10055 zfs_dbgmsg("spa=%s async request task=%u", spa_load_name(spa), task);
10056 mutex_enter(&spa->spa_async_lock);
10057 spa->spa_async_tasks |= task;
10058 mutex_exit(&spa->spa_async_lock);
10059 }
10060
10061 int
10062 spa_async_tasks(spa_t *spa)
10063 {
10064 return (spa->spa_async_tasks);
10065 }
10066
10067 /*
10068 * ==========================================================================
10069 * SPA syncing routines
10070 * ==========================================================================
10071 */
10072
10073
10074 static int
10075 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
10076 dmu_tx_t *tx)
10077 {
10078 bpobj_t *bpo = arg;
10079 bpobj_enqueue(bpo, bp, bp_freed, tx);
10080 return (0);
10081 }
10082
10083 int
10084 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
10085 {
10086 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
10087 }
10088
10089 int
10090 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
10091 {
10092 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
10093 }
10094
10095 static int
10096 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
10097 {
10098 zio_t *pio = arg;
10099
10100 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
10101 pio->io_flags));
10102 return (0);
10103 }
10104
10105 static int
10106 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
10107 dmu_tx_t *tx)
10108 {
10109 ASSERT(!bp_freed);
10110 return (spa_free_sync_cb(arg, bp, tx));
10111 }
10112
10113 /*
10114 * Note: this simple function is not inlined to make it easier to dtrace the
10115 * amount of time spent syncing frees.
10116 */
10117 static void
10118 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
10119 {
10120 zio_t *zio = zio_root(spa, NULL, NULL, 0);
10121 bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
10122 VERIFY0(zio_wait(zio));
10123 }
10124
10125 /*
10126 * Note: this simple function is not inlined to make it easier to dtrace the
10127 * amount of time spent syncing deferred frees.
10128 */
10129 static void
10130 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
10131 {
10132 if (spa_sync_pass(spa) != 1)
10133 return;
10134
10135 /*
10136 * Note:
10137 * If the log space map feature is active, we stop deferring
10138 * frees to the next TXG and therefore running this function
10139 * would be considered a no-op as spa_deferred_bpobj should
10140 * not have any entries.
10141 *
10142 * That said we run this function anyway (instead of returning
10143 * immediately) for the edge-case scenario where we just
10144 * activated the log space map feature in this TXG but we have
10145 * deferred frees from the previous TXG.
10146 */
10147 zio_t *zio = zio_root(spa, NULL, NULL, 0);
10148 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
10149 bpobj_spa_free_sync_cb, zio, tx), ==, 0);
10150 VERIFY0(zio_wait(zio));
10151 }
10152
10153 static void
10154 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
10155 {
10156 char *packed = NULL;
10157 size_t bufsize;
10158 size_t nvsize = 0;
10159 dmu_buf_t *db;
10160
10161 VERIFY0(nvlist_size(nv, &nvsize, NV_ENCODE_XDR));
10162
10163 /*
10164 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
10165 * information. This avoids the dmu_buf_will_dirty() path and
10166 * saves us a pre-read to get data we don't actually care about.
10167 */
10168 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
10169 packed = vmem_alloc(bufsize, KM_SLEEP);
10170
10171 VERIFY0(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
10172 KM_SLEEP));
10173 memset(packed + nvsize, 0, bufsize - nvsize);
10174
10175 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx,
10176 DMU_READ_NO_PREFETCH);
10177
10178 vmem_free(packed, bufsize);
10179
10180 VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
10181 dmu_buf_will_dirty(db, tx);
10182 *(uint64_t *)db->db_data = nvsize;
10183 dmu_buf_rele(db, FTAG);
10184 }
10185
10186 static void
10187 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
10188 const char *config, const char *entry)
10189 {
10190 nvlist_t *nvroot;
10191 nvlist_t **list;
10192 int i;
10193
10194 if (!sav->sav_sync)
10195 return;
10196
10197 /*
10198 * Update the MOS nvlist describing the list of available devices.
10199 * spa_validate_aux() will have already made sure this nvlist is
10200 * valid and the vdevs are labeled appropriately.
10201 */
10202 if (sav->sav_object == 0) {
10203 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
10204 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
10205 sizeof (uint64_t), tx);
10206 VERIFY(zap_update(spa->spa_meta_objset,
10207 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
10208 &sav->sav_object, tx) == 0);
10209 }
10210
10211 nvroot = fnvlist_alloc();
10212 if (sav->sav_count == 0) {
10213 fnvlist_add_nvlist_array(nvroot, config,
10214 (const nvlist_t * const *)NULL, 0);
10215 } else {
10216 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
10217 for (i = 0; i < sav->sav_count; i++)
10218 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
10219 B_FALSE, VDEV_CONFIG_L2CACHE);
10220 fnvlist_add_nvlist_array(nvroot, config,
10221 (const nvlist_t * const *)list, sav->sav_count);
10222 for (i = 0; i < sav->sav_count; i++)
10223 nvlist_free(list[i]);
10224 kmem_free(list, sav->sav_count * sizeof (void *));
10225 }
10226
10227 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
10228 nvlist_free(nvroot);
10229
10230 sav->sav_sync = B_FALSE;
10231 }
10232
10233 /*
10234 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
10235 * The all-vdev ZAP must be empty.
10236 */
10237 static void
10238 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
10239 {
10240 spa_t *spa = vd->vdev_spa;
10241
10242 if (vd->vdev_root_zap != 0 &&
10243 spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
10244 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
10245 vd->vdev_root_zap, tx));
10246 }
10247 if (vd->vdev_top_zap != 0) {
10248 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
10249 vd->vdev_top_zap, tx));
10250 }
10251 if (vd->vdev_leaf_zap != 0) {
10252 VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
10253 vd->vdev_leaf_zap, tx));
10254 }
10255 for (uint64_t i = 0; i < vd->vdev_children; i++) {
10256 spa_avz_build(vd->vdev_child[i], avz, tx);
10257 }
10258 }
10259
10260 static void
10261 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
10262 {
10263 nvlist_t *config;
10264
10265 /*
10266 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
10267 * its config may not be dirty but we still need to build per-vdev ZAPs.
10268 * Similarly, if the pool is being assembled (e.g. after a split), we
10269 * need to rebuild the AVZ although the config may not be dirty.
10270 */
10271 if (list_is_empty(&spa->spa_config_dirty_list) &&
10272 spa->spa_avz_action == AVZ_ACTION_NONE)
10273 return;
10274
10275 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
10276
10277 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
10278 spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
10279 spa->spa_all_vdev_zaps != 0);
10280
10281 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
10282 /* Make and build the new AVZ */
10283 uint64_t new_avz = zap_create(spa->spa_meta_objset,
10284 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
10285 spa_avz_build(spa->spa_root_vdev, new_avz, tx);
10286
10287 /* Diff old AVZ with new one */
10288 zap_cursor_t zc;
10289 zap_attribute_t *za = zap_attribute_alloc();
10290
10291 for (zap_cursor_init(&zc, spa->spa_meta_objset,
10292 spa->spa_all_vdev_zaps);
10293 zap_cursor_retrieve(&zc, za) == 0;
10294 zap_cursor_advance(&zc)) {
10295 uint64_t vdzap = za->za_first_integer;
10296 if (zap_lookup_int(spa->spa_meta_objset, new_avz,
10297 vdzap) == ENOENT) {
10298 /*
10299 * ZAP is listed in old AVZ but not in new one;
10300 * destroy it
10301 */
10302 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
10303 tx));
10304 }
10305 }
10306
10307 zap_cursor_fini(&zc);
10308 zap_attribute_free(za);
10309
10310 /* Destroy the old AVZ */
10311 VERIFY0(zap_destroy(spa->spa_meta_objset,
10312 spa->spa_all_vdev_zaps, tx));
10313
10314 /* Replace the old AVZ in the dir obj with the new one */
10315 VERIFY0(zap_update(spa->spa_meta_objset,
10316 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
10317 sizeof (new_avz), 1, &new_avz, tx));
10318
10319 spa->spa_all_vdev_zaps = new_avz;
10320 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
10321 zap_cursor_t zc;
10322 zap_attribute_t *za = zap_attribute_alloc();
10323
10324 /* Walk through the AVZ and destroy all listed ZAPs */
10325 for (zap_cursor_init(&zc, spa->spa_meta_objset,
10326 spa->spa_all_vdev_zaps);
10327 zap_cursor_retrieve(&zc, za) == 0;
10328 zap_cursor_advance(&zc)) {
10329 uint64_t zap = za->za_first_integer;
10330 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
10331 }
10332
10333 zap_cursor_fini(&zc);
10334 zap_attribute_free(za);
10335
10336 /* Destroy and unlink the AVZ itself */
10337 VERIFY0(zap_destroy(spa->spa_meta_objset,
10338 spa->spa_all_vdev_zaps, tx));
10339 VERIFY0(zap_remove(spa->spa_meta_objset,
10340 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
10341 spa->spa_all_vdev_zaps = 0;
10342 }
10343
10344 if (spa->spa_all_vdev_zaps == 0) {
10345 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
10346 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
10347 DMU_POOL_VDEV_ZAP_MAP, tx);
10348 }
10349 spa->spa_avz_action = AVZ_ACTION_NONE;
10350
10351 /* Create ZAPs for vdevs that don't have them. */
10352 vdev_construct_zaps(spa->spa_root_vdev, tx);
10353
10354 config = spa_config_generate(spa, spa->spa_root_vdev,
10355 dmu_tx_get_txg(tx), B_FALSE);
10356
10357 /*
10358 * If we're upgrading the spa version then make sure that
10359 * the config object gets updated with the correct version.
10360 */
10361 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
10362 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
10363 spa->spa_uberblock.ub_version);
10364
10365 spa_config_exit(spa, SCL_STATE, FTAG);
10366
10367 nvlist_free(spa->spa_config_syncing);
10368 spa->spa_config_syncing = config;
10369
10370 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
10371 }
10372
10373 static void
10374 spa_sync_version(void *arg, dmu_tx_t *tx)
10375 {
10376 uint64_t *versionp = arg;
10377 uint64_t version = *versionp;
10378 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
10379
10380 /*
10381 * Setting the version is special cased when first creating the pool.
10382 */
10383 ASSERT(tx->tx_txg != TXG_INITIAL);
10384
10385 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
10386 ASSERT(version >= spa_version(spa));
10387
10388 spa->spa_uberblock.ub_version = version;
10389 vdev_config_dirty(spa->spa_root_vdev);
10390 spa_history_log_internal(spa, "set", tx, "version=%lld",
10391 (longlong_t)version);
10392 }
10393
10394 /*
10395 * Set zpool properties.
10396 */
10397 static void
10398 spa_sync_props(void *arg, dmu_tx_t *tx)
10399 {
10400 nvlist_t *nvp = arg;
10401 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
10402 objset_t *mos = spa->spa_meta_objset;
10403 nvpair_t *elem = NULL;
10404
10405 mutex_enter(&spa->spa_props_lock);
10406
10407 while ((elem = nvlist_next_nvpair(nvp, elem))) {
10408 uint64_t intval;
10409 const char *strval, *fname;
10410 zpool_prop_t prop;
10411 const char *propname;
10412 const char *elemname = nvpair_name(elem);
10413 zprop_type_t proptype;
10414 spa_feature_t fid;
10415
10416 switch (prop = zpool_name_to_prop(elemname)) {
10417 case ZPOOL_PROP_VERSION:
10418 intval = fnvpair_value_uint64(elem);
10419 /*
10420 * The version is synced separately before other
10421 * properties and should be correct by now.
10422 */
10423 ASSERT3U(spa_version(spa), >=, intval);
10424 break;
10425
10426 case ZPOOL_PROP_ALTROOT:
10427 /*
10428 * 'altroot' is a non-persistent property. It should
10429 * have been set temporarily at creation or import time.
10430 */
10431 ASSERT(spa->spa_root != NULL);
10432 break;
10433
10434 case ZPOOL_PROP_READONLY:
10435 case ZPOOL_PROP_CACHEFILE:
10436 /*
10437 * 'readonly' and 'cachefile' are also non-persistent
10438 * properties.
10439 */
10440 break;
10441 case ZPOOL_PROP_COMMENT:
10442 strval = fnvpair_value_string(elem);
10443 if (spa->spa_comment != NULL)
10444 spa_strfree(spa->spa_comment);
10445 spa->spa_comment = spa_strdup(strval);
10446 /*
10447 * We need to dirty the configuration on all the vdevs
10448 * so that their labels get updated. We also need to
10449 * update the cache file to keep it in sync with the
10450 * MOS version. It's unnecessary to do this for pool
10451 * creation since the vdev's configuration has already
10452 * been dirtied.
10453 */
10454 if (tx->tx_txg != TXG_INITIAL) {
10455 vdev_config_dirty(spa->spa_root_vdev);
10456 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
10457 }
10458 spa_history_log_internal(spa, "set", tx,
10459 "%s=%s", elemname, strval);
10460 break;
10461 case ZPOOL_PROP_COMPATIBILITY:
10462 strval = fnvpair_value_string(elem);
10463 if (spa->spa_compatibility != NULL)
10464 spa_strfree(spa->spa_compatibility);
10465 spa->spa_compatibility = spa_strdup(strval);
10466 /*
10467 * Dirty the configuration on vdevs as above.
10468 */
10469 if (tx->tx_txg != TXG_INITIAL) {
10470 vdev_config_dirty(spa->spa_root_vdev);
10471 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
10472 }
10473
10474 spa_history_log_internal(spa, "set", tx,
10475 "%s=%s", nvpair_name(elem), strval);
10476 break;
10477
10478 case ZPOOL_PROP_INVAL:
10479 if (zpool_prop_feature(elemname)) {
10480 fname = strchr(elemname, '@') + 1;
10481 VERIFY0(zfeature_lookup_name(fname, &fid));
10482
10483 spa_feature_enable(spa, fid, tx);
10484 spa_history_log_internal(spa, "set", tx,
10485 "%s=enabled", elemname);
10486 break;
10487 } else if (!zfs_prop_user(elemname)) {
10488 ASSERT(zpool_prop_feature(elemname));
10489 break;
10490 }
10491 zfs_fallthrough;
10492 default:
10493 /*
10494 * Set pool property values in the poolprops mos object.
10495 */
10496 if (spa->spa_pool_props_object == 0) {
10497 spa->spa_pool_props_object =
10498 zap_create_link(mos, DMU_OT_POOL_PROPS,
10499 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
10500 tx);
10501 }
10502
10503 /* normalize the property name */
10504 if (prop == ZPOOL_PROP_INVAL) {
10505 propname = elemname;
10506 proptype = PROP_TYPE_STRING;
10507 } else {
10508 propname = zpool_prop_to_name(prop);
10509 proptype = zpool_prop_get_type(prop);
10510 }
10511
10512 if (nvpair_type(elem) == DATA_TYPE_STRING) {
10513 ASSERT(proptype == PROP_TYPE_STRING);
10514 strval = fnvpair_value_string(elem);
10515 if (strlen(strval) == 0) {
10516 /* remove the property if value == "" */
10517 (void) zap_remove(mos,
10518 spa->spa_pool_props_object,
10519 propname, tx);
10520 } else {
10521 VERIFY0(zap_update(mos,
10522 spa->spa_pool_props_object,
10523 propname, 1, strlen(strval) + 1,
10524 strval, tx));
10525 }
10526 spa_history_log_internal(spa, "set", tx,
10527 "%s=%s", elemname, strval);
10528 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
10529 intval = fnvpair_value_uint64(elem);
10530
10531 if (proptype == PROP_TYPE_INDEX) {
10532 const char *unused;
10533 VERIFY0(zpool_prop_index_to_string(
10534 prop, intval, &unused));
10535 }
10536 VERIFY0(zap_update(mos,
10537 spa->spa_pool_props_object, propname,
10538 8, 1, &intval, tx));
10539 spa_history_log_internal(spa, "set", tx,
10540 "%s=%lld", elemname,
10541 (longlong_t)intval);
10542
10543 switch (prop) {
10544 case ZPOOL_PROP_DELEGATION:
10545 spa->spa_delegation = intval;
10546 break;
10547 case ZPOOL_PROP_BOOTFS:
10548 spa->spa_bootfs = intval;
10549 break;
10550 case ZPOOL_PROP_FAILUREMODE:
10551 spa->spa_failmode = intval;
10552 break;
10553 case ZPOOL_PROP_AUTOTRIM:
10554 spa->spa_autotrim = intval;
10555 spa_async_request(spa,
10556 SPA_ASYNC_AUTOTRIM_RESTART);
10557 break;
10558 case ZPOOL_PROP_AUTOEXPAND:
10559 spa->spa_autoexpand = intval;
10560 if (tx->tx_txg != TXG_INITIAL)
10561 spa_async_request(spa,
10562 SPA_ASYNC_AUTOEXPAND);
10563 break;
10564 case ZPOOL_PROP_MULTIHOST:
10565 spa->spa_multihost = intval;
10566 break;
10567 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
10568 spa->spa_dedup_table_quota = intval;
10569 break;
10570 default:
10571 break;
10572 }
10573 } else {
10574 ASSERT(0); /* not allowed */
10575 }
10576 }
10577
10578 }
10579
10580 mutex_exit(&spa->spa_props_lock);
10581 }
10582
10583 /*
10584 * Perform one-time upgrade on-disk changes. spa_version() does not
10585 * reflect the new version this txg, so there must be no changes this
10586 * txg to anything that the upgrade code depends on after it executes.
10587 * Therefore this must be called after dsl_pool_sync() does the sync
10588 * tasks.
10589 */
10590 static void
10591 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
10592 {
10593 if (spa_sync_pass(spa) != 1)
10594 return;
10595
10596 dsl_pool_t *dp = spa->spa_dsl_pool;
10597 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
10598
10599 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
10600 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
10601 dsl_pool_create_origin(dp, tx);
10602
10603 /* Keeping the origin open increases spa_minref */
10604 spa->spa_minref += 3;
10605 }
10606
10607 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
10608 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
10609 dsl_pool_upgrade_clones(dp, tx);
10610 }
10611
10612 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
10613 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
10614 dsl_pool_upgrade_dir_clones(dp, tx);
10615
10616 /* Keeping the freedir open increases spa_minref */
10617 spa->spa_minref += 3;
10618 }
10619
10620 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
10621 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
10622 spa_feature_create_zap_objects(spa, tx);
10623 }
10624
10625 /*
10626 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
10627 * when possibility to use lz4 compression for metadata was added
10628 * Old pools that have this feature enabled must be upgraded to have
10629 * this feature active
10630 */
10631 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
10632 boolean_t lz4_en = spa_feature_is_enabled(spa,
10633 SPA_FEATURE_LZ4_COMPRESS);
10634 boolean_t lz4_ac = spa_feature_is_active(spa,
10635 SPA_FEATURE_LZ4_COMPRESS);
10636
10637 if (lz4_en && !lz4_ac)
10638 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
10639 }
10640
10641 /*
10642 * If we haven't written the salt, do so now. Note that the
10643 * feature may not be activated yet, but that's fine since
10644 * the presence of this ZAP entry is backwards compatible.
10645 */
10646 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
10647 DMU_POOL_CHECKSUM_SALT) == ENOENT) {
10648 VERIFY0(zap_add(spa->spa_meta_objset,
10649 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
10650 sizeof (spa->spa_cksum_salt.zcs_bytes),
10651 spa->spa_cksum_salt.zcs_bytes, tx));
10652 }
10653
10654 rrw_exit(&dp->dp_config_rwlock, FTAG);
10655 }
10656
10657 static void
10658 vdev_indirect_state_sync_verify(vdev_t *vd)
10659 {
10660 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
10661 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
10662
10663 if (vd->vdev_ops == &vdev_indirect_ops) {
10664 ASSERT(vim != NULL);
10665 ASSERT(vib != NULL);
10666 }
10667
10668 uint64_t obsolete_sm_object = 0;
10669 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
10670 if (obsolete_sm_object != 0) {
10671 ASSERT(vd->vdev_obsolete_sm != NULL);
10672 ASSERT(vd->vdev_removing ||
10673 vd->vdev_ops == &vdev_indirect_ops);
10674 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
10675 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
10676 ASSERT3U(obsolete_sm_object, ==,
10677 space_map_object(vd->vdev_obsolete_sm));
10678 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
10679 space_map_allocated(vd->vdev_obsolete_sm));
10680 }
10681 ASSERT(vd->vdev_obsolete_segments != NULL);
10682
10683 /*
10684 * Since frees / remaps to an indirect vdev can only
10685 * happen in syncing context, the obsolete segments
10686 * tree must be empty when we start syncing.
10687 */
10688 ASSERT0(zfs_range_tree_space(vd->vdev_obsolete_segments));
10689 }
10690
10691 /*
10692 * Set the top-level vdev's max queue depth. Evaluate each top-level's
10693 * async write queue depth in case it changed. The max queue depth will
10694 * not change in the middle of syncing out this txg.
10695 */
10696 static void
10697 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
10698 {
10699 ASSERT(spa_writeable(spa));
10700
10701 metaslab_class_balance(spa_normal_class(spa), B_TRUE);
10702 metaslab_class_balance(spa_special_class(spa), B_TRUE);
10703 metaslab_class_balance(spa_dedup_class(spa), B_TRUE);
10704 }
10705
10706 static void
10707 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
10708 {
10709 ASSERT(spa_writeable(spa));
10710
10711 vdev_t *rvd = spa->spa_root_vdev;
10712 for (int c = 0; c < rvd->vdev_children; c++) {
10713 vdev_t *vd = rvd->vdev_child[c];
10714 vdev_indirect_state_sync_verify(vd);
10715
10716 if (vdev_indirect_should_condense(vd)) {
10717 spa_condense_indirect_start_sync(vd, tx);
10718 break;
10719 }
10720 }
10721 }
10722
10723 static void
10724 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
10725 {
10726 objset_t *mos = spa->spa_meta_objset;
10727 dsl_pool_t *dp = spa->spa_dsl_pool;
10728 uint64_t txg = tx->tx_txg;
10729 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
10730
10731 do {
10732 int pass = ++spa->spa_sync_pass;
10733
10734 spa_sync_config_object(spa, tx);
10735 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
10736 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
10737 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
10738 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
10739 spa_errlog_sync(spa, txg);
10740 dsl_pool_sync(dp, txg);
10741
10742 if (pass < zfs_sync_pass_deferred_free ||
10743 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
10744 /*
10745 * If the log space map feature is active we don't
10746 * care about deferred frees and the deferred bpobj
10747 * as the log space map should effectively have the
10748 * same results (i.e. appending only to one object).
10749 */
10750 spa_sync_frees(spa, free_bpl, tx);
10751 } else {
10752 /*
10753 * We can not defer frees in pass 1, because
10754 * we sync the deferred frees later in pass 1.
10755 */
10756 ASSERT3U(pass, >, 1);
10757 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
10758 &spa->spa_deferred_bpobj, tx);
10759 }
10760
10761 brt_sync(spa, txg);
10762 ddt_sync(spa, txg);
10763 dsl_scan_sync(dp, tx);
10764 dsl_errorscrub_sync(dp, tx);
10765 svr_sync(spa, tx);
10766 spa_sync_upgrades(spa, tx);
10767
10768 spa_flush_metaslabs(spa, tx);
10769
10770 vdev_t *vd = NULL;
10771 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
10772 != NULL)
10773 vdev_sync(vd, txg);
10774
10775 if (pass == 1) {
10776 /*
10777 * dsl_pool_sync() -> dp_sync_tasks may have dirtied
10778 * the config. If that happens, this txg should not
10779 * be a no-op. So we must sync the config to the MOS
10780 * before checking for no-op.
10781 *
10782 * Note that when the config is dirty, it will
10783 * be written to the MOS (i.e. the MOS will be
10784 * dirtied) every time we call spa_sync_config_object()
10785 * in this txg. Therefore we can't call this after
10786 * dsl_pool_sync() every pass, because it would
10787 * prevent us from converging, since we'd dirty
10788 * the MOS every pass.
10789 *
10790 * Sync tasks can only be processed in pass 1, so
10791 * there's no need to do this in later passes.
10792 */
10793 spa_sync_config_object(spa, tx);
10794 }
10795
10796 /*
10797 * Note: We need to check if the MOS is dirty because we could
10798 * have marked the MOS dirty without updating the uberblock
10799 * (e.g. if we have sync tasks but no dirty user data). We need
10800 * to check the uberblock's rootbp because it is updated if we
10801 * have synced out dirty data (though in this case the MOS will
10802 * most likely also be dirty due to second order effects, we
10803 * don't want to rely on that here).
10804 */
10805 if (pass == 1 &&
10806 BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg &&
10807 !dmu_objset_is_dirty(mos, txg)) {
10808 /*
10809 * Nothing changed on the first pass, therefore this
10810 * TXG is a no-op. Avoid syncing deferred frees, so
10811 * that we can keep this TXG as a no-op.
10812 */
10813 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
10814 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
10815 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
10816 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
10817 break;
10818 }
10819
10820 spa_sync_deferred_frees(spa, tx);
10821 } while (dmu_objset_is_dirty(mos, txg));
10822 }
10823
10824 /*
10825 * Rewrite the vdev configuration (which includes the uberblock) to
10826 * commit the transaction group.
10827 *
10828 * If there are no dirty vdevs, we sync the uberblock to a few random
10829 * top-level vdevs that are known to be visible in the config cache
10830 * (see spa_vdev_add() for a complete description). If there *are* dirty
10831 * vdevs, sync the uberblock to all vdevs.
10832 */
10833 static void
10834 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
10835 {
10836 vdev_t *rvd = spa->spa_root_vdev;
10837 uint64_t txg = tx->tx_txg;
10838
10839 for (;;) {
10840 int error = 0;
10841
10842 /*
10843 * We hold SCL_STATE to prevent vdev open/close/etc.
10844 * while we're attempting to write the vdev labels.
10845 */
10846 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
10847
10848 if (list_is_empty(&spa->spa_config_dirty_list)) {
10849 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
10850 int svdcount = 0;
10851 int children = rvd->vdev_children;
10852 int c0 = random_in_range(children);
10853
10854 for (int c = 0; c < children; c++) {
10855 vdev_t *vd =
10856 rvd->vdev_child[(c0 + c) % children];
10857
10858 /* Stop when revisiting the first vdev */
10859 if (c > 0 && svd[0] == vd)
10860 break;
10861
10862 if (vd->vdev_ms_array == 0 ||
10863 vd->vdev_islog ||
10864 !vdev_is_concrete(vd))
10865 continue;
10866
10867 svd[svdcount++] = vd;
10868 if (svdcount == SPA_SYNC_MIN_VDEVS)
10869 break;
10870 }
10871 error = vdev_config_sync(svd, svdcount, txg);
10872 } else {
10873 error = vdev_config_sync(rvd->vdev_child,
10874 rvd->vdev_children, txg);
10875 }
10876
10877 if (error == 0)
10878 spa->spa_last_synced_guid = rvd->vdev_guid;
10879
10880 spa_config_exit(spa, SCL_STATE, FTAG);
10881
10882 if (error == 0)
10883 break;
10884 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
10885 zio_resume_wait(spa);
10886 }
10887 }
10888
10889 /*
10890 * Sync the specified transaction group. New blocks may be dirtied as
10891 * part of the process, so we iterate until it converges.
10892 */
10893 void
10894 spa_sync(spa_t *spa, uint64_t txg)
10895 {
10896 vdev_t *vd = NULL;
10897
10898 VERIFY(spa_writeable(spa));
10899
10900 /*
10901 * Wait for i/os issued in open context that need to complete
10902 * before this txg syncs.
10903 */
10904 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
10905 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
10906 ZIO_FLAG_CANFAIL);
10907
10908 /*
10909 * Now that there can be no more cloning in this transaction group,
10910 * but we are still before issuing frees, we can process pending BRT
10911 * updates.
10912 */
10913 brt_pending_apply(spa, txg);
10914
10915 spa_sync_time_logger(spa, txg, B_FALSE);
10916
10917 /*
10918 * Lock out configuration changes.
10919 */
10920 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
10921
10922 spa->spa_syncing_txg = txg;
10923 spa->spa_sync_pass = 0;
10924
10925 /*
10926 * If there are any pending vdev state changes, convert them
10927 * into config changes that go out with this transaction group.
10928 */
10929 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
10930 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
10931 /* Avoid holding the write lock unless actually necessary */
10932 if (vd->vdev_aux == NULL) {
10933 vdev_state_clean(vd);
10934 vdev_config_dirty(vd);
10935 continue;
10936 }
10937 /*
10938 * We need the write lock here because, for aux vdevs,
10939 * calling vdev_config_dirty() modifies sav_config.
10940 * This is ugly and will become unnecessary when we
10941 * eliminate the aux vdev wart by integrating all vdevs
10942 * into the root vdev tree.
10943 */
10944 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10945 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
10946 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
10947 vdev_state_clean(vd);
10948 vdev_config_dirty(vd);
10949 }
10950 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
10951 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
10952 }
10953 spa_config_exit(spa, SCL_STATE, FTAG);
10954
10955 dsl_pool_t *dp = spa->spa_dsl_pool;
10956 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
10957
10958 spa->spa_sync_starttime = getlrtime();
10959
10960 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
10961 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
10962 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
10963 NSEC_TO_TICK(spa->spa_deadman_synctime));
10964
10965 /*
10966 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
10967 * set spa_deflate if we have no raid-z vdevs.
10968 */
10969 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
10970 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
10971 vdev_t *rvd = spa->spa_root_vdev;
10972
10973 int i;
10974 for (i = 0; i < rvd->vdev_children; i++) {
10975 vd = rvd->vdev_child[i];
10976 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
10977 break;
10978 }
10979 if (i == rvd->vdev_children) {
10980 spa->spa_deflate = TRUE;
10981 VERIFY0(zap_add(spa->spa_meta_objset,
10982 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
10983 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
10984 }
10985 }
10986
10987 spa_sync_adjust_vdev_max_queue_depth(spa);
10988
10989 spa_sync_condense_indirect(spa, tx);
10990
10991 spa_sync_iterate_to_convergence(spa, tx);
10992
10993 #ifdef ZFS_DEBUG
10994 if (!list_is_empty(&spa->spa_config_dirty_list)) {
10995 /*
10996 * Make sure that the number of ZAPs for all the vdevs matches
10997 * the number of ZAPs in the per-vdev ZAP list. This only gets
10998 * called if the config is dirty; otherwise there may be
10999 * outstanding AVZ operations that weren't completed in
11000 * spa_sync_config_object.
11001 */
11002 uint64_t all_vdev_zap_entry_count;
11003 ASSERT0(zap_count(spa->spa_meta_objset,
11004 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
11005 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
11006 all_vdev_zap_entry_count);
11007 }
11008 #endif
11009
11010 if (spa->spa_vdev_removal != NULL) {
11011 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
11012 }
11013
11014 spa_sync_rewrite_vdev_config(spa, tx);
11015 dmu_tx_commit(tx);
11016
11017 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
11018 spa->spa_deadman_tqid = 0;
11019
11020 /*
11021 * Clear the dirty config list.
11022 */
11023 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
11024 vdev_config_clean(vd);
11025
11026 /*
11027 * Now that the new config has synced transactionally,
11028 * let it become visible to the config cache.
11029 */
11030 if (spa->spa_config_syncing != NULL) {
11031 spa_config_set(spa, spa->spa_config_syncing);
11032 spa->spa_config_txg = txg;
11033 spa->spa_config_syncing = NULL;
11034 }
11035
11036 dsl_pool_sync_done(dp, txg);
11037
11038 /*
11039 * Update usable space statistics.
11040 */
11041 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
11042 != NULL)
11043 vdev_sync_done(vd, txg);
11044
11045 metaslab_class_evict_old(spa->spa_normal_class, txg);
11046 metaslab_class_evict_old(spa->spa_log_class, txg);
11047 /* Embedded log classes have only one metaslab per vdev. */
11048 metaslab_class_evict_old(spa->spa_special_class, txg);
11049 metaslab_class_evict_old(spa->spa_dedup_class, txg);
11050
11051 spa_sync_close_syncing_log_sm(spa);
11052
11053 spa_update_dspace(spa);
11054
11055 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
11056 vdev_autotrim_kick(spa);
11057
11058 /*
11059 * It had better be the case that we didn't dirty anything
11060 * since vdev_config_sync().
11061 */
11062 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
11063 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
11064 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
11065
11066 while (zfs_pause_spa_sync)
11067 delay(1);
11068
11069 spa->spa_sync_pass = 0;
11070
11071 /*
11072 * Update the last synced uberblock here. We want to do this at
11073 * the end of spa_sync() so that consumers of spa_last_synced_txg()
11074 * will be guaranteed that all the processing associated with
11075 * that txg has been completed.
11076 */
11077 spa->spa_ubsync = spa->spa_uberblock;
11078 spa_config_exit(spa, SCL_CONFIG, FTAG);
11079
11080 spa_handle_ignored_writes(spa);
11081
11082 /*
11083 * If any async tasks have been requested, kick them off.
11084 */
11085 spa_async_dispatch(spa);
11086 }
11087
11088 /*
11089 * Sync all pools. We don't want to hold the namespace lock across these
11090 * operations, so we take a reference on the spa_t and drop the lock during the
11091 * sync.
11092 */
11093 void
11094 spa_sync_allpools(void)
11095 {
11096 spa_t *spa = NULL;
11097 spa_namespace_enter(FTAG);
11098 while ((spa = spa_next(spa)) != NULL) {
11099 if (spa_state(spa) != POOL_STATE_ACTIVE ||
11100 !spa_writeable(spa) || spa_suspended(spa))
11101 continue;
11102 spa_open_ref(spa, FTAG);
11103 spa_namespace_exit(FTAG);
11104 txg_wait_synced(spa_get_dsl(spa), 0);
11105 spa_namespace_enter(FTAG);
11106 spa_close(spa, FTAG);
11107 }
11108 spa_namespace_exit(FTAG);
11109 }
11110
11111 taskq_t *
11112 spa_sync_tq_create(spa_t *spa, const char *name)
11113 {
11114 kthread_t **kthreads;
11115
11116 ASSERT0P(spa->spa_sync_tq);
11117 ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
11118
11119 /*
11120 * - do not allow more allocators than cpus.
11121 * - there may be more cpus than allocators.
11122 * - do not allow more sync taskq threads than allocators or cpus.
11123 */
11124 int nthreads = spa->spa_alloc_count;
11125 spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) *
11126 nthreads, KM_SLEEP);
11127
11128 spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri,
11129 nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads);
11130 VERIFY(spa->spa_sync_tq != NULL);
11131 VERIFY(kthreads != NULL);
11132
11133 spa_syncthread_info_t *ti = spa->spa_syncthreads;
11134 for (int i = 0; i < nthreads; i++, ti++) {
11135 ti->sti_thread = kthreads[i];
11136 ti->sti_allocator = i;
11137 }
11138
11139 kmem_free(kthreads, sizeof (*kthreads) * nthreads);
11140 return (spa->spa_sync_tq);
11141 }
11142
11143 void
11144 spa_sync_tq_destroy(spa_t *spa)
11145 {
11146 ASSERT(spa->spa_sync_tq != NULL);
11147
11148 taskq_wait(spa->spa_sync_tq);
11149 taskq_destroy(spa->spa_sync_tq);
11150 kmem_free(spa->spa_syncthreads,
11151 sizeof (spa_syncthread_info_t) * spa->spa_alloc_count);
11152 spa->spa_sync_tq = NULL;
11153 }
11154
11155 uint_t
11156 spa_acq_allocator(spa_t *spa)
11157 {
11158 int i;
11159
11160 if (spa->spa_alloc_count == 1)
11161 return (0);
11162
11163 mutex_enter(&spa->spa_allocs_use->sau_lock);
11164 uint_t r = spa->spa_allocs_use->sau_rotor;
11165 do {
11166 if (++r == spa->spa_alloc_count)
11167 r = 0;
11168 } while (spa->spa_allocs_use->sau_inuse[r]);
11169 spa->spa_allocs_use->sau_inuse[r] = B_TRUE;
11170 spa->spa_allocs_use->sau_rotor = r;
11171 mutex_exit(&spa->spa_allocs_use->sau_lock);
11172
11173 spa_syncthread_info_t *ti = spa->spa_syncthreads;
11174 for (i = 0; i < spa->spa_alloc_count; i++, ti++) {
11175 if (ti->sti_thread == curthread) {
11176 ti->sti_allocator = r;
11177 break;
11178 }
11179 }
11180 ASSERT3S(i, <, spa->spa_alloc_count);
11181 return (r);
11182 }
11183
11184 void
11185 spa_rel_allocator(spa_t *spa, uint_t allocator)
11186 {
11187 if (spa->spa_alloc_count > 1)
11188 spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE;
11189 }
11190
11191 void
11192 spa_select_allocator(zio_t *zio)
11193 {
11194 zbookmark_phys_t *bm = &zio->io_bookmark;
11195 spa_t *spa = zio->io_spa;
11196
11197 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
11198
11199 /*
11200 * A gang block (for example) may have inherited its parent's
11201 * allocator, in which case there is nothing further to do here.
11202 */
11203 if (ZIO_HAS_ALLOCATOR(zio))
11204 return;
11205
11206 ASSERT(spa != NULL);
11207 ASSERT(bm != NULL);
11208
11209 /*
11210 * First try to use an allocator assigned to the syncthread, and set
11211 * the corresponding write issue taskq for the allocator.
11212 * Note, we must have an open pool to do this.
11213 */
11214 if (spa->spa_sync_tq != NULL) {
11215 spa_syncthread_info_t *ti = spa->spa_syncthreads;
11216 for (int i = 0; i < spa->spa_alloc_count; i++, ti++) {
11217 if (ti->sti_thread == curthread) {
11218 zio->io_allocator = ti->sti_allocator;
11219 return;
11220 }
11221 }
11222 }
11223
11224 /*
11225 * We want to try to use as many allocators as possible to help improve
11226 * performance, but we also want logically adjacent IOs to be physically
11227 * adjacent to improve sequential read performance. We chunk each object
11228 * into 2^20 block regions, and then hash based on the objset, object,
11229 * level, and region to accomplish both of these goals.
11230 */
11231 uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level,
11232 bm->zb_blkid >> 20);
11233
11234 zio->io_allocator = (uint_t)hv % spa->spa_alloc_count;
11235 }
11236
11237 /*
11238 * ==========================================================================
11239 * Miscellaneous routines
11240 * ==========================================================================
11241 */
11242
11243 /*
11244 * Remove all pools in the system.
11245 */
11246 void
11247 spa_evict_all(void)
11248 {
11249 spa_t *spa;
11250
11251 /*
11252 * Remove all cached state. All pools should be closed now,
11253 * so every spa in the AVL tree should be unreferenced.
11254 */
11255 spa_namespace_enter(FTAG);
11256 while ((spa = spa_next(NULL)) != NULL) {
11257 /*
11258 * Stop async tasks. The async thread may need to detach
11259 * a device that's been replaced, which requires grabbing
11260 * spa_namespace_lock, so we must drop it here.
11261 */
11262 spa_open_ref(spa, FTAG);
11263 spa_namespace_exit(FTAG);
11264 spa_async_suspend(spa);
11265 spa_namespace_enter(FTAG);
11266 spa_close(spa, FTAG);
11267
11268 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
11269 spa_unload(spa);
11270 spa_deactivate(spa);
11271 }
11272 spa_remove(spa);
11273 }
11274 spa_namespace_exit(FTAG);
11275 }
11276
11277 vdev_t *
11278 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
11279 {
11280 vdev_t *vd;
11281 int i;
11282
11283 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
11284 return (vd);
11285
11286 if (aux) {
11287 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
11288 vd = spa->spa_l2cache.sav_vdevs[i];
11289 if (vd->vdev_guid == guid)
11290 return (vd);
11291 }
11292
11293 for (i = 0; i < spa->spa_spares.sav_count; i++) {
11294 vd = spa->spa_spares.sav_vdevs[i];
11295 if (vd->vdev_guid == guid)
11296 return (vd);
11297 }
11298 }
11299
11300 return (NULL);
11301 }
11302
11303 void
11304 spa_upgrade(spa_t *spa, uint64_t version)
11305 {
11306 ASSERT(spa_writeable(spa));
11307
11308 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
11309
11310 /*
11311 * This should only be called for a non-faulted pool, and since a
11312 * future version would result in an unopenable pool, this shouldn't be
11313 * possible.
11314 */
11315 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
11316 ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
11317
11318 spa->spa_uberblock.ub_version = version;
11319 vdev_config_dirty(spa->spa_root_vdev);
11320
11321 spa_config_exit(spa, SCL_ALL, FTAG);
11322
11323 txg_wait_synced(spa_get_dsl(spa), 0);
11324 }
11325
11326 static boolean_t
11327 spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
11328 {
11329 (void) spa;
11330 int i;
11331 uint64_t vdev_guid;
11332
11333 for (i = 0; i < sav->sav_count; i++)
11334 if (sav->sav_vdevs[i]->vdev_guid == guid)
11335 return (B_TRUE);
11336
11337 for (i = 0; i < sav->sav_npending; i++) {
11338 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
11339 &vdev_guid) == 0 && vdev_guid == guid)
11340 return (B_TRUE);
11341 }
11342
11343 return (B_FALSE);
11344 }
11345
11346 boolean_t
11347 spa_has_l2cache(spa_t *spa, uint64_t guid)
11348 {
11349 return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
11350 }
11351
11352 boolean_t
11353 spa_has_spare(spa_t *spa, uint64_t guid)
11354 {
11355 return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
11356 }
11357
11358 /*
11359 * Check if a pool has an active shared spare device.
11360 * Note: reference count of an active spare is 2, as a spare and as a replace
11361 */
11362 static boolean_t
11363 spa_has_active_shared_spare(spa_t *spa)
11364 {
11365 int i, refcnt;
11366 uint64_t pool;
11367 spa_aux_vdev_t *sav = &spa->spa_spares;
11368
11369 for (i = 0; i < sav->sav_count; i++) {
11370 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
11371 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
11372 refcnt > 2)
11373 return (B_TRUE);
11374 }
11375
11376 return (B_FALSE);
11377 }
11378
11379 uint64_t
11380 spa_total_metaslabs(spa_t *spa)
11381 {
11382 vdev_t *rvd = spa->spa_root_vdev;
11383
11384 uint64_t m = 0;
11385 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
11386 vdev_t *vd = rvd->vdev_child[c];
11387 if (!vdev_is_concrete(vd))
11388 continue;
11389 m += vd->vdev_ms_count;
11390 }
11391 return (m);
11392 }
11393
11394 /*
11395 * Notify any waiting threads that some activity has switched from being in-
11396 * progress to not-in-progress so that the thread can wake up and determine
11397 * whether it is finished waiting.
11398 */
11399 void
11400 spa_notify_waiters(spa_t *spa)
11401 {
11402 /*
11403 * Acquiring spa_activities_lock here prevents the cv_broadcast from
11404 * happening between the waiting thread's check and cv_wait.
11405 */
11406 mutex_enter(&spa->spa_activities_lock);
11407 cv_broadcast(&spa->spa_activities_cv);
11408 mutex_exit(&spa->spa_activities_lock);
11409 }
11410
11411 /*
11412 * Notify any waiting threads that the pool is exporting, and then block until
11413 * they are finished using the spa_t.
11414 */
11415 void
11416 spa_wake_waiters(spa_t *spa)
11417 {
11418 mutex_enter(&spa->spa_activities_lock);
11419 spa->spa_waiters_cancel = B_TRUE;
11420 cv_broadcast(&spa->spa_activities_cv);
11421 while (spa->spa_waiters != 0)
11422 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
11423 spa->spa_waiters_cancel = B_FALSE;
11424 mutex_exit(&spa->spa_activities_lock);
11425 }
11426
11427 /* Whether the vdev or any of its descendants are being initialized/trimmed. */
11428 static boolean_t
11429 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
11430 {
11431 spa_t *spa = vd->vdev_spa;
11432
11433 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
11434 ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
11435 ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
11436 activity == ZPOOL_WAIT_TRIM);
11437
11438 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
11439 &vd->vdev_initialize_lock : &vd->vdev_trim_lock;
11440
11441 mutex_exit(&spa->spa_activities_lock);
11442 mutex_enter(lock);
11443 mutex_enter(&spa->spa_activities_lock);
11444
11445 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
11446 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
11447 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
11448 mutex_exit(lock);
11449
11450 if (in_progress)
11451 return (B_TRUE);
11452
11453 for (int i = 0; i < vd->vdev_children; i++) {
11454 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
11455 activity))
11456 return (B_TRUE);
11457 }
11458
11459 return (B_FALSE);
11460 }
11461
11462 /*
11463 * If use_guid is true, this checks whether the vdev specified by guid is
11464 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
11465 * is being initialized/trimmed. The caller must hold the config lock and
11466 * spa_activities_lock.
11467 */
11468 static int
11469 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
11470 zpool_wait_activity_t activity, boolean_t *in_progress)
11471 {
11472 mutex_exit(&spa->spa_activities_lock);
11473 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
11474 mutex_enter(&spa->spa_activities_lock);
11475
11476 vdev_t *vd;
11477 if (use_guid) {
11478 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
11479 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
11480 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
11481 return (EINVAL);
11482 }
11483 } else {
11484 vd = spa->spa_root_vdev;
11485 }
11486
11487 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
11488
11489 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
11490 return (0);
11491 }
11492
11493 /*
11494 * Locking for waiting threads
11495 * ---------------------------
11496 *
11497 * Waiting threads need a way to check whether a given activity is in progress,
11498 * and then, if it is, wait for it to complete. Each activity will have some
11499 * in-memory representation of the relevant on-disk state which can be used to
11500 * determine whether or not the activity is in progress. The in-memory state and
11501 * the locking used to protect it will be different for each activity, and may
11502 * not be suitable for use with a cvar (e.g., some state is protected by the
11503 * config lock). To allow waiting threads to wait without any races, another
11504 * lock, spa_activities_lock, is used.
11505 *
11506 * When the state is checked, both the activity-specific lock (if there is one)
11507 * and spa_activities_lock are held. In some cases, the activity-specific lock
11508 * is acquired explicitly (e.g. the config lock). In others, the locking is
11509 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting
11510 * thread releases the activity-specific lock and, if the activity is in
11511 * progress, then cv_waits using spa_activities_lock.
11512 *
11513 * The waiting thread is woken when another thread, one completing some
11514 * activity, updates the state of the activity and then calls
11515 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
11516 * needs to hold its activity-specific lock when updating the state, and this
11517 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
11518 *
11519 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
11520 * and because it is held when the waiting thread checks the state of the
11521 * activity, it can never be the case that the completing thread both updates
11522 * the activity state and cv_broadcasts in between the waiting thread's check
11523 * and cv_wait. Thus, a waiting thread can never miss a wakeup.
11524 *
11525 * In order to prevent deadlock, when the waiting thread does its check, in some
11526 * cases it will temporarily drop spa_activities_lock in order to acquire the
11527 * activity-specific lock. The order in which spa_activities_lock and the
11528 * activity specific lock are acquired in the waiting thread is determined by
11529 * the order in which they are acquired in the completing thread; if the
11530 * completing thread calls spa_notify_waiters with the activity-specific lock
11531 * held, then the waiting thread must also acquire the activity-specific lock
11532 * first.
11533 */
11534
11535 static int
11536 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
11537 boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
11538 {
11539 int error = 0;
11540
11541 ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
11542
11543 switch (activity) {
11544 case ZPOOL_WAIT_CKPT_DISCARD:
11545 *in_progress =
11546 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
11547 zap_contains(spa_meta_objset(spa),
11548 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
11549 ENOENT);
11550 break;
11551 case ZPOOL_WAIT_FREE:
11552 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
11553 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
11554 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
11555 spa_livelist_delete_check(spa));
11556 break;
11557 case ZPOOL_WAIT_INITIALIZE:
11558 case ZPOOL_WAIT_TRIM:
11559 error = spa_vdev_activity_in_progress(spa, use_tag, tag,
11560 activity, in_progress);
11561 break;
11562 case ZPOOL_WAIT_REPLACE:
11563 mutex_exit(&spa->spa_activities_lock);
11564 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
11565 mutex_enter(&spa->spa_activities_lock);
11566
11567 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
11568 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
11569 break;
11570 case ZPOOL_WAIT_REMOVE:
11571 *in_progress = (spa->spa_removing_phys.sr_state ==
11572 DSS_SCANNING);
11573 break;
11574 case ZPOOL_WAIT_RESILVER:
11575 *in_progress = vdev_rebuild_active(spa->spa_root_vdev);
11576 if (*in_progress)
11577 break;
11578 zfs_fallthrough;
11579 case ZPOOL_WAIT_SCRUB:
11580 {
11581 boolean_t scanning, paused, is_scrub;
11582 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
11583
11584 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
11585 scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
11586 paused = dsl_scan_is_paused_scrub(scn);
11587 *in_progress = (scanning && !paused &&
11588 is_scrub == (activity == ZPOOL_WAIT_SCRUB));
11589 break;
11590 }
11591 case ZPOOL_WAIT_RAIDZ_EXPAND:
11592 {
11593 vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
11594 *in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING);
11595 break;
11596 }
11597 default:
11598 panic("unrecognized value for activity %d", activity);
11599 }
11600
11601 return (error);
11602 }
11603
11604 static int
11605 spa_wait_common(const char *pool, zpool_wait_activity_t activity,
11606 boolean_t use_tag, uint64_t tag, boolean_t *waited)
11607 {
11608 /*
11609 * The tag is used to distinguish between instances of an activity.
11610 * 'initialize' and 'trim' are the only activities that we use this for.
11611 * The other activities can only have a single instance in progress in a
11612 * pool at one time, making the tag unnecessary.
11613 *
11614 * There can be multiple devices being replaced at once, but since they
11615 * all finish once resilvering finishes, we don't bother keeping track
11616 * of them individually, we just wait for them all to finish.
11617 */
11618 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
11619 activity != ZPOOL_WAIT_TRIM)
11620 return (EINVAL);
11621
11622 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
11623 return (EINVAL);
11624
11625 spa_t *spa;
11626 int error = spa_open(pool, &spa, FTAG);
11627 if (error != 0)
11628 return (error);
11629
11630 /*
11631 * Increment the spa's waiter count so that we can call spa_close and
11632 * still ensure that the spa_t doesn't get freed before this thread is
11633 * finished with it when the pool is exported. We want to call spa_close
11634 * before we start waiting because otherwise the additional ref would
11635 * prevent the pool from being exported or destroyed throughout the
11636 * potentially long wait.
11637 */
11638 mutex_enter(&spa->spa_activities_lock);
11639 spa->spa_waiters++;
11640 spa_close(spa, FTAG);
11641
11642 *waited = B_FALSE;
11643 for (;;) {
11644 boolean_t in_progress;
11645 error = spa_activity_in_progress(spa, activity, use_tag, tag,
11646 &in_progress);
11647
11648 if (error || !in_progress || spa->spa_waiters_cancel)
11649 break;
11650
11651 *waited = B_TRUE;
11652
11653 if (cv_wait_sig(&spa->spa_activities_cv,
11654 &spa->spa_activities_lock) == 0) {
11655 error = EINTR;
11656 break;
11657 }
11658 }
11659
11660 spa->spa_waiters--;
11661 cv_signal(&spa->spa_waiters_cv);
11662 mutex_exit(&spa->spa_activities_lock);
11663
11664 return (error);
11665 }
11666
11667 /*
11668 * Wait for a particular instance of the specified activity to complete, where
11669 * the instance is identified by 'tag'
11670 */
11671 int
11672 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
11673 boolean_t *waited)
11674 {
11675 return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
11676 }
11677
11678 /*
11679 * Wait for all instances of the specified activity complete
11680 */
11681 int
11682 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
11683 {
11684
11685 return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
11686 }
11687
11688 sysevent_t *
11689 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
11690 {
11691 sysevent_t *ev = NULL;
11692 #ifdef _KERNEL
11693 nvlist_t *resource;
11694
11695 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
11696 if (resource) {
11697 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
11698 ev->resource = resource;
11699 }
11700 #else
11701 (void) spa, (void) vd, (void) hist_nvl, (void) name;
11702 #endif
11703 return (ev);
11704 }
11705
11706 void
11707 spa_event_post(sysevent_t *ev)
11708 {
11709 #ifdef _KERNEL
11710 if (ev) {
11711 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
11712 kmem_free(ev, sizeof (*ev));
11713 }
11714 #else
11715 (void) ev;
11716 #endif
11717 }
11718
11719 /*
11720 * Post a zevent corresponding to the given sysevent. The 'name' must be one
11721 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be
11722 * filled in from the spa and (optionally) the vdev. This doesn't do anything
11723 * in the userland libzpool, as we don't want consumers to misinterpret ztest
11724 * or zdb as real changes.
11725 */
11726 void
11727 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
11728 {
11729 spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
11730 }
11731
11732 /* state manipulation functions */
11733 EXPORT_SYMBOL(spa_open);
11734 EXPORT_SYMBOL(spa_open_rewind);
11735 EXPORT_SYMBOL(spa_get_stats);
11736 EXPORT_SYMBOL(spa_create);
11737 EXPORT_SYMBOL(spa_import);
11738 EXPORT_SYMBOL(spa_tryimport);
11739 EXPORT_SYMBOL(spa_destroy);
11740 EXPORT_SYMBOL(spa_export);
11741 EXPORT_SYMBOL(spa_reset);
11742 EXPORT_SYMBOL(spa_async_request);
11743 EXPORT_SYMBOL(spa_async_suspend);
11744 EXPORT_SYMBOL(spa_async_resume);
11745 EXPORT_SYMBOL(spa_inject_addref);
11746 EXPORT_SYMBOL(spa_inject_delref);
11747 EXPORT_SYMBOL(spa_scan_stat_init);
11748 EXPORT_SYMBOL(spa_scan_get_stats);
11749
11750 /* device manipulation */
11751 EXPORT_SYMBOL(spa_vdev_add);
11752 EXPORT_SYMBOL(spa_vdev_attach);
11753 EXPORT_SYMBOL(spa_vdev_detach);
11754 EXPORT_SYMBOL(spa_vdev_setpath);
11755 EXPORT_SYMBOL(spa_vdev_setfru);
11756 EXPORT_SYMBOL(spa_vdev_split_mirror);
11757
11758 /* spare statech is global across all pools) */
11759 EXPORT_SYMBOL(spa_spare_add);
11760 EXPORT_SYMBOL(spa_spare_remove);
11761 EXPORT_SYMBOL(spa_spare_exists);
11762 EXPORT_SYMBOL(spa_spare_activate);
11763
11764 /* L2ARC statech is global across all pools) */
11765 EXPORT_SYMBOL(spa_l2cache_add);
11766 EXPORT_SYMBOL(spa_l2cache_remove);
11767 EXPORT_SYMBOL(spa_l2cache_exists);
11768 EXPORT_SYMBOL(spa_l2cache_activate);
11769 EXPORT_SYMBOL(spa_l2cache_drop);
11770
11771 /* scanning */
11772 EXPORT_SYMBOL(spa_scan);
11773 EXPORT_SYMBOL(spa_scan_range);
11774 EXPORT_SYMBOL(spa_scan_stop);
11775
11776 /* spa syncing */
11777 EXPORT_SYMBOL(spa_sync); /* only for DMU use */
11778 EXPORT_SYMBOL(spa_sync_allpools);
11779
11780 /* properties */
11781 EXPORT_SYMBOL(spa_prop_set);
11782 EXPORT_SYMBOL(spa_prop_get);
11783 EXPORT_SYMBOL(spa_prop_clear_bootfs);
11784
11785 /* asynchronous event notification */
11786 EXPORT_SYMBOL(spa_event_notify);
11787
11788 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
11789 "Percentage of CPUs to run a metaslab preload taskq");
11790
11791 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
11792 "log2 fraction of arc that can be used by inflight I/Os when "
11793 "verifying pool during import");
11794
11795 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
11796 "Set to traverse metadata on pool import");
11797
11798 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
11799 "Set to traverse data on pool import");
11800
11801 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
11802 "Print vdev tree to zfs_dbgmsg during pool import");
11803
11804 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW,
11805 "Percentage of CPUs to run an IO worker thread");
11806
11807 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW,
11808 "Number of threads per IO worker taskqueue");
11809
11810 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
11811 "Allow importing pool with up to this number of missing top-level "
11812 "vdevs (in read-only mode)");
11813
11814 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
11815 ZMOD_RW, "Set the livelist condense zthr to pause");
11816
11817 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
11818 ZMOD_RW, "Set the livelist condense synctask to pause");
11819
11820 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
11821 INT, ZMOD_RW,
11822 "Whether livelist condensing was canceled in the synctask");
11823
11824 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
11825 INT, ZMOD_RW,
11826 "Whether livelist condensing was canceled in the zthr function");
11827
11828 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
11829 ZMOD_RW,
11830 "Whether extra ALLOC blkptrs were added to a livelist entry while it "
11831 "was being condensed");
11832
11833 ZFS_MODULE_PARAM(zfs_spa, spa_, note_txg_time, UINT, ZMOD_RW,
11834 "How frequently TXG timestamps are stored internally (in seconds)");
11835
11836 ZFS_MODULE_PARAM(zfs_spa, spa_, flush_txg_time, UINT, ZMOD_RW,
11837 "How frequently the TXG timestamps database should be flushed "
11838 "to disk (in seconds)");
11839
11840 #ifdef _KERNEL
11841 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
11842 spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW,
11843 "Configure IO queues for read IO");
11844 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
11845 spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW,
11846 "Configure IO queues for write IO");
11847 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_free,
11848 spa_taskq_free_param_set, spa_taskq_free_param_get, ZMOD_RW,
11849 "Configure IO queues for free IO");
11850 #endif
11851
11852 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW,
11853 "Number of CPUs per write issue taskq");
11854