1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 25 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 31 * Copyright 2018 Joyent, Inc. 32 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 33 * Copyright 2017 Joyent, Inc. 34 * Copyright (c) 2017, Intel Corporation. 35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36 * Copyright (c) 2023 Hewlett Packard Enterprise Development LP. 37 * Copyright (c) 2023, 2024, Klara Inc. 38 */ 39 40 /* 41 * SPA: Storage Pool Allocator 42 * 43 * This file contains all the routines used when modifying on-disk SPA state. 44 * This includes opening, importing, destroying, exporting a pool, and syncing a 45 * pool. 46 */ 47 48 #include <sys/zfs_context.h> 49 #include <sys/fm/fs/zfs.h> 50 #include <sys/spa_impl.h> 51 #include <sys/zio.h> 52 #include <sys/zio_checksum.h> 53 #include <sys/dmu.h> 54 #include <sys/dmu_tx.h> 55 #include <sys/zap.h> 56 #include <sys/zil.h> 57 #include <sys/brt.h> 58 #include <sys/ddt.h> 59 #include <sys/vdev_impl.h> 60 #include <sys/vdev_removal.h> 61 #include <sys/vdev_indirect_mapping.h> 62 #include <sys/vdev_indirect_births.h> 63 #include <sys/vdev_initialize.h> 64 #include <sys/vdev_rebuild.h> 65 #include <sys/vdev_trim.h> 66 #include <sys/vdev_disk.h> 67 #include <sys/vdev_raidz.h> 68 #include <sys/vdev_draid.h> 69 #include <sys/metaslab.h> 70 #include <sys/metaslab_impl.h> 71 #include <sys/mmp.h> 72 #include <sys/uberblock_impl.h> 73 #include <sys/txg.h> 74 #include <sys/avl.h> 75 #include <sys/bpobj.h> 76 #include <sys/dmu_traverse.h> 77 #include <sys/dmu_objset.h> 78 #include <sys/unique.h> 79 #include <sys/dsl_pool.h> 80 #include <sys/dsl_dataset.h> 81 #include <sys/dsl_dir.h> 82 #include <sys/dsl_prop.h> 83 #include <sys/dsl_synctask.h> 84 #include <sys/fs/zfs.h> 85 #include <sys/arc.h> 86 #include <sys/callb.h> 87 #include <sys/systeminfo.h> 88 #include <sys/zfs_ioctl.h> 89 #include <sys/dsl_scan.h> 90 #include <sys/zfeature.h> 91 #include <sys/dsl_destroy.h> 92 #include <sys/zvol.h> 93 94 #ifdef _KERNEL 95 #include <sys/fm/protocol.h> 96 #include <sys/fm/util.h> 97 #include <sys/callb.h> 98 #include <sys/zone.h> 99 #include <sys/vmsystm.h> 100 #endif /* _KERNEL */ 101 102 #include "zfs_prop.h" 103 #include "zfs_comutil.h" 104 #include <cityhash.h> 105 106 /* 107 * spa_thread() existed on Illumos as a parent thread for the various worker 108 * threads that actually run the pool, as a way to both reference the entire 109 * pool work as a single object, and to share properties like scheduling 110 * options. It has not yet been adapted to Linux or FreeBSD. This define is 111 * used to mark related parts of the code to make things easier for the reader, 112 * and to compile this code out. It can be removed when someone implements it, 113 * moves it to some Illumos-specific place, or removes it entirely. 114 */ 115 #undef HAVE_SPA_THREAD 116 117 /* 118 * The "System Duty Cycle" scheduling class is an Illumos feature to help 119 * prevent CPU-intensive kernel threads from affecting latency on interactive 120 * threads. It doesn't exist on Linux or FreeBSD, so the supporting code is 121 * gated behind a define. On Illumos SDC depends on spa_thread(), but 122 * spa_thread() also has other uses, so this is a separate define. 123 */ 124 #undef HAVE_SYSDC 125 126 /* 127 * The interval, in seconds, at which failed configuration cache file writes 128 * should be retried. 129 */ 130 int zfs_ccw_retry_interval = 300; 131 132 typedef enum zti_modes { 133 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 134 ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */ 135 ZTI_MODE_SYNC, /* sync thread assigned */ 136 ZTI_MODE_NULL, /* don't create a taskq */ 137 ZTI_NMODES 138 } zti_modes_t; 139 140 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 141 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 142 #define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 } 143 #define ZTI_SYNC { ZTI_MODE_SYNC, 0, 1 } 144 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 145 146 #define ZTI_N(n) ZTI_P(n, 1) 147 #define ZTI_ONE ZTI_N(1) 148 149 typedef struct zio_taskq_info { 150 zti_modes_t zti_mode; 151 uint_t zti_value; 152 uint_t zti_count; 153 } zio_taskq_info_t; 154 155 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 156 "iss", "iss_h", "int", "int_h" 157 }; 158 159 /* 160 * This table defines the taskq settings for each ZFS I/O type. When 161 * initializing a pool, we use this table to create an appropriately sized 162 * taskq. Some operations are low volume and therefore have a small, static 163 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 164 * macros. Other operations process a large amount of data; the ZTI_SCALE 165 * macro causes us to create a taskq oriented for throughput. Some operations 166 * are so high frequency and short-lived that the taskq itself can become a 167 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 168 * additional degree of parallelism specified by the number of threads per- 169 * taskq and the number of taskqs; when dispatching an event in this case, the 170 * particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs 171 * that scales with the number of CPUs. 172 * 173 * The different taskq priorities are to handle the different contexts (issue 174 * and interrupt) and then to reserve threads for high priority I/Os that 175 * need to be handled with minimum delay. Illumos taskq has unfair TQ_FRONT 176 * implementation, so separate high priority threads are used there. 177 */ 178 static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 179 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 180 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 181 { ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */ 182 #ifdef illumos 183 { ZTI_SYNC, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */ 184 #else 185 { ZTI_SYNC, ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* WRITE */ 186 #endif 187 { ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 188 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 189 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FLUSH */ 190 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ 191 }; 192 193 static void spa_sync_version(void *arg, dmu_tx_t *tx); 194 static void spa_sync_props(void *arg, dmu_tx_t *tx); 195 static boolean_t spa_has_active_shared_spare(spa_t *spa); 196 static int spa_load_impl(spa_t *spa, spa_import_type_t type, 197 const char **ereport); 198 static void spa_vdev_resilver_done(spa_t *spa); 199 200 /* 201 * Percentage of all CPUs that can be used by the metaslab preload taskq. 202 */ 203 static uint_t metaslab_preload_pct = 50; 204 205 static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ 206 static uint_t zio_taskq_batch_tpq; /* threads per taskq */ 207 208 #ifdef HAVE_SYSDC 209 static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 210 static const uint_t zio_taskq_basedc = 80; /* base duty cycle */ 211 #endif 212 213 #ifdef HAVE_SPA_THREAD 214 static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */ 215 #endif 216 217 static uint_t zio_taskq_write_tpq = 16; 218 219 /* 220 * Report any spa_load_verify errors found, but do not fail spa_load. 221 * This is used by zdb to analyze non-idle pools. 222 */ 223 boolean_t spa_load_verify_dryrun = B_FALSE; 224 225 /* 226 * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ). 227 * This is used by zdb for spacemaps verification. 228 */ 229 boolean_t spa_mode_readable_spacemaps = B_FALSE; 230 231 /* 232 * This (illegal) pool name is used when temporarily importing a spa_t in order 233 * to get the vdev stats associated with the imported devices. 234 */ 235 #define TRYIMPORT_NAME "$import" 236 237 /* 238 * For debugging purposes: print out vdev tree during pool import. 239 */ 240 static int spa_load_print_vdev_tree = B_FALSE; 241 242 /* 243 * A non-zero value for zfs_max_missing_tvds means that we allow importing 244 * pools with missing top-level vdevs. This is strictly intended for advanced 245 * pool recovery cases since missing data is almost inevitable. Pools with 246 * missing devices can only be imported read-only for safety reasons, and their 247 * fail-mode will be automatically set to "continue". 248 * 249 * With 1 missing vdev we should be able to import the pool and mount all 250 * datasets. User data that was not modified after the missing device has been 251 * added should be recoverable. This means that snapshots created prior to the 252 * addition of that device should be completely intact. 253 * 254 * With 2 missing vdevs, some datasets may fail to mount since there are 255 * dataset statistics that are stored as regular metadata. Some data might be 256 * recoverable if those vdevs were added recently. 257 * 258 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 259 * may be missing entirely. Chances of data recovery are very low. Note that 260 * there are also risks of performing an inadvertent rewind as we might be 261 * missing all the vdevs with the latest uberblocks. 262 */ 263 uint64_t zfs_max_missing_tvds = 0; 264 265 /* 266 * The parameters below are similar to zfs_max_missing_tvds but are only 267 * intended for a preliminary open of the pool with an untrusted config which 268 * might be incomplete or out-dated. 269 * 270 * We are more tolerant for pools opened from a cachefile since we could have 271 * an out-dated cachefile where a device removal was not registered. 272 * We could have set the limit arbitrarily high but in the case where devices 273 * are really missing we would want to return the proper error codes; we chose 274 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 275 * and we get a chance to retrieve the trusted config. 276 */ 277 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 278 279 /* 280 * In the case where config was assembled by scanning device paths (/dev/dsks 281 * by default) we are less tolerant since all the existing devices should have 282 * been detected and we want spa_load to return the right error codes. 283 */ 284 uint64_t zfs_max_missing_tvds_scan = 0; 285 286 /* 287 * Debugging aid that pauses spa_sync() towards the end. 288 */ 289 static const boolean_t zfs_pause_spa_sync = B_FALSE; 290 291 /* 292 * Variables to indicate the livelist condense zthr func should wait at certain 293 * points for the livelist to be removed - used to test condense/destroy races 294 */ 295 static int zfs_livelist_condense_zthr_pause = 0; 296 static int zfs_livelist_condense_sync_pause = 0; 297 298 /* 299 * Variables to track whether or not condense cancellation has been 300 * triggered in testing. 301 */ 302 static int zfs_livelist_condense_sync_cancel = 0; 303 static int zfs_livelist_condense_zthr_cancel = 0; 304 305 /* 306 * Variable to track whether or not extra ALLOC blkptrs were added to a 307 * livelist entry while it was being condensed (caused by the way we track 308 * remapped blkptrs in dbuf_remap_impl) 309 */ 310 static int zfs_livelist_condense_new_alloc = 0; 311 312 /* 313 * ========================================================================== 314 * SPA properties routines 315 * ========================================================================== 316 */ 317 318 /* 319 * Add a (source=src, propname=propval) list to an nvlist. 320 */ 321 static void 322 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval, 323 uint64_t intval, zprop_source_t src) 324 { 325 const char *propname = zpool_prop_to_name(prop); 326 nvlist_t *propval; 327 328 propval = fnvlist_alloc(); 329 fnvlist_add_uint64(propval, ZPROP_SOURCE, src); 330 331 if (strval != NULL) 332 fnvlist_add_string(propval, ZPROP_VALUE, strval); 333 else 334 fnvlist_add_uint64(propval, ZPROP_VALUE, intval); 335 336 fnvlist_add_nvlist(nvl, propname, propval); 337 nvlist_free(propval); 338 } 339 340 static int 341 spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl) 342 { 343 zpool_prop_t prop = zpool_name_to_prop(propname); 344 zprop_source_t src = ZPROP_SRC_NONE; 345 uint64_t intval; 346 int err; 347 348 /* 349 * NB: Not all properties lookups via this API require 350 * the spa props lock, so they must explicitly grab it here. 351 */ 352 switch (prop) { 353 case ZPOOL_PROP_DEDUPCACHED: 354 err = ddt_get_pool_dedup_cached(spa, &intval); 355 if (err != 0) 356 return (SET_ERROR(err)); 357 break; 358 default: 359 return (SET_ERROR(EINVAL)); 360 } 361 362 spa_prop_add_list(outnvl, prop, NULL, intval, src); 363 364 return (0); 365 } 366 367 int 368 spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props, 369 nvlist_t *outnvl) 370 { 371 int err = 0; 372 373 if (props == NULL) 374 return (0); 375 376 for (unsigned int i = 0; i < n_props && err == 0; i++) { 377 err = spa_prop_add(spa, props[i], outnvl); 378 } 379 380 return (err); 381 } 382 383 /* 384 * Add a user property (source=src, propname=propval) to an nvlist. 385 */ 386 static void 387 spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval, 388 zprop_source_t src) 389 { 390 nvlist_t *propval; 391 392 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 393 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 394 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 395 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 396 nvlist_free(propval); 397 } 398 399 /* 400 * Get property values from the spa configuration. 401 */ 402 static void 403 spa_prop_get_config(spa_t *spa, nvlist_t *nv) 404 { 405 vdev_t *rvd = spa->spa_root_vdev; 406 dsl_pool_t *pool = spa->spa_dsl_pool; 407 uint64_t size, alloc, cap, version; 408 const zprop_source_t src = ZPROP_SRC_NONE; 409 spa_config_dirent_t *dp; 410 metaslab_class_t *mc = spa_normal_class(spa); 411 412 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 413 414 if (rvd != NULL) { 415 alloc = metaslab_class_get_alloc(mc); 416 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 417 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 418 alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa)); 419 420 size = metaslab_class_get_space(mc); 421 size += metaslab_class_get_space(spa_special_class(spa)); 422 size += metaslab_class_get_space(spa_dedup_class(spa)); 423 size += metaslab_class_get_space(spa_embedded_log_class(spa)); 424 425 spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 426 spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src); 427 spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 428 spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL, 429 size - alloc, src); 430 spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL, 431 spa->spa_checkpoint_info.sci_dspace, src); 432 433 spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL, 434 metaslab_class_fragmentation(mc), src); 435 spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL, 436 metaslab_class_expandable_space(mc), src); 437 spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL, 438 (spa_mode(spa) == SPA_MODE_READ), src); 439 440 cap = (size == 0) ? 0 : (alloc * 100 / size); 441 spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src); 442 443 spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL, 444 ddt_get_pool_dedup_ratio(spa), src); 445 spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL, 446 brt_get_used(spa), src); 447 spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL, 448 brt_get_saved(spa), src); 449 spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL, 450 brt_get_ratio(spa), src); 451 452 spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL, 453 ddt_get_ddt_dsize(spa), src); 454 spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL, 455 rvd->vdev_state, src); 456 spa_prop_add_list(nv, ZPOOL_PROP_LAST_SCRUBBED_TXG, NULL, 457 spa_get_last_scrubbed_txg(spa), src); 458 459 version = spa_version(spa); 460 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) { 461 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL, 462 version, ZPROP_SRC_DEFAULT); 463 } else { 464 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL, 465 version, ZPROP_SRC_LOCAL); 466 } 467 spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID, 468 NULL, spa_load_guid(spa), src); 469 } 470 471 if (pool != NULL) { 472 /* 473 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 474 * when opening pools before this version freedir will be NULL. 475 */ 476 if (pool->dp_free_dir != NULL) { 477 spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL, 478 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 479 src); 480 } else { 481 spa_prop_add_list(nv, ZPOOL_PROP_FREEING, 482 NULL, 0, src); 483 } 484 485 if (pool->dp_leak_dir != NULL) { 486 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL, 487 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 488 src); 489 } else { 490 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, 491 NULL, 0, src); 492 } 493 } 494 495 spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 496 497 if (spa->spa_comment != NULL) { 498 spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment, 499 0, ZPROP_SRC_LOCAL); 500 } 501 502 if (spa->spa_compatibility != NULL) { 503 spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY, 504 spa->spa_compatibility, 0, ZPROP_SRC_LOCAL); 505 } 506 507 if (spa->spa_root != NULL) 508 spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root, 509 0, ZPROP_SRC_LOCAL); 510 511 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 512 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 513 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 514 } else { 515 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 516 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 517 } 518 519 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 520 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL, 521 DNODE_MAX_SIZE, ZPROP_SRC_NONE); 522 } else { 523 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL, 524 DNODE_MIN_SIZE, ZPROP_SRC_NONE); 525 } 526 527 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 528 if (dp->scd_path == NULL) { 529 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE, 530 "none", 0, ZPROP_SRC_LOCAL); 531 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 532 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE, 533 dp->scd_path, 0, ZPROP_SRC_LOCAL); 534 } 535 } 536 } 537 538 /* 539 * Get zpool property values. 540 */ 541 int 542 spa_prop_get(spa_t *spa, nvlist_t *nv) 543 { 544 objset_t *mos = spa->spa_meta_objset; 545 zap_cursor_t zc; 546 zap_attribute_t *za; 547 dsl_pool_t *dp; 548 int err = 0; 549 550 dp = spa_get_dsl(spa); 551 dsl_pool_config_enter(dp, FTAG); 552 za = zap_attribute_alloc(); 553 mutex_enter(&spa->spa_props_lock); 554 555 /* 556 * Get properties from the spa config. 557 */ 558 spa_prop_get_config(spa, nv); 559 560 /* If no pool property object, no more prop to get. */ 561 if (mos == NULL || spa->spa_pool_props_object == 0) 562 goto out; 563 564 /* 565 * Get properties from the MOS pool property object. 566 */ 567 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 568 (err = zap_cursor_retrieve(&zc, za)) == 0; 569 zap_cursor_advance(&zc)) { 570 uint64_t intval = 0; 571 char *strval = NULL; 572 zprop_source_t src = ZPROP_SRC_DEFAULT; 573 zpool_prop_t prop; 574 575 if ((prop = zpool_name_to_prop(za->za_name)) == 576 ZPOOL_PROP_INVAL && !zfs_prop_user(za->za_name)) 577 continue; 578 579 switch (za->za_integer_length) { 580 case 8: 581 /* integer property */ 582 if (za->za_first_integer != 583 zpool_prop_default_numeric(prop)) 584 src = ZPROP_SRC_LOCAL; 585 586 if (prop == ZPOOL_PROP_BOOTFS) { 587 dsl_dataset_t *ds = NULL; 588 589 err = dsl_dataset_hold_obj(dp, 590 za->za_first_integer, FTAG, &ds); 591 if (err != 0) 592 break; 593 594 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 595 KM_SLEEP); 596 dsl_dataset_name(ds, strval); 597 dsl_dataset_rele(ds, FTAG); 598 } else { 599 strval = NULL; 600 intval = za->za_first_integer; 601 } 602 603 spa_prop_add_list(nv, prop, strval, intval, src); 604 605 if (strval != NULL) 606 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 607 608 break; 609 610 case 1: 611 /* string property */ 612 strval = kmem_alloc(za->za_num_integers, KM_SLEEP); 613 err = zap_lookup(mos, spa->spa_pool_props_object, 614 za->za_name, 1, za->za_num_integers, strval); 615 if (err) { 616 kmem_free(strval, za->za_num_integers); 617 break; 618 } 619 if (prop != ZPOOL_PROP_INVAL) { 620 spa_prop_add_list(nv, prop, strval, 0, src); 621 } else { 622 src = ZPROP_SRC_LOCAL; 623 spa_prop_add_user(nv, za->za_name, strval, 624 src); 625 } 626 kmem_free(strval, za->za_num_integers); 627 break; 628 629 default: 630 break; 631 } 632 } 633 zap_cursor_fini(&zc); 634 out: 635 mutex_exit(&spa->spa_props_lock); 636 dsl_pool_config_exit(dp, FTAG); 637 zap_attribute_free(za); 638 639 if (err && err != ENOENT) 640 return (err); 641 642 return (0); 643 } 644 645 /* 646 * Validate the given pool properties nvlist and modify the list 647 * for the property values to be set. 648 */ 649 static int 650 spa_prop_validate(spa_t *spa, nvlist_t *props) 651 { 652 nvpair_t *elem; 653 int error = 0, reset_bootfs = 0; 654 uint64_t objnum = 0; 655 boolean_t has_feature = B_FALSE; 656 657 elem = NULL; 658 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 659 uint64_t intval; 660 const char *strval, *slash, *check, *fname; 661 const char *propname = nvpair_name(elem); 662 zpool_prop_t prop = zpool_name_to_prop(propname); 663 664 switch (prop) { 665 case ZPOOL_PROP_INVAL: 666 /* 667 * Sanitize the input. 668 */ 669 if (zfs_prop_user(propname)) { 670 if (strlen(propname) >= ZAP_MAXNAMELEN) { 671 error = SET_ERROR(ENAMETOOLONG); 672 break; 673 } 674 675 if (strlen(fnvpair_value_string(elem)) >= 676 ZAP_MAXVALUELEN) { 677 error = SET_ERROR(E2BIG); 678 break; 679 } 680 } else if (zpool_prop_feature(propname)) { 681 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 682 error = SET_ERROR(EINVAL); 683 break; 684 } 685 686 if (nvpair_value_uint64(elem, &intval) != 0) { 687 error = SET_ERROR(EINVAL); 688 break; 689 } 690 691 if (intval != 0) { 692 error = SET_ERROR(EINVAL); 693 break; 694 } 695 696 fname = strchr(propname, '@') + 1; 697 if (zfeature_lookup_name(fname, NULL) != 0) { 698 error = SET_ERROR(EINVAL); 699 break; 700 } 701 702 has_feature = B_TRUE; 703 } else { 704 error = SET_ERROR(EINVAL); 705 break; 706 } 707 break; 708 709 case ZPOOL_PROP_VERSION: 710 error = nvpair_value_uint64(elem, &intval); 711 if (!error && 712 (intval < spa_version(spa) || 713 intval > SPA_VERSION_BEFORE_FEATURES || 714 has_feature)) 715 error = SET_ERROR(EINVAL); 716 break; 717 718 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 719 error = nvpair_value_uint64(elem, &intval); 720 break; 721 722 case ZPOOL_PROP_DELEGATION: 723 case ZPOOL_PROP_AUTOREPLACE: 724 case ZPOOL_PROP_LISTSNAPS: 725 case ZPOOL_PROP_AUTOEXPAND: 726 case ZPOOL_PROP_AUTOTRIM: 727 error = nvpair_value_uint64(elem, &intval); 728 if (!error && intval > 1) 729 error = SET_ERROR(EINVAL); 730 break; 731 732 case ZPOOL_PROP_MULTIHOST: 733 error = nvpair_value_uint64(elem, &intval); 734 if (!error && intval > 1) 735 error = SET_ERROR(EINVAL); 736 737 if (!error) { 738 uint32_t hostid = zone_get_hostid(NULL); 739 if (hostid) 740 spa->spa_hostid = hostid; 741 else 742 error = SET_ERROR(ENOTSUP); 743 } 744 745 break; 746 747 case ZPOOL_PROP_BOOTFS: 748 /* 749 * If the pool version is less than SPA_VERSION_BOOTFS, 750 * or the pool is still being created (version == 0), 751 * the bootfs property cannot be set. 752 */ 753 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 754 error = SET_ERROR(ENOTSUP); 755 break; 756 } 757 758 /* 759 * Make sure the vdev config is bootable 760 */ 761 if (!vdev_is_bootable(spa->spa_root_vdev)) { 762 error = SET_ERROR(ENOTSUP); 763 break; 764 } 765 766 reset_bootfs = 1; 767 768 error = nvpair_value_string(elem, &strval); 769 770 if (!error) { 771 objset_t *os; 772 773 if (strval == NULL || strval[0] == '\0') { 774 objnum = zpool_prop_default_numeric( 775 ZPOOL_PROP_BOOTFS); 776 break; 777 } 778 779 error = dmu_objset_hold(strval, FTAG, &os); 780 if (error != 0) 781 break; 782 783 /* Must be ZPL. */ 784 if (dmu_objset_type(os) != DMU_OST_ZFS) { 785 error = SET_ERROR(ENOTSUP); 786 } else { 787 objnum = dmu_objset_id(os); 788 } 789 dmu_objset_rele(os, FTAG); 790 } 791 break; 792 793 case ZPOOL_PROP_FAILUREMODE: 794 error = nvpair_value_uint64(elem, &intval); 795 if (!error && intval > ZIO_FAILURE_MODE_PANIC) 796 error = SET_ERROR(EINVAL); 797 798 /* 799 * This is a special case which only occurs when 800 * the pool has completely failed. This allows 801 * the user to change the in-core failmode property 802 * without syncing it out to disk (I/Os might 803 * currently be blocked). We do this by returning 804 * EIO to the caller (spa_prop_set) to trick it 805 * into thinking we encountered a property validation 806 * error. 807 */ 808 if (!error && spa_suspended(spa)) { 809 spa->spa_failmode = intval; 810 error = SET_ERROR(EIO); 811 } 812 break; 813 814 case ZPOOL_PROP_CACHEFILE: 815 if ((error = nvpair_value_string(elem, &strval)) != 0) 816 break; 817 818 if (strval[0] == '\0') 819 break; 820 821 if (strcmp(strval, "none") == 0) 822 break; 823 824 if (strval[0] != '/') { 825 error = SET_ERROR(EINVAL); 826 break; 827 } 828 829 slash = strrchr(strval, '/'); 830 ASSERT(slash != NULL); 831 832 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 833 strcmp(slash, "/..") == 0) 834 error = SET_ERROR(EINVAL); 835 break; 836 837 case ZPOOL_PROP_COMMENT: 838 if ((error = nvpair_value_string(elem, &strval)) != 0) 839 break; 840 for (check = strval; *check != '\0'; check++) { 841 if (!isprint(*check)) { 842 error = SET_ERROR(EINVAL); 843 break; 844 } 845 } 846 if (strlen(strval) > ZPROP_MAX_COMMENT) 847 error = SET_ERROR(E2BIG); 848 break; 849 850 default: 851 break; 852 } 853 854 if (error) 855 break; 856 } 857 858 (void) nvlist_remove_all(props, 859 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO)); 860 861 if (!error && reset_bootfs) { 862 error = nvlist_remove(props, 863 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 864 865 if (!error) { 866 error = nvlist_add_uint64(props, 867 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 868 } 869 } 870 871 return (error); 872 } 873 874 void 875 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 876 { 877 const char *cachefile; 878 spa_config_dirent_t *dp; 879 880 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 881 &cachefile) != 0) 882 return; 883 884 dp = kmem_alloc(sizeof (spa_config_dirent_t), 885 KM_SLEEP); 886 887 if (cachefile[0] == '\0') 888 dp->scd_path = spa_strdup(spa_config_path); 889 else if (strcmp(cachefile, "none") == 0) 890 dp->scd_path = NULL; 891 else 892 dp->scd_path = spa_strdup(cachefile); 893 894 list_insert_head(&spa->spa_config_list, dp); 895 if (need_sync) 896 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 897 } 898 899 int 900 spa_prop_set(spa_t *spa, nvlist_t *nvp) 901 { 902 int error; 903 nvpair_t *elem = NULL; 904 boolean_t need_sync = B_FALSE; 905 906 if ((error = spa_prop_validate(spa, nvp)) != 0) 907 return (error); 908 909 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 910 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 911 912 if (prop == ZPOOL_PROP_CACHEFILE || 913 prop == ZPOOL_PROP_ALTROOT || 914 prop == ZPOOL_PROP_READONLY) 915 continue; 916 917 if (prop == ZPOOL_PROP_INVAL && 918 zfs_prop_user(nvpair_name(elem))) { 919 need_sync = B_TRUE; 920 break; 921 } 922 923 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 924 uint64_t ver = 0; 925 926 if (prop == ZPOOL_PROP_VERSION) { 927 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 928 } else { 929 ASSERT(zpool_prop_feature(nvpair_name(elem))); 930 ver = SPA_VERSION_FEATURES; 931 need_sync = B_TRUE; 932 } 933 934 /* Save time if the version is already set. */ 935 if (ver == spa_version(spa)) 936 continue; 937 938 /* 939 * In addition to the pool directory object, we might 940 * create the pool properties object, the features for 941 * read object, the features for write object, or the 942 * feature descriptions object. 943 */ 944 error = dsl_sync_task(spa->spa_name, NULL, 945 spa_sync_version, &ver, 946 6, ZFS_SPACE_CHECK_RESERVED); 947 if (error) 948 return (error); 949 continue; 950 } 951 952 need_sync = B_TRUE; 953 break; 954 } 955 956 if (need_sync) { 957 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 958 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 959 } 960 961 return (0); 962 } 963 964 /* 965 * If the bootfs property value is dsobj, clear it. 966 */ 967 void 968 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 969 { 970 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 971 VERIFY(zap_remove(spa->spa_meta_objset, 972 spa->spa_pool_props_object, 973 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 974 spa->spa_bootfs = 0; 975 } 976 } 977 978 static int 979 spa_change_guid_check(void *arg, dmu_tx_t *tx) 980 { 981 uint64_t *newguid __maybe_unused = arg; 982 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 983 vdev_t *rvd = spa->spa_root_vdev; 984 uint64_t vdev_state; 985 986 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 987 int error = (spa_has_checkpoint(spa)) ? 988 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 989 return (SET_ERROR(error)); 990 } 991 992 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 993 vdev_state = rvd->vdev_state; 994 spa_config_exit(spa, SCL_STATE, FTAG); 995 996 if (vdev_state != VDEV_STATE_HEALTHY) 997 return (SET_ERROR(ENXIO)); 998 999 ASSERT3U(spa_guid(spa), !=, *newguid); 1000 1001 return (0); 1002 } 1003 1004 static void 1005 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 1006 { 1007 uint64_t *newguid = arg; 1008 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1009 uint64_t oldguid; 1010 vdev_t *rvd = spa->spa_root_vdev; 1011 1012 oldguid = spa_guid(spa); 1013 1014 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1015 rvd->vdev_guid = *newguid; 1016 rvd->vdev_guid_sum += (*newguid - oldguid); 1017 vdev_config_dirty(rvd); 1018 spa_config_exit(spa, SCL_STATE, FTAG); 1019 1020 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 1021 (u_longlong_t)oldguid, (u_longlong_t)*newguid); 1022 } 1023 1024 /* 1025 * Change the GUID for the pool. This is done so that we can later 1026 * re-import a pool built from a clone of our own vdevs. We will modify 1027 * the root vdev's guid, our own pool guid, and then mark all of our 1028 * vdevs dirty. Note that we must make sure that all our vdevs are 1029 * online when we do this, or else any vdevs that weren't present 1030 * would be orphaned from our pool. We are also going to issue a 1031 * sysevent to update any watchers. 1032 * 1033 * The GUID of the pool will be changed to the value pointed to by guidp. 1034 * The GUID may not be set to the reserverd value of 0. 1035 * The new GUID will be generated if guidp is NULL. 1036 */ 1037 int 1038 spa_change_guid(spa_t *spa, const uint64_t *guidp) 1039 { 1040 uint64_t guid; 1041 int error; 1042 1043 mutex_enter(&spa->spa_vdev_top_lock); 1044 mutex_enter(&spa_namespace_lock); 1045 1046 if (guidp != NULL) { 1047 guid = *guidp; 1048 if (guid == 0) { 1049 error = SET_ERROR(EINVAL); 1050 goto out; 1051 } 1052 1053 if (spa_guid_exists(guid, 0)) { 1054 error = SET_ERROR(EEXIST); 1055 goto out; 1056 } 1057 } else { 1058 guid = spa_generate_guid(NULL); 1059 } 1060 1061 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 1062 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 1063 1064 if (error == 0) { 1065 /* 1066 * Clear the kobj flag from all the vdevs to allow 1067 * vdev_cache_process_kobj_evt() to post events to all the 1068 * vdevs since GUID is updated. 1069 */ 1070 vdev_clear_kobj_evt(spa->spa_root_vdev); 1071 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 1072 vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]); 1073 1074 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1075 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 1076 } 1077 1078 out: 1079 mutex_exit(&spa_namespace_lock); 1080 mutex_exit(&spa->spa_vdev_top_lock); 1081 1082 return (error); 1083 } 1084 1085 /* 1086 * ========================================================================== 1087 * SPA state manipulation (open/create/destroy/import/export) 1088 * ========================================================================== 1089 */ 1090 1091 static int 1092 spa_error_entry_compare(const void *a, const void *b) 1093 { 1094 const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 1095 const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 1096 int ret; 1097 1098 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 1099 sizeof (zbookmark_phys_t)); 1100 1101 return (TREE_ISIGN(ret)); 1102 } 1103 1104 /* 1105 * Utility function which retrieves copies of the current logs and 1106 * re-initializes them in the process. 1107 */ 1108 void 1109 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 1110 { 1111 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 1112 1113 memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t)); 1114 memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t)); 1115 1116 avl_create(&spa->spa_errlist_scrub, 1117 spa_error_entry_compare, sizeof (spa_error_entry_t), 1118 offsetof(spa_error_entry_t, se_avl)); 1119 avl_create(&spa->spa_errlist_last, 1120 spa_error_entry_compare, sizeof (spa_error_entry_t), 1121 offsetof(spa_error_entry_t, se_avl)); 1122 } 1123 1124 static void 1125 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1126 { 1127 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 1128 enum zti_modes mode = ztip->zti_mode; 1129 uint_t value = ztip->zti_value; 1130 uint_t count = ztip->zti_count; 1131 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1132 uint_t cpus, flags = TASKQ_DYNAMIC; 1133 1134 switch (mode) { 1135 case ZTI_MODE_FIXED: 1136 ASSERT3U(value, >, 0); 1137 break; 1138 1139 case ZTI_MODE_SYNC: 1140 1141 /* 1142 * Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs, 1143 * not to exceed the number of spa allocators, and align to it. 1144 */ 1145 cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 1146 count = MAX(1, cpus / MAX(1, zio_taskq_write_tpq)); 1147 count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 1148 count = MIN(count, spa->spa_alloc_count); 1149 while (spa->spa_alloc_count % count != 0 && 1150 spa->spa_alloc_count < count * 2) 1151 count--; 1152 1153 /* 1154 * zio_taskq_batch_pct is unbounded and may exceed 100%, but no 1155 * single taskq may have more threads than 100% of online cpus. 1156 */ 1157 value = (zio_taskq_batch_pct + count / 2) / count; 1158 value = MIN(value, 100); 1159 flags |= TASKQ_THREADS_CPU_PCT; 1160 break; 1161 1162 case ZTI_MODE_SCALE: 1163 flags |= TASKQ_THREADS_CPU_PCT; 1164 /* 1165 * We want more taskqs to reduce lock contention, but we want 1166 * less for better request ordering and CPU utilization. 1167 */ 1168 cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 1169 if (zio_taskq_batch_tpq > 0) { 1170 count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) / 1171 zio_taskq_batch_tpq); 1172 } else { 1173 /* 1174 * Prefer 6 threads per taskq, but no more taskqs 1175 * than threads in them on large systems. For 80%: 1176 * 1177 * taskq taskq total 1178 * cpus taskqs percent threads threads 1179 * ------- ------- ------- ------- ------- 1180 * 1 1 80% 1 1 1181 * 2 1 80% 1 1 1182 * 4 1 80% 3 3 1183 * 8 2 40% 3 6 1184 * 16 3 27% 4 12 1185 * 32 5 16% 5 25 1186 * 64 7 11% 7 49 1187 * 128 10 8% 10 100 1188 * 256 14 6% 15 210 1189 */ 1190 count = 1 + cpus / 6; 1191 while (count * count > cpus) 1192 count--; 1193 } 1194 /* Limit each taskq within 100% to not trigger assertion. */ 1195 count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 1196 value = (zio_taskq_batch_pct + count / 2) / count; 1197 break; 1198 1199 case ZTI_MODE_NULL: 1200 tqs->stqs_count = 0; 1201 tqs->stqs_taskq = NULL; 1202 return; 1203 1204 default: 1205 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 1206 "spa_taskqs_init()", 1207 zio_type_name[t], zio_taskq_types[q], mode, value); 1208 break; 1209 } 1210 1211 ASSERT3U(count, >, 0); 1212 tqs->stqs_count = count; 1213 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 1214 1215 for (uint_t i = 0; i < count; i++) { 1216 taskq_t *tq; 1217 char name[32]; 1218 1219 if (count > 1) 1220 (void) snprintf(name, sizeof (name), "%s_%s_%u", 1221 zio_type_name[t], zio_taskq_types[q], i); 1222 else 1223 (void) snprintf(name, sizeof (name), "%s_%s", 1224 zio_type_name[t], zio_taskq_types[q]); 1225 1226 #ifdef HAVE_SYSDC 1227 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 1228 (void) zio_taskq_basedc; 1229 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 1230 spa->spa_proc, zio_taskq_basedc, flags); 1231 } else { 1232 #endif 1233 pri_t pri = maxclsyspri; 1234 /* 1235 * The write issue taskq can be extremely CPU 1236 * intensive. Run it at slightly less important 1237 * priority than the other taskqs. 1238 * 1239 * Under Linux and FreeBSD this means incrementing 1240 * the priority value as opposed to platforms like 1241 * illumos where it should be decremented. 1242 * 1243 * On FreeBSD, if priorities divided by four (RQ_PPQ) 1244 * are equal then a difference between them is 1245 * insignificant. 1246 */ 1247 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) { 1248 #if defined(__linux__) 1249 pri++; 1250 #elif defined(__FreeBSD__) 1251 pri += 4; 1252 #else 1253 #error "unknown OS" 1254 #endif 1255 } 1256 tq = taskq_create_proc(name, value, pri, 50, 1257 INT_MAX, spa->spa_proc, flags); 1258 #ifdef HAVE_SYSDC 1259 } 1260 #endif 1261 1262 tqs->stqs_taskq[i] = tq; 1263 } 1264 } 1265 1266 static void 1267 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1268 { 1269 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1270 1271 if (tqs->stqs_taskq == NULL) { 1272 ASSERT3U(tqs->stqs_count, ==, 0); 1273 return; 1274 } 1275 1276 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1277 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1278 taskq_destroy(tqs->stqs_taskq[i]); 1279 } 1280 1281 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1282 tqs->stqs_taskq = NULL; 1283 } 1284 1285 #ifdef _KERNEL 1286 /* 1287 * The READ and WRITE rows of zio_taskqs are configurable at module load time 1288 * by setting zio_taskq_read or zio_taskq_write. 1289 * 1290 * Example (the defaults for READ and WRITE) 1291 * zio_taskq_read='fixed,1,8 null scale null' 1292 * zio_taskq_write='sync null scale null' 1293 * 1294 * Each sets the entire row at a time. 1295 * 1296 * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number 1297 * of threads per taskq. 1298 * 1299 * 'null' can only be set on the high-priority queues (queue selection for 1300 * high-priority queues will fall back to the regular queue if the high-pri 1301 * is NULL. 1302 */ 1303 static const char *const modes[ZTI_NMODES] = { 1304 "fixed", "scale", "sync", "null" 1305 }; 1306 1307 /* Parse the incoming config string. Modifies cfg */ 1308 static int 1309 spa_taskq_param_set(zio_type_t t, char *cfg) 1310 { 1311 int err = 0; 1312 1313 zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}}; 1314 1315 char *next = cfg, *tok, *c; 1316 1317 /* 1318 * Parse out each element from the string and fill `row`. The entire 1319 * row has to be set at once, so any errors are flagged by just 1320 * breaking out of this loop early. 1321 */ 1322 uint_t q; 1323 for (q = 0; q < ZIO_TASKQ_TYPES; q++) { 1324 /* `next` is the start of the config */ 1325 if (next == NULL) 1326 break; 1327 1328 /* Eat up leading space */ 1329 while (isspace(*next)) 1330 next++; 1331 if (*next == '\0') 1332 break; 1333 1334 /* Mode ends at space or end of string */ 1335 tok = next; 1336 next = strchr(tok, ' '); 1337 if (next != NULL) *next++ = '\0'; 1338 1339 /* Parameters start after a comma */ 1340 c = strchr(tok, ','); 1341 if (c != NULL) *c++ = '\0'; 1342 1343 /* Match mode string */ 1344 uint_t mode; 1345 for (mode = 0; mode < ZTI_NMODES; mode++) 1346 if (strcmp(tok, modes[mode]) == 0) 1347 break; 1348 if (mode == ZTI_NMODES) 1349 break; 1350 1351 /* Invalid canary */ 1352 row[q].zti_mode = ZTI_NMODES; 1353 1354 /* Per-mode setup */ 1355 switch (mode) { 1356 1357 /* 1358 * FIXED is parameterised: number of queues, and number of 1359 * threads per queue. 1360 */ 1361 case ZTI_MODE_FIXED: { 1362 /* No parameters? */ 1363 if (c == NULL || *c == '\0') 1364 break; 1365 1366 /* Find next parameter */ 1367 tok = c; 1368 c = strchr(tok, ','); 1369 if (c == NULL) 1370 break; 1371 1372 /* Take digits and convert */ 1373 unsigned long long nq; 1374 if (!(isdigit(*tok))) 1375 break; 1376 err = ddi_strtoull(tok, &tok, 10, &nq); 1377 /* Must succeed and also end at the next param sep */ 1378 if (err != 0 || tok != c) 1379 break; 1380 1381 /* Move past the comma */ 1382 tok++; 1383 /* Need another number */ 1384 if (!(isdigit(*tok))) 1385 break; 1386 /* Remember start to make sure we moved */ 1387 c = tok; 1388 1389 /* Take digits */ 1390 unsigned long long ntpq; 1391 err = ddi_strtoull(tok, &tok, 10, &ntpq); 1392 /* Must succeed, and moved forward */ 1393 if (err != 0 || tok == c || *tok != '\0') 1394 break; 1395 1396 /* 1397 * sanity; zero queues/threads make no sense, and 1398 * 16K is almost certainly more than anyone will ever 1399 * need and avoids silly numbers like UINT32_MAX 1400 */ 1401 if (nq == 0 || nq >= 16384 || 1402 ntpq == 0 || ntpq >= 16384) 1403 break; 1404 1405 const zio_taskq_info_t zti = ZTI_P(ntpq, nq); 1406 row[q] = zti; 1407 break; 1408 } 1409 1410 case ZTI_MODE_SCALE: { 1411 const zio_taskq_info_t zti = ZTI_SCALE; 1412 row[q] = zti; 1413 break; 1414 } 1415 1416 case ZTI_MODE_SYNC: { 1417 const zio_taskq_info_t zti = ZTI_SYNC; 1418 row[q] = zti; 1419 break; 1420 } 1421 1422 case ZTI_MODE_NULL: { 1423 /* 1424 * Can only null the high-priority queues; the general- 1425 * purpose ones have to exist. 1426 */ 1427 if (q != ZIO_TASKQ_ISSUE_HIGH && 1428 q != ZIO_TASKQ_INTERRUPT_HIGH) 1429 break; 1430 1431 const zio_taskq_info_t zti = ZTI_NULL; 1432 row[q] = zti; 1433 break; 1434 } 1435 1436 default: 1437 break; 1438 } 1439 1440 /* Ensure we set a mode */ 1441 if (row[q].zti_mode == ZTI_NMODES) 1442 break; 1443 } 1444 1445 /* Didn't get a full row, fail */ 1446 if (q < ZIO_TASKQ_TYPES) 1447 return (SET_ERROR(EINVAL)); 1448 1449 /* Eat trailing space */ 1450 if (next != NULL) 1451 while (isspace(*next)) 1452 next++; 1453 1454 /* If there's anything left over then fail */ 1455 if (next != NULL && *next != '\0') 1456 return (SET_ERROR(EINVAL)); 1457 1458 /* Success! Copy it into the real config */ 1459 for (q = 0; q < ZIO_TASKQ_TYPES; q++) 1460 zio_taskqs[t][q] = row[q]; 1461 1462 return (0); 1463 } 1464 1465 static int 1466 spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline) 1467 { 1468 int pos = 0; 1469 1470 /* Build paramater string from live config */ 1471 const char *sep = ""; 1472 for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) { 1473 const zio_taskq_info_t *zti = &zio_taskqs[t][q]; 1474 if (zti->zti_mode == ZTI_MODE_FIXED) 1475 pos += sprintf(&buf[pos], "%s%s,%u,%u", sep, 1476 modes[zti->zti_mode], zti->zti_count, 1477 zti->zti_value); 1478 else 1479 pos += sprintf(&buf[pos], "%s%s", sep, 1480 modes[zti->zti_mode]); 1481 sep = " "; 1482 } 1483 1484 if (add_newline) 1485 buf[pos++] = '\n'; 1486 buf[pos] = '\0'; 1487 1488 return (pos); 1489 } 1490 1491 #ifdef __linux__ 1492 static int 1493 spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp) 1494 { 1495 char *cfg = kmem_strdup(val); 1496 int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg); 1497 kmem_free(cfg, strlen(val)+1); 1498 return (-err); 1499 } 1500 static int 1501 spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp) 1502 { 1503 return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE)); 1504 } 1505 1506 static int 1507 spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp) 1508 { 1509 char *cfg = kmem_strdup(val); 1510 int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg); 1511 kmem_free(cfg, strlen(val)+1); 1512 return (-err); 1513 } 1514 static int 1515 spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp) 1516 { 1517 return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE)); 1518 } 1519 #else 1520 /* 1521 * On FreeBSD load-time parameters can be set up before malloc() is available, 1522 * so we have to do all the parsing work on the stack. 1523 */ 1524 #define SPA_TASKQ_PARAM_MAX (128) 1525 1526 static int 1527 spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS) 1528 { 1529 char buf[SPA_TASKQ_PARAM_MAX]; 1530 int err; 1531 1532 (void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE); 1533 err = sysctl_handle_string(oidp, buf, sizeof (buf), req); 1534 if (err || req->newptr == NULL) 1535 return (err); 1536 return (spa_taskq_param_set(ZIO_TYPE_READ, buf)); 1537 } 1538 1539 static int 1540 spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS) 1541 { 1542 char buf[SPA_TASKQ_PARAM_MAX]; 1543 int err; 1544 1545 (void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE); 1546 err = sysctl_handle_string(oidp, buf, sizeof (buf), req); 1547 if (err || req->newptr == NULL) 1548 return (err); 1549 return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf)); 1550 } 1551 #endif 1552 #endif /* _KERNEL */ 1553 1554 /* 1555 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1556 * Note that a type may have multiple discrete taskqs to avoid lock contention 1557 * on the taskq itself. 1558 */ 1559 void 1560 spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1561 task_func_t *func, zio_t *zio, boolean_t cutinline) 1562 { 1563 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1564 taskq_t *tq; 1565 1566 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1567 ASSERT3U(tqs->stqs_count, !=, 0); 1568 1569 /* 1570 * NB: We are assuming that the zio can only be dispatched 1571 * to a single taskq at a time. It would be a grievous error 1572 * to dispatch the zio to another taskq at the same time. 1573 */ 1574 ASSERT(zio); 1575 ASSERT(taskq_empty_ent(&zio->io_tqent)); 1576 1577 if (tqs->stqs_count == 1) { 1578 tq = tqs->stqs_taskq[0]; 1579 } else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) && 1580 ZIO_HAS_ALLOCATOR(zio)) { 1581 tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count]; 1582 } else { 1583 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1584 } 1585 1586 taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0, 1587 &zio->io_tqent); 1588 } 1589 1590 static void 1591 spa_create_zio_taskqs(spa_t *spa) 1592 { 1593 for (int t = 0; t < ZIO_TYPES; t++) { 1594 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1595 spa_taskqs_init(spa, t, q); 1596 } 1597 } 1598 } 1599 1600 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD) 1601 static void 1602 spa_thread(void *arg) 1603 { 1604 psetid_t zio_taskq_psrset_bind = PS_NONE; 1605 callb_cpr_t cprinfo; 1606 1607 spa_t *spa = arg; 1608 user_t *pu = PTOU(curproc); 1609 1610 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1611 spa->spa_name); 1612 1613 ASSERT(curproc != &p0); 1614 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1615 "zpool-%s", spa->spa_name); 1616 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1617 1618 /* bind this thread to the requested psrset */ 1619 if (zio_taskq_psrset_bind != PS_NONE) { 1620 pool_lock(); 1621 mutex_enter(&cpu_lock); 1622 mutex_enter(&pidlock); 1623 mutex_enter(&curproc->p_lock); 1624 1625 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1626 0, NULL, NULL) == 0) { 1627 curthread->t_bind_pset = zio_taskq_psrset_bind; 1628 } else { 1629 cmn_err(CE_WARN, 1630 "Couldn't bind process for zfs pool \"%s\" to " 1631 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1632 } 1633 1634 mutex_exit(&curproc->p_lock); 1635 mutex_exit(&pidlock); 1636 mutex_exit(&cpu_lock); 1637 pool_unlock(); 1638 } 1639 1640 #ifdef HAVE_SYSDC 1641 if (zio_taskq_sysdc) { 1642 sysdc_thread_enter(curthread, 100, 0); 1643 } 1644 #endif 1645 1646 spa->spa_proc = curproc; 1647 spa->spa_did = curthread->t_did; 1648 1649 spa_create_zio_taskqs(spa); 1650 1651 mutex_enter(&spa->spa_proc_lock); 1652 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1653 1654 spa->spa_proc_state = SPA_PROC_ACTIVE; 1655 cv_broadcast(&spa->spa_proc_cv); 1656 1657 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1658 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1659 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1660 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1661 1662 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1663 spa->spa_proc_state = SPA_PROC_GONE; 1664 spa->spa_proc = &p0; 1665 cv_broadcast(&spa->spa_proc_cv); 1666 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1667 1668 mutex_enter(&curproc->p_lock); 1669 lwp_exit(); 1670 } 1671 #endif 1672 1673 extern metaslab_ops_t *metaslab_allocator(spa_t *spa); 1674 1675 /* 1676 * Activate an uninitialized pool. 1677 */ 1678 static void 1679 spa_activate(spa_t *spa, spa_mode_t mode) 1680 { 1681 metaslab_ops_t *msp = metaslab_allocator(spa); 1682 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1683 1684 spa->spa_state = POOL_STATE_ACTIVE; 1685 spa->spa_mode = mode; 1686 spa->spa_read_spacemaps = spa_mode_readable_spacemaps; 1687 1688 spa->spa_normal_class = metaslab_class_create(spa, msp); 1689 spa->spa_log_class = metaslab_class_create(spa, msp); 1690 spa->spa_embedded_log_class = metaslab_class_create(spa, msp); 1691 spa->spa_special_class = metaslab_class_create(spa, msp); 1692 spa->spa_dedup_class = metaslab_class_create(spa, msp); 1693 1694 /* Try to create a covering process */ 1695 mutex_enter(&spa->spa_proc_lock); 1696 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1697 ASSERT(spa->spa_proc == &p0); 1698 spa->spa_did = 0; 1699 1700 #ifdef HAVE_SPA_THREAD 1701 /* Only create a process if we're going to be around a while. */ 1702 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1703 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1704 NULL, 0) == 0) { 1705 spa->spa_proc_state = SPA_PROC_CREATED; 1706 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1707 cv_wait(&spa->spa_proc_cv, 1708 &spa->spa_proc_lock); 1709 } 1710 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1711 ASSERT(spa->spa_proc != &p0); 1712 ASSERT(spa->spa_did != 0); 1713 } else { 1714 #ifdef _KERNEL 1715 cmn_err(CE_WARN, 1716 "Couldn't create process for zfs pool \"%s\"\n", 1717 spa->spa_name); 1718 #endif 1719 } 1720 } 1721 #endif /* HAVE_SPA_THREAD */ 1722 mutex_exit(&spa->spa_proc_lock); 1723 1724 /* If we didn't create a process, we need to create our taskqs. */ 1725 if (spa->spa_proc == &p0) { 1726 spa_create_zio_taskqs(spa); 1727 } 1728 1729 for (size_t i = 0; i < TXG_SIZE; i++) { 1730 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1731 ZIO_FLAG_CANFAIL); 1732 } 1733 1734 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1735 offsetof(vdev_t, vdev_config_dirty_node)); 1736 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1737 offsetof(objset_t, os_evicting_node)); 1738 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1739 offsetof(vdev_t, vdev_state_dirty_node)); 1740 1741 txg_list_create(&spa->spa_vdev_txg_list, spa, 1742 offsetof(struct vdev, vdev_txg_node)); 1743 1744 avl_create(&spa->spa_errlist_scrub, 1745 spa_error_entry_compare, sizeof (spa_error_entry_t), 1746 offsetof(spa_error_entry_t, se_avl)); 1747 avl_create(&spa->spa_errlist_last, 1748 spa_error_entry_compare, sizeof (spa_error_entry_t), 1749 offsetof(spa_error_entry_t, se_avl)); 1750 avl_create(&spa->spa_errlist_healed, 1751 spa_error_entry_compare, sizeof (spa_error_entry_t), 1752 offsetof(spa_error_entry_t, se_avl)); 1753 1754 spa_activate_os(spa); 1755 1756 spa_keystore_init(&spa->spa_keystore); 1757 1758 /* 1759 * This taskq is used to perform zvol-minor-related tasks 1760 * asynchronously. This has several advantages, including easy 1761 * resolution of various deadlocks. 1762 * 1763 * The taskq must be single threaded to ensure tasks are always 1764 * processed in the order in which they were dispatched. 1765 * 1766 * A taskq per pool allows one to keep the pools independent. 1767 * This way if one pool is suspended, it will not impact another. 1768 * 1769 * The preferred location to dispatch a zvol minor task is a sync 1770 * task. In this context, there is easy access to the spa_t and minimal 1771 * error handling is required because the sync task must succeed. 1772 */ 1773 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1774 1, INT_MAX, 0); 1775 1776 /* 1777 * The taskq to preload metaslabs. 1778 */ 1779 spa->spa_metaslab_taskq = taskq_create("z_metaslab", 1780 metaslab_preload_pct, maxclsyspri, 1, INT_MAX, 1781 TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1782 1783 /* 1784 * Taskq dedicated to prefetcher threads: this is used to prevent the 1785 * pool traverse code from monopolizing the global (and limited) 1786 * system_taskq by inappropriately scheduling long running tasks on it. 1787 */ 1788 spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100, 1789 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1790 1791 /* 1792 * The taskq to upgrade datasets in this pool. Currently used by 1793 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1794 */ 1795 spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100, 1796 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1797 } 1798 1799 /* 1800 * Opposite of spa_activate(). 1801 */ 1802 static void 1803 spa_deactivate(spa_t *spa) 1804 { 1805 ASSERT(spa->spa_sync_on == B_FALSE); 1806 ASSERT(spa->spa_dsl_pool == NULL); 1807 ASSERT(spa->spa_root_vdev == NULL); 1808 ASSERT(spa->spa_async_zio_root == NULL); 1809 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1810 1811 spa_evicting_os_wait(spa); 1812 1813 if (spa->spa_zvol_taskq) { 1814 taskq_destroy(spa->spa_zvol_taskq); 1815 spa->spa_zvol_taskq = NULL; 1816 } 1817 1818 if (spa->spa_metaslab_taskq) { 1819 taskq_destroy(spa->spa_metaslab_taskq); 1820 spa->spa_metaslab_taskq = NULL; 1821 } 1822 1823 if (spa->spa_prefetch_taskq) { 1824 taskq_destroy(spa->spa_prefetch_taskq); 1825 spa->spa_prefetch_taskq = NULL; 1826 } 1827 1828 if (spa->spa_upgrade_taskq) { 1829 taskq_destroy(spa->spa_upgrade_taskq); 1830 spa->spa_upgrade_taskq = NULL; 1831 } 1832 1833 txg_list_destroy(&spa->spa_vdev_txg_list); 1834 1835 list_destroy(&spa->spa_config_dirty_list); 1836 list_destroy(&spa->spa_evicting_os_list); 1837 list_destroy(&spa->spa_state_dirty_list); 1838 1839 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 1840 1841 for (int t = 0; t < ZIO_TYPES; t++) { 1842 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1843 spa_taskqs_fini(spa, t, q); 1844 } 1845 } 1846 1847 for (size_t i = 0; i < TXG_SIZE; i++) { 1848 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1849 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1850 spa->spa_txg_zio[i] = NULL; 1851 } 1852 1853 metaslab_class_destroy(spa->spa_normal_class); 1854 spa->spa_normal_class = NULL; 1855 1856 metaslab_class_destroy(spa->spa_log_class); 1857 spa->spa_log_class = NULL; 1858 1859 metaslab_class_destroy(spa->spa_embedded_log_class); 1860 spa->spa_embedded_log_class = NULL; 1861 1862 metaslab_class_destroy(spa->spa_special_class); 1863 spa->spa_special_class = NULL; 1864 1865 metaslab_class_destroy(spa->spa_dedup_class); 1866 spa->spa_dedup_class = NULL; 1867 1868 /* 1869 * If this was part of an import or the open otherwise failed, we may 1870 * still have errors left in the queues. Empty them just in case. 1871 */ 1872 spa_errlog_drain(spa); 1873 avl_destroy(&spa->spa_errlist_scrub); 1874 avl_destroy(&spa->spa_errlist_last); 1875 avl_destroy(&spa->spa_errlist_healed); 1876 1877 spa_keystore_fini(&spa->spa_keystore); 1878 1879 spa->spa_state = POOL_STATE_UNINITIALIZED; 1880 1881 mutex_enter(&spa->spa_proc_lock); 1882 if (spa->spa_proc_state != SPA_PROC_NONE) { 1883 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1884 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1885 cv_broadcast(&spa->spa_proc_cv); 1886 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1887 ASSERT(spa->spa_proc != &p0); 1888 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1889 } 1890 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1891 spa->spa_proc_state = SPA_PROC_NONE; 1892 } 1893 ASSERT(spa->spa_proc == &p0); 1894 mutex_exit(&spa->spa_proc_lock); 1895 1896 /* 1897 * We want to make sure spa_thread() has actually exited the ZFS 1898 * module, so that the module can't be unloaded out from underneath 1899 * it. 1900 */ 1901 if (spa->spa_did != 0) { 1902 thread_join(spa->spa_did); 1903 spa->spa_did = 0; 1904 } 1905 1906 spa_deactivate_os(spa); 1907 1908 } 1909 1910 /* 1911 * Verify a pool configuration, and construct the vdev tree appropriately. This 1912 * will create all the necessary vdevs in the appropriate layout, with each vdev 1913 * in the CLOSED state. This will prep the pool before open/creation/import. 1914 * All vdev validation is done by the vdev_alloc() routine. 1915 */ 1916 int 1917 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1918 uint_t id, int atype) 1919 { 1920 nvlist_t **child; 1921 uint_t children; 1922 int error; 1923 1924 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1925 return (error); 1926 1927 if ((*vdp)->vdev_ops->vdev_op_leaf) 1928 return (0); 1929 1930 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1931 &child, &children); 1932 1933 if (error == ENOENT) 1934 return (0); 1935 1936 if (error) { 1937 vdev_free(*vdp); 1938 *vdp = NULL; 1939 return (SET_ERROR(EINVAL)); 1940 } 1941 1942 for (int c = 0; c < children; c++) { 1943 vdev_t *vd; 1944 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1945 atype)) != 0) { 1946 vdev_free(*vdp); 1947 *vdp = NULL; 1948 return (error); 1949 } 1950 } 1951 1952 ASSERT(*vdp != NULL); 1953 1954 return (0); 1955 } 1956 1957 static boolean_t 1958 spa_should_flush_logs_on_unload(spa_t *spa) 1959 { 1960 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1961 return (B_FALSE); 1962 1963 if (!spa_writeable(spa)) 1964 return (B_FALSE); 1965 1966 if (!spa->spa_sync_on) 1967 return (B_FALSE); 1968 1969 if (spa_state(spa) != POOL_STATE_EXPORTED) 1970 return (B_FALSE); 1971 1972 if (zfs_keep_log_spacemaps_at_export) 1973 return (B_FALSE); 1974 1975 return (B_TRUE); 1976 } 1977 1978 /* 1979 * Opens a transaction that will set the flag that will instruct 1980 * spa_sync to attempt to flush all the metaslabs for that txg. 1981 */ 1982 static void 1983 spa_unload_log_sm_flush_all(spa_t *spa) 1984 { 1985 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1986 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1987 1988 ASSERT3U(spa->spa_log_flushall_txg, ==, 0); 1989 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); 1990 1991 dmu_tx_commit(tx); 1992 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); 1993 } 1994 1995 static void 1996 spa_unload_log_sm_metadata(spa_t *spa) 1997 { 1998 void *cookie = NULL; 1999 spa_log_sm_t *sls; 2000 log_summary_entry_t *e; 2001 2002 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, 2003 &cookie)) != NULL) { 2004 VERIFY0(sls->sls_mscount); 2005 kmem_free(sls, sizeof (spa_log_sm_t)); 2006 } 2007 2008 while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) { 2009 VERIFY0(e->lse_mscount); 2010 kmem_free(e, sizeof (log_summary_entry_t)); 2011 } 2012 2013 spa->spa_unflushed_stats.sus_nblocks = 0; 2014 spa->spa_unflushed_stats.sus_memused = 0; 2015 spa->spa_unflushed_stats.sus_blocklimit = 0; 2016 } 2017 2018 static void 2019 spa_destroy_aux_threads(spa_t *spa) 2020 { 2021 if (spa->spa_condense_zthr != NULL) { 2022 zthr_destroy(spa->spa_condense_zthr); 2023 spa->spa_condense_zthr = NULL; 2024 } 2025 if (spa->spa_checkpoint_discard_zthr != NULL) { 2026 zthr_destroy(spa->spa_checkpoint_discard_zthr); 2027 spa->spa_checkpoint_discard_zthr = NULL; 2028 } 2029 if (spa->spa_livelist_delete_zthr != NULL) { 2030 zthr_destroy(spa->spa_livelist_delete_zthr); 2031 spa->spa_livelist_delete_zthr = NULL; 2032 } 2033 if (spa->spa_livelist_condense_zthr != NULL) { 2034 zthr_destroy(spa->spa_livelist_condense_zthr); 2035 spa->spa_livelist_condense_zthr = NULL; 2036 } 2037 if (spa->spa_raidz_expand_zthr != NULL) { 2038 zthr_destroy(spa->spa_raidz_expand_zthr); 2039 spa->spa_raidz_expand_zthr = NULL; 2040 } 2041 } 2042 2043 /* 2044 * Opposite of spa_load(). 2045 */ 2046 static void 2047 spa_unload(spa_t *spa) 2048 { 2049 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 2050 spa->spa_export_thread == curthread); 2051 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); 2052 2053 spa_import_progress_remove(spa_guid(spa)); 2054 spa_load_note(spa, "UNLOADING"); 2055 2056 spa_wake_waiters(spa); 2057 2058 /* 2059 * If we have set the spa_final_txg, we have already performed the 2060 * tasks below in spa_export_common(). We should not redo it here since 2061 * we delay the final TXGs beyond what spa_final_txg is set at. 2062 */ 2063 if (spa->spa_final_txg == UINT64_MAX) { 2064 /* 2065 * If the log space map feature is enabled and the pool is 2066 * getting exported (but not destroyed), we want to spend some 2067 * time flushing as many metaslabs as we can in an attempt to 2068 * destroy log space maps and save import time. 2069 */ 2070 if (spa_should_flush_logs_on_unload(spa)) 2071 spa_unload_log_sm_flush_all(spa); 2072 2073 /* 2074 * Stop async tasks. 2075 */ 2076 spa_async_suspend(spa); 2077 2078 if (spa->spa_root_vdev) { 2079 vdev_t *root_vdev = spa->spa_root_vdev; 2080 vdev_initialize_stop_all(root_vdev, 2081 VDEV_INITIALIZE_ACTIVE); 2082 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); 2083 vdev_autotrim_stop_all(spa); 2084 vdev_rebuild_stop_all(spa); 2085 l2arc_spa_rebuild_stop(spa); 2086 } 2087 } 2088 2089 /* 2090 * Stop syncing. 2091 */ 2092 if (spa->spa_sync_on) { 2093 txg_sync_stop(spa->spa_dsl_pool); 2094 spa->spa_sync_on = B_FALSE; 2095 } 2096 2097 /* 2098 * This ensures that there is no async metaslab prefetching 2099 * while we attempt to unload the spa. 2100 */ 2101 taskq_wait(spa->spa_metaslab_taskq); 2102 2103 if (spa->spa_mmp.mmp_thread) 2104 mmp_thread_stop(spa); 2105 2106 /* 2107 * Wait for any outstanding async I/O to complete. 2108 */ 2109 if (spa->spa_async_zio_root != NULL) { 2110 for (int i = 0; i < max_ncpus; i++) 2111 (void) zio_wait(spa->spa_async_zio_root[i]); 2112 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 2113 spa->spa_async_zio_root = NULL; 2114 } 2115 2116 if (spa->spa_vdev_removal != NULL) { 2117 spa_vdev_removal_destroy(spa->spa_vdev_removal); 2118 spa->spa_vdev_removal = NULL; 2119 } 2120 2121 spa_destroy_aux_threads(spa); 2122 2123 spa_condense_fini(spa); 2124 2125 bpobj_close(&spa->spa_deferred_bpobj); 2126 2127 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 2128 2129 /* 2130 * Close all vdevs. 2131 */ 2132 if (spa->spa_root_vdev) 2133 vdev_free(spa->spa_root_vdev); 2134 ASSERT(spa->spa_root_vdev == NULL); 2135 2136 /* 2137 * Close the dsl pool. 2138 */ 2139 if (spa->spa_dsl_pool) { 2140 dsl_pool_close(spa->spa_dsl_pool); 2141 spa->spa_dsl_pool = NULL; 2142 spa->spa_meta_objset = NULL; 2143 } 2144 2145 ddt_unload(spa); 2146 brt_unload(spa); 2147 spa_unload_log_sm_metadata(spa); 2148 2149 /* 2150 * Drop and purge level 2 cache 2151 */ 2152 spa_l2cache_drop(spa); 2153 2154 if (spa->spa_spares.sav_vdevs) { 2155 for (int i = 0; i < spa->spa_spares.sav_count; i++) 2156 vdev_free(spa->spa_spares.sav_vdevs[i]); 2157 kmem_free(spa->spa_spares.sav_vdevs, 2158 spa->spa_spares.sav_count * sizeof (void *)); 2159 spa->spa_spares.sav_vdevs = NULL; 2160 } 2161 if (spa->spa_spares.sav_config) { 2162 nvlist_free(spa->spa_spares.sav_config); 2163 spa->spa_spares.sav_config = NULL; 2164 } 2165 spa->spa_spares.sav_count = 0; 2166 2167 if (spa->spa_l2cache.sav_vdevs) { 2168 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { 2169 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 2170 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 2171 } 2172 kmem_free(spa->spa_l2cache.sav_vdevs, 2173 spa->spa_l2cache.sav_count * sizeof (void *)); 2174 spa->spa_l2cache.sav_vdevs = NULL; 2175 } 2176 if (spa->spa_l2cache.sav_config) { 2177 nvlist_free(spa->spa_l2cache.sav_config); 2178 spa->spa_l2cache.sav_config = NULL; 2179 } 2180 spa->spa_l2cache.sav_count = 0; 2181 2182 spa->spa_async_suspended = 0; 2183 2184 spa->spa_indirect_vdevs_loaded = B_FALSE; 2185 2186 if (spa->spa_comment != NULL) { 2187 spa_strfree(spa->spa_comment); 2188 spa->spa_comment = NULL; 2189 } 2190 if (spa->spa_compatibility != NULL) { 2191 spa_strfree(spa->spa_compatibility); 2192 spa->spa_compatibility = NULL; 2193 } 2194 2195 spa->spa_raidz_expand = NULL; 2196 2197 spa_config_exit(spa, SCL_ALL, spa); 2198 } 2199 2200 /* 2201 * Load (or re-load) the current list of vdevs describing the active spares for 2202 * this pool. When this is called, we have some form of basic information in 2203 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 2204 * then re-generate a more complete list including status information. 2205 */ 2206 void 2207 spa_load_spares(spa_t *spa) 2208 { 2209 nvlist_t **spares; 2210 uint_t nspares; 2211 int i; 2212 vdev_t *vd, *tvd; 2213 2214 #ifndef _KERNEL 2215 /* 2216 * zdb opens both the current state of the pool and the 2217 * checkpointed state (if present), with a different spa_t. 2218 * 2219 * As spare vdevs are shared among open pools, we skip loading 2220 * them when we load the checkpointed state of the pool. 2221 */ 2222 if (!spa_writeable(spa)) 2223 return; 2224 #endif 2225 2226 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2227 2228 /* 2229 * First, close and free any existing spare vdevs. 2230 */ 2231 if (spa->spa_spares.sav_vdevs) { 2232 for (i = 0; i < spa->spa_spares.sav_count; i++) { 2233 vd = spa->spa_spares.sav_vdevs[i]; 2234 2235 /* Undo the call to spa_activate() below */ 2236 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 2237 B_FALSE)) != NULL && tvd->vdev_isspare) 2238 spa_spare_remove(tvd); 2239 vdev_close(vd); 2240 vdev_free(vd); 2241 } 2242 2243 kmem_free(spa->spa_spares.sav_vdevs, 2244 spa->spa_spares.sav_count * sizeof (void *)); 2245 } 2246 2247 if (spa->spa_spares.sav_config == NULL) 2248 nspares = 0; 2249 else 2250 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2251 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 2252 2253 spa->spa_spares.sav_count = (int)nspares; 2254 spa->spa_spares.sav_vdevs = NULL; 2255 2256 if (nspares == 0) 2257 return; 2258 2259 /* 2260 * Construct the array of vdevs, opening them to get status in the 2261 * process. For each spare, there is potentially two different vdev_t 2262 * structures associated with it: one in the list of spares (used only 2263 * for basic validation purposes) and one in the active vdev 2264 * configuration (if it's spared in). During this phase we open and 2265 * validate each vdev on the spare list. If the vdev also exists in the 2266 * active configuration, then we also mark this vdev as an active spare. 2267 */ 2268 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *), 2269 KM_SLEEP); 2270 for (i = 0; i < spa->spa_spares.sav_count; i++) { 2271 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 2272 VDEV_ALLOC_SPARE) == 0); 2273 ASSERT(vd != NULL); 2274 2275 spa->spa_spares.sav_vdevs[i] = vd; 2276 2277 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 2278 B_FALSE)) != NULL) { 2279 if (!tvd->vdev_isspare) 2280 spa_spare_add(tvd); 2281 2282 /* 2283 * We only mark the spare active if we were successfully 2284 * able to load the vdev. Otherwise, importing a pool 2285 * with a bad active spare would result in strange 2286 * behavior, because multiple pool would think the spare 2287 * is actively in use. 2288 * 2289 * There is a vulnerability here to an equally bizarre 2290 * circumstance, where a dead active spare is later 2291 * brought back to life (onlined or otherwise). Given 2292 * the rarity of this scenario, and the extra complexity 2293 * it adds, we ignore the possibility. 2294 */ 2295 if (!vdev_is_dead(tvd)) 2296 spa_spare_activate(tvd); 2297 } 2298 2299 vd->vdev_top = vd; 2300 vd->vdev_aux = &spa->spa_spares; 2301 2302 if (vdev_open(vd) != 0) 2303 continue; 2304 2305 if (vdev_validate_aux(vd) == 0) 2306 spa_spare_add(vd); 2307 } 2308 2309 /* 2310 * Recompute the stashed list of spares, with status information 2311 * this time. 2312 */ 2313 fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES); 2314 2315 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 2316 KM_SLEEP); 2317 for (i = 0; i < spa->spa_spares.sav_count; i++) 2318 spares[i] = vdev_config_generate(spa, 2319 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 2320 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 2321 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 2322 spa->spa_spares.sav_count); 2323 for (i = 0; i < spa->spa_spares.sav_count; i++) 2324 nvlist_free(spares[i]); 2325 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 2326 } 2327 2328 /* 2329 * Load (or re-load) the current list of vdevs describing the active l2cache for 2330 * this pool. When this is called, we have some form of basic information in 2331 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 2332 * then re-generate a more complete list including status information. 2333 * Devices which are already active have their details maintained, and are 2334 * not re-opened. 2335 */ 2336 void 2337 spa_load_l2cache(spa_t *spa) 2338 { 2339 nvlist_t **l2cache = NULL; 2340 uint_t nl2cache; 2341 int i, j, oldnvdevs; 2342 uint64_t guid; 2343 vdev_t *vd, **oldvdevs, **newvdevs; 2344 spa_aux_vdev_t *sav = &spa->spa_l2cache; 2345 2346 #ifndef _KERNEL 2347 /* 2348 * zdb opens both the current state of the pool and the 2349 * checkpointed state (if present), with a different spa_t. 2350 * 2351 * As L2 caches are part of the ARC which is shared among open 2352 * pools, we skip loading them when we load the checkpointed 2353 * state of the pool. 2354 */ 2355 if (!spa_writeable(spa)) 2356 return; 2357 #endif 2358 2359 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2360 2361 oldvdevs = sav->sav_vdevs; 2362 oldnvdevs = sav->sav_count; 2363 sav->sav_vdevs = NULL; 2364 sav->sav_count = 0; 2365 2366 if (sav->sav_config == NULL) { 2367 nl2cache = 0; 2368 newvdevs = NULL; 2369 goto out; 2370 } 2371 2372 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, 2373 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 2374 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 2375 2376 /* 2377 * Process new nvlist of vdevs. 2378 */ 2379 for (i = 0; i < nl2cache; i++) { 2380 guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID); 2381 2382 newvdevs[i] = NULL; 2383 for (j = 0; j < oldnvdevs; j++) { 2384 vd = oldvdevs[j]; 2385 if (vd != NULL && guid == vd->vdev_guid) { 2386 /* 2387 * Retain previous vdev for add/remove ops. 2388 */ 2389 newvdevs[i] = vd; 2390 oldvdevs[j] = NULL; 2391 break; 2392 } 2393 } 2394 2395 if (newvdevs[i] == NULL) { 2396 /* 2397 * Create new vdev 2398 */ 2399 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 2400 VDEV_ALLOC_L2CACHE) == 0); 2401 ASSERT(vd != NULL); 2402 newvdevs[i] = vd; 2403 2404 /* 2405 * Commit this vdev as an l2cache device, 2406 * even if it fails to open. 2407 */ 2408 spa_l2cache_add(vd); 2409 2410 vd->vdev_top = vd; 2411 vd->vdev_aux = sav; 2412 2413 spa_l2cache_activate(vd); 2414 2415 if (vdev_open(vd) != 0) 2416 continue; 2417 2418 (void) vdev_validate_aux(vd); 2419 2420 if (!vdev_is_dead(vd)) 2421 l2arc_add_vdev(spa, vd); 2422 2423 /* 2424 * Upon cache device addition to a pool or pool 2425 * creation with a cache device or if the header 2426 * of the device is invalid we issue an async 2427 * TRIM command for the whole device which will 2428 * execute if l2arc_trim_ahead > 0. 2429 */ 2430 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 2431 } 2432 } 2433 2434 sav->sav_vdevs = newvdevs; 2435 sav->sav_count = (int)nl2cache; 2436 2437 /* 2438 * Recompute the stashed list of l2cache devices, with status 2439 * information this time. 2440 */ 2441 fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE); 2442 2443 if (sav->sav_count > 0) 2444 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), 2445 KM_SLEEP); 2446 for (i = 0; i < sav->sav_count; i++) 2447 l2cache[i] = vdev_config_generate(spa, 2448 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 2449 fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 2450 (const nvlist_t * const *)l2cache, sav->sav_count); 2451 2452 out: 2453 /* 2454 * Purge vdevs that were dropped 2455 */ 2456 if (oldvdevs) { 2457 for (i = 0; i < oldnvdevs; i++) { 2458 uint64_t pool; 2459 2460 vd = oldvdevs[i]; 2461 if (vd != NULL) { 2462 ASSERT(vd->vdev_isl2cache); 2463 2464 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 2465 pool != 0ULL && l2arc_vdev_present(vd)) 2466 l2arc_remove_vdev(vd); 2467 vdev_clear_stats(vd); 2468 vdev_free(vd); 2469 } 2470 } 2471 2472 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 2473 } 2474 2475 for (i = 0; i < sav->sav_count; i++) 2476 nvlist_free(l2cache[i]); 2477 if (sav->sav_count) 2478 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 2479 } 2480 2481 static int 2482 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 2483 { 2484 dmu_buf_t *db; 2485 char *packed = NULL; 2486 size_t nvsize = 0; 2487 int error; 2488 *value = NULL; 2489 2490 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 2491 if (error) 2492 return (error); 2493 2494 nvsize = *(uint64_t *)db->db_data; 2495 dmu_buf_rele(db, FTAG); 2496 2497 packed = vmem_alloc(nvsize, KM_SLEEP); 2498 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 2499 DMU_READ_PREFETCH); 2500 if (error == 0) 2501 error = nvlist_unpack(packed, nvsize, value, 0); 2502 vmem_free(packed, nvsize); 2503 2504 return (error); 2505 } 2506 2507 /* 2508 * Concrete top-level vdevs that are not missing and are not logs. At every 2509 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 2510 */ 2511 static uint64_t 2512 spa_healthy_core_tvds(spa_t *spa) 2513 { 2514 vdev_t *rvd = spa->spa_root_vdev; 2515 uint64_t tvds = 0; 2516 2517 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 2518 vdev_t *vd = rvd->vdev_child[i]; 2519 if (vd->vdev_islog) 2520 continue; 2521 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 2522 tvds++; 2523 } 2524 2525 return (tvds); 2526 } 2527 2528 /* 2529 * Checks to see if the given vdev could not be opened, in which case we post a 2530 * sysevent to notify the autoreplace code that the device has been removed. 2531 */ 2532 static void 2533 spa_check_removed(vdev_t *vd) 2534 { 2535 for (uint64_t c = 0; c < vd->vdev_children; c++) 2536 spa_check_removed(vd->vdev_child[c]); 2537 2538 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 2539 vdev_is_concrete(vd)) { 2540 zfs_post_autoreplace(vd->vdev_spa, vd); 2541 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 2542 } 2543 } 2544 2545 static int 2546 spa_check_for_missing_logs(spa_t *spa) 2547 { 2548 vdev_t *rvd = spa->spa_root_vdev; 2549 2550 /* 2551 * If we're doing a normal import, then build up any additional 2552 * diagnostic information about missing log devices. 2553 * We'll pass this up to the user for further processing. 2554 */ 2555 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 2556 nvlist_t **child, *nv; 2557 uint64_t idx = 0; 2558 2559 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 2560 KM_SLEEP); 2561 nv = fnvlist_alloc(); 2562 2563 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2564 vdev_t *tvd = rvd->vdev_child[c]; 2565 2566 /* 2567 * We consider a device as missing only if it failed 2568 * to open (i.e. offline or faulted is not considered 2569 * as missing). 2570 */ 2571 if (tvd->vdev_islog && 2572 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2573 child[idx++] = vdev_config_generate(spa, tvd, 2574 B_FALSE, VDEV_CONFIG_MISSING); 2575 } 2576 } 2577 2578 if (idx > 0) { 2579 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2580 (const nvlist_t * const *)child, idx); 2581 fnvlist_add_nvlist(spa->spa_load_info, 2582 ZPOOL_CONFIG_MISSING_DEVICES, nv); 2583 2584 for (uint64_t i = 0; i < idx; i++) 2585 nvlist_free(child[i]); 2586 } 2587 nvlist_free(nv); 2588 kmem_free(child, rvd->vdev_children * sizeof (char **)); 2589 2590 if (idx > 0) { 2591 spa_load_failed(spa, "some log devices are missing"); 2592 vdev_dbgmsg_print_tree(rvd, 2); 2593 return (SET_ERROR(ENXIO)); 2594 } 2595 } else { 2596 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2597 vdev_t *tvd = rvd->vdev_child[c]; 2598 2599 if (tvd->vdev_islog && 2600 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2601 spa_set_log_state(spa, SPA_LOG_CLEAR); 2602 spa_load_note(spa, "some log devices are " 2603 "missing, ZIL is dropped."); 2604 vdev_dbgmsg_print_tree(rvd, 2); 2605 break; 2606 } 2607 } 2608 } 2609 2610 return (0); 2611 } 2612 2613 /* 2614 * Check for missing log devices 2615 */ 2616 static boolean_t 2617 spa_check_logs(spa_t *spa) 2618 { 2619 boolean_t rv = B_FALSE; 2620 dsl_pool_t *dp = spa_get_dsl(spa); 2621 2622 switch (spa->spa_log_state) { 2623 default: 2624 break; 2625 case SPA_LOG_MISSING: 2626 /* need to recheck in case slog has been restored */ 2627 case SPA_LOG_UNKNOWN: 2628 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2629 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 2630 if (rv) 2631 spa_set_log_state(spa, SPA_LOG_MISSING); 2632 break; 2633 } 2634 return (rv); 2635 } 2636 2637 /* 2638 * Passivate any log vdevs (note, does not apply to embedded log metaslabs). 2639 */ 2640 static boolean_t 2641 spa_passivate_log(spa_t *spa) 2642 { 2643 vdev_t *rvd = spa->spa_root_vdev; 2644 boolean_t slog_found = B_FALSE; 2645 2646 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2647 2648 for (int c = 0; c < rvd->vdev_children; c++) { 2649 vdev_t *tvd = rvd->vdev_child[c]; 2650 2651 if (tvd->vdev_islog) { 2652 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2653 metaslab_group_passivate(tvd->vdev_mg); 2654 slog_found = B_TRUE; 2655 } 2656 } 2657 2658 return (slog_found); 2659 } 2660 2661 /* 2662 * Activate any log vdevs (note, does not apply to embedded log metaslabs). 2663 */ 2664 static void 2665 spa_activate_log(spa_t *spa) 2666 { 2667 vdev_t *rvd = spa->spa_root_vdev; 2668 2669 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2670 2671 for (int c = 0; c < rvd->vdev_children; c++) { 2672 vdev_t *tvd = rvd->vdev_child[c]; 2673 2674 if (tvd->vdev_islog) { 2675 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2676 metaslab_group_activate(tvd->vdev_mg); 2677 } 2678 } 2679 } 2680 2681 int 2682 spa_reset_logs(spa_t *spa) 2683 { 2684 int error; 2685 2686 error = dmu_objset_find(spa_name(spa), zil_reset, 2687 NULL, DS_FIND_CHILDREN); 2688 if (error == 0) { 2689 /* 2690 * We successfully offlined the log device, sync out the 2691 * current txg so that the "stubby" block can be removed 2692 * by zil_sync(). 2693 */ 2694 txg_wait_synced(spa->spa_dsl_pool, 0); 2695 } 2696 return (error); 2697 } 2698 2699 static void 2700 spa_aux_check_removed(spa_aux_vdev_t *sav) 2701 { 2702 for (int i = 0; i < sav->sav_count; i++) 2703 spa_check_removed(sav->sav_vdevs[i]); 2704 } 2705 2706 void 2707 spa_claim_notify(zio_t *zio) 2708 { 2709 spa_t *spa = zio->io_spa; 2710 2711 if (zio->io_error) 2712 return; 2713 2714 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2715 if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp)) 2716 spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp); 2717 mutex_exit(&spa->spa_props_lock); 2718 } 2719 2720 typedef struct spa_load_error { 2721 boolean_t sle_verify_data; 2722 uint64_t sle_meta_count; 2723 uint64_t sle_data_count; 2724 } spa_load_error_t; 2725 2726 static void 2727 spa_load_verify_done(zio_t *zio) 2728 { 2729 blkptr_t *bp = zio->io_bp; 2730 spa_load_error_t *sle = zio->io_private; 2731 dmu_object_type_t type = BP_GET_TYPE(bp); 2732 int error = zio->io_error; 2733 spa_t *spa = zio->io_spa; 2734 2735 abd_free(zio->io_abd); 2736 if (error) { 2737 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2738 type != DMU_OT_INTENT_LOG) 2739 atomic_inc_64(&sle->sle_meta_count); 2740 else 2741 atomic_inc_64(&sle->sle_data_count); 2742 } 2743 2744 mutex_enter(&spa->spa_scrub_lock); 2745 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp); 2746 cv_broadcast(&spa->spa_scrub_io_cv); 2747 mutex_exit(&spa->spa_scrub_lock); 2748 } 2749 2750 /* 2751 * Maximum number of inflight bytes is the log2 fraction of the arc size. 2752 * By default, we set it to 1/16th of the arc. 2753 */ 2754 static uint_t spa_load_verify_shift = 4; 2755 static int spa_load_verify_metadata = B_TRUE; 2756 static int spa_load_verify_data = B_TRUE; 2757 2758 static int 2759 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2760 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2761 { 2762 zio_t *rio = arg; 2763 spa_load_error_t *sle = rio->io_private; 2764 2765 (void) zilog, (void) dnp; 2766 2767 /* 2768 * Note: normally this routine will not be called if 2769 * spa_load_verify_metadata is not set. However, it may be useful 2770 * to manually set the flag after the traversal has begun. 2771 */ 2772 if (!spa_load_verify_metadata) 2773 return (0); 2774 2775 /* 2776 * Sanity check the block pointer in order to detect obvious damage 2777 * before using the contents in subsequent checks or in zio_read(). 2778 * When damaged consider it to be a metadata error since we cannot 2779 * trust the BP_GET_TYPE and BP_GET_LEVEL values. 2780 */ 2781 if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) { 2782 atomic_inc_64(&sle->sle_meta_count); 2783 return (0); 2784 } 2785 2786 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || 2787 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 2788 return (0); 2789 2790 if (!BP_IS_METADATA(bp) && 2791 (!spa_load_verify_data || !sle->sle_verify_data)) 2792 return (0); 2793 2794 uint64_t maxinflight_bytes = 2795 arc_target_bytes() >> spa_load_verify_shift; 2796 size_t size = BP_GET_PSIZE(bp); 2797 2798 mutex_enter(&spa->spa_scrub_lock); 2799 while (spa->spa_load_verify_bytes >= maxinflight_bytes) 2800 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2801 spa->spa_load_verify_bytes += size; 2802 mutex_exit(&spa->spa_scrub_lock); 2803 2804 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2805 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2806 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2807 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2808 return (0); 2809 } 2810 2811 static int 2812 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2813 { 2814 (void) dp, (void) arg; 2815 2816 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2817 return (SET_ERROR(ENAMETOOLONG)); 2818 2819 return (0); 2820 } 2821 2822 static int 2823 spa_load_verify(spa_t *spa) 2824 { 2825 zio_t *rio; 2826 spa_load_error_t sle = { 0 }; 2827 zpool_load_policy_t policy; 2828 boolean_t verify_ok = B_FALSE; 2829 int error = 0; 2830 2831 zpool_get_load_policy(spa->spa_config, &policy); 2832 2833 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND || 2834 policy.zlp_maxmeta == UINT64_MAX) 2835 return (0); 2836 2837 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2838 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2839 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2840 DS_FIND_CHILDREN); 2841 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2842 if (error != 0) 2843 return (error); 2844 2845 /* 2846 * Verify data only if we are rewinding or error limit was set. 2847 * Otherwise nothing except dbgmsg care about it to waste time. 2848 */ 2849 sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) || 2850 (policy.zlp_maxdata < UINT64_MAX); 2851 2852 rio = zio_root(spa, NULL, &sle, 2853 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2854 2855 if (spa_load_verify_metadata) { 2856 if (spa->spa_extreme_rewind) { 2857 spa_load_note(spa, "performing a complete scan of the " 2858 "pool since extreme rewind is on. This may take " 2859 "a very long time.\n (spa_load_verify_data=%u, " 2860 "spa_load_verify_metadata=%u)", 2861 spa_load_verify_data, spa_load_verify_metadata); 2862 } 2863 2864 error = traverse_pool(spa, spa->spa_verify_min_txg, 2865 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2866 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2867 } 2868 2869 (void) zio_wait(rio); 2870 ASSERT0(spa->spa_load_verify_bytes); 2871 2872 spa->spa_load_meta_errors = sle.sle_meta_count; 2873 spa->spa_load_data_errors = sle.sle_data_count; 2874 2875 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2876 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2877 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2878 (u_longlong_t)sle.sle_data_count); 2879 } 2880 2881 if (spa_load_verify_dryrun || 2882 (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2883 sle.sle_data_count <= policy.zlp_maxdata)) { 2884 int64_t loss = 0; 2885 2886 verify_ok = B_TRUE; 2887 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2888 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2889 2890 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2891 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME, 2892 spa->spa_load_txg_ts); 2893 fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME, 2894 loss); 2895 fnvlist_add_uint64(spa->spa_load_info, 2896 ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count); 2897 fnvlist_add_uint64(spa->spa_load_info, 2898 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count); 2899 } else { 2900 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2901 } 2902 2903 if (spa_load_verify_dryrun) 2904 return (0); 2905 2906 if (error) { 2907 if (error != ENXIO && error != EIO) 2908 error = SET_ERROR(EIO); 2909 return (error); 2910 } 2911 2912 return (verify_ok ? 0 : EIO); 2913 } 2914 2915 /* 2916 * Find a value in the pool props object. 2917 */ 2918 static void 2919 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2920 { 2921 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2922 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2923 } 2924 2925 /* 2926 * Find a value in the pool directory object. 2927 */ 2928 static int 2929 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2930 { 2931 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2932 name, sizeof (uint64_t), 1, val); 2933 2934 if (error != 0 && (error != ENOENT || log_enoent)) { 2935 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2936 "[error=%d]", name, error); 2937 } 2938 2939 return (error); 2940 } 2941 2942 static int 2943 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2944 { 2945 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2946 return (SET_ERROR(err)); 2947 } 2948 2949 boolean_t 2950 spa_livelist_delete_check(spa_t *spa) 2951 { 2952 return (spa->spa_livelists_to_delete != 0); 2953 } 2954 2955 static boolean_t 2956 spa_livelist_delete_cb_check(void *arg, zthr_t *z) 2957 { 2958 (void) z; 2959 spa_t *spa = arg; 2960 return (spa_livelist_delete_check(spa)); 2961 } 2962 2963 static int 2964 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2965 { 2966 spa_t *spa = arg; 2967 zio_free(spa, tx->tx_txg, bp); 2968 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2969 -bp_get_dsize_sync(spa, bp), 2970 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2971 return (0); 2972 } 2973 2974 static int 2975 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp) 2976 { 2977 int err; 2978 zap_cursor_t zc; 2979 zap_attribute_t *za = zap_attribute_alloc(); 2980 zap_cursor_init(&zc, os, zap_obj); 2981 err = zap_cursor_retrieve(&zc, za); 2982 zap_cursor_fini(&zc); 2983 if (err == 0) 2984 *llp = za->za_first_integer; 2985 zap_attribute_free(za); 2986 return (err); 2987 } 2988 2989 /* 2990 * Components of livelist deletion that must be performed in syncing 2991 * context: freeing block pointers and updating the pool-wide data 2992 * structures to indicate how much work is left to do 2993 */ 2994 typedef struct sublist_delete_arg { 2995 spa_t *spa; 2996 dsl_deadlist_t *ll; 2997 uint64_t key; 2998 bplist_t *to_free; 2999 } sublist_delete_arg_t; 3000 3001 static void 3002 sublist_delete_sync(void *arg, dmu_tx_t *tx) 3003 { 3004 sublist_delete_arg_t *sda = arg; 3005 spa_t *spa = sda->spa; 3006 dsl_deadlist_t *ll = sda->ll; 3007 uint64_t key = sda->key; 3008 bplist_t *to_free = sda->to_free; 3009 3010 bplist_iterate(to_free, delete_blkptr_cb, spa, tx); 3011 dsl_deadlist_remove_entry(ll, key, tx); 3012 } 3013 3014 typedef struct livelist_delete_arg { 3015 spa_t *spa; 3016 uint64_t ll_obj; 3017 uint64_t zap_obj; 3018 } livelist_delete_arg_t; 3019 3020 static void 3021 livelist_delete_sync(void *arg, dmu_tx_t *tx) 3022 { 3023 livelist_delete_arg_t *lda = arg; 3024 spa_t *spa = lda->spa; 3025 uint64_t ll_obj = lda->ll_obj; 3026 uint64_t zap_obj = lda->zap_obj; 3027 objset_t *mos = spa->spa_meta_objset; 3028 uint64_t count; 3029 3030 /* free the livelist and decrement the feature count */ 3031 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx)); 3032 dsl_deadlist_free(mos, ll_obj, tx); 3033 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 3034 VERIFY0(zap_count(mos, zap_obj, &count)); 3035 if (count == 0) { 3036 /* no more livelists to delete */ 3037 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 3038 DMU_POOL_DELETED_CLONES, tx)); 3039 VERIFY0(zap_destroy(mos, zap_obj, tx)); 3040 spa->spa_livelists_to_delete = 0; 3041 spa_notify_waiters(spa); 3042 } 3043 } 3044 3045 /* 3046 * Load in the value for the livelist to be removed and open it. Then, 3047 * load its first sublist and determine which block pointers should actually 3048 * be freed. Then, call a synctask which performs the actual frees and updates 3049 * the pool-wide livelist data. 3050 */ 3051 static void 3052 spa_livelist_delete_cb(void *arg, zthr_t *z) 3053 { 3054 spa_t *spa = arg; 3055 uint64_t ll_obj = 0, count; 3056 objset_t *mos = spa->spa_meta_objset; 3057 uint64_t zap_obj = spa->spa_livelists_to_delete; 3058 /* 3059 * Determine the next livelist to delete. This function should only 3060 * be called if there is at least one deleted clone. 3061 */ 3062 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj)); 3063 VERIFY0(zap_count(mos, ll_obj, &count)); 3064 if (count > 0) { 3065 dsl_deadlist_t *ll; 3066 dsl_deadlist_entry_t *dle; 3067 bplist_t to_free; 3068 ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP); 3069 VERIFY0(dsl_deadlist_open(ll, mos, ll_obj)); 3070 dle = dsl_deadlist_first(ll); 3071 ASSERT3P(dle, !=, NULL); 3072 bplist_create(&to_free); 3073 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free, 3074 z, NULL); 3075 if (err == 0) { 3076 sublist_delete_arg_t sync_arg = { 3077 .spa = spa, 3078 .ll = ll, 3079 .key = dle->dle_mintxg, 3080 .to_free = &to_free 3081 }; 3082 zfs_dbgmsg("deleting sublist (id %llu) from" 3083 " livelist %llu, %lld remaining", 3084 (u_longlong_t)dle->dle_bpobj.bpo_object, 3085 (u_longlong_t)ll_obj, (longlong_t)count - 1); 3086 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 3087 sublist_delete_sync, &sync_arg, 0, 3088 ZFS_SPACE_CHECK_DESTROY)); 3089 } else { 3090 VERIFY3U(err, ==, EINTR); 3091 } 3092 bplist_clear(&to_free); 3093 bplist_destroy(&to_free); 3094 dsl_deadlist_close(ll); 3095 kmem_free(ll, sizeof (dsl_deadlist_t)); 3096 } else { 3097 livelist_delete_arg_t sync_arg = { 3098 .spa = spa, 3099 .ll_obj = ll_obj, 3100 .zap_obj = zap_obj 3101 }; 3102 zfs_dbgmsg("deletion of livelist %llu completed", 3103 (u_longlong_t)ll_obj); 3104 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync, 3105 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY)); 3106 } 3107 } 3108 3109 static void 3110 spa_start_livelist_destroy_thread(spa_t *spa) 3111 { 3112 ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL); 3113 spa->spa_livelist_delete_zthr = 3114 zthr_create("z_livelist_destroy", 3115 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa, 3116 minclsyspri); 3117 } 3118 3119 typedef struct livelist_new_arg { 3120 bplist_t *allocs; 3121 bplist_t *frees; 3122 } livelist_new_arg_t; 3123 3124 static int 3125 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3126 dmu_tx_t *tx) 3127 { 3128 ASSERT(tx == NULL); 3129 livelist_new_arg_t *lna = arg; 3130 if (bp_freed) { 3131 bplist_append(lna->frees, bp); 3132 } else { 3133 bplist_append(lna->allocs, bp); 3134 zfs_livelist_condense_new_alloc++; 3135 } 3136 return (0); 3137 } 3138 3139 typedef struct livelist_condense_arg { 3140 spa_t *spa; 3141 bplist_t to_keep; 3142 uint64_t first_size; 3143 uint64_t next_size; 3144 } livelist_condense_arg_t; 3145 3146 static void 3147 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx) 3148 { 3149 livelist_condense_arg_t *lca = arg; 3150 spa_t *spa = lca->spa; 3151 bplist_t new_frees; 3152 dsl_dataset_t *ds = spa->spa_to_condense.ds; 3153 3154 /* Have we been cancelled? */ 3155 if (spa->spa_to_condense.cancelled) { 3156 zfs_livelist_condense_sync_cancel++; 3157 goto out; 3158 } 3159 3160 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 3161 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 3162 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist; 3163 3164 /* 3165 * It's possible that the livelist was changed while the zthr was 3166 * running. Therefore, we need to check for new blkptrs in the two 3167 * entries being condensed and continue to track them in the livelist. 3168 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl), 3169 * it's possible that the newly added blkptrs are FREEs or ALLOCs so 3170 * we need to sort them into two different bplists. 3171 */ 3172 uint64_t first_obj = first->dle_bpobj.bpo_object; 3173 uint64_t next_obj = next->dle_bpobj.bpo_object; 3174 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs; 3175 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs; 3176 3177 bplist_create(&new_frees); 3178 livelist_new_arg_t new_bps = { 3179 .allocs = &lca->to_keep, 3180 .frees = &new_frees, 3181 }; 3182 3183 if (cur_first_size > lca->first_size) { 3184 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj, 3185 livelist_track_new_cb, &new_bps, lca->first_size)); 3186 } 3187 if (cur_next_size > lca->next_size) { 3188 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj, 3189 livelist_track_new_cb, &new_bps, lca->next_size)); 3190 } 3191 3192 dsl_deadlist_clear_entry(first, ll, tx); 3193 ASSERT(bpobj_is_empty(&first->dle_bpobj)); 3194 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx); 3195 3196 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx); 3197 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx); 3198 bplist_destroy(&new_frees); 3199 3200 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3201 dsl_dataset_name(ds, dsname); 3202 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu " 3203 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu " 3204 "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname, 3205 (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj, 3206 (u_longlong_t)cur_first_size, (u_longlong_t)next_obj, 3207 (u_longlong_t)cur_next_size, 3208 (u_longlong_t)first->dle_bpobj.bpo_object, 3209 (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs); 3210 out: 3211 dmu_buf_rele(ds->ds_dbuf, spa); 3212 spa->spa_to_condense.ds = NULL; 3213 bplist_clear(&lca->to_keep); 3214 bplist_destroy(&lca->to_keep); 3215 kmem_free(lca, sizeof (livelist_condense_arg_t)); 3216 spa->spa_to_condense.syncing = B_FALSE; 3217 } 3218 3219 static void 3220 spa_livelist_condense_cb(void *arg, zthr_t *t) 3221 { 3222 while (zfs_livelist_condense_zthr_pause && 3223 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 3224 delay(1); 3225 3226 spa_t *spa = arg; 3227 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 3228 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 3229 uint64_t first_size, next_size; 3230 3231 livelist_condense_arg_t *lca = 3232 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP); 3233 bplist_create(&lca->to_keep); 3234 3235 /* 3236 * Process the livelists (matching FREEs and ALLOCs) in open context 3237 * so we have minimal work in syncing context to condense. 3238 * 3239 * We save bpobj sizes (first_size and next_size) to use later in 3240 * syncing context to determine if entries were added to these sublists 3241 * while in open context. This is possible because the clone is still 3242 * active and open for normal writes and we want to make sure the new, 3243 * unprocessed blockpointers are inserted into the livelist normally. 3244 * 3245 * Note that dsl_process_sub_livelist() both stores the size number of 3246 * blockpointers and iterates over them while the bpobj's lock held, so 3247 * the sizes returned to us are consistent which what was actually 3248 * processed. 3249 */ 3250 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t, 3251 &first_size); 3252 if (err == 0) 3253 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep, 3254 t, &next_size); 3255 3256 if (err == 0) { 3257 while (zfs_livelist_condense_sync_pause && 3258 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 3259 delay(1); 3260 3261 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 3262 dmu_tx_mark_netfree(tx); 3263 dmu_tx_hold_space(tx, 1); 3264 err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); 3265 if (err == 0) { 3266 /* 3267 * Prevent the condense zthr restarting before 3268 * the synctask completes. 3269 */ 3270 spa->spa_to_condense.syncing = B_TRUE; 3271 lca->spa = spa; 3272 lca->first_size = first_size; 3273 lca->next_size = next_size; 3274 dsl_sync_task_nowait(spa_get_dsl(spa), 3275 spa_livelist_condense_sync, lca, tx); 3276 dmu_tx_commit(tx); 3277 return; 3278 } 3279 } 3280 /* 3281 * Condensing can not continue: either it was externally stopped or 3282 * we were unable to assign to a tx because the pool has run out of 3283 * space. In the second case, we'll just end up trying to condense 3284 * again in a later txg. 3285 */ 3286 ASSERT(err != 0); 3287 bplist_clear(&lca->to_keep); 3288 bplist_destroy(&lca->to_keep); 3289 kmem_free(lca, sizeof (livelist_condense_arg_t)); 3290 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa); 3291 spa->spa_to_condense.ds = NULL; 3292 if (err == EINTR) 3293 zfs_livelist_condense_zthr_cancel++; 3294 } 3295 3296 /* 3297 * Check that there is something to condense but that a condense is not 3298 * already in progress and that condensing has not been cancelled. 3299 */ 3300 static boolean_t 3301 spa_livelist_condense_cb_check(void *arg, zthr_t *z) 3302 { 3303 (void) z; 3304 spa_t *spa = arg; 3305 if ((spa->spa_to_condense.ds != NULL) && 3306 (spa->spa_to_condense.syncing == B_FALSE) && 3307 (spa->spa_to_condense.cancelled == B_FALSE)) { 3308 return (B_TRUE); 3309 } 3310 return (B_FALSE); 3311 } 3312 3313 static void 3314 spa_start_livelist_condensing_thread(spa_t *spa) 3315 { 3316 spa->spa_to_condense.ds = NULL; 3317 spa->spa_to_condense.first = NULL; 3318 spa->spa_to_condense.next = NULL; 3319 spa->spa_to_condense.syncing = B_FALSE; 3320 spa->spa_to_condense.cancelled = B_FALSE; 3321 3322 ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL); 3323 spa->spa_livelist_condense_zthr = 3324 zthr_create("z_livelist_condense", 3325 spa_livelist_condense_cb_check, 3326 spa_livelist_condense_cb, spa, minclsyspri); 3327 } 3328 3329 static void 3330 spa_spawn_aux_threads(spa_t *spa) 3331 { 3332 ASSERT(spa_writeable(spa)); 3333 3334 spa_start_raidz_expansion_thread(spa); 3335 spa_start_indirect_condensing_thread(spa); 3336 spa_start_livelist_destroy_thread(spa); 3337 spa_start_livelist_condensing_thread(spa); 3338 3339 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 3340 spa->spa_checkpoint_discard_zthr = 3341 zthr_create("z_checkpoint_discard", 3342 spa_checkpoint_discard_thread_check, 3343 spa_checkpoint_discard_thread, spa, minclsyspri); 3344 } 3345 3346 /* 3347 * Fix up config after a partly-completed split. This is done with the 3348 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 3349 * pool have that entry in their config, but only the splitting one contains 3350 * a list of all the guids of the vdevs that are being split off. 3351 * 3352 * This function determines what to do with that list: either rejoin 3353 * all the disks to the pool, or complete the splitting process. To attempt 3354 * the rejoin, each disk that is offlined is marked online again, and 3355 * we do a reopen() call. If the vdev label for every disk that was 3356 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 3357 * then we call vdev_split() on each disk, and complete the split. 3358 * 3359 * Otherwise we leave the config alone, with all the vdevs in place in 3360 * the original pool. 3361 */ 3362 static void 3363 spa_try_repair(spa_t *spa, nvlist_t *config) 3364 { 3365 uint_t extracted; 3366 uint64_t *glist; 3367 uint_t i, gcount; 3368 nvlist_t *nvl; 3369 vdev_t **vd; 3370 boolean_t attempt_reopen; 3371 3372 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 3373 return; 3374 3375 /* check that the config is complete */ 3376 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 3377 &glist, &gcount) != 0) 3378 return; 3379 3380 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 3381 3382 /* attempt to online all the vdevs & validate */ 3383 attempt_reopen = B_TRUE; 3384 for (i = 0; i < gcount; i++) { 3385 if (glist[i] == 0) /* vdev is hole */ 3386 continue; 3387 3388 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 3389 if (vd[i] == NULL) { 3390 /* 3391 * Don't bother attempting to reopen the disks; 3392 * just do the split. 3393 */ 3394 attempt_reopen = B_FALSE; 3395 } else { 3396 /* attempt to re-online it */ 3397 vd[i]->vdev_offline = B_FALSE; 3398 } 3399 } 3400 3401 if (attempt_reopen) { 3402 vdev_reopen(spa->spa_root_vdev); 3403 3404 /* check each device to see what state it's in */ 3405 for (extracted = 0, i = 0; i < gcount; i++) { 3406 if (vd[i] != NULL && 3407 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 3408 break; 3409 ++extracted; 3410 } 3411 } 3412 3413 /* 3414 * If every disk has been moved to the new pool, or if we never 3415 * even attempted to look at them, then we split them off for 3416 * good. 3417 */ 3418 if (!attempt_reopen || gcount == extracted) { 3419 for (i = 0; i < gcount; i++) 3420 if (vd[i] != NULL) 3421 vdev_split(vd[i]); 3422 vdev_reopen(spa->spa_root_vdev); 3423 } 3424 3425 kmem_free(vd, gcount * sizeof (vdev_t *)); 3426 } 3427 3428 static int 3429 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 3430 { 3431 const char *ereport = FM_EREPORT_ZFS_POOL; 3432 int error; 3433 3434 spa->spa_load_state = state; 3435 (void) spa_import_progress_set_state(spa_guid(spa), 3436 spa_load_state(spa)); 3437 spa_import_progress_set_notes(spa, "spa_load()"); 3438 3439 gethrestime(&spa->spa_loaded_ts); 3440 error = spa_load_impl(spa, type, &ereport); 3441 3442 /* 3443 * Don't count references from objsets that are already closed 3444 * and are making their way through the eviction process. 3445 */ 3446 spa_evicting_os_wait(spa); 3447 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 3448 if (error) { 3449 if (error != EEXIST) { 3450 spa->spa_loaded_ts.tv_sec = 0; 3451 spa->spa_loaded_ts.tv_nsec = 0; 3452 } 3453 if (error != EBADF) { 3454 (void) zfs_ereport_post(ereport, spa, 3455 NULL, NULL, NULL, 0); 3456 } 3457 } 3458 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 3459 spa->spa_ena = 0; 3460 3461 (void) spa_import_progress_set_state(spa_guid(spa), 3462 spa_load_state(spa)); 3463 3464 return (error); 3465 } 3466 3467 #ifdef ZFS_DEBUG 3468 /* 3469 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 3470 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 3471 * spa's per-vdev ZAP list. 3472 */ 3473 static uint64_t 3474 vdev_count_verify_zaps(vdev_t *vd) 3475 { 3476 spa_t *spa = vd->vdev_spa; 3477 uint64_t total = 0; 3478 3479 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) && 3480 vd->vdev_root_zap != 0) { 3481 total++; 3482 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3483 spa->spa_all_vdev_zaps, vd->vdev_root_zap)); 3484 } 3485 if (vd->vdev_top_zap != 0) { 3486 total++; 3487 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3488 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 3489 } 3490 if (vd->vdev_leaf_zap != 0) { 3491 total++; 3492 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3493 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 3494 } 3495 3496 for (uint64_t i = 0; i < vd->vdev_children; i++) { 3497 total += vdev_count_verify_zaps(vd->vdev_child[i]); 3498 } 3499 3500 return (total); 3501 } 3502 #else 3503 #define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0) 3504 #endif 3505 3506 /* 3507 * Determine whether the activity check is required. 3508 */ 3509 static boolean_t 3510 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 3511 nvlist_t *config) 3512 { 3513 uint64_t state = 0; 3514 uint64_t hostid = 0; 3515 uint64_t tryconfig_txg = 0; 3516 uint64_t tryconfig_timestamp = 0; 3517 uint16_t tryconfig_mmp_seq = 0; 3518 nvlist_t *nvinfo; 3519 3520 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3521 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3522 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 3523 &tryconfig_txg); 3524 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 3525 &tryconfig_timestamp); 3526 (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, 3527 &tryconfig_mmp_seq); 3528 } 3529 3530 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 3531 3532 /* 3533 * Disable the MMP activity check - This is used by zdb which 3534 * is intended to be used on potentially active pools. 3535 */ 3536 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 3537 return (B_FALSE); 3538 3539 /* 3540 * Skip the activity check when the MMP feature is disabled. 3541 */ 3542 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 3543 return (B_FALSE); 3544 3545 /* 3546 * If the tryconfig_ values are nonzero, they are the results of an 3547 * earlier tryimport. If they all match the uberblock we just found, 3548 * then the pool has not changed and we return false so we do not test 3549 * a second time. 3550 */ 3551 if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 3552 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && 3553 tryconfig_mmp_seq && tryconfig_mmp_seq == 3554 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) 3555 return (B_FALSE); 3556 3557 /* 3558 * Allow the activity check to be skipped when importing the pool 3559 * on the same host which last imported it. Since the hostid from 3560 * configuration may be stale use the one read from the label. 3561 */ 3562 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 3563 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 3564 3565 if (hostid == spa_get_hostid(spa)) 3566 return (B_FALSE); 3567 3568 /* 3569 * Skip the activity test when the pool was cleanly exported. 3570 */ 3571 if (state != POOL_STATE_ACTIVE) 3572 return (B_FALSE); 3573 3574 return (B_TRUE); 3575 } 3576 3577 /* 3578 * Nanoseconds the activity check must watch for changes on-disk. 3579 */ 3580 static uint64_t 3581 spa_activity_check_duration(spa_t *spa, uberblock_t *ub) 3582 { 3583 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 3584 uint64_t multihost_interval = MSEC2NSEC( 3585 MMP_INTERVAL_OK(zfs_multihost_interval)); 3586 uint64_t import_delay = MAX(NANOSEC, import_intervals * 3587 multihost_interval); 3588 3589 /* 3590 * Local tunables determine a minimum duration except for the case 3591 * where we know when the remote host will suspend the pool if MMP 3592 * writes do not land. 3593 * 3594 * See Big Theory comment at the top of mmp.c for the reasoning behind 3595 * these cases and times. 3596 */ 3597 3598 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); 3599 3600 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3601 MMP_FAIL_INT(ub) > 0) { 3602 3603 /* MMP on remote host will suspend pool after failed writes */ 3604 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * 3605 MMP_IMPORT_SAFETY_FACTOR / 100; 3606 3607 zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " 3608 "mmp_fails=%llu ub_mmp mmp_interval=%llu " 3609 "import_intervals=%llu", (u_longlong_t)import_delay, 3610 (u_longlong_t)MMP_FAIL_INT(ub), 3611 (u_longlong_t)MMP_INTERVAL(ub), 3612 (u_longlong_t)import_intervals); 3613 3614 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3615 MMP_FAIL_INT(ub) == 0) { 3616 3617 /* MMP on remote host will never suspend pool */ 3618 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + 3619 ub->ub_mmp_delay) * import_intervals); 3620 3621 zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " 3622 "mmp_interval=%llu ub_mmp_delay=%llu " 3623 "import_intervals=%llu", (u_longlong_t)import_delay, 3624 (u_longlong_t)MMP_INTERVAL(ub), 3625 (u_longlong_t)ub->ub_mmp_delay, 3626 (u_longlong_t)import_intervals); 3627 3628 } else if (MMP_VALID(ub)) { 3629 /* 3630 * zfs-0.7 compatibility case 3631 */ 3632 3633 import_delay = MAX(import_delay, (multihost_interval + 3634 ub->ub_mmp_delay) * import_intervals); 3635 3636 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " 3637 "import_intervals=%llu leaves=%u", 3638 (u_longlong_t)import_delay, 3639 (u_longlong_t)ub->ub_mmp_delay, 3640 (u_longlong_t)import_intervals, 3641 vdev_count_leaves(spa)); 3642 } else { 3643 /* Using local tunings is the only reasonable option */ 3644 zfs_dbgmsg("pool last imported on non-MMP aware " 3645 "host using import_delay=%llu multihost_interval=%llu " 3646 "import_intervals=%llu", (u_longlong_t)import_delay, 3647 (u_longlong_t)multihost_interval, 3648 (u_longlong_t)import_intervals); 3649 } 3650 3651 return (import_delay); 3652 } 3653 3654 /* 3655 * Remote host activity check. 3656 * 3657 * error results: 3658 * 0 - no activity detected 3659 * EREMOTEIO - remote activity detected 3660 * EINTR - user canceled the operation 3661 */ 3662 static int 3663 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config, 3664 boolean_t importing) 3665 { 3666 uint64_t txg = ub->ub_txg; 3667 uint64_t timestamp = ub->ub_timestamp; 3668 uint64_t mmp_config = ub->ub_mmp_config; 3669 uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; 3670 uint64_t import_delay; 3671 hrtime_t import_expire, now; 3672 nvlist_t *mmp_label = NULL; 3673 vdev_t *rvd = spa->spa_root_vdev; 3674 kcondvar_t cv; 3675 kmutex_t mtx; 3676 int error = 0; 3677 3678 cv_init(&cv, NULL, CV_DEFAULT, NULL); 3679 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 3680 mutex_enter(&mtx); 3681 3682 /* 3683 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 3684 * during the earlier tryimport. If the txg recorded there is 0 then 3685 * the pool is known to be active on another host. 3686 * 3687 * Otherwise, the pool might be in use on another host. Check for 3688 * changes in the uberblocks on disk if necessary. 3689 */ 3690 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3691 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 3692 ZPOOL_CONFIG_LOAD_INFO); 3693 3694 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 3695 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 3696 vdev_uberblock_load(rvd, ub, &mmp_label); 3697 error = SET_ERROR(EREMOTEIO); 3698 goto out; 3699 } 3700 } 3701 3702 import_delay = spa_activity_check_duration(spa, ub); 3703 3704 /* Add a small random factor in case of simultaneous imports (0-25%) */ 3705 import_delay += import_delay * random_in_range(250) / 1000; 3706 3707 import_expire = gethrtime() + import_delay; 3708 3709 if (importing) { 3710 spa_import_progress_set_notes(spa, "Checking MMP activity, " 3711 "waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay)); 3712 } 3713 3714 int iterations = 0; 3715 while ((now = gethrtime()) < import_expire) { 3716 if (importing && iterations++ % 30 == 0) { 3717 spa_import_progress_set_notes(spa, "Checking MMP " 3718 "activity, %llu ms remaining", 3719 (u_longlong_t)NSEC2MSEC(import_expire - now)); 3720 } 3721 3722 if (importing) { 3723 (void) spa_import_progress_set_mmp_check(spa_guid(spa), 3724 NSEC2SEC(import_expire - gethrtime())); 3725 } 3726 3727 vdev_uberblock_load(rvd, ub, &mmp_label); 3728 3729 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || 3730 mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { 3731 zfs_dbgmsg("multihost activity detected " 3732 "txg %llu ub_txg %llu " 3733 "timestamp %llu ub_timestamp %llu " 3734 "mmp_config %#llx ub_mmp_config %#llx", 3735 (u_longlong_t)txg, (u_longlong_t)ub->ub_txg, 3736 (u_longlong_t)timestamp, 3737 (u_longlong_t)ub->ub_timestamp, 3738 (u_longlong_t)mmp_config, 3739 (u_longlong_t)ub->ub_mmp_config); 3740 3741 error = SET_ERROR(EREMOTEIO); 3742 break; 3743 } 3744 3745 if (mmp_label) { 3746 nvlist_free(mmp_label); 3747 mmp_label = NULL; 3748 } 3749 3750 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 3751 if (error != -1) { 3752 error = SET_ERROR(EINTR); 3753 break; 3754 } 3755 error = 0; 3756 } 3757 3758 out: 3759 mutex_exit(&mtx); 3760 mutex_destroy(&mtx); 3761 cv_destroy(&cv); 3762 3763 /* 3764 * If the pool is determined to be active store the status in the 3765 * spa->spa_load_info nvlist. If the remote hostname or hostid are 3766 * available from configuration read from disk store them as well. 3767 * This allows 'zpool import' to generate a more useful message. 3768 * 3769 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 3770 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 3771 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 3772 */ 3773 if (error == EREMOTEIO) { 3774 const char *hostname = "<unknown>"; 3775 uint64_t hostid = 0; 3776 3777 if (mmp_label) { 3778 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 3779 hostname = fnvlist_lookup_string(mmp_label, 3780 ZPOOL_CONFIG_HOSTNAME); 3781 fnvlist_add_string(spa->spa_load_info, 3782 ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 3783 } 3784 3785 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 3786 hostid = fnvlist_lookup_uint64(mmp_label, 3787 ZPOOL_CONFIG_HOSTID); 3788 fnvlist_add_uint64(spa->spa_load_info, 3789 ZPOOL_CONFIG_MMP_HOSTID, hostid); 3790 } 3791 } 3792 3793 fnvlist_add_uint64(spa->spa_load_info, 3794 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 3795 fnvlist_add_uint64(spa->spa_load_info, 3796 ZPOOL_CONFIG_MMP_TXG, 0); 3797 3798 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 3799 } 3800 3801 if (mmp_label) 3802 nvlist_free(mmp_label); 3803 3804 return (error); 3805 } 3806 3807 /* 3808 * Called from zfs_ioc_clear for a pool that was suspended 3809 * after failing mmp write checks. 3810 */ 3811 boolean_t 3812 spa_mmp_remote_host_activity(spa_t *spa) 3813 { 3814 ASSERT(spa_multihost(spa) && spa_suspended(spa)); 3815 3816 nvlist_t *best_label; 3817 uberblock_t best_ub; 3818 3819 /* 3820 * Locate the best uberblock on disk 3821 */ 3822 vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label); 3823 if (best_label) { 3824 /* 3825 * confirm that the best hostid matches our hostid 3826 */ 3827 if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) && 3828 spa_get_hostid(spa) != 3829 fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) { 3830 nvlist_free(best_label); 3831 return (B_TRUE); 3832 } 3833 nvlist_free(best_label); 3834 } else { 3835 return (B_TRUE); 3836 } 3837 3838 if (!MMP_VALID(&best_ub) || 3839 !MMP_FAIL_INT_VALID(&best_ub) || 3840 MMP_FAIL_INT(&best_ub) == 0) { 3841 return (B_TRUE); 3842 } 3843 3844 if (best_ub.ub_txg != spa->spa_uberblock.ub_txg || 3845 best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) { 3846 zfs_dbgmsg("txg mismatch detected during pool clear " 3847 "txg %llu ub_txg %llu timestamp %llu ub_timestamp %llu", 3848 (u_longlong_t)spa->spa_uberblock.ub_txg, 3849 (u_longlong_t)best_ub.ub_txg, 3850 (u_longlong_t)spa->spa_uberblock.ub_timestamp, 3851 (u_longlong_t)best_ub.ub_timestamp); 3852 return (B_TRUE); 3853 } 3854 3855 /* 3856 * Perform an activity check looking for any remote writer 3857 */ 3858 return (spa_activity_check(spa, &spa->spa_uberblock, spa->spa_config, 3859 B_FALSE) != 0); 3860 } 3861 3862 static int 3863 spa_verify_host(spa_t *spa, nvlist_t *mos_config) 3864 { 3865 uint64_t hostid; 3866 const char *hostname; 3867 uint64_t myhostid = 0; 3868 3869 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 3870 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 3871 hostname = fnvlist_lookup_string(mos_config, 3872 ZPOOL_CONFIG_HOSTNAME); 3873 3874 myhostid = zone_get_hostid(NULL); 3875 3876 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 3877 cmn_err(CE_WARN, "pool '%s' could not be " 3878 "loaded as it was last accessed by " 3879 "another system (host: %s hostid: 0x%llx). " 3880 "See: https://openzfs.github.io/openzfs-docs/msg/" 3881 "ZFS-8000-EY", 3882 spa_name(spa), hostname, (u_longlong_t)hostid); 3883 spa_load_failed(spa, "hostid verification failed: pool " 3884 "last accessed by host: %s (hostid: 0x%llx)", 3885 hostname, (u_longlong_t)hostid); 3886 return (SET_ERROR(EBADF)); 3887 } 3888 } 3889 3890 return (0); 3891 } 3892 3893 static int 3894 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 3895 { 3896 int error = 0; 3897 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 3898 int parse; 3899 vdev_t *rvd; 3900 uint64_t pool_guid; 3901 const char *comment; 3902 const char *compatibility; 3903 3904 /* 3905 * Versioning wasn't explicitly added to the label until later, so if 3906 * it's not present treat it as the initial version. 3907 */ 3908 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 3909 &spa->spa_ubsync.ub_version) != 0) 3910 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 3911 3912 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3913 spa_load_failed(spa, "invalid config provided: '%s' missing", 3914 ZPOOL_CONFIG_POOL_GUID); 3915 return (SET_ERROR(EINVAL)); 3916 } 3917 3918 /* 3919 * If we are doing an import, ensure that the pool is not already 3920 * imported by checking if its pool guid already exists in the 3921 * spa namespace. 3922 * 3923 * The only case that we allow an already imported pool to be 3924 * imported again, is when the pool is checkpointed and we want to 3925 * look at its checkpointed state from userland tools like zdb. 3926 */ 3927 #ifdef _KERNEL 3928 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3929 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3930 spa_guid_exists(pool_guid, 0)) { 3931 #else 3932 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3933 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3934 spa_guid_exists(pool_guid, 0) && 3935 !spa_importing_readonly_checkpoint(spa)) { 3936 #endif 3937 spa_load_failed(spa, "a pool with guid %llu is already open", 3938 (u_longlong_t)pool_guid); 3939 return (SET_ERROR(EEXIST)); 3940 } 3941 3942 spa->spa_config_guid = pool_guid; 3943 3944 nvlist_free(spa->spa_load_info); 3945 spa->spa_load_info = fnvlist_alloc(); 3946 3947 ASSERT(spa->spa_comment == NULL); 3948 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3949 spa->spa_comment = spa_strdup(comment); 3950 3951 ASSERT(spa->spa_compatibility == NULL); 3952 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY, 3953 &compatibility) == 0) 3954 spa->spa_compatibility = spa_strdup(compatibility); 3955 3956 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 3957 &spa->spa_config_txg); 3958 3959 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 3960 spa->spa_config_splitting = fnvlist_dup(nvl); 3961 3962 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 3963 spa_load_failed(spa, "invalid config provided: '%s' missing", 3964 ZPOOL_CONFIG_VDEV_TREE); 3965 return (SET_ERROR(EINVAL)); 3966 } 3967 3968 /* 3969 * Create "The Godfather" zio to hold all async IOs 3970 */ 3971 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3972 KM_SLEEP); 3973 for (int i = 0; i < max_ncpus; i++) { 3974 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3975 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3976 ZIO_FLAG_GODFATHER); 3977 } 3978 3979 /* 3980 * Parse the configuration into a vdev tree. We explicitly set the 3981 * value that will be returned by spa_version() since parsing the 3982 * configuration requires knowing the version number. 3983 */ 3984 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3985 parse = (type == SPA_IMPORT_EXISTING ? 3986 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 3987 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 3988 spa_config_exit(spa, SCL_ALL, FTAG); 3989 3990 if (error != 0) { 3991 spa_load_failed(spa, "unable to parse config [error=%d]", 3992 error); 3993 return (error); 3994 } 3995 3996 ASSERT(spa->spa_root_vdev == rvd); 3997 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 3998 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 3999 4000 if (type != SPA_IMPORT_ASSEMBLE) { 4001 ASSERT(spa_guid(spa) == pool_guid); 4002 } 4003 4004 return (0); 4005 } 4006 4007 /* 4008 * Recursively open all vdevs in the vdev tree. This function is called twice: 4009 * first with the untrusted config, then with the trusted config. 4010 */ 4011 static int 4012 spa_ld_open_vdevs(spa_t *spa) 4013 { 4014 int error = 0; 4015 4016 /* 4017 * spa_missing_tvds_allowed defines how many top-level vdevs can be 4018 * missing/unopenable for the root vdev to be still considered openable. 4019 */ 4020 if (spa->spa_trust_config) { 4021 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 4022 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 4023 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 4024 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 4025 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 4026 } else { 4027 spa->spa_missing_tvds_allowed = 0; 4028 } 4029 4030 spa->spa_missing_tvds_allowed = 4031 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 4032 4033 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4034 error = vdev_open(spa->spa_root_vdev); 4035 spa_config_exit(spa, SCL_ALL, FTAG); 4036 4037 if (spa->spa_missing_tvds != 0) { 4038 spa_load_note(spa, "vdev tree has %lld missing top-level " 4039 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 4040 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) { 4041 /* 4042 * Although theoretically we could allow users to open 4043 * incomplete pools in RW mode, we'd need to add a lot 4044 * of extra logic (e.g. adjust pool space to account 4045 * for missing vdevs). 4046 * This limitation also prevents users from accidentally 4047 * opening the pool in RW mode during data recovery and 4048 * damaging it further. 4049 */ 4050 spa_load_note(spa, "pools with missing top-level " 4051 "vdevs can only be opened in read-only mode."); 4052 error = SET_ERROR(ENXIO); 4053 } else { 4054 spa_load_note(spa, "current settings allow for maximum " 4055 "%lld missing top-level vdevs at this stage.", 4056 (u_longlong_t)spa->spa_missing_tvds_allowed); 4057 } 4058 } 4059 if (error != 0) { 4060 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 4061 error); 4062 } 4063 if (spa->spa_missing_tvds != 0 || error != 0) 4064 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 4065 4066 return (error); 4067 } 4068 4069 /* 4070 * We need to validate the vdev labels against the configuration that 4071 * we have in hand. This function is called twice: first with an untrusted 4072 * config, then with a trusted config. The validation is more strict when the 4073 * config is trusted. 4074 */ 4075 static int 4076 spa_ld_validate_vdevs(spa_t *spa) 4077 { 4078 int error = 0; 4079 vdev_t *rvd = spa->spa_root_vdev; 4080 4081 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4082 error = vdev_validate(rvd); 4083 spa_config_exit(spa, SCL_ALL, FTAG); 4084 4085 if (error != 0) { 4086 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 4087 return (error); 4088 } 4089 4090 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 4091 spa_load_failed(spa, "cannot open vdev tree after invalidating " 4092 "some vdevs"); 4093 vdev_dbgmsg_print_tree(rvd, 2); 4094 return (SET_ERROR(ENXIO)); 4095 } 4096 4097 return (0); 4098 } 4099 4100 static void 4101 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 4102 { 4103 spa->spa_state = POOL_STATE_ACTIVE; 4104 spa->spa_ubsync = spa->spa_uberblock; 4105 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 4106 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 4107 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 4108 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 4109 spa->spa_claim_max_txg = spa->spa_first_txg; 4110 spa->spa_prev_software_version = ub->ub_software_version; 4111 } 4112 4113 static int 4114 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 4115 { 4116 vdev_t *rvd = spa->spa_root_vdev; 4117 nvlist_t *label; 4118 uberblock_t *ub = &spa->spa_uberblock; 4119 boolean_t activity_check = B_FALSE; 4120 4121 /* 4122 * If we are opening the checkpointed state of the pool by 4123 * rewinding to it, at this point we will have written the 4124 * checkpointed uberblock to the vdev labels, so searching 4125 * the labels will find the right uberblock. However, if 4126 * we are opening the checkpointed state read-only, we have 4127 * not modified the labels. Therefore, we must ignore the 4128 * labels and continue using the spa_uberblock that was set 4129 * by spa_ld_checkpoint_rewind. 4130 * 4131 * Note that it would be fine to ignore the labels when 4132 * rewinding (opening writeable) as well. However, if we 4133 * crash just after writing the labels, we will end up 4134 * searching the labels. Doing so in the common case means 4135 * that this code path gets exercised normally, rather than 4136 * just in the edge case. 4137 */ 4138 if (ub->ub_checkpoint_txg != 0 && 4139 spa_importing_readonly_checkpoint(spa)) { 4140 spa_ld_select_uberblock_done(spa, ub); 4141 return (0); 4142 } 4143 4144 /* 4145 * Find the best uberblock. 4146 */ 4147 vdev_uberblock_load(rvd, ub, &label); 4148 4149 /* 4150 * If we weren't able to find a single valid uberblock, return failure. 4151 */ 4152 if (ub->ub_txg == 0) { 4153 nvlist_free(label); 4154 spa_load_failed(spa, "no valid uberblock found"); 4155 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 4156 } 4157 4158 if (spa->spa_load_max_txg != UINT64_MAX) { 4159 (void) spa_import_progress_set_max_txg(spa_guid(spa), 4160 (u_longlong_t)spa->spa_load_max_txg); 4161 } 4162 spa_load_note(spa, "using uberblock with txg=%llu", 4163 (u_longlong_t)ub->ub_txg); 4164 if (ub->ub_raidz_reflow_info != 0) { 4165 spa_load_note(spa, "uberblock raidz_reflow_info: " 4166 "state=%u offset=%llu", 4167 (int)RRSS_GET_STATE(ub), 4168 (u_longlong_t)RRSS_GET_OFFSET(ub)); 4169 } 4170 4171 4172 /* 4173 * For pools which have the multihost property on determine if the 4174 * pool is truly inactive and can be safely imported. Prevent 4175 * hosts which don't have a hostid set from importing the pool. 4176 */ 4177 activity_check = spa_activity_check_required(spa, ub, label, 4178 spa->spa_config); 4179 if (activity_check) { 4180 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 4181 spa_get_hostid(spa) == 0) { 4182 nvlist_free(label); 4183 fnvlist_add_uint64(spa->spa_load_info, 4184 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4185 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4186 } 4187 4188 int error = 4189 spa_activity_check(spa, ub, spa->spa_config, B_TRUE); 4190 if (error) { 4191 nvlist_free(label); 4192 return (error); 4193 } 4194 4195 fnvlist_add_uint64(spa->spa_load_info, 4196 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 4197 fnvlist_add_uint64(spa->spa_load_info, 4198 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 4199 fnvlist_add_uint16(spa->spa_load_info, 4200 ZPOOL_CONFIG_MMP_SEQ, 4201 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); 4202 } 4203 4204 /* 4205 * If the pool has an unsupported version we can't open it. 4206 */ 4207 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 4208 nvlist_free(label); 4209 spa_load_failed(spa, "version %llu is not supported", 4210 (u_longlong_t)ub->ub_version); 4211 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 4212 } 4213 4214 if (ub->ub_version >= SPA_VERSION_FEATURES) { 4215 nvlist_t *features; 4216 4217 /* 4218 * If we weren't able to find what's necessary for reading the 4219 * MOS in the label, return failure. 4220 */ 4221 if (label == NULL) { 4222 spa_load_failed(spa, "label config unavailable"); 4223 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4224 ENXIO)); 4225 } 4226 4227 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 4228 &features) != 0) { 4229 nvlist_free(label); 4230 spa_load_failed(spa, "invalid label: '%s' missing", 4231 ZPOOL_CONFIG_FEATURES_FOR_READ); 4232 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4233 ENXIO)); 4234 } 4235 4236 /* 4237 * Update our in-core representation with the definitive values 4238 * from the label. 4239 */ 4240 nvlist_free(spa->spa_label_features); 4241 spa->spa_label_features = fnvlist_dup(features); 4242 } 4243 4244 nvlist_free(label); 4245 4246 /* 4247 * Look through entries in the label nvlist's features_for_read. If 4248 * there is a feature listed there which we don't understand then we 4249 * cannot open a pool. 4250 */ 4251 if (ub->ub_version >= SPA_VERSION_FEATURES) { 4252 nvlist_t *unsup_feat; 4253 4254 unsup_feat = fnvlist_alloc(); 4255 4256 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 4257 NULL); nvp != NULL; 4258 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 4259 if (!zfeature_is_supported(nvpair_name(nvp))) { 4260 fnvlist_add_string(unsup_feat, 4261 nvpair_name(nvp), ""); 4262 } 4263 } 4264 4265 if (!nvlist_empty(unsup_feat)) { 4266 fnvlist_add_nvlist(spa->spa_load_info, 4267 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 4268 nvlist_free(unsup_feat); 4269 spa_load_failed(spa, "some features are unsupported"); 4270 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4271 ENOTSUP)); 4272 } 4273 4274 nvlist_free(unsup_feat); 4275 } 4276 4277 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 4278 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4279 spa_try_repair(spa, spa->spa_config); 4280 spa_config_exit(spa, SCL_ALL, FTAG); 4281 nvlist_free(spa->spa_config_splitting); 4282 spa->spa_config_splitting = NULL; 4283 } 4284 4285 /* 4286 * Initialize internal SPA structures. 4287 */ 4288 spa_ld_select_uberblock_done(spa, ub); 4289 4290 return (0); 4291 } 4292 4293 static int 4294 spa_ld_open_rootbp(spa_t *spa) 4295 { 4296 int error = 0; 4297 vdev_t *rvd = spa->spa_root_vdev; 4298 4299 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 4300 if (error != 0) { 4301 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 4302 "[error=%d]", error); 4303 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4304 } 4305 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 4306 4307 return (0); 4308 } 4309 4310 static int 4311 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 4312 boolean_t reloading) 4313 { 4314 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 4315 nvlist_t *nv, *mos_config, *policy; 4316 int error = 0, copy_error; 4317 uint64_t healthy_tvds, healthy_tvds_mos; 4318 uint64_t mos_config_txg; 4319 4320 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 4321 != 0) 4322 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4323 4324 /* 4325 * If we're assembling a pool from a split, the config provided is 4326 * already trusted so there is nothing to do. 4327 */ 4328 if (type == SPA_IMPORT_ASSEMBLE) 4329 return (0); 4330 4331 healthy_tvds = spa_healthy_core_tvds(spa); 4332 4333 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 4334 != 0) { 4335 spa_load_failed(spa, "unable to retrieve MOS config"); 4336 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4337 } 4338 4339 /* 4340 * If we are doing an open, pool owner wasn't verified yet, thus do 4341 * the verification here. 4342 */ 4343 if (spa->spa_load_state == SPA_LOAD_OPEN) { 4344 error = spa_verify_host(spa, mos_config); 4345 if (error != 0) { 4346 nvlist_free(mos_config); 4347 return (error); 4348 } 4349 } 4350 4351 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 4352 4353 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4354 4355 /* 4356 * Build a new vdev tree from the trusted config 4357 */ 4358 error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD); 4359 if (error != 0) { 4360 nvlist_free(mos_config); 4361 spa_config_exit(spa, SCL_ALL, FTAG); 4362 spa_load_failed(spa, "spa_config_parse failed [error=%d]", 4363 error); 4364 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4365 } 4366 4367 /* 4368 * Vdev paths in the MOS may be obsolete. If the untrusted config was 4369 * obtained by scanning /dev/dsk, then it will have the right vdev 4370 * paths. We update the trusted MOS config with this information. 4371 * We first try to copy the paths with vdev_copy_path_strict, which 4372 * succeeds only when both configs have exactly the same vdev tree. 4373 * If that fails, we fall back to a more flexible method that has a 4374 * best effort policy. 4375 */ 4376 copy_error = vdev_copy_path_strict(rvd, mrvd); 4377 if (copy_error != 0 || spa_load_print_vdev_tree) { 4378 spa_load_note(spa, "provided vdev tree:"); 4379 vdev_dbgmsg_print_tree(rvd, 2); 4380 spa_load_note(spa, "MOS vdev tree:"); 4381 vdev_dbgmsg_print_tree(mrvd, 2); 4382 } 4383 if (copy_error != 0) { 4384 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 4385 "back to vdev_copy_path_relaxed"); 4386 vdev_copy_path_relaxed(rvd, mrvd); 4387 } 4388 4389 vdev_close(rvd); 4390 vdev_free(rvd); 4391 spa->spa_root_vdev = mrvd; 4392 rvd = mrvd; 4393 spa_config_exit(spa, SCL_ALL, FTAG); 4394 4395 /* 4396 * If 'zpool import' used a cached config, then the on-disk hostid and 4397 * hostname may be different to the cached config in ways that should 4398 * prevent import. Userspace can't discover this without a scan, but 4399 * we know, so we add these values to LOAD_INFO so the caller can know 4400 * the difference. 4401 * 4402 * Note that we have to do this before the config is regenerated, 4403 * because the new config will have the hostid and hostname for this 4404 * host, in readiness for import. 4405 */ 4406 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID)) 4407 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID, 4408 fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID)); 4409 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME)) 4410 fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME, 4411 fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME)); 4412 4413 /* 4414 * We will use spa_config if we decide to reload the spa or if spa_load 4415 * fails and we rewind. We must thus regenerate the config using the 4416 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 4417 * pass settings on how to load the pool and is not stored in the MOS. 4418 * We copy it over to our new, trusted config. 4419 */ 4420 mos_config_txg = fnvlist_lookup_uint64(mos_config, 4421 ZPOOL_CONFIG_POOL_TXG); 4422 nvlist_free(mos_config); 4423 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 4424 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 4425 &policy) == 0) 4426 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 4427 spa_config_set(spa, mos_config); 4428 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 4429 4430 /* 4431 * Now that we got the config from the MOS, we should be more strict 4432 * in checking blkptrs and can make assumptions about the consistency 4433 * of the vdev tree. spa_trust_config must be set to true before opening 4434 * vdevs in order for them to be writeable. 4435 */ 4436 spa->spa_trust_config = B_TRUE; 4437 4438 /* 4439 * Open and validate the new vdev tree 4440 */ 4441 error = spa_ld_open_vdevs(spa); 4442 if (error != 0) 4443 return (error); 4444 4445 error = spa_ld_validate_vdevs(spa); 4446 if (error != 0) 4447 return (error); 4448 4449 if (copy_error != 0 || spa_load_print_vdev_tree) { 4450 spa_load_note(spa, "final vdev tree:"); 4451 vdev_dbgmsg_print_tree(rvd, 2); 4452 } 4453 4454 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 4455 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 4456 /* 4457 * Sanity check to make sure that we are indeed loading the 4458 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 4459 * in the config provided and they happened to be the only ones 4460 * to have the latest uberblock, we could involuntarily perform 4461 * an extreme rewind. 4462 */ 4463 healthy_tvds_mos = spa_healthy_core_tvds(spa); 4464 if (healthy_tvds_mos - healthy_tvds >= 4465 SPA_SYNC_MIN_VDEVS) { 4466 spa_load_note(spa, "config provided misses too many " 4467 "top-level vdevs compared to MOS (%lld vs %lld). ", 4468 (u_longlong_t)healthy_tvds, 4469 (u_longlong_t)healthy_tvds_mos); 4470 spa_load_note(spa, "vdev tree:"); 4471 vdev_dbgmsg_print_tree(rvd, 2); 4472 if (reloading) { 4473 spa_load_failed(spa, "config was already " 4474 "provided from MOS. Aborting."); 4475 return (spa_vdev_err(rvd, 4476 VDEV_AUX_CORRUPT_DATA, EIO)); 4477 } 4478 spa_load_note(spa, "spa must be reloaded using MOS " 4479 "config"); 4480 return (SET_ERROR(EAGAIN)); 4481 } 4482 } 4483 4484 error = spa_check_for_missing_logs(spa); 4485 if (error != 0) 4486 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 4487 4488 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 4489 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 4490 "guid sum (%llu != %llu)", 4491 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 4492 (u_longlong_t)rvd->vdev_guid_sum); 4493 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 4494 ENXIO)); 4495 } 4496 4497 return (0); 4498 } 4499 4500 static int 4501 spa_ld_open_indirect_vdev_metadata(spa_t *spa) 4502 { 4503 int error = 0; 4504 vdev_t *rvd = spa->spa_root_vdev; 4505 4506 /* 4507 * Everything that we read before spa_remove_init() must be stored 4508 * on concreted vdevs. Therefore we do this as early as possible. 4509 */ 4510 error = spa_remove_init(spa); 4511 if (error != 0) { 4512 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 4513 error); 4514 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4515 } 4516 4517 /* 4518 * Retrieve information needed to condense indirect vdev mappings. 4519 */ 4520 error = spa_condense_init(spa); 4521 if (error != 0) { 4522 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 4523 error); 4524 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4525 } 4526 4527 return (0); 4528 } 4529 4530 static int 4531 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 4532 { 4533 int error = 0; 4534 vdev_t *rvd = spa->spa_root_vdev; 4535 4536 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 4537 boolean_t missing_feat_read = B_FALSE; 4538 nvlist_t *unsup_feat, *enabled_feat; 4539 4540 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 4541 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 4542 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4543 } 4544 4545 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 4546 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 4547 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4548 } 4549 4550 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 4551 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 4552 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4553 } 4554 4555 enabled_feat = fnvlist_alloc(); 4556 unsup_feat = fnvlist_alloc(); 4557 4558 if (!spa_features_check(spa, B_FALSE, 4559 unsup_feat, enabled_feat)) 4560 missing_feat_read = B_TRUE; 4561 4562 if (spa_writeable(spa) || 4563 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 4564 if (!spa_features_check(spa, B_TRUE, 4565 unsup_feat, enabled_feat)) { 4566 *missing_feat_writep = B_TRUE; 4567 } 4568 } 4569 4570 fnvlist_add_nvlist(spa->spa_load_info, 4571 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 4572 4573 if (!nvlist_empty(unsup_feat)) { 4574 fnvlist_add_nvlist(spa->spa_load_info, 4575 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 4576 } 4577 4578 fnvlist_free(enabled_feat); 4579 fnvlist_free(unsup_feat); 4580 4581 if (!missing_feat_read) { 4582 fnvlist_add_boolean(spa->spa_load_info, 4583 ZPOOL_CONFIG_CAN_RDONLY); 4584 } 4585 4586 /* 4587 * If the state is SPA_LOAD_TRYIMPORT, our objective is 4588 * twofold: to determine whether the pool is available for 4589 * import in read-write mode and (if it is not) whether the 4590 * pool is available for import in read-only mode. If the pool 4591 * is available for import in read-write mode, it is displayed 4592 * as available in userland; if it is not available for import 4593 * in read-only mode, it is displayed as unavailable in 4594 * userland. If the pool is available for import in read-only 4595 * mode but not read-write mode, it is displayed as unavailable 4596 * in userland with a special note that the pool is actually 4597 * available for open in read-only mode. 4598 * 4599 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 4600 * missing a feature for write, we must first determine whether 4601 * the pool can be opened read-only before returning to 4602 * userland in order to know whether to display the 4603 * abovementioned note. 4604 */ 4605 if (missing_feat_read || (*missing_feat_writep && 4606 spa_writeable(spa))) { 4607 spa_load_failed(spa, "pool uses unsupported features"); 4608 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4609 ENOTSUP)); 4610 } 4611 4612 /* 4613 * Load refcounts for ZFS features from disk into an in-memory 4614 * cache during SPA initialization. 4615 */ 4616 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 4617 uint64_t refcount; 4618 4619 error = feature_get_refcount_from_disk(spa, 4620 &spa_feature_table[i], &refcount); 4621 if (error == 0) { 4622 spa->spa_feat_refcount_cache[i] = refcount; 4623 } else if (error == ENOTSUP) { 4624 spa->spa_feat_refcount_cache[i] = 4625 SPA_FEATURE_DISABLED; 4626 } else { 4627 spa_load_failed(spa, "error getting refcount " 4628 "for feature %s [error=%d]", 4629 spa_feature_table[i].fi_guid, error); 4630 return (spa_vdev_err(rvd, 4631 VDEV_AUX_CORRUPT_DATA, EIO)); 4632 } 4633 } 4634 } 4635 4636 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 4637 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 4638 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 4639 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4640 } 4641 4642 /* 4643 * Encryption was added before bookmark_v2, even though bookmark_v2 4644 * is now a dependency. If this pool has encryption enabled without 4645 * bookmark_v2, trigger an errata message. 4646 */ 4647 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 4648 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 4649 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 4650 } 4651 4652 return (0); 4653 } 4654 4655 static int 4656 spa_ld_load_special_directories(spa_t *spa) 4657 { 4658 int error = 0; 4659 vdev_t *rvd = spa->spa_root_vdev; 4660 4661 spa->spa_is_initializing = B_TRUE; 4662 error = dsl_pool_open(spa->spa_dsl_pool); 4663 spa->spa_is_initializing = B_FALSE; 4664 if (error != 0) { 4665 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 4666 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4667 } 4668 4669 return (0); 4670 } 4671 4672 static int 4673 spa_ld_get_props(spa_t *spa) 4674 { 4675 int error = 0; 4676 uint64_t obj; 4677 vdev_t *rvd = spa->spa_root_vdev; 4678 4679 /* Grab the checksum salt from the MOS. */ 4680 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4681 DMU_POOL_CHECKSUM_SALT, 1, 4682 sizeof (spa->spa_cksum_salt.zcs_bytes), 4683 spa->spa_cksum_salt.zcs_bytes); 4684 if (error == ENOENT) { 4685 /* Generate a new salt for subsequent use */ 4686 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4687 sizeof (spa->spa_cksum_salt.zcs_bytes)); 4688 } else if (error != 0) { 4689 spa_load_failed(spa, "unable to retrieve checksum salt from " 4690 "MOS [error=%d]", error); 4691 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4692 } 4693 4694 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 4695 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4696 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 4697 if (error != 0) { 4698 spa_load_failed(spa, "error opening deferred-frees bpobj " 4699 "[error=%d]", error); 4700 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4701 } 4702 4703 /* 4704 * Load the bit that tells us to use the new accounting function 4705 * (raid-z deflation). If we have an older pool, this will not 4706 * be present. 4707 */ 4708 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 4709 if (error != 0 && error != ENOENT) 4710 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4711 4712 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 4713 &spa->spa_creation_version, B_FALSE); 4714 if (error != 0 && error != ENOENT) 4715 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4716 4717 /* 4718 * Load the persistent error log. If we have an older pool, this will 4719 * not be present. 4720 */ 4721 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 4722 B_FALSE); 4723 if (error != 0 && error != ENOENT) 4724 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4725 4726 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 4727 &spa->spa_errlog_scrub, B_FALSE); 4728 if (error != 0 && error != ENOENT) 4729 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4730 4731 /* Load the last scrubbed txg. */ 4732 error = spa_dir_prop(spa, DMU_POOL_LAST_SCRUBBED_TXG, 4733 &spa->spa_scrubbed_last_txg, B_FALSE); 4734 if (error != 0 && error != ENOENT) 4735 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4736 4737 /* 4738 * Load the livelist deletion field. If a livelist is queued for 4739 * deletion, indicate that in the spa 4740 */ 4741 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES, 4742 &spa->spa_livelists_to_delete, B_FALSE); 4743 if (error != 0 && error != ENOENT) 4744 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4745 4746 /* 4747 * Load the history object. If we have an older pool, this 4748 * will not be present. 4749 */ 4750 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 4751 if (error != 0 && error != ENOENT) 4752 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4753 4754 /* 4755 * Load the per-vdev ZAP map. If we have an older pool, this will not 4756 * be present; in this case, defer its creation to a later time to 4757 * avoid dirtying the MOS this early / out of sync context. See 4758 * spa_sync_config_object. 4759 */ 4760 4761 /* The sentinel is only available in the MOS config. */ 4762 nvlist_t *mos_config; 4763 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 4764 spa_load_failed(spa, "unable to retrieve MOS config"); 4765 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4766 } 4767 4768 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 4769 &spa->spa_all_vdev_zaps, B_FALSE); 4770 4771 if (error == ENOENT) { 4772 VERIFY(!nvlist_exists(mos_config, 4773 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 4774 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 4775 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4776 } else if (error != 0) { 4777 nvlist_free(mos_config); 4778 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4779 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 4780 /* 4781 * An older version of ZFS overwrote the sentinel value, so 4782 * we have orphaned per-vdev ZAPs in the MOS. Defer their 4783 * destruction to later; see spa_sync_config_object. 4784 */ 4785 spa->spa_avz_action = AVZ_ACTION_DESTROY; 4786 /* 4787 * We're assuming that no vdevs have had their ZAPs created 4788 * before this. Better be sure of it. 4789 */ 4790 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4791 } 4792 nvlist_free(mos_config); 4793 4794 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4795 4796 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 4797 B_FALSE); 4798 if (error && error != ENOENT) 4799 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4800 4801 if (error == 0) { 4802 uint64_t autoreplace = 0; 4803 4804 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 4805 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 4806 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 4807 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 4808 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 4809 spa_prop_find(spa, ZPOOL_PROP_DEDUP_TABLE_QUOTA, 4810 &spa->spa_dedup_table_quota); 4811 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 4812 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); 4813 spa->spa_autoreplace = (autoreplace != 0); 4814 } 4815 4816 /* 4817 * If we are importing a pool with missing top-level vdevs, 4818 * we enforce that the pool doesn't panic or get suspended on 4819 * error since the likelihood of missing data is extremely high. 4820 */ 4821 if (spa->spa_missing_tvds > 0 && 4822 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 4823 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4824 spa_load_note(spa, "forcing failmode to 'continue' " 4825 "as some top level vdevs are missing"); 4826 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 4827 } 4828 4829 return (0); 4830 } 4831 4832 static int 4833 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 4834 { 4835 int error = 0; 4836 vdev_t *rvd = spa->spa_root_vdev; 4837 4838 /* 4839 * If we're assembling the pool from the split-off vdevs of 4840 * an existing pool, we don't want to attach the spares & cache 4841 * devices. 4842 */ 4843 4844 /* 4845 * Load any hot spares for this pool. 4846 */ 4847 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 4848 B_FALSE); 4849 if (error != 0 && error != ENOENT) 4850 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4851 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4852 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 4853 if (load_nvlist(spa, spa->spa_spares.sav_object, 4854 &spa->spa_spares.sav_config) != 0) { 4855 spa_load_failed(spa, "error loading spares nvlist"); 4856 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4857 } 4858 4859 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4860 spa_load_spares(spa); 4861 spa_config_exit(spa, SCL_ALL, FTAG); 4862 } else if (error == 0) { 4863 spa->spa_spares.sav_sync = B_TRUE; 4864 } 4865 4866 /* 4867 * Load any level 2 ARC devices for this pool. 4868 */ 4869 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 4870 &spa->spa_l2cache.sav_object, B_FALSE); 4871 if (error != 0 && error != ENOENT) 4872 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4873 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4874 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 4875 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 4876 &spa->spa_l2cache.sav_config) != 0) { 4877 spa_load_failed(spa, "error loading l2cache nvlist"); 4878 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4879 } 4880 4881 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4882 spa_load_l2cache(spa); 4883 spa_config_exit(spa, SCL_ALL, FTAG); 4884 } else if (error == 0) { 4885 spa->spa_l2cache.sav_sync = B_TRUE; 4886 } 4887 4888 return (0); 4889 } 4890 4891 static int 4892 spa_ld_load_vdev_metadata(spa_t *spa) 4893 { 4894 int error = 0; 4895 vdev_t *rvd = spa->spa_root_vdev; 4896 4897 /* 4898 * If the 'multihost' property is set, then never allow a pool to 4899 * be imported when the system hostid is zero. The exception to 4900 * this rule is zdb which is always allowed to access pools. 4901 */ 4902 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 && 4903 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 4904 fnvlist_add_uint64(spa->spa_load_info, 4905 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4906 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4907 } 4908 4909 /* 4910 * If the 'autoreplace' property is set, then post a resource notifying 4911 * the ZFS DE that it should not issue any faults for unopenable 4912 * devices. We also iterate over the vdevs, and post a sysevent for any 4913 * unopenable vdevs so that the normal autoreplace handler can take 4914 * over. 4915 */ 4916 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4917 spa_check_removed(spa->spa_root_vdev); 4918 /* 4919 * For the import case, this is done in spa_import(), because 4920 * at this point we're using the spare definitions from 4921 * the MOS config, not necessarily from the userland config. 4922 */ 4923 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 4924 spa_aux_check_removed(&spa->spa_spares); 4925 spa_aux_check_removed(&spa->spa_l2cache); 4926 } 4927 } 4928 4929 /* 4930 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 4931 */ 4932 error = vdev_load(rvd); 4933 if (error != 0) { 4934 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 4935 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4936 } 4937 4938 error = spa_ld_log_spacemaps(spa); 4939 if (error != 0) { 4940 spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]", 4941 error); 4942 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4943 } 4944 4945 /* 4946 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 4947 */ 4948 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4949 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE); 4950 spa_config_exit(spa, SCL_ALL, FTAG); 4951 4952 return (0); 4953 } 4954 4955 static int 4956 spa_ld_load_dedup_tables(spa_t *spa) 4957 { 4958 int error = 0; 4959 vdev_t *rvd = spa->spa_root_vdev; 4960 4961 error = ddt_load(spa); 4962 if (error != 0) { 4963 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 4964 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4965 } 4966 4967 return (0); 4968 } 4969 4970 static int 4971 spa_ld_load_brt(spa_t *spa) 4972 { 4973 int error = 0; 4974 vdev_t *rvd = spa->spa_root_vdev; 4975 4976 error = brt_load(spa); 4977 if (error != 0) { 4978 spa_load_failed(spa, "brt_load failed [error=%d]", error); 4979 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4980 } 4981 4982 return (0); 4983 } 4984 4985 static int 4986 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport) 4987 { 4988 vdev_t *rvd = spa->spa_root_vdev; 4989 4990 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 4991 boolean_t missing = spa_check_logs(spa); 4992 if (missing) { 4993 if (spa->spa_missing_tvds != 0) { 4994 spa_load_note(spa, "spa_check_logs failed " 4995 "so dropping the logs"); 4996 } else { 4997 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 4998 spa_load_failed(spa, "spa_check_logs failed"); 4999 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 5000 ENXIO)); 5001 } 5002 } 5003 } 5004 5005 return (0); 5006 } 5007 5008 static int 5009 spa_ld_verify_pool_data(spa_t *spa) 5010 { 5011 int error = 0; 5012 vdev_t *rvd = spa->spa_root_vdev; 5013 5014 /* 5015 * We've successfully opened the pool, verify that we're ready 5016 * to start pushing transactions. 5017 */ 5018 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 5019 error = spa_load_verify(spa); 5020 if (error != 0) { 5021 spa_load_failed(spa, "spa_load_verify failed " 5022 "[error=%d]", error); 5023 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 5024 error)); 5025 } 5026 } 5027 5028 return (0); 5029 } 5030 5031 static void 5032 spa_ld_claim_log_blocks(spa_t *spa) 5033 { 5034 dmu_tx_t *tx; 5035 dsl_pool_t *dp = spa_get_dsl(spa); 5036 5037 /* 5038 * Claim log blocks that haven't been committed yet. 5039 * This must all happen in a single txg. 5040 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 5041 * invoked from zil_claim_log_block()'s i/o done callback. 5042 * Price of rollback is that we abandon the log. 5043 */ 5044 spa->spa_claiming = B_TRUE; 5045 5046 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 5047 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 5048 zil_claim, tx, DS_FIND_CHILDREN); 5049 dmu_tx_commit(tx); 5050 5051 spa->spa_claiming = B_FALSE; 5052 5053 spa_set_log_state(spa, SPA_LOG_GOOD); 5054 } 5055 5056 static void 5057 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 5058 boolean_t update_config_cache) 5059 { 5060 vdev_t *rvd = spa->spa_root_vdev; 5061 int need_update = B_FALSE; 5062 5063 /* 5064 * If the config cache is stale, or we have uninitialized 5065 * metaslabs (see spa_vdev_add()), then update the config. 5066 * 5067 * If this is a verbatim import, trust the current 5068 * in-core spa_config and update the disk labels. 5069 */ 5070 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 5071 spa->spa_load_state == SPA_LOAD_IMPORT || 5072 spa->spa_load_state == SPA_LOAD_RECOVER || 5073 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 5074 need_update = B_TRUE; 5075 5076 for (int c = 0; c < rvd->vdev_children; c++) 5077 if (rvd->vdev_child[c]->vdev_ms_array == 0) 5078 need_update = B_TRUE; 5079 5080 /* 5081 * Update the config cache asynchronously in case we're the 5082 * root pool, in which case the config cache isn't writable yet. 5083 */ 5084 if (need_update) 5085 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 5086 } 5087 5088 static void 5089 spa_ld_prepare_for_reload(spa_t *spa) 5090 { 5091 spa_mode_t mode = spa->spa_mode; 5092 int async_suspended = spa->spa_async_suspended; 5093 5094 spa_unload(spa); 5095 spa_deactivate(spa); 5096 spa_activate(spa, mode); 5097 5098 /* 5099 * We save the value of spa_async_suspended as it gets reset to 0 by 5100 * spa_unload(). We want to restore it back to the original value before 5101 * returning as we might be calling spa_async_resume() later. 5102 */ 5103 spa->spa_async_suspended = async_suspended; 5104 } 5105 5106 static int 5107 spa_ld_read_checkpoint_txg(spa_t *spa) 5108 { 5109 uberblock_t checkpoint; 5110 int error = 0; 5111 5112 ASSERT0(spa->spa_checkpoint_txg); 5113 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 5114 spa->spa_load_thread == curthread); 5115 5116 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5117 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 5118 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 5119 5120 if (error == ENOENT) 5121 return (0); 5122 5123 if (error != 0) 5124 return (error); 5125 5126 ASSERT3U(checkpoint.ub_txg, !=, 0); 5127 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 5128 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 5129 spa->spa_checkpoint_txg = checkpoint.ub_txg; 5130 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 5131 5132 return (0); 5133 } 5134 5135 static int 5136 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 5137 { 5138 int error = 0; 5139 5140 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5141 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 5142 5143 /* 5144 * Never trust the config that is provided unless we are assembling 5145 * a pool following a split. 5146 * This means don't trust blkptrs and the vdev tree in general. This 5147 * also effectively puts the spa in read-only mode since 5148 * spa_writeable() checks for spa_trust_config to be true. 5149 * We will later load a trusted config from the MOS. 5150 */ 5151 if (type != SPA_IMPORT_ASSEMBLE) 5152 spa->spa_trust_config = B_FALSE; 5153 5154 /* 5155 * Parse the config provided to create a vdev tree. 5156 */ 5157 error = spa_ld_parse_config(spa, type); 5158 if (error != 0) 5159 return (error); 5160 5161 spa_import_progress_add(spa); 5162 5163 /* 5164 * Now that we have the vdev tree, try to open each vdev. This involves 5165 * opening the underlying physical device, retrieving its geometry and 5166 * probing the vdev with a dummy I/O. The state of each vdev will be set 5167 * based on the success of those operations. After this we'll be ready 5168 * to read from the vdevs. 5169 */ 5170 error = spa_ld_open_vdevs(spa); 5171 if (error != 0) 5172 return (error); 5173 5174 /* 5175 * Read the label of each vdev and make sure that the GUIDs stored 5176 * there match the GUIDs in the config provided. 5177 * If we're assembling a new pool that's been split off from an 5178 * existing pool, the labels haven't yet been updated so we skip 5179 * validation for now. 5180 */ 5181 if (type != SPA_IMPORT_ASSEMBLE) { 5182 error = spa_ld_validate_vdevs(spa); 5183 if (error != 0) 5184 return (error); 5185 } 5186 5187 /* 5188 * Read all vdev labels to find the best uberblock (i.e. latest, 5189 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 5190 * get the list of features required to read blkptrs in the MOS from 5191 * the vdev label with the best uberblock and verify that our version 5192 * of zfs supports them all. 5193 */ 5194 error = spa_ld_select_uberblock(spa, type); 5195 if (error != 0) 5196 return (error); 5197 5198 /* 5199 * Pass that uberblock to the dsl_pool layer which will open the root 5200 * blkptr. This blkptr points to the latest version of the MOS and will 5201 * allow us to read its contents. 5202 */ 5203 error = spa_ld_open_rootbp(spa); 5204 if (error != 0) 5205 return (error); 5206 5207 return (0); 5208 } 5209 5210 static int 5211 spa_ld_checkpoint_rewind(spa_t *spa) 5212 { 5213 uberblock_t checkpoint; 5214 int error = 0; 5215 5216 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5217 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5218 5219 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5220 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 5221 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 5222 5223 if (error != 0) { 5224 spa_load_failed(spa, "unable to retrieve checkpointed " 5225 "uberblock from the MOS config [error=%d]", error); 5226 5227 if (error == ENOENT) 5228 error = ZFS_ERR_NO_CHECKPOINT; 5229 5230 return (error); 5231 } 5232 5233 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 5234 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 5235 5236 /* 5237 * We need to update the txg and timestamp of the checkpointed 5238 * uberblock to be higher than the latest one. This ensures that 5239 * the checkpointed uberblock is selected if we were to close and 5240 * reopen the pool right after we've written it in the vdev labels. 5241 * (also see block comment in vdev_uberblock_compare) 5242 */ 5243 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 5244 checkpoint.ub_timestamp = gethrestime_sec(); 5245 5246 /* 5247 * Set current uberblock to be the checkpointed uberblock. 5248 */ 5249 spa->spa_uberblock = checkpoint; 5250 5251 /* 5252 * If we are doing a normal rewind, then the pool is open for 5253 * writing and we sync the "updated" checkpointed uberblock to 5254 * disk. Once this is done, we've basically rewound the whole 5255 * pool and there is no way back. 5256 * 5257 * There are cases when we don't want to attempt and sync the 5258 * checkpointed uberblock to disk because we are opening a 5259 * pool as read-only. Specifically, verifying the checkpointed 5260 * state with zdb, and importing the checkpointed state to get 5261 * a "preview" of its content. 5262 */ 5263 if (spa_writeable(spa)) { 5264 vdev_t *rvd = spa->spa_root_vdev; 5265 5266 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5267 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 5268 int svdcount = 0; 5269 int children = rvd->vdev_children; 5270 int c0 = random_in_range(children); 5271 5272 for (int c = 0; c < children; c++) { 5273 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 5274 5275 /* Stop when revisiting the first vdev */ 5276 if (c > 0 && svd[0] == vd) 5277 break; 5278 5279 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 5280 !vdev_is_concrete(vd)) 5281 continue; 5282 5283 svd[svdcount++] = vd; 5284 if (svdcount == SPA_SYNC_MIN_VDEVS) 5285 break; 5286 } 5287 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 5288 if (error == 0) 5289 spa->spa_last_synced_guid = rvd->vdev_guid; 5290 spa_config_exit(spa, SCL_ALL, FTAG); 5291 5292 if (error != 0) { 5293 spa_load_failed(spa, "failed to write checkpointed " 5294 "uberblock to the vdev labels [error=%d]", error); 5295 return (error); 5296 } 5297 } 5298 5299 return (0); 5300 } 5301 5302 static int 5303 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 5304 boolean_t *update_config_cache) 5305 { 5306 int error; 5307 5308 /* 5309 * Parse the config for pool, open and validate vdevs, 5310 * select an uberblock, and use that uberblock to open 5311 * the MOS. 5312 */ 5313 error = spa_ld_mos_init(spa, type); 5314 if (error != 0) 5315 return (error); 5316 5317 /* 5318 * Retrieve the trusted config stored in the MOS and use it to create 5319 * a new, exact version of the vdev tree, then reopen all vdevs. 5320 */ 5321 error = spa_ld_trusted_config(spa, type, B_FALSE); 5322 if (error == EAGAIN) { 5323 if (update_config_cache != NULL) 5324 *update_config_cache = B_TRUE; 5325 5326 /* 5327 * Redo the loading process with the trusted config if it is 5328 * too different from the untrusted config. 5329 */ 5330 spa_ld_prepare_for_reload(spa); 5331 spa_load_note(spa, "RELOADING"); 5332 error = spa_ld_mos_init(spa, type); 5333 if (error != 0) 5334 return (error); 5335 5336 error = spa_ld_trusted_config(spa, type, B_TRUE); 5337 if (error != 0) 5338 return (error); 5339 5340 } else if (error != 0) { 5341 return (error); 5342 } 5343 5344 return (0); 5345 } 5346 5347 /* 5348 * Load an existing storage pool, using the config provided. This config 5349 * describes which vdevs are part of the pool and is later validated against 5350 * partial configs present in each vdev's label and an entire copy of the 5351 * config stored in the MOS. 5352 */ 5353 static int 5354 spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport) 5355 { 5356 int error = 0; 5357 boolean_t missing_feat_write = B_FALSE; 5358 boolean_t checkpoint_rewind = 5359 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5360 boolean_t update_config_cache = B_FALSE; 5361 hrtime_t load_start = gethrtime(); 5362 5363 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5364 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 5365 5366 spa_load_note(spa, "LOADING"); 5367 5368 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 5369 if (error != 0) 5370 return (error); 5371 5372 /* 5373 * If we are rewinding to the checkpoint then we need to repeat 5374 * everything we've done so far in this function but this time 5375 * selecting the checkpointed uberblock and using that to open 5376 * the MOS. 5377 */ 5378 if (checkpoint_rewind) { 5379 /* 5380 * If we are rewinding to the checkpoint update config cache 5381 * anyway. 5382 */ 5383 update_config_cache = B_TRUE; 5384 5385 /* 5386 * Extract the checkpointed uberblock from the current MOS 5387 * and use this as the pool's uberblock from now on. If the 5388 * pool is imported as writeable we also write the checkpoint 5389 * uberblock to the labels, making the rewind permanent. 5390 */ 5391 error = spa_ld_checkpoint_rewind(spa); 5392 if (error != 0) 5393 return (error); 5394 5395 /* 5396 * Redo the loading process again with the 5397 * checkpointed uberblock. 5398 */ 5399 spa_ld_prepare_for_reload(spa); 5400 spa_load_note(spa, "LOADING checkpointed uberblock"); 5401 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 5402 if (error != 0) 5403 return (error); 5404 } 5405 5406 /* 5407 * Drop the namespace lock for the rest of the function. 5408 */ 5409 spa->spa_load_thread = curthread; 5410 mutex_exit(&spa_namespace_lock); 5411 5412 /* 5413 * Retrieve the checkpoint txg if the pool has a checkpoint. 5414 */ 5415 spa_import_progress_set_notes(spa, "Loading checkpoint txg"); 5416 error = spa_ld_read_checkpoint_txg(spa); 5417 if (error != 0) 5418 goto fail; 5419 5420 /* 5421 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 5422 * from the pool and their contents were re-mapped to other vdevs. Note 5423 * that everything that we read before this step must have been 5424 * rewritten on concrete vdevs after the last device removal was 5425 * initiated. Otherwise we could be reading from indirect vdevs before 5426 * we have loaded their mappings. 5427 */ 5428 spa_import_progress_set_notes(spa, "Loading indirect vdev metadata"); 5429 error = spa_ld_open_indirect_vdev_metadata(spa); 5430 if (error != 0) 5431 goto fail; 5432 5433 /* 5434 * Retrieve the full list of active features from the MOS and check if 5435 * they are all supported. 5436 */ 5437 spa_import_progress_set_notes(spa, "Checking feature flags"); 5438 error = spa_ld_check_features(spa, &missing_feat_write); 5439 if (error != 0) 5440 goto fail; 5441 5442 /* 5443 * Load several special directories from the MOS needed by the dsl_pool 5444 * layer. 5445 */ 5446 spa_import_progress_set_notes(spa, "Loading special MOS directories"); 5447 error = spa_ld_load_special_directories(spa); 5448 if (error != 0) 5449 goto fail; 5450 5451 /* 5452 * Retrieve pool properties from the MOS. 5453 */ 5454 spa_import_progress_set_notes(spa, "Loading properties"); 5455 error = spa_ld_get_props(spa); 5456 if (error != 0) 5457 goto fail; 5458 5459 /* 5460 * Retrieve the list of auxiliary devices - cache devices and spares - 5461 * and open them. 5462 */ 5463 spa_import_progress_set_notes(spa, "Loading AUX vdevs"); 5464 error = spa_ld_open_aux_vdevs(spa, type); 5465 if (error != 0) 5466 goto fail; 5467 5468 /* 5469 * Load the metadata for all vdevs. Also check if unopenable devices 5470 * should be autoreplaced. 5471 */ 5472 spa_import_progress_set_notes(spa, "Loading vdev metadata"); 5473 error = spa_ld_load_vdev_metadata(spa); 5474 if (error != 0) 5475 goto fail; 5476 5477 spa_import_progress_set_notes(spa, "Loading dedup tables"); 5478 error = spa_ld_load_dedup_tables(spa); 5479 if (error != 0) 5480 goto fail; 5481 5482 spa_import_progress_set_notes(spa, "Loading BRT"); 5483 error = spa_ld_load_brt(spa); 5484 if (error != 0) 5485 goto fail; 5486 5487 /* 5488 * Verify the logs now to make sure we don't have any unexpected errors 5489 * when we claim log blocks later. 5490 */ 5491 spa_import_progress_set_notes(spa, "Verifying Log Devices"); 5492 error = spa_ld_verify_logs(spa, type, ereport); 5493 if (error != 0) 5494 goto fail; 5495 5496 if (missing_feat_write) { 5497 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 5498 5499 /* 5500 * At this point, we know that we can open the pool in 5501 * read-only mode but not read-write mode. We now have enough 5502 * information and can return to userland. 5503 */ 5504 error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 5505 ENOTSUP); 5506 goto fail; 5507 } 5508 5509 /* 5510 * Traverse the last txgs to make sure the pool was left off in a safe 5511 * state. When performing an extreme rewind, we verify the whole pool, 5512 * which can take a very long time. 5513 */ 5514 spa_import_progress_set_notes(spa, "Verifying pool data"); 5515 error = spa_ld_verify_pool_data(spa); 5516 if (error != 0) 5517 goto fail; 5518 5519 /* 5520 * Calculate the deflated space for the pool. This must be done before 5521 * we write anything to the pool because we'd need to update the space 5522 * accounting using the deflated sizes. 5523 */ 5524 spa_import_progress_set_notes(spa, "Calculating deflated space"); 5525 spa_update_dspace(spa); 5526 5527 /* 5528 * We have now retrieved all the information we needed to open the 5529 * pool. If we are importing the pool in read-write mode, a few 5530 * additional steps must be performed to finish the import. 5531 */ 5532 spa_import_progress_set_notes(spa, "Starting import"); 5533 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 5534 spa->spa_load_max_txg == UINT64_MAX)) { 5535 uint64_t config_cache_txg = spa->spa_config_txg; 5536 5537 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 5538 5539 /* 5540 * Before we do any zio_write's, complete the raidz expansion 5541 * scratch space copying, if necessary. 5542 */ 5543 if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID) 5544 vdev_raidz_reflow_copy_scratch(spa); 5545 5546 /* 5547 * In case of a checkpoint rewind, log the original txg 5548 * of the checkpointed uberblock. 5549 */ 5550 if (checkpoint_rewind) { 5551 spa_history_log_internal(spa, "checkpoint rewind", 5552 NULL, "rewound state to txg=%llu", 5553 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 5554 } 5555 5556 spa_import_progress_set_notes(spa, "Claiming ZIL blocks"); 5557 /* 5558 * Traverse the ZIL and claim all blocks. 5559 */ 5560 spa_ld_claim_log_blocks(spa); 5561 5562 /* 5563 * Kick-off the syncing thread. 5564 */ 5565 spa->spa_sync_on = B_TRUE; 5566 txg_sync_start(spa->spa_dsl_pool); 5567 mmp_thread_start(spa); 5568 5569 /* 5570 * Wait for all claims to sync. We sync up to the highest 5571 * claimed log block birth time so that claimed log blocks 5572 * don't appear to be from the future. spa_claim_max_txg 5573 * will have been set for us by ZIL traversal operations 5574 * performed above. 5575 */ 5576 spa_import_progress_set_notes(spa, "Syncing ZIL claims"); 5577 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 5578 5579 /* 5580 * Check if we need to request an update of the config. On the 5581 * next sync, we would update the config stored in vdev labels 5582 * and the cachefile (by default /etc/zfs/zpool.cache). 5583 */ 5584 spa_import_progress_set_notes(spa, "Updating configs"); 5585 spa_ld_check_for_config_update(spa, config_cache_txg, 5586 update_config_cache); 5587 5588 /* 5589 * Check if a rebuild was in progress and if so resume it. 5590 * Then check all DTLs to see if anything needs resilvering. 5591 * The resilver will be deferred if a rebuild was started. 5592 */ 5593 spa_import_progress_set_notes(spa, "Starting resilvers"); 5594 if (vdev_rebuild_active(spa->spa_root_vdev)) { 5595 vdev_rebuild_restart(spa); 5596 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 5597 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5598 spa_async_request(spa, SPA_ASYNC_RESILVER); 5599 } 5600 5601 /* 5602 * Log the fact that we booted up (so that we can detect if 5603 * we rebooted in the middle of an operation). 5604 */ 5605 spa_history_log_version(spa, "open", NULL); 5606 5607 spa_import_progress_set_notes(spa, 5608 "Restarting device removals"); 5609 spa_restart_removal(spa); 5610 spa_spawn_aux_threads(spa); 5611 5612 /* 5613 * Delete any inconsistent datasets. 5614 * 5615 * Note: 5616 * Since we may be issuing deletes for clones here, 5617 * we make sure to do so after we've spawned all the 5618 * auxiliary threads above (from which the livelist 5619 * deletion zthr is part of). 5620 */ 5621 spa_import_progress_set_notes(spa, 5622 "Cleaning up inconsistent objsets"); 5623 (void) dmu_objset_find(spa_name(spa), 5624 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 5625 5626 /* 5627 * Clean up any stale temporary dataset userrefs. 5628 */ 5629 spa_import_progress_set_notes(spa, 5630 "Cleaning up temporary userrefs"); 5631 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 5632 5633 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5634 spa_import_progress_set_notes(spa, "Restarting initialize"); 5635 vdev_initialize_restart(spa->spa_root_vdev); 5636 spa_import_progress_set_notes(spa, "Restarting TRIM"); 5637 vdev_trim_restart(spa->spa_root_vdev); 5638 vdev_autotrim_restart(spa); 5639 spa_config_exit(spa, SCL_CONFIG, FTAG); 5640 spa_import_progress_set_notes(spa, "Finished importing"); 5641 } 5642 zio_handle_import_delay(spa, gethrtime() - load_start); 5643 5644 spa_import_progress_remove(spa_guid(spa)); 5645 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 5646 5647 spa_load_note(spa, "LOADED"); 5648 fail: 5649 mutex_enter(&spa_namespace_lock); 5650 spa->spa_load_thread = NULL; 5651 cv_broadcast(&spa_namespace_cv); 5652 5653 return (error); 5654 5655 } 5656 5657 static int 5658 spa_load_retry(spa_t *spa, spa_load_state_t state) 5659 { 5660 spa_mode_t mode = spa->spa_mode; 5661 5662 spa_unload(spa); 5663 spa_deactivate(spa); 5664 5665 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 5666 5667 spa_activate(spa, mode); 5668 spa_async_suspend(spa); 5669 5670 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 5671 (u_longlong_t)spa->spa_load_max_txg); 5672 5673 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 5674 } 5675 5676 /* 5677 * If spa_load() fails this function will try loading prior txg's. If 5678 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 5679 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 5680 * function will not rewind the pool and will return the same error as 5681 * spa_load(). 5682 */ 5683 static int 5684 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 5685 int rewind_flags) 5686 { 5687 nvlist_t *loadinfo = NULL; 5688 nvlist_t *config = NULL; 5689 int load_error, rewind_error; 5690 uint64_t safe_rewind_txg; 5691 uint64_t min_txg; 5692 5693 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 5694 spa->spa_load_max_txg = spa->spa_load_txg; 5695 spa_set_log_state(spa, SPA_LOG_CLEAR); 5696 } else { 5697 spa->spa_load_max_txg = max_request; 5698 if (max_request != UINT64_MAX) 5699 spa->spa_extreme_rewind = B_TRUE; 5700 } 5701 5702 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 5703 if (load_error == 0) 5704 return (0); 5705 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 5706 /* 5707 * When attempting checkpoint-rewind on a pool with no 5708 * checkpoint, we should not attempt to load uberblocks 5709 * from previous txgs when spa_load fails. 5710 */ 5711 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5712 spa_import_progress_remove(spa_guid(spa)); 5713 return (load_error); 5714 } 5715 5716 if (spa->spa_root_vdev != NULL) 5717 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5718 5719 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 5720 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 5721 5722 if (rewind_flags & ZPOOL_NEVER_REWIND) { 5723 nvlist_free(config); 5724 spa_import_progress_remove(spa_guid(spa)); 5725 return (load_error); 5726 } 5727 5728 if (state == SPA_LOAD_RECOVER) { 5729 /* Price of rolling back is discarding txgs, including log */ 5730 spa_set_log_state(spa, SPA_LOG_CLEAR); 5731 } else { 5732 /* 5733 * If we aren't rolling back save the load info from our first 5734 * import attempt so that we can restore it after attempting 5735 * to rewind. 5736 */ 5737 loadinfo = spa->spa_load_info; 5738 spa->spa_load_info = fnvlist_alloc(); 5739 } 5740 5741 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 5742 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 5743 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 5744 TXG_INITIAL : safe_rewind_txg; 5745 5746 /* 5747 * Continue as long as we're finding errors, we're still within 5748 * the acceptable rewind range, and we're still finding uberblocks 5749 */ 5750 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 5751 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 5752 if (spa->spa_load_max_txg < safe_rewind_txg) 5753 spa->spa_extreme_rewind = B_TRUE; 5754 rewind_error = spa_load_retry(spa, state); 5755 } 5756 5757 spa->spa_extreme_rewind = B_FALSE; 5758 spa->spa_load_max_txg = UINT64_MAX; 5759 5760 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 5761 spa_config_set(spa, config); 5762 else 5763 nvlist_free(config); 5764 5765 if (state == SPA_LOAD_RECOVER) { 5766 ASSERT3P(loadinfo, ==, NULL); 5767 spa_import_progress_remove(spa_guid(spa)); 5768 return (rewind_error); 5769 } else { 5770 /* Store the rewind info as part of the initial load info */ 5771 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 5772 spa->spa_load_info); 5773 5774 /* Restore the initial load info */ 5775 fnvlist_free(spa->spa_load_info); 5776 spa->spa_load_info = loadinfo; 5777 5778 spa_import_progress_remove(spa_guid(spa)); 5779 return (load_error); 5780 } 5781 } 5782 5783 /* 5784 * Pool Open/Import 5785 * 5786 * The import case is identical to an open except that the configuration is sent 5787 * down from userland, instead of grabbed from the configuration cache. For the 5788 * case of an open, the pool configuration will exist in the 5789 * POOL_STATE_UNINITIALIZED state. 5790 * 5791 * The stats information (gen/count/ustats) is used to gather vdev statistics at 5792 * the same time open the pool, without having to keep around the spa_t in some 5793 * ambiguous state. 5794 */ 5795 static int 5796 spa_open_common(const char *pool, spa_t **spapp, const void *tag, 5797 nvlist_t *nvpolicy, nvlist_t **config) 5798 { 5799 spa_t *spa; 5800 spa_load_state_t state = SPA_LOAD_OPEN; 5801 int error; 5802 int locked = B_FALSE; 5803 int firstopen = B_FALSE; 5804 5805 *spapp = NULL; 5806 5807 /* 5808 * As disgusting as this is, we need to support recursive calls to this 5809 * function because dsl_dir_open() is called during spa_load(), and ends 5810 * up calling spa_open() again. The real fix is to figure out how to 5811 * avoid dsl_dir_open() calling this in the first place. 5812 */ 5813 if (MUTEX_NOT_HELD(&spa_namespace_lock)) { 5814 mutex_enter(&spa_namespace_lock); 5815 locked = B_TRUE; 5816 } 5817 5818 if ((spa = spa_lookup(pool)) == NULL) { 5819 if (locked) 5820 mutex_exit(&spa_namespace_lock); 5821 return (SET_ERROR(ENOENT)); 5822 } 5823 5824 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 5825 zpool_load_policy_t policy; 5826 5827 firstopen = B_TRUE; 5828 5829 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 5830 &policy); 5831 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5832 state = SPA_LOAD_RECOVER; 5833 5834 spa_activate(spa, spa_mode_global); 5835 5836 if (state != SPA_LOAD_RECOVER) 5837 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5838 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5839 5840 zfs_dbgmsg("spa_open_common: opening %s", pool); 5841 error = spa_load_best(spa, state, policy.zlp_txg, 5842 policy.zlp_rewind); 5843 5844 if (error == EBADF) { 5845 /* 5846 * If vdev_validate() returns failure (indicated by 5847 * EBADF), it indicates that one of the vdevs indicates 5848 * that the pool has been exported or destroyed. If 5849 * this is the case, the config cache is out of sync and 5850 * we should remove the pool from the namespace. 5851 */ 5852 spa_unload(spa); 5853 spa_deactivate(spa); 5854 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE); 5855 spa_remove(spa); 5856 if (locked) 5857 mutex_exit(&spa_namespace_lock); 5858 return (SET_ERROR(ENOENT)); 5859 } 5860 5861 if (error) { 5862 /* 5863 * We can't open the pool, but we still have useful 5864 * information: the state of each vdev after the 5865 * attempted vdev_open(). Return this to the user. 5866 */ 5867 if (config != NULL && spa->spa_config) { 5868 *config = fnvlist_dup(spa->spa_config); 5869 fnvlist_add_nvlist(*config, 5870 ZPOOL_CONFIG_LOAD_INFO, 5871 spa->spa_load_info); 5872 } 5873 spa_unload(spa); 5874 spa_deactivate(spa); 5875 spa->spa_last_open_failed = error; 5876 if (locked) 5877 mutex_exit(&spa_namespace_lock); 5878 *spapp = NULL; 5879 return (error); 5880 } 5881 } 5882 5883 spa_open_ref(spa, tag); 5884 5885 if (config != NULL) 5886 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5887 5888 /* 5889 * If we've recovered the pool, pass back any information we 5890 * gathered while doing the load. 5891 */ 5892 if (state == SPA_LOAD_RECOVER && config != NULL) { 5893 fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 5894 spa->spa_load_info); 5895 } 5896 5897 if (locked) { 5898 spa->spa_last_open_failed = 0; 5899 spa->spa_last_ubsync_txg = 0; 5900 spa->spa_load_txg = 0; 5901 mutex_exit(&spa_namespace_lock); 5902 } 5903 5904 if (firstopen) 5905 zvol_create_minors_recursive(spa_name(spa)); 5906 5907 *spapp = spa; 5908 5909 return (0); 5910 } 5911 5912 int 5913 spa_open_rewind(const char *name, spa_t **spapp, const void *tag, 5914 nvlist_t *policy, nvlist_t **config) 5915 { 5916 return (spa_open_common(name, spapp, tag, policy, config)); 5917 } 5918 5919 int 5920 spa_open(const char *name, spa_t **spapp, const void *tag) 5921 { 5922 return (spa_open_common(name, spapp, tag, NULL, NULL)); 5923 } 5924 5925 /* 5926 * Lookup the given spa_t, incrementing the inject count in the process, 5927 * preventing it from being exported or destroyed. 5928 */ 5929 spa_t * 5930 spa_inject_addref(char *name) 5931 { 5932 spa_t *spa; 5933 5934 mutex_enter(&spa_namespace_lock); 5935 if ((spa = spa_lookup(name)) == NULL) { 5936 mutex_exit(&spa_namespace_lock); 5937 return (NULL); 5938 } 5939 spa->spa_inject_ref++; 5940 mutex_exit(&spa_namespace_lock); 5941 5942 return (spa); 5943 } 5944 5945 void 5946 spa_inject_delref(spa_t *spa) 5947 { 5948 mutex_enter(&spa_namespace_lock); 5949 spa->spa_inject_ref--; 5950 mutex_exit(&spa_namespace_lock); 5951 } 5952 5953 /* 5954 * Add spares device information to the nvlist. 5955 */ 5956 static void 5957 spa_add_spares(spa_t *spa, nvlist_t *config) 5958 { 5959 nvlist_t **spares; 5960 uint_t i, nspares; 5961 nvlist_t *nvroot; 5962 uint64_t guid; 5963 vdev_stat_t *vs; 5964 uint_t vsc; 5965 uint64_t pool; 5966 5967 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5968 5969 if (spa->spa_spares.sav_count == 0) 5970 return; 5971 5972 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 5973 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5974 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 5975 if (nspares != 0) { 5976 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5977 (const nvlist_t * const *)spares, nspares); 5978 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5979 &spares, &nspares)); 5980 5981 /* 5982 * Go through and find any spares which have since been 5983 * repurposed as an active spare. If this is the case, update 5984 * their status appropriately. 5985 */ 5986 for (i = 0; i < nspares; i++) { 5987 guid = fnvlist_lookup_uint64(spares[i], 5988 ZPOOL_CONFIG_GUID); 5989 VERIFY0(nvlist_lookup_uint64_array(spares[i], 5990 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)); 5991 if (spa_spare_exists(guid, &pool, NULL) && 5992 pool != 0ULL) { 5993 vs->vs_state = VDEV_STATE_CANT_OPEN; 5994 vs->vs_aux = VDEV_AUX_SPARED; 5995 } else { 5996 vs->vs_state = 5997 spa->spa_spares.sav_vdevs[i]->vdev_state; 5998 } 5999 } 6000 } 6001 } 6002 6003 /* 6004 * Add l2cache device information to the nvlist, including vdev stats. 6005 */ 6006 static void 6007 spa_add_l2cache(spa_t *spa, nvlist_t *config) 6008 { 6009 nvlist_t **l2cache; 6010 uint_t i, j, nl2cache; 6011 nvlist_t *nvroot; 6012 uint64_t guid; 6013 vdev_t *vd; 6014 vdev_stat_t *vs; 6015 uint_t vsc; 6016 6017 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 6018 6019 if (spa->spa_l2cache.sav_count == 0) 6020 return; 6021 6022 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6023 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 6024 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 6025 if (nl2cache != 0) { 6026 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6027 (const nvlist_t * const *)l2cache, nl2cache); 6028 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6029 &l2cache, &nl2cache)); 6030 6031 /* 6032 * Update level 2 cache device stats. 6033 */ 6034 6035 for (i = 0; i < nl2cache; i++) { 6036 guid = fnvlist_lookup_uint64(l2cache[i], 6037 ZPOOL_CONFIG_GUID); 6038 6039 vd = NULL; 6040 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 6041 if (guid == 6042 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 6043 vd = spa->spa_l2cache.sav_vdevs[j]; 6044 break; 6045 } 6046 } 6047 ASSERT(vd != NULL); 6048 6049 VERIFY0(nvlist_lookup_uint64_array(l2cache[i], 6050 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)); 6051 vdev_get_stats(vd, vs); 6052 vdev_config_generate_stats(vd, l2cache[i]); 6053 6054 } 6055 } 6056 } 6057 6058 static void 6059 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features) 6060 { 6061 zap_cursor_t zc; 6062 zap_attribute_t *za = zap_attribute_alloc(); 6063 6064 if (spa->spa_feat_for_read_obj != 0) { 6065 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6066 spa->spa_feat_for_read_obj); 6067 zap_cursor_retrieve(&zc, za) == 0; 6068 zap_cursor_advance(&zc)) { 6069 ASSERT(za->za_integer_length == sizeof (uint64_t) && 6070 za->za_num_integers == 1); 6071 VERIFY0(nvlist_add_uint64(features, za->za_name, 6072 za->za_first_integer)); 6073 } 6074 zap_cursor_fini(&zc); 6075 } 6076 6077 if (spa->spa_feat_for_write_obj != 0) { 6078 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6079 spa->spa_feat_for_write_obj); 6080 zap_cursor_retrieve(&zc, za) == 0; 6081 zap_cursor_advance(&zc)) { 6082 ASSERT(za->za_integer_length == sizeof (uint64_t) && 6083 za->za_num_integers == 1); 6084 VERIFY0(nvlist_add_uint64(features, za->za_name, 6085 za->za_first_integer)); 6086 } 6087 zap_cursor_fini(&zc); 6088 } 6089 zap_attribute_free(za); 6090 } 6091 6092 static void 6093 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features) 6094 { 6095 int i; 6096 6097 for (i = 0; i < SPA_FEATURES; i++) { 6098 zfeature_info_t feature = spa_feature_table[i]; 6099 uint64_t refcount; 6100 6101 if (feature_get_refcount(spa, &feature, &refcount) != 0) 6102 continue; 6103 6104 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount)); 6105 } 6106 } 6107 6108 /* 6109 * Store a list of pool features and their reference counts in the 6110 * config. 6111 * 6112 * The first time this is called on a spa, allocate a new nvlist, fetch 6113 * the pool features and reference counts from disk, then save the list 6114 * in the spa. In subsequent calls on the same spa use the saved nvlist 6115 * and refresh its values from the cached reference counts. This 6116 * ensures we don't block here on I/O on a suspended pool so 'zpool 6117 * clear' can resume the pool. 6118 */ 6119 static void 6120 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 6121 { 6122 nvlist_t *features; 6123 6124 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 6125 6126 mutex_enter(&spa->spa_feat_stats_lock); 6127 features = spa->spa_feat_stats; 6128 6129 if (features != NULL) { 6130 spa_feature_stats_from_cache(spa, features); 6131 } else { 6132 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP)); 6133 spa->spa_feat_stats = features; 6134 spa_feature_stats_from_disk(spa, features); 6135 } 6136 6137 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 6138 features)); 6139 6140 mutex_exit(&spa->spa_feat_stats_lock); 6141 } 6142 6143 int 6144 spa_get_stats(const char *name, nvlist_t **config, 6145 char *altroot, size_t buflen) 6146 { 6147 int error; 6148 spa_t *spa; 6149 6150 *config = NULL; 6151 error = spa_open_common(name, &spa, FTAG, NULL, config); 6152 6153 if (spa != NULL) { 6154 /* 6155 * This still leaves a window of inconsistency where the spares 6156 * or l2cache devices could change and the config would be 6157 * self-inconsistent. 6158 */ 6159 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6160 6161 if (*config != NULL) { 6162 uint64_t loadtimes[2]; 6163 6164 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 6165 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 6166 fnvlist_add_uint64_array(*config, 6167 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2); 6168 6169 fnvlist_add_uint64(*config, 6170 ZPOOL_CONFIG_ERRCOUNT, 6171 spa_approx_errlog_size(spa)); 6172 6173 if (spa_suspended(spa)) { 6174 fnvlist_add_uint64(*config, 6175 ZPOOL_CONFIG_SUSPENDED, 6176 spa->spa_failmode); 6177 fnvlist_add_uint64(*config, 6178 ZPOOL_CONFIG_SUSPENDED_REASON, 6179 spa->spa_suspended); 6180 } 6181 6182 spa_add_spares(spa, *config); 6183 spa_add_l2cache(spa, *config); 6184 spa_add_feature_stats(spa, *config); 6185 } 6186 } 6187 6188 /* 6189 * We want to get the alternate root even for faulted pools, so we cheat 6190 * and call spa_lookup() directly. 6191 */ 6192 if (altroot) { 6193 if (spa == NULL) { 6194 mutex_enter(&spa_namespace_lock); 6195 spa = spa_lookup(name); 6196 if (spa) 6197 spa_altroot(spa, altroot, buflen); 6198 else 6199 altroot[0] = '\0'; 6200 spa = NULL; 6201 mutex_exit(&spa_namespace_lock); 6202 } else { 6203 spa_altroot(spa, altroot, buflen); 6204 } 6205 } 6206 6207 if (spa != NULL) { 6208 spa_config_exit(spa, SCL_CONFIG, FTAG); 6209 spa_close(spa, FTAG); 6210 } 6211 6212 return (error); 6213 } 6214 6215 /* 6216 * Validate that the auxiliary device array is well formed. We must have an 6217 * array of nvlists, each which describes a valid leaf vdev. If this is an 6218 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 6219 * specified, as long as they are well-formed. 6220 */ 6221 static int 6222 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 6223 spa_aux_vdev_t *sav, const char *config, uint64_t version, 6224 vdev_labeltype_t label) 6225 { 6226 nvlist_t **dev; 6227 uint_t i, ndev; 6228 vdev_t *vd; 6229 int error; 6230 6231 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 6232 6233 /* 6234 * It's acceptable to have no devs specified. 6235 */ 6236 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 6237 return (0); 6238 6239 if (ndev == 0) 6240 return (SET_ERROR(EINVAL)); 6241 6242 /* 6243 * Make sure the pool is formatted with a version that supports this 6244 * device type. 6245 */ 6246 if (spa_version(spa) < version) 6247 return (SET_ERROR(ENOTSUP)); 6248 6249 /* 6250 * Set the pending device list so we correctly handle device in-use 6251 * checking. 6252 */ 6253 sav->sav_pending = dev; 6254 sav->sav_npending = ndev; 6255 6256 for (i = 0; i < ndev; i++) { 6257 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 6258 mode)) != 0) 6259 goto out; 6260 6261 if (!vd->vdev_ops->vdev_op_leaf) { 6262 vdev_free(vd); 6263 error = SET_ERROR(EINVAL); 6264 goto out; 6265 } 6266 6267 vd->vdev_top = vd; 6268 6269 if ((error = vdev_open(vd)) == 0 && 6270 (error = vdev_label_init(vd, crtxg, label)) == 0) { 6271 fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 6272 vd->vdev_guid); 6273 } 6274 6275 vdev_free(vd); 6276 6277 if (error && 6278 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 6279 goto out; 6280 else 6281 error = 0; 6282 } 6283 6284 out: 6285 sav->sav_pending = NULL; 6286 sav->sav_npending = 0; 6287 return (error); 6288 } 6289 6290 static int 6291 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 6292 { 6293 int error; 6294 6295 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 6296 6297 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 6298 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 6299 VDEV_LABEL_SPARE)) != 0) { 6300 return (error); 6301 } 6302 6303 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 6304 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 6305 VDEV_LABEL_L2CACHE)); 6306 } 6307 6308 static void 6309 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 6310 const char *config) 6311 { 6312 int i; 6313 6314 if (sav->sav_config != NULL) { 6315 nvlist_t **olddevs; 6316 uint_t oldndevs; 6317 nvlist_t **newdevs; 6318 6319 /* 6320 * Generate new dev list by concatenating with the 6321 * current dev list. 6322 */ 6323 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config, 6324 &olddevs, &oldndevs)); 6325 6326 newdevs = kmem_alloc(sizeof (void *) * 6327 (ndevs + oldndevs), KM_SLEEP); 6328 for (i = 0; i < oldndevs; i++) 6329 newdevs[i] = fnvlist_dup(olddevs[i]); 6330 for (i = 0; i < ndevs; i++) 6331 newdevs[i + oldndevs] = fnvlist_dup(devs[i]); 6332 6333 fnvlist_remove(sav->sav_config, config); 6334 6335 fnvlist_add_nvlist_array(sav->sav_config, config, 6336 (const nvlist_t * const *)newdevs, ndevs + oldndevs); 6337 for (i = 0; i < oldndevs + ndevs; i++) 6338 nvlist_free(newdevs[i]); 6339 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 6340 } else { 6341 /* 6342 * Generate a new dev list. 6343 */ 6344 sav->sav_config = fnvlist_alloc(); 6345 fnvlist_add_nvlist_array(sav->sav_config, config, 6346 (const nvlist_t * const *)devs, ndevs); 6347 } 6348 } 6349 6350 /* 6351 * Stop and drop level 2 ARC devices 6352 */ 6353 void 6354 spa_l2cache_drop(spa_t *spa) 6355 { 6356 vdev_t *vd; 6357 int i; 6358 spa_aux_vdev_t *sav = &spa->spa_l2cache; 6359 6360 for (i = 0; i < sav->sav_count; i++) { 6361 uint64_t pool; 6362 6363 vd = sav->sav_vdevs[i]; 6364 ASSERT(vd != NULL); 6365 6366 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 6367 pool != 0ULL && l2arc_vdev_present(vd)) 6368 l2arc_remove_vdev(vd); 6369 } 6370 } 6371 6372 /* 6373 * Verify encryption parameters for spa creation. If we are encrypting, we must 6374 * have the encryption feature flag enabled. 6375 */ 6376 static int 6377 spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 6378 boolean_t has_encryption) 6379 { 6380 if (dcp->cp_crypt != ZIO_CRYPT_OFF && 6381 dcp->cp_crypt != ZIO_CRYPT_INHERIT && 6382 !has_encryption) 6383 return (SET_ERROR(ENOTSUP)); 6384 6385 return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 6386 } 6387 6388 /* 6389 * Pool Creation 6390 */ 6391 int 6392 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 6393 nvlist_t *zplprops, dsl_crypto_params_t *dcp) 6394 { 6395 spa_t *spa; 6396 const char *altroot = NULL; 6397 vdev_t *rvd; 6398 dsl_pool_t *dp; 6399 dmu_tx_t *tx; 6400 int error = 0; 6401 uint64_t txg = TXG_INITIAL; 6402 nvlist_t **spares, **l2cache; 6403 uint_t nspares, nl2cache; 6404 uint64_t version, obj, ndraid = 0; 6405 boolean_t has_features; 6406 boolean_t has_encryption; 6407 boolean_t has_allocclass; 6408 spa_feature_t feat; 6409 const char *feat_name; 6410 const char *poolname; 6411 nvlist_t *nvl; 6412 6413 if (props == NULL || 6414 nvlist_lookup_string(props, 6415 zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0) 6416 poolname = (char *)pool; 6417 6418 /* 6419 * If this pool already exists, return failure. 6420 */ 6421 mutex_enter(&spa_namespace_lock); 6422 if (spa_lookup(poolname) != NULL) { 6423 mutex_exit(&spa_namespace_lock); 6424 return (SET_ERROR(EEXIST)); 6425 } 6426 6427 /* 6428 * Allocate a new spa_t structure. 6429 */ 6430 nvl = fnvlist_alloc(); 6431 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 6432 (void) nvlist_lookup_string(props, 6433 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6434 spa = spa_add(poolname, nvl, altroot); 6435 fnvlist_free(nvl); 6436 spa_activate(spa, spa_mode_global); 6437 6438 if (props && (error = spa_prop_validate(spa, props))) { 6439 spa_deactivate(spa); 6440 spa_remove(spa); 6441 mutex_exit(&spa_namespace_lock); 6442 return (error); 6443 } 6444 6445 /* 6446 * Temporary pool names should never be written to disk. 6447 */ 6448 if (poolname != pool) 6449 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 6450 6451 has_features = B_FALSE; 6452 has_encryption = B_FALSE; 6453 has_allocclass = B_FALSE; 6454 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 6455 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 6456 if (zpool_prop_feature(nvpair_name(elem))) { 6457 has_features = B_TRUE; 6458 6459 feat_name = strchr(nvpair_name(elem), '@') + 1; 6460 VERIFY0(zfeature_lookup_name(feat_name, &feat)); 6461 if (feat == SPA_FEATURE_ENCRYPTION) 6462 has_encryption = B_TRUE; 6463 if (feat == SPA_FEATURE_ALLOCATION_CLASSES) 6464 has_allocclass = B_TRUE; 6465 } 6466 } 6467 6468 /* verify encryption params, if they were provided */ 6469 if (dcp != NULL) { 6470 error = spa_create_check_encryption_params(dcp, has_encryption); 6471 if (error != 0) { 6472 spa_deactivate(spa); 6473 spa_remove(spa); 6474 mutex_exit(&spa_namespace_lock); 6475 return (error); 6476 } 6477 } 6478 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) { 6479 spa_deactivate(spa); 6480 spa_remove(spa); 6481 mutex_exit(&spa_namespace_lock); 6482 return (ENOTSUP); 6483 } 6484 6485 if (has_features || nvlist_lookup_uint64(props, 6486 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 6487 version = SPA_VERSION; 6488 } 6489 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6490 6491 spa->spa_first_txg = txg; 6492 spa->spa_uberblock.ub_txg = txg - 1; 6493 spa->spa_uberblock.ub_version = version; 6494 spa->spa_ubsync = spa->spa_uberblock; 6495 spa->spa_load_state = SPA_LOAD_CREATE; 6496 spa->spa_removing_phys.sr_state = DSS_NONE; 6497 spa->spa_removing_phys.sr_removing_vdev = -1; 6498 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 6499 spa->spa_indirect_vdevs_loaded = B_TRUE; 6500 6501 /* 6502 * Create "The Godfather" zio to hold all async IOs 6503 */ 6504 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 6505 KM_SLEEP); 6506 for (int i = 0; i < max_ncpus; i++) { 6507 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 6508 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 6509 ZIO_FLAG_GODFATHER); 6510 } 6511 6512 /* 6513 * Create the root vdev. 6514 */ 6515 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6516 6517 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 6518 6519 ASSERT(error != 0 || rvd != NULL); 6520 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 6521 6522 if (error == 0 && !zfs_allocatable_devs(nvroot)) 6523 error = SET_ERROR(EINVAL); 6524 6525 if (error == 0 && 6526 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 6527 (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 && 6528 (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { 6529 /* 6530 * instantiate the metaslab groups (this will dirty the vdevs) 6531 * we can no longer error exit past this point 6532 */ 6533 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 6534 vdev_t *vd = rvd->vdev_child[c]; 6535 6536 vdev_metaslab_set_size(vd); 6537 vdev_expand(vd, txg); 6538 } 6539 } 6540 6541 spa_config_exit(spa, SCL_ALL, FTAG); 6542 6543 if (error != 0) { 6544 spa_unload(spa); 6545 spa_deactivate(spa); 6546 spa_remove(spa); 6547 mutex_exit(&spa_namespace_lock); 6548 return (error); 6549 } 6550 6551 /* 6552 * Get the list of spares, if specified. 6553 */ 6554 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6555 &spares, &nspares) == 0) { 6556 spa->spa_spares.sav_config = fnvlist_alloc(); 6557 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 6558 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 6559 nspares); 6560 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6561 spa_load_spares(spa); 6562 spa_config_exit(spa, SCL_ALL, FTAG); 6563 spa->spa_spares.sav_sync = B_TRUE; 6564 } 6565 6566 /* 6567 * Get the list of level 2 cache devices, if specified. 6568 */ 6569 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6570 &l2cache, &nl2cache) == 0) { 6571 VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config, 6572 NV_UNIQUE_NAME, KM_SLEEP)); 6573 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6574 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 6575 nl2cache); 6576 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6577 spa_load_l2cache(spa); 6578 spa_config_exit(spa, SCL_ALL, FTAG); 6579 spa->spa_l2cache.sav_sync = B_TRUE; 6580 } 6581 6582 spa->spa_is_initializing = B_TRUE; 6583 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 6584 spa->spa_is_initializing = B_FALSE; 6585 6586 /* 6587 * Create DDTs (dedup tables). 6588 */ 6589 ddt_create(spa); 6590 /* 6591 * Create BRT table and BRT table object. 6592 */ 6593 brt_create(spa); 6594 6595 spa_update_dspace(spa); 6596 6597 tx = dmu_tx_create_assigned(dp, txg); 6598 6599 /* 6600 * Create the pool's history object. 6601 */ 6602 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history) 6603 spa_history_create_obj(spa, tx); 6604 6605 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 6606 spa_history_log_version(spa, "create", tx); 6607 6608 /* 6609 * Create the pool config object. 6610 */ 6611 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 6612 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 6613 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 6614 6615 if (zap_add(spa->spa_meta_objset, 6616 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 6617 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 6618 cmn_err(CE_PANIC, "failed to add pool config"); 6619 } 6620 6621 if (zap_add(spa->spa_meta_objset, 6622 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 6623 sizeof (uint64_t), 1, &version, tx) != 0) { 6624 cmn_err(CE_PANIC, "failed to add pool version"); 6625 } 6626 6627 /* Newly created pools with the right version are always deflated. */ 6628 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 6629 spa->spa_deflate = TRUE; 6630 if (zap_add(spa->spa_meta_objset, 6631 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6632 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 6633 cmn_err(CE_PANIC, "failed to add deflate"); 6634 } 6635 } 6636 6637 /* 6638 * Create the deferred-free bpobj. Turn off compression 6639 * because sync-to-convergence takes longer if the blocksize 6640 * keeps changing. 6641 */ 6642 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 6643 dmu_object_set_compress(spa->spa_meta_objset, obj, 6644 ZIO_COMPRESS_OFF, tx); 6645 if (zap_add(spa->spa_meta_objset, 6646 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 6647 sizeof (uint64_t), 1, &obj, tx) != 0) { 6648 cmn_err(CE_PANIC, "failed to add bpobj"); 6649 } 6650 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 6651 spa->spa_meta_objset, obj)); 6652 6653 /* 6654 * Generate some random noise for salted checksums to operate on. 6655 */ 6656 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 6657 sizeof (spa->spa_cksum_salt.zcs_bytes)); 6658 6659 /* 6660 * Set pool properties. 6661 */ 6662 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 6663 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 6664 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 6665 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 6666 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 6667 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); 6668 spa->spa_dedup_table_quota = 6669 zpool_prop_default_numeric(ZPOOL_PROP_DEDUP_TABLE_QUOTA); 6670 6671 if (props != NULL) { 6672 spa_configfile_set(spa, props, B_FALSE); 6673 spa_sync_props(props, tx); 6674 } 6675 6676 for (int i = 0; i < ndraid; i++) 6677 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 6678 6679 dmu_tx_commit(tx); 6680 6681 spa->spa_sync_on = B_TRUE; 6682 txg_sync_start(dp); 6683 mmp_thread_start(spa); 6684 txg_wait_synced(dp, txg); 6685 6686 spa_spawn_aux_threads(spa); 6687 6688 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 6689 6690 /* 6691 * Don't count references from objsets that are already closed 6692 * and are making their way through the eviction process. 6693 */ 6694 spa_evicting_os_wait(spa); 6695 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 6696 spa->spa_load_state = SPA_LOAD_NONE; 6697 6698 spa_import_os(spa); 6699 6700 mutex_exit(&spa_namespace_lock); 6701 6702 return (0); 6703 } 6704 6705 /* 6706 * Import a non-root pool into the system. 6707 */ 6708 int 6709 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 6710 { 6711 spa_t *spa; 6712 const char *altroot = NULL; 6713 spa_load_state_t state = SPA_LOAD_IMPORT; 6714 zpool_load_policy_t policy; 6715 spa_mode_t mode = spa_mode_global; 6716 uint64_t readonly = B_FALSE; 6717 int error; 6718 nvlist_t *nvroot; 6719 nvlist_t **spares, **l2cache; 6720 uint_t nspares, nl2cache; 6721 6722 /* 6723 * If a pool with this name exists, return failure. 6724 */ 6725 mutex_enter(&spa_namespace_lock); 6726 if (spa_lookup(pool) != NULL) { 6727 mutex_exit(&spa_namespace_lock); 6728 return (SET_ERROR(EEXIST)); 6729 } 6730 6731 /* 6732 * Create and initialize the spa structure. 6733 */ 6734 (void) nvlist_lookup_string(props, 6735 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6736 (void) nvlist_lookup_uint64(props, 6737 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 6738 if (readonly) 6739 mode = SPA_MODE_READ; 6740 spa = spa_add(pool, config, altroot); 6741 spa->spa_import_flags = flags; 6742 6743 /* 6744 * Verbatim import - Take a pool and insert it into the namespace 6745 * as if it had been loaded at boot. 6746 */ 6747 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 6748 if (props != NULL) 6749 spa_configfile_set(spa, props, B_FALSE); 6750 6751 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 6752 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6753 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 6754 mutex_exit(&spa_namespace_lock); 6755 return (0); 6756 } 6757 6758 spa_activate(spa, mode); 6759 6760 /* 6761 * Don't start async tasks until we know everything is healthy. 6762 */ 6763 spa_async_suspend(spa); 6764 6765 zpool_get_load_policy(config, &policy); 6766 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 6767 state = SPA_LOAD_RECOVER; 6768 6769 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 6770 6771 if (state != SPA_LOAD_RECOVER) { 6772 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 6773 zfs_dbgmsg("spa_import: importing %s", pool); 6774 } else { 6775 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 6776 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 6777 } 6778 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 6779 6780 /* 6781 * Propagate anything learned while loading the pool and pass it 6782 * back to caller (i.e. rewind info, missing devices, etc). 6783 */ 6784 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info); 6785 6786 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6787 /* 6788 * Toss any existing sparelist, as it doesn't have any validity 6789 * anymore, and conflicts with spa_has_spare(). 6790 */ 6791 if (spa->spa_spares.sav_config) { 6792 nvlist_free(spa->spa_spares.sav_config); 6793 spa->spa_spares.sav_config = NULL; 6794 spa_load_spares(spa); 6795 } 6796 if (spa->spa_l2cache.sav_config) { 6797 nvlist_free(spa->spa_l2cache.sav_config); 6798 spa->spa_l2cache.sav_config = NULL; 6799 spa_load_l2cache(spa); 6800 } 6801 6802 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6803 spa_config_exit(spa, SCL_ALL, FTAG); 6804 6805 if (props != NULL) 6806 spa_configfile_set(spa, props, B_FALSE); 6807 6808 if (error != 0 || (props && spa_writeable(spa) && 6809 (error = spa_prop_set(spa, props)))) { 6810 spa_unload(spa); 6811 spa_deactivate(spa); 6812 spa_remove(spa); 6813 mutex_exit(&spa_namespace_lock); 6814 return (error); 6815 } 6816 6817 spa_async_resume(spa); 6818 6819 /* 6820 * Override any spares and level 2 cache devices as specified by 6821 * the user, as these may have correct device names/devids, etc. 6822 */ 6823 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6824 &spares, &nspares) == 0) { 6825 if (spa->spa_spares.sav_config) 6826 fnvlist_remove(spa->spa_spares.sav_config, 6827 ZPOOL_CONFIG_SPARES); 6828 else 6829 spa->spa_spares.sav_config = fnvlist_alloc(); 6830 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 6831 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 6832 nspares); 6833 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6834 spa_load_spares(spa); 6835 spa_config_exit(spa, SCL_ALL, FTAG); 6836 spa->spa_spares.sav_sync = B_TRUE; 6837 spa->spa_spares.sav_label_sync = B_TRUE; 6838 } 6839 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6840 &l2cache, &nl2cache) == 0) { 6841 if (spa->spa_l2cache.sav_config) 6842 fnvlist_remove(spa->spa_l2cache.sav_config, 6843 ZPOOL_CONFIG_L2CACHE); 6844 else 6845 spa->spa_l2cache.sav_config = fnvlist_alloc(); 6846 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6847 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 6848 nl2cache); 6849 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6850 spa_load_l2cache(spa); 6851 spa_config_exit(spa, SCL_ALL, FTAG); 6852 spa->spa_l2cache.sav_sync = B_TRUE; 6853 spa->spa_l2cache.sav_label_sync = B_TRUE; 6854 } 6855 6856 /* 6857 * Check for any removed devices. 6858 */ 6859 if (spa->spa_autoreplace) { 6860 spa_aux_check_removed(&spa->spa_spares); 6861 spa_aux_check_removed(&spa->spa_l2cache); 6862 } 6863 6864 if (spa_writeable(spa)) { 6865 /* 6866 * Update the config cache to include the newly-imported pool. 6867 */ 6868 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6869 } 6870 6871 /* 6872 * It's possible that the pool was expanded while it was exported. 6873 * We kick off an async task to handle this for us. 6874 */ 6875 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 6876 6877 spa_history_log_version(spa, "import", NULL); 6878 6879 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6880 6881 mutex_exit(&spa_namespace_lock); 6882 6883 zvol_create_minors_recursive(pool); 6884 6885 spa_import_os(spa); 6886 6887 return (0); 6888 } 6889 6890 nvlist_t * 6891 spa_tryimport(nvlist_t *tryconfig) 6892 { 6893 nvlist_t *config = NULL; 6894 const char *poolname, *cachefile; 6895 spa_t *spa; 6896 uint64_t state; 6897 int error; 6898 zpool_load_policy_t policy; 6899 6900 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 6901 return (NULL); 6902 6903 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 6904 return (NULL); 6905 6906 /* 6907 * Create and initialize the spa structure. 6908 */ 6909 char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6910 (void) snprintf(name, MAXPATHLEN, "%s-%llx-%s", 6911 TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname); 6912 6913 mutex_enter(&spa_namespace_lock); 6914 spa = spa_add(name, tryconfig, NULL); 6915 spa_activate(spa, SPA_MODE_READ); 6916 kmem_free(name, MAXPATHLEN); 6917 6918 /* 6919 * Rewind pool if a max txg was provided. 6920 */ 6921 zpool_get_load_policy(spa->spa_config, &policy); 6922 if (policy.zlp_txg != UINT64_MAX) { 6923 spa->spa_load_max_txg = policy.zlp_txg; 6924 spa->spa_extreme_rewind = B_TRUE; 6925 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 6926 poolname, (longlong_t)policy.zlp_txg); 6927 } else { 6928 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 6929 } 6930 6931 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 6932 == 0) { 6933 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 6934 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 6935 } else { 6936 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 6937 } 6938 6939 /* 6940 * spa_import() relies on a pool config fetched by spa_try_import() 6941 * for spare/cache devices. Import flags are not passed to 6942 * spa_tryimport(), which makes it return early due to a missing log 6943 * device and missing retrieving the cache device and spare eventually. 6944 * Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch 6945 * the correct configuration regardless of the missing log device. 6946 */ 6947 spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG; 6948 6949 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 6950 6951 /* 6952 * If 'tryconfig' was at least parsable, return the current config. 6953 */ 6954 if (spa->spa_root_vdev != NULL) { 6955 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 6956 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname); 6957 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); 6958 fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 6959 spa->spa_uberblock.ub_timestamp); 6960 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6961 spa->spa_load_info); 6962 fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, 6963 spa->spa_errata); 6964 6965 /* 6966 * If the bootfs property exists on this pool then we 6967 * copy it out so that external consumers can tell which 6968 * pools are bootable. 6969 */ 6970 if ((!error || error == EEXIST) && spa->spa_bootfs) { 6971 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6972 6973 /* 6974 * We have to play games with the name since the 6975 * pool was opened as TRYIMPORT_NAME. 6976 */ 6977 if (dsl_dsobj_to_dsname(spa_name(spa), 6978 spa->spa_bootfs, tmpname) == 0) { 6979 char *cp; 6980 char *dsname; 6981 6982 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6983 6984 cp = strchr(tmpname, '/'); 6985 if (cp == NULL) { 6986 (void) strlcpy(dsname, tmpname, 6987 MAXPATHLEN); 6988 } else { 6989 (void) snprintf(dsname, MAXPATHLEN, 6990 "%s/%s", poolname, ++cp); 6991 } 6992 fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS, 6993 dsname); 6994 kmem_free(dsname, MAXPATHLEN); 6995 } 6996 kmem_free(tmpname, MAXPATHLEN); 6997 } 6998 6999 /* 7000 * Add the list of hot spares and level 2 cache devices. 7001 */ 7002 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 7003 spa_add_spares(spa, config); 7004 spa_add_l2cache(spa, config); 7005 spa_config_exit(spa, SCL_CONFIG, FTAG); 7006 } 7007 7008 spa_unload(spa); 7009 spa_deactivate(spa); 7010 spa_remove(spa); 7011 mutex_exit(&spa_namespace_lock); 7012 7013 return (config); 7014 } 7015 7016 /* 7017 * Pool export/destroy 7018 * 7019 * The act of destroying or exporting a pool is very simple. We make sure there 7020 * is no more pending I/O and any references to the pool are gone. Then, we 7021 * update the pool state and sync all the labels to disk, removing the 7022 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 7023 * we don't sync the labels or remove the configuration cache. 7024 */ 7025 static int 7026 spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, 7027 boolean_t force, boolean_t hardforce) 7028 { 7029 int error = 0; 7030 spa_t *spa; 7031 hrtime_t export_start = gethrtime(); 7032 7033 if (oldconfig) 7034 *oldconfig = NULL; 7035 7036 if (!(spa_mode_global & SPA_MODE_WRITE)) 7037 return (SET_ERROR(EROFS)); 7038 7039 mutex_enter(&spa_namespace_lock); 7040 if ((spa = spa_lookup(pool)) == NULL) { 7041 mutex_exit(&spa_namespace_lock); 7042 return (SET_ERROR(ENOENT)); 7043 } 7044 7045 if (spa->spa_is_exporting) { 7046 /* the pool is being exported by another thread */ 7047 mutex_exit(&spa_namespace_lock); 7048 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS)); 7049 } 7050 spa->spa_is_exporting = B_TRUE; 7051 7052 /* 7053 * Put a hold on the pool, drop the namespace lock, stop async tasks 7054 * and see if we can export. 7055 */ 7056 spa_open_ref(spa, FTAG); 7057 mutex_exit(&spa_namespace_lock); 7058 spa_async_suspend(spa); 7059 if (spa->spa_zvol_taskq) { 7060 zvol_remove_minors(spa, spa_name(spa), B_TRUE); 7061 taskq_wait(spa->spa_zvol_taskq); 7062 } 7063 mutex_enter(&spa_namespace_lock); 7064 spa->spa_export_thread = curthread; 7065 spa_close(spa, FTAG); 7066 7067 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 7068 mutex_exit(&spa_namespace_lock); 7069 goto export_spa; 7070 } 7071 7072 /* 7073 * The pool will be in core if it's openable, in which case we can 7074 * modify its state. Objsets may be open only because they're dirty, 7075 * so we have to force it to sync before checking spa_refcnt. 7076 */ 7077 if (spa->spa_sync_on) { 7078 txg_wait_synced(spa->spa_dsl_pool, 0); 7079 spa_evicting_os_wait(spa); 7080 } 7081 7082 /* 7083 * A pool cannot be exported or destroyed if there are active 7084 * references. If we are resetting a pool, allow references by 7085 * fault injection handlers. 7086 */ 7087 if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) { 7088 error = SET_ERROR(EBUSY); 7089 goto fail; 7090 } 7091 7092 mutex_exit(&spa_namespace_lock); 7093 /* 7094 * At this point we no longer hold the spa_namespace_lock and 7095 * there were no references on the spa. Future spa_lookups will 7096 * notice the spa->spa_export_thread and wait until we signal 7097 * that we are finshed. 7098 */ 7099 7100 if (spa->spa_sync_on) { 7101 vdev_t *rvd = spa->spa_root_vdev; 7102 /* 7103 * A pool cannot be exported if it has an active shared spare. 7104 * This is to prevent other pools stealing the active spare 7105 * from an exported pool. At user's own will, such pool can 7106 * be forcedly exported. 7107 */ 7108 if (!force && new_state == POOL_STATE_EXPORTED && 7109 spa_has_active_shared_spare(spa)) { 7110 error = SET_ERROR(EXDEV); 7111 mutex_enter(&spa_namespace_lock); 7112 goto fail; 7113 } 7114 7115 /* 7116 * We're about to export or destroy this pool. Make sure 7117 * we stop all initialization and trim activity here before 7118 * we set the spa_final_txg. This will ensure that all 7119 * dirty data resulting from the initialization is 7120 * committed to disk before we unload the pool. 7121 */ 7122 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); 7123 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); 7124 vdev_autotrim_stop_all(spa); 7125 vdev_rebuild_stop_all(spa); 7126 l2arc_spa_rebuild_stop(spa); 7127 7128 /* 7129 * We want this to be reflected on every label, 7130 * so mark them all dirty. spa_unload() will do the 7131 * final sync that pushes these changes out. 7132 */ 7133 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 7134 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7135 spa->spa_state = new_state; 7136 vdev_config_dirty(rvd); 7137 spa_config_exit(spa, SCL_ALL, FTAG); 7138 } 7139 7140 /* 7141 * If the log space map feature is enabled and the pool is 7142 * getting exported (but not destroyed), we want to spend some 7143 * time flushing as many metaslabs as we can in an attempt to 7144 * destroy log space maps and save import time. This has to be 7145 * done before we set the spa_final_txg, otherwise 7146 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs. 7147 * spa_should_flush_logs_on_unload() should be called after 7148 * spa_state has been set to the new_state. 7149 */ 7150 if (spa_should_flush_logs_on_unload(spa)) 7151 spa_unload_log_sm_flush_all(spa); 7152 7153 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 7154 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7155 spa->spa_final_txg = spa_last_synced_txg(spa) + 7156 TXG_DEFER_SIZE + 1; 7157 spa_config_exit(spa, SCL_ALL, FTAG); 7158 } 7159 } 7160 7161 export_spa: 7162 spa_export_os(spa); 7163 7164 if (new_state == POOL_STATE_DESTROYED) 7165 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 7166 else if (new_state == POOL_STATE_EXPORTED) 7167 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT); 7168 7169 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 7170 spa_unload(spa); 7171 spa_deactivate(spa); 7172 } 7173 7174 if (oldconfig && spa->spa_config) 7175 *oldconfig = fnvlist_dup(spa->spa_config); 7176 7177 if (new_state == POOL_STATE_EXPORTED) 7178 zio_handle_export_delay(spa, gethrtime() - export_start); 7179 7180 /* 7181 * Take the namespace lock for the actual spa_t removal 7182 */ 7183 mutex_enter(&spa_namespace_lock); 7184 if (new_state != POOL_STATE_UNINITIALIZED) { 7185 if (!hardforce) 7186 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE); 7187 spa_remove(spa); 7188 } else { 7189 /* 7190 * If spa_remove() is not called for this spa_t and 7191 * there is any possibility that it can be reused, 7192 * we make sure to reset the exporting flag. 7193 */ 7194 spa->spa_is_exporting = B_FALSE; 7195 spa->spa_export_thread = NULL; 7196 } 7197 7198 /* 7199 * Wake up any waiters in spa_lookup() 7200 */ 7201 cv_broadcast(&spa_namespace_cv); 7202 mutex_exit(&spa_namespace_lock); 7203 return (0); 7204 7205 fail: 7206 spa->spa_is_exporting = B_FALSE; 7207 spa->spa_export_thread = NULL; 7208 7209 spa_async_resume(spa); 7210 /* 7211 * Wake up any waiters in spa_lookup() 7212 */ 7213 cv_broadcast(&spa_namespace_cv); 7214 mutex_exit(&spa_namespace_lock); 7215 return (error); 7216 } 7217 7218 /* 7219 * Destroy a storage pool. 7220 */ 7221 int 7222 spa_destroy(const char *pool) 7223 { 7224 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 7225 B_FALSE, B_FALSE)); 7226 } 7227 7228 /* 7229 * Export a storage pool. 7230 */ 7231 int 7232 spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force, 7233 boolean_t hardforce) 7234 { 7235 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 7236 force, hardforce)); 7237 } 7238 7239 /* 7240 * Similar to spa_export(), this unloads the spa_t without actually removing it 7241 * from the namespace in any way. 7242 */ 7243 int 7244 spa_reset(const char *pool) 7245 { 7246 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 7247 B_FALSE, B_FALSE)); 7248 } 7249 7250 /* 7251 * ========================================================================== 7252 * Device manipulation 7253 * ========================================================================== 7254 */ 7255 7256 /* 7257 * This is called as a synctask to increment the draid feature flag 7258 */ 7259 static void 7260 spa_draid_feature_incr(void *arg, dmu_tx_t *tx) 7261 { 7262 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7263 int draid = (int)(uintptr_t)arg; 7264 7265 for (int c = 0; c < draid; c++) 7266 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 7267 } 7268 7269 /* 7270 * Add a device to a storage pool. 7271 */ 7272 int 7273 spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift) 7274 { 7275 uint64_t txg, ndraid = 0; 7276 int error; 7277 vdev_t *rvd = spa->spa_root_vdev; 7278 vdev_t *vd, *tvd; 7279 nvlist_t **spares, **l2cache; 7280 uint_t nspares, nl2cache; 7281 7282 ASSERT(spa_writeable(spa)); 7283 7284 txg = spa_vdev_enter(spa); 7285 7286 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 7287 VDEV_ALLOC_ADD)) != 0) 7288 return (spa_vdev_exit(spa, NULL, txg, error)); 7289 7290 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 7291 7292 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 7293 &nspares) != 0) 7294 nspares = 0; 7295 7296 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 7297 &nl2cache) != 0) 7298 nl2cache = 0; 7299 7300 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 7301 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7302 7303 if (vd->vdev_children != 0 && 7304 (error = vdev_create(vd, txg, B_FALSE)) != 0) { 7305 return (spa_vdev_exit(spa, vd, txg, error)); 7306 } 7307 7308 /* 7309 * The virtual dRAID spares must be added after vdev tree is created 7310 * and the vdev guids are generated. The guid of their associated 7311 * dRAID is stored in the config and used when opening the spare. 7312 */ 7313 if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, 7314 rvd->vdev_children)) == 0) { 7315 if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot, 7316 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0) 7317 nspares = 0; 7318 } else { 7319 return (spa_vdev_exit(spa, vd, txg, error)); 7320 } 7321 7322 /* 7323 * We must validate the spares and l2cache devices after checking the 7324 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 7325 */ 7326 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 7327 return (spa_vdev_exit(spa, vd, txg, error)); 7328 7329 /* 7330 * If we are in the middle of a device removal, we can only add 7331 * devices which match the existing devices in the pool. 7332 * If we are in the middle of a removal, or have some indirect 7333 * vdevs, we can not add raidz or dRAID top levels. 7334 */ 7335 if (spa->spa_vdev_removal != NULL || 7336 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 7337 for (int c = 0; c < vd->vdev_children; c++) { 7338 tvd = vd->vdev_child[c]; 7339 if (spa->spa_vdev_removal != NULL && 7340 tvd->vdev_ashift != spa->spa_max_ashift) { 7341 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7342 } 7343 /* Fail if top level vdev is raidz or a dRAID */ 7344 if (vdev_get_nparity(tvd) != 0) 7345 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7346 7347 /* 7348 * Need the top level mirror to be 7349 * a mirror of leaf vdevs only 7350 */ 7351 if (tvd->vdev_ops == &vdev_mirror_ops) { 7352 for (uint64_t cid = 0; 7353 cid < tvd->vdev_children; cid++) { 7354 vdev_t *cvd = tvd->vdev_child[cid]; 7355 if (!cvd->vdev_ops->vdev_op_leaf) { 7356 return (spa_vdev_exit(spa, vd, 7357 txg, EINVAL)); 7358 } 7359 } 7360 } 7361 } 7362 } 7363 7364 if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) { 7365 for (int c = 0; c < vd->vdev_children; c++) { 7366 tvd = vd->vdev_child[c]; 7367 if (tvd->vdev_ashift != spa->spa_max_ashift) { 7368 return (spa_vdev_exit(spa, vd, txg, 7369 ZFS_ERR_ASHIFT_MISMATCH)); 7370 } 7371 } 7372 } 7373 7374 for (int c = 0; c < vd->vdev_children; c++) { 7375 tvd = vd->vdev_child[c]; 7376 vdev_remove_child(vd, tvd); 7377 tvd->vdev_id = rvd->vdev_children; 7378 vdev_add_child(rvd, tvd); 7379 vdev_config_dirty(tvd); 7380 } 7381 7382 if (nspares != 0) { 7383 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 7384 ZPOOL_CONFIG_SPARES); 7385 spa_load_spares(spa); 7386 spa->spa_spares.sav_sync = B_TRUE; 7387 } 7388 7389 if (nl2cache != 0) { 7390 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 7391 ZPOOL_CONFIG_L2CACHE); 7392 spa_load_l2cache(spa); 7393 spa->spa_l2cache.sav_sync = B_TRUE; 7394 } 7395 7396 /* 7397 * We can't increment a feature while holding spa_vdev so we 7398 * have to do it in a synctask. 7399 */ 7400 if (ndraid != 0) { 7401 dmu_tx_t *tx; 7402 7403 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 7404 dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr, 7405 (void *)(uintptr_t)ndraid, tx); 7406 dmu_tx_commit(tx); 7407 } 7408 7409 /* 7410 * We have to be careful when adding new vdevs to an existing pool. 7411 * If other threads start allocating from these vdevs before we 7412 * sync the config cache, and we lose power, then upon reboot we may 7413 * fail to open the pool because there are DVAs that the config cache 7414 * can't translate. Therefore, we first add the vdevs without 7415 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 7416 * and then let spa_config_update() initialize the new metaslabs. 7417 * 7418 * spa_load() checks for added-but-not-initialized vdevs, so that 7419 * if we lose power at any point in this sequence, the remaining 7420 * steps will be completed the next time we load the pool. 7421 */ 7422 (void) spa_vdev_exit(spa, vd, txg, 0); 7423 7424 mutex_enter(&spa_namespace_lock); 7425 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 7426 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 7427 mutex_exit(&spa_namespace_lock); 7428 7429 return (0); 7430 } 7431 7432 /* 7433 * Attach a device to a vdev specified by its guid. The vdev type can be 7434 * a mirror, a raidz, or a leaf device that is also a top-level (e.g. a 7435 * single device). When the vdev is a single device, a mirror vdev will be 7436 * automatically inserted. 7437 * 7438 * If 'replacing' is specified, the new device is intended to replace the 7439 * existing device; in this case the two devices are made into their own 7440 * mirror using the 'replacing' vdev, which is functionally identical to 7441 * the mirror vdev (it actually reuses all the same ops) but has a few 7442 * extra rules: you can't attach to it after it's been created, and upon 7443 * completion of resilvering, the first disk (the one being replaced) 7444 * is automatically detached. 7445 * 7446 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild) 7447 * should be performed instead of traditional healing reconstruction. From 7448 * an administrators perspective these are both resilver operations. 7449 */ 7450 int 7451 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing, 7452 int rebuild) 7453 { 7454 uint64_t txg, dtl_max_txg; 7455 vdev_t *rvd = spa->spa_root_vdev; 7456 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 7457 vdev_ops_t *pvops; 7458 char *oldvdpath, *newvdpath; 7459 int newvd_isspare = B_FALSE; 7460 int error; 7461 7462 ASSERT(spa_writeable(spa)); 7463 7464 txg = spa_vdev_enter(spa); 7465 7466 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 7467 7468 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7469 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7470 error = (spa_has_checkpoint(spa)) ? 7471 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7472 return (spa_vdev_exit(spa, NULL, txg, error)); 7473 } 7474 7475 if (rebuild) { 7476 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 7477 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7478 7479 if (dsl_scan_resilvering(spa_get_dsl(spa)) || 7480 dsl_scan_resilver_scheduled(spa_get_dsl(spa))) { 7481 return (spa_vdev_exit(spa, NULL, txg, 7482 ZFS_ERR_RESILVER_IN_PROGRESS)); 7483 } 7484 } else { 7485 if (vdev_rebuild_active(rvd)) 7486 return (spa_vdev_exit(spa, NULL, txg, 7487 ZFS_ERR_REBUILD_IN_PROGRESS)); 7488 } 7489 7490 if (spa->spa_vdev_removal != NULL) { 7491 return (spa_vdev_exit(spa, NULL, txg, 7492 ZFS_ERR_DEVRM_IN_PROGRESS)); 7493 } 7494 7495 if (oldvd == NULL) 7496 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 7497 7498 boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops; 7499 7500 if (raidz) { 7501 if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION)) 7502 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7503 7504 /* 7505 * Can't expand a raidz while prior expand is in progress. 7506 */ 7507 if (spa->spa_raidz_expand != NULL) { 7508 return (spa_vdev_exit(spa, NULL, txg, 7509 ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS)); 7510 } 7511 } else if (!oldvd->vdev_ops->vdev_op_leaf) { 7512 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7513 } 7514 7515 if (raidz) 7516 pvd = oldvd; 7517 else 7518 pvd = oldvd->vdev_parent; 7519 7520 if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 7521 VDEV_ALLOC_ATTACH) != 0) 7522 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7523 7524 if (newrootvd->vdev_children != 1) 7525 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 7526 7527 newvd = newrootvd->vdev_child[0]; 7528 7529 if (!newvd->vdev_ops->vdev_op_leaf) 7530 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 7531 7532 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 7533 return (spa_vdev_exit(spa, newrootvd, txg, error)); 7534 7535 /* 7536 * log, dedup and special vdevs should not be replaced by spares. 7537 */ 7538 if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE || 7539 oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) { 7540 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7541 } 7542 7543 /* 7544 * A dRAID spare can only replace a child of its parent dRAID vdev. 7545 */ 7546 if (newvd->vdev_ops == &vdev_draid_spare_ops && 7547 oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) { 7548 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7549 } 7550 7551 if (rebuild) { 7552 /* 7553 * For rebuilds, the top vdev must support reconstruction 7554 * using only space maps. This means the only allowable 7555 * vdevs types are the root vdev, a mirror, or dRAID. 7556 */ 7557 tvd = pvd; 7558 if (pvd->vdev_top != NULL) 7559 tvd = pvd->vdev_top; 7560 7561 if (tvd->vdev_ops != &vdev_mirror_ops && 7562 tvd->vdev_ops != &vdev_root_ops && 7563 tvd->vdev_ops != &vdev_draid_ops) { 7564 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7565 } 7566 } 7567 7568 if (!replacing) { 7569 /* 7570 * For attach, the only allowable parent is a mirror or 7571 * the root vdev. A raidz vdev can be attached to, but 7572 * you cannot attach to a raidz child. 7573 */ 7574 if (pvd->vdev_ops != &vdev_mirror_ops && 7575 pvd->vdev_ops != &vdev_root_ops && 7576 !raidz) 7577 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7578 7579 pvops = &vdev_mirror_ops; 7580 } else { 7581 /* 7582 * Active hot spares can only be replaced by inactive hot 7583 * spares. 7584 */ 7585 if (pvd->vdev_ops == &vdev_spare_ops && 7586 oldvd->vdev_isspare && 7587 !spa_has_spare(spa, newvd->vdev_guid)) 7588 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7589 7590 /* 7591 * If the source is a hot spare, and the parent isn't already a 7592 * spare, then we want to create a new hot spare. Otherwise, we 7593 * want to create a replacing vdev. The user is not allowed to 7594 * attach to a spared vdev child unless the 'isspare' state is 7595 * the same (spare replaces spare, non-spare replaces 7596 * non-spare). 7597 */ 7598 if (pvd->vdev_ops == &vdev_replacing_ops && 7599 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 7600 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7601 } else if (pvd->vdev_ops == &vdev_spare_ops && 7602 newvd->vdev_isspare != oldvd->vdev_isspare) { 7603 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7604 } 7605 7606 if (newvd->vdev_isspare) 7607 pvops = &vdev_spare_ops; 7608 else 7609 pvops = &vdev_replacing_ops; 7610 } 7611 7612 /* 7613 * Make sure the new device is big enough. 7614 */ 7615 vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd; 7616 if (newvd->vdev_asize < vdev_get_min_asize(min_vdev)) 7617 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 7618 7619 /* 7620 * The new device cannot have a higher alignment requirement 7621 * than the top-level vdev. 7622 */ 7623 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) { 7624 return (spa_vdev_exit(spa, newrootvd, txg, 7625 ZFS_ERR_ASHIFT_MISMATCH)); 7626 } 7627 7628 /* 7629 * RAIDZ-expansion-specific checks. 7630 */ 7631 if (raidz) { 7632 if (vdev_raidz_attach_check(newvd) != 0) 7633 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7634 7635 /* 7636 * Fail early if a child is not healthy or being replaced 7637 */ 7638 for (int i = 0; i < oldvd->vdev_children; i++) { 7639 if (vdev_is_dead(oldvd->vdev_child[i]) || 7640 !oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) { 7641 return (spa_vdev_exit(spa, newrootvd, txg, 7642 ENXIO)); 7643 } 7644 /* Also fail if reserved boot area is in-use */ 7645 if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i]) 7646 != 0) { 7647 return (spa_vdev_exit(spa, newrootvd, txg, 7648 EADDRINUSE)); 7649 } 7650 } 7651 } 7652 7653 if (raidz) { 7654 /* 7655 * Note: oldvdpath is freed by spa_strfree(), but 7656 * kmem_asprintf() is freed by kmem_strfree(), so we have to 7657 * move it to a spa_strdup-ed string. 7658 */ 7659 char *tmp = kmem_asprintf("raidz%u-%u", 7660 (uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id); 7661 oldvdpath = spa_strdup(tmp); 7662 kmem_strfree(tmp); 7663 } else { 7664 oldvdpath = spa_strdup(oldvd->vdev_path); 7665 } 7666 newvdpath = spa_strdup(newvd->vdev_path); 7667 7668 /* 7669 * If this is an in-place replacement, update oldvd's path and devid 7670 * to make it distinguishable from newvd, and unopenable from now on. 7671 */ 7672 if (strcmp(oldvdpath, newvdpath) == 0) { 7673 spa_strfree(oldvd->vdev_path); 7674 oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5, 7675 KM_SLEEP); 7676 (void) sprintf(oldvd->vdev_path, "%s/old", 7677 newvdpath); 7678 if (oldvd->vdev_devid != NULL) { 7679 spa_strfree(oldvd->vdev_devid); 7680 oldvd->vdev_devid = NULL; 7681 } 7682 spa_strfree(oldvdpath); 7683 oldvdpath = spa_strdup(oldvd->vdev_path); 7684 } 7685 7686 /* 7687 * If the parent is not a mirror, or if we're replacing, insert the new 7688 * mirror/replacing/spare vdev above oldvd. 7689 */ 7690 if (!raidz && pvd->vdev_ops != pvops) { 7691 pvd = vdev_add_parent(oldvd, pvops); 7692 ASSERT(pvd->vdev_ops == pvops); 7693 ASSERT(oldvd->vdev_parent == pvd); 7694 } 7695 7696 ASSERT(pvd->vdev_top->vdev_parent == rvd); 7697 7698 /* 7699 * Extract the new device from its root and add it to pvd. 7700 */ 7701 vdev_remove_child(newrootvd, newvd); 7702 newvd->vdev_id = pvd->vdev_children; 7703 newvd->vdev_crtxg = oldvd->vdev_crtxg; 7704 vdev_add_child(pvd, newvd); 7705 7706 /* 7707 * Reevaluate the parent vdev state. 7708 */ 7709 vdev_propagate_state(pvd); 7710 7711 tvd = newvd->vdev_top; 7712 ASSERT(pvd->vdev_top == tvd); 7713 ASSERT(tvd->vdev_parent == rvd); 7714 7715 vdev_config_dirty(tvd); 7716 7717 /* 7718 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 7719 * for any dmu_sync-ed blocks. It will propagate upward when 7720 * spa_vdev_exit() calls vdev_dtl_reassess(). 7721 */ 7722 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 7723 7724 if (raidz) { 7725 /* 7726 * Wait for the youngest allocations and frees to sync, 7727 * and then wait for the deferral of those frees to finish. 7728 */ 7729 spa_vdev_config_exit(spa, NULL, 7730 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 7731 7732 vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE); 7733 vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE); 7734 vdev_autotrim_stop_wait(tvd); 7735 7736 dtl_max_txg = spa_vdev_config_enter(spa); 7737 7738 tvd->vdev_rz_expanding = B_TRUE; 7739 7740 vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg); 7741 vdev_config_dirty(tvd); 7742 7743 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, 7744 dtl_max_txg); 7745 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync, 7746 newvd, tx); 7747 dmu_tx_commit(tx); 7748 } else { 7749 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 7750 dtl_max_txg - TXG_INITIAL); 7751 7752 if (newvd->vdev_isspare) { 7753 spa_spare_activate(newvd); 7754 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 7755 } 7756 7757 newvd_isspare = newvd->vdev_isspare; 7758 7759 /* 7760 * Mark newvd's DTL dirty in this txg. 7761 */ 7762 vdev_dirty(tvd, VDD_DTL, newvd, txg); 7763 7764 /* 7765 * Schedule the resilver or rebuild to restart in the future. 7766 * We do this to ensure that dmu_sync-ed blocks have been 7767 * stitched into the respective datasets. 7768 */ 7769 if (rebuild) { 7770 newvd->vdev_rebuild_txg = txg; 7771 7772 vdev_rebuild(tvd); 7773 } else { 7774 newvd->vdev_resilver_txg = txg; 7775 7776 if (dsl_scan_resilvering(spa_get_dsl(spa)) && 7777 spa_feature_is_enabled(spa, 7778 SPA_FEATURE_RESILVER_DEFER)) { 7779 vdev_defer_resilver(newvd); 7780 } else { 7781 dsl_scan_restart_resilver(spa->spa_dsl_pool, 7782 dtl_max_txg); 7783 } 7784 } 7785 } 7786 7787 if (spa->spa_bootfs) 7788 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 7789 7790 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 7791 7792 /* 7793 * Commit the config 7794 */ 7795 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 7796 7797 spa_history_log_internal(spa, "vdev attach", NULL, 7798 "%s vdev=%s %s vdev=%s", 7799 replacing && newvd_isspare ? "spare in" : 7800 replacing ? "replace" : "attach", newvdpath, 7801 replacing ? "for" : "to", oldvdpath); 7802 7803 spa_strfree(oldvdpath); 7804 spa_strfree(newvdpath); 7805 7806 return (0); 7807 } 7808 7809 /* 7810 * Detach a device from a mirror or replacing vdev. 7811 * 7812 * If 'replace_done' is specified, only detach if the parent 7813 * is a replacing or a spare vdev. 7814 */ 7815 int 7816 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 7817 { 7818 uint64_t txg; 7819 int error; 7820 vdev_t *rvd __maybe_unused = spa->spa_root_vdev; 7821 vdev_t *vd, *pvd, *cvd, *tvd; 7822 boolean_t unspare = B_FALSE; 7823 uint64_t unspare_guid = 0; 7824 char *vdpath; 7825 7826 ASSERT(spa_writeable(spa)); 7827 7828 txg = spa_vdev_detach_enter(spa, guid); 7829 7830 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7831 7832 /* 7833 * Besides being called directly from the userland through the 7834 * ioctl interface, spa_vdev_detach() can be potentially called 7835 * at the end of spa_vdev_resilver_done(). 7836 * 7837 * In the regular case, when we have a checkpoint this shouldn't 7838 * happen as we never empty the DTLs of a vdev during the scrub 7839 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 7840 * should never get here when we have a checkpoint. 7841 * 7842 * That said, even in a case when we checkpoint the pool exactly 7843 * as spa_vdev_resilver_done() calls this function everything 7844 * should be fine as the resilver will return right away. 7845 */ 7846 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7847 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7848 error = (spa_has_checkpoint(spa)) ? 7849 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7850 return (spa_vdev_exit(spa, NULL, txg, error)); 7851 } 7852 7853 if (vd == NULL) 7854 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 7855 7856 if (!vd->vdev_ops->vdev_op_leaf) 7857 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7858 7859 pvd = vd->vdev_parent; 7860 7861 /* 7862 * If the parent/child relationship is not as expected, don't do it. 7863 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 7864 * vdev that's replacing B with C. The user's intent in replacing 7865 * is to go from M(A,B) to M(A,C). If the user decides to cancel 7866 * the replace by detaching C, the expected behavior is to end up 7867 * M(A,B). But suppose that right after deciding to detach C, 7868 * the replacement of B completes. We would have M(A,C), and then 7869 * ask to detach C, which would leave us with just A -- not what 7870 * the user wanted. To prevent this, we make sure that the 7871 * parent/child relationship hasn't changed -- in this example, 7872 * that C's parent is still the replacing vdev R. 7873 */ 7874 if (pvd->vdev_guid != pguid && pguid != 0) 7875 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7876 7877 /* 7878 * Only 'replacing' or 'spare' vdevs can be replaced. 7879 */ 7880 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 7881 pvd->vdev_ops != &vdev_spare_ops) 7882 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7883 7884 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 7885 spa_version(spa) >= SPA_VERSION_SPARES); 7886 7887 /* 7888 * Only mirror, replacing, and spare vdevs support detach. 7889 */ 7890 if (pvd->vdev_ops != &vdev_replacing_ops && 7891 pvd->vdev_ops != &vdev_mirror_ops && 7892 pvd->vdev_ops != &vdev_spare_ops) 7893 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7894 7895 /* 7896 * If this device has the only valid copy of some data, 7897 * we cannot safely detach it. 7898 */ 7899 if (vdev_dtl_required(vd)) 7900 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7901 7902 ASSERT(pvd->vdev_children >= 2); 7903 7904 /* 7905 * If we are detaching the second disk from a replacing vdev, then 7906 * check to see if we changed the original vdev's path to have "/old" 7907 * at the end in spa_vdev_attach(). If so, undo that change now. 7908 */ 7909 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 7910 vd->vdev_path != NULL) { 7911 size_t len = strlen(vd->vdev_path); 7912 7913 for (int c = 0; c < pvd->vdev_children; c++) { 7914 cvd = pvd->vdev_child[c]; 7915 7916 if (cvd == vd || cvd->vdev_path == NULL) 7917 continue; 7918 7919 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 7920 strcmp(cvd->vdev_path + len, "/old") == 0) { 7921 spa_strfree(cvd->vdev_path); 7922 cvd->vdev_path = spa_strdup(vd->vdev_path); 7923 break; 7924 } 7925 } 7926 } 7927 7928 /* 7929 * If we are detaching the original disk from a normal spare, then it 7930 * implies that the spare should become a real disk, and be removed 7931 * from the active spare list for the pool. dRAID spares on the 7932 * other hand are coupled to the pool and thus should never be removed 7933 * from the spares list. 7934 */ 7935 if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) { 7936 vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7937 7938 if (last_cvd->vdev_isspare && 7939 last_cvd->vdev_ops != &vdev_draid_spare_ops) { 7940 unspare = B_TRUE; 7941 } 7942 } 7943 7944 /* 7945 * Erase the disk labels so the disk can be used for other things. 7946 * This must be done after all other error cases are handled, 7947 * but before we disembowel vd (so we can still do I/O to it). 7948 * But if we can't do it, don't treat the error as fatal -- 7949 * it may be that the unwritability of the disk is the reason 7950 * it's being detached! 7951 */ 7952 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 7953 7954 /* 7955 * Remove vd from its parent and compact the parent's children. 7956 */ 7957 vdev_remove_child(pvd, vd); 7958 vdev_compact_children(pvd); 7959 7960 /* 7961 * Remember one of the remaining children so we can get tvd below. 7962 */ 7963 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7964 7965 /* 7966 * If we need to remove the remaining child from the list of hot spares, 7967 * do it now, marking the vdev as no longer a spare in the process. 7968 * We must do this before vdev_remove_parent(), because that can 7969 * change the GUID if it creates a new toplevel GUID. For a similar 7970 * reason, we must remove the spare now, in the same txg as the detach; 7971 * otherwise someone could attach a new sibling, change the GUID, and 7972 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 7973 */ 7974 if (unspare) { 7975 ASSERT(cvd->vdev_isspare); 7976 spa_spare_remove(cvd); 7977 unspare_guid = cvd->vdev_guid; 7978 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 7979 cvd->vdev_unspare = B_TRUE; 7980 } 7981 7982 /* 7983 * If the parent mirror/replacing vdev only has one child, 7984 * the parent is no longer needed. Remove it from the tree. 7985 */ 7986 if (pvd->vdev_children == 1) { 7987 if (pvd->vdev_ops == &vdev_spare_ops) 7988 cvd->vdev_unspare = B_FALSE; 7989 vdev_remove_parent(cvd); 7990 } 7991 7992 /* 7993 * We don't set tvd until now because the parent we just removed 7994 * may have been the previous top-level vdev. 7995 */ 7996 tvd = cvd->vdev_top; 7997 ASSERT(tvd->vdev_parent == rvd); 7998 7999 /* 8000 * Reevaluate the parent vdev state. 8001 */ 8002 vdev_propagate_state(cvd); 8003 8004 /* 8005 * If the 'autoexpand' property is set on the pool then automatically 8006 * try to expand the size of the pool. For example if the device we 8007 * just detached was smaller than the others, it may be possible to 8008 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 8009 * first so that we can obtain the updated sizes of the leaf vdevs. 8010 */ 8011 if (spa->spa_autoexpand) { 8012 vdev_reopen(tvd); 8013 vdev_expand(tvd, txg); 8014 } 8015 8016 vdev_config_dirty(tvd); 8017 8018 /* 8019 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 8020 * vd->vdev_detached is set and free vd's DTL object in syncing context. 8021 * But first make sure we're not on any *other* txg's DTL list, to 8022 * prevent vd from being accessed after it's freed. 8023 */ 8024 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); 8025 for (int t = 0; t < TXG_SIZE; t++) 8026 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 8027 vd->vdev_detached = B_TRUE; 8028 vdev_dirty(tvd, VDD_DTL, vd, txg); 8029 8030 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 8031 spa_notify_waiters(spa); 8032 8033 /* hang on to the spa before we release the lock */ 8034 spa_open_ref(spa, FTAG); 8035 8036 error = spa_vdev_exit(spa, vd, txg, 0); 8037 8038 spa_history_log_internal(spa, "detach", NULL, 8039 "vdev=%s", vdpath); 8040 spa_strfree(vdpath); 8041 8042 /* 8043 * If this was the removal of the original device in a hot spare vdev, 8044 * then we want to go through and remove the device from the hot spare 8045 * list of every other pool. 8046 */ 8047 if (unspare) { 8048 spa_t *altspa = NULL; 8049 8050 mutex_enter(&spa_namespace_lock); 8051 while ((altspa = spa_next(altspa)) != NULL) { 8052 if (altspa->spa_state != POOL_STATE_ACTIVE || 8053 altspa == spa) 8054 continue; 8055 8056 spa_open_ref(altspa, FTAG); 8057 mutex_exit(&spa_namespace_lock); 8058 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 8059 mutex_enter(&spa_namespace_lock); 8060 spa_close(altspa, FTAG); 8061 } 8062 mutex_exit(&spa_namespace_lock); 8063 8064 /* search the rest of the vdevs for spares to remove */ 8065 spa_vdev_resilver_done(spa); 8066 } 8067 8068 /* all done with the spa; OK to release */ 8069 mutex_enter(&spa_namespace_lock); 8070 spa_close(spa, FTAG); 8071 mutex_exit(&spa_namespace_lock); 8072 8073 return (error); 8074 } 8075 8076 static int 8077 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 8078 list_t *vd_list) 8079 { 8080 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8081 8082 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 8083 8084 /* Look up vdev and ensure it's a leaf. */ 8085 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 8086 if (vd == NULL || vd->vdev_detached) { 8087 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8088 return (SET_ERROR(ENODEV)); 8089 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 8090 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8091 return (SET_ERROR(EINVAL)); 8092 } else if (!vdev_writeable(vd)) { 8093 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8094 return (SET_ERROR(EROFS)); 8095 } 8096 mutex_enter(&vd->vdev_initialize_lock); 8097 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8098 8099 /* 8100 * When we activate an initialize action we check to see 8101 * if the vdev_initialize_thread is NULL. We do this instead 8102 * of using the vdev_initialize_state since there might be 8103 * a previous initialization process which has completed but 8104 * the thread is not exited. 8105 */ 8106 if (cmd_type == POOL_INITIALIZE_START && 8107 (vd->vdev_initialize_thread != NULL || 8108 vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) { 8109 mutex_exit(&vd->vdev_initialize_lock); 8110 return (SET_ERROR(EBUSY)); 8111 } else if (cmd_type == POOL_INITIALIZE_CANCEL && 8112 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 8113 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 8114 mutex_exit(&vd->vdev_initialize_lock); 8115 return (SET_ERROR(ESRCH)); 8116 } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 8117 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 8118 mutex_exit(&vd->vdev_initialize_lock); 8119 return (SET_ERROR(ESRCH)); 8120 } else if (cmd_type == POOL_INITIALIZE_UNINIT && 8121 vd->vdev_initialize_thread != NULL) { 8122 mutex_exit(&vd->vdev_initialize_lock); 8123 return (SET_ERROR(EBUSY)); 8124 } 8125 8126 switch (cmd_type) { 8127 case POOL_INITIALIZE_START: 8128 vdev_initialize(vd); 8129 break; 8130 case POOL_INITIALIZE_CANCEL: 8131 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); 8132 break; 8133 case POOL_INITIALIZE_SUSPEND: 8134 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); 8135 break; 8136 case POOL_INITIALIZE_UNINIT: 8137 vdev_uninitialize(vd); 8138 break; 8139 default: 8140 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 8141 } 8142 mutex_exit(&vd->vdev_initialize_lock); 8143 8144 return (0); 8145 } 8146 8147 int 8148 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, 8149 nvlist_t *vdev_errlist) 8150 { 8151 int total_errors = 0; 8152 list_t vd_list; 8153 8154 list_create(&vd_list, sizeof (vdev_t), 8155 offsetof(vdev_t, vdev_initialize_node)); 8156 8157 /* 8158 * We hold the namespace lock through the whole function 8159 * to prevent any changes to the pool while we're starting or 8160 * stopping initialization. The config and state locks are held so that 8161 * we can properly assess the vdev state before we commit to 8162 * the initializing operation. 8163 */ 8164 mutex_enter(&spa_namespace_lock); 8165 8166 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 8167 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 8168 uint64_t vdev_guid = fnvpair_value_uint64(pair); 8169 8170 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, 8171 &vd_list); 8172 if (error != 0) { 8173 char guid_as_str[MAXNAMELEN]; 8174 8175 (void) snprintf(guid_as_str, sizeof (guid_as_str), 8176 "%llu", (unsigned long long)vdev_guid); 8177 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 8178 total_errors++; 8179 } 8180 } 8181 8182 /* Wait for all initialize threads to stop. */ 8183 vdev_initialize_stop_wait(spa, &vd_list); 8184 8185 /* Sync out the initializing state */ 8186 txg_wait_synced(spa->spa_dsl_pool, 0); 8187 mutex_exit(&spa_namespace_lock); 8188 8189 list_destroy(&vd_list); 8190 8191 return (total_errors); 8192 } 8193 8194 static int 8195 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 8196 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) 8197 { 8198 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8199 8200 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 8201 8202 /* Look up vdev and ensure it's a leaf. */ 8203 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 8204 if (vd == NULL || vd->vdev_detached) { 8205 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8206 return (SET_ERROR(ENODEV)); 8207 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 8208 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8209 return (SET_ERROR(EINVAL)); 8210 } else if (!vdev_writeable(vd)) { 8211 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8212 return (SET_ERROR(EROFS)); 8213 } else if (!vd->vdev_has_trim) { 8214 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8215 return (SET_ERROR(EOPNOTSUPP)); 8216 } else if (secure && !vd->vdev_has_securetrim) { 8217 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8218 return (SET_ERROR(EOPNOTSUPP)); 8219 } 8220 mutex_enter(&vd->vdev_trim_lock); 8221 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8222 8223 /* 8224 * When we activate a TRIM action we check to see if the 8225 * vdev_trim_thread is NULL. We do this instead of using the 8226 * vdev_trim_state since there might be a previous TRIM process 8227 * which has completed but the thread is not exited. 8228 */ 8229 if (cmd_type == POOL_TRIM_START && 8230 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing || 8231 vd->vdev_top->vdev_rz_expanding)) { 8232 mutex_exit(&vd->vdev_trim_lock); 8233 return (SET_ERROR(EBUSY)); 8234 } else if (cmd_type == POOL_TRIM_CANCEL && 8235 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && 8236 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { 8237 mutex_exit(&vd->vdev_trim_lock); 8238 return (SET_ERROR(ESRCH)); 8239 } else if (cmd_type == POOL_TRIM_SUSPEND && 8240 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { 8241 mutex_exit(&vd->vdev_trim_lock); 8242 return (SET_ERROR(ESRCH)); 8243 } 8244 8245 switch (cmd_type) { 8246 case POOL_TRIM_START: 8247 vdev_trim(vd, rate, partial, secure); 8248 break; 8249 case POOL_TRIM_CANCEL: 8250 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); 8251 break; 8252 case POOL_TRIM_SUSPEND: 8253 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); 8254 break; 8255 default: 8256 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 8257 } 8258 mutex_exit(&vd->vdev_trim_lock); 8259 8260 return (0); 8261 } 8262 8263 /* 8264 * Initiates a manual TRIM for the requested vdevs. This kicks off individual 8265 * TRIM threads for each child vdev. These threads pass over all of the free 8266 * space in the vdev's metaslabs and issues TRIM commands for that space. 8267 */ 8268 int 8269 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, 8270 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) 8271 { 8272 int total_errors = 0; 8273 list_t vd_list; 8274 8275 list_create(&vd_list, sizeof (vdev_t), 8276 offsetof(vdev_t, vdev_trim_node)); 8277 8278 /* 8279 * We hold the namespace lock through the whole function 8280 * to prevent any changes to the pool while we're starting or 8281 * stopping TRIM. The config and state locks are held so that 8282 * we can properly assess the vdev state before we commit to 8283 * the TRIM operation. 8284 */ 8285 mutex_enter(&spa_namespace_lock); 8286 8287 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 8288 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 8289 uint64_t vdev_guid = fnvpair_value_uint64(pair); 8290 8291 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, 8292 rate, partial, secure, &vd_list); 8293 if (error != 0) { 8294 char guid_as_str[MAXNAMELEN]; 8295 8296 (void) snprintf(guid_as_str, sizeof (guid_as_str), 8297 "%llu", (unsigned long long)vdev_guid); 8298 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 8299 total_errors++; 8300 } 8301 } 8302 8303 /* Wait for all TRIM threads to stop. */ 8304 vdev_trim_stop_wait(spa, &vd_list); 8305 8306 /* Sync out the TRIM state */ 8307 txg_wait_synced(spa->spa_dsl_pool, 0); 8308 mutex_exit(&spa_namespace_lock); 8309 8310 list_destroy(&vd_list); 8311 8312 return (total_errors); 8313 } 8314 8315 /* 8316 * Split a set of devices from their mirrors, and create a new pool from them. 8317 */ 8318 int 8319 spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config, 8320 nvlist_t *props, boolean_t exp) 8321 { 8322 int error = 0; 8323 uint64_t txg, *glist; 8324 spa_t *newspa; 8325 uint_t c, children, lastlog; 8326 nvlist_t **child, *nvl, *tmp; 8327 dmu_tx_t *tx; 8328 const char *altroot = NULL; 8329 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 8330 boolean_t activate_slog; 8331 8332 ASSERT(spa_writeable(spa)); 8333 8334 txg = spa_vdev_enter(spa); 8335 8336 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8337 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 8338 error = (spa_has_checkpoint(spa)) ? 8339 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 8340 return (spa_vdev_exit(spa, NULL, txg, error)); 8341 } 8342 8343 /* clear the log and flush everything up to now */ 8344 activate_slog = spa_passivate_log(spa); 8345 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 8346 error = spa_reset_logs(spa); 8347 txg = spa_vdev_config_enter(spa); 8348 8349 if (activate_slog) 8350 spa_activate_log(spa); 8351 8352 if (error != 0) 8353 return (spa_vdev_exit(spa, NULL, txg, error)); 8354 8355 /* check new spa name before going any further */ 8356 if (spa_lookup(newname) != NULL) 8357 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 8358 8359 /* 8360 * scan through all the children to ensure they're all mirrors 8361 */ 8362 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 8363 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 8364 &children) != 0) 8365 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8366 8367 /* first, check to ensure we've got the right child count */ 8368 rvd = spa->spa_root_vdev; 8369 lastlog = 0; 8370 for (c = 0; c < rvd->vdev_children; c++) { 8371 vdev_t *vd = rvd->vdev_child[c]; 8372 8373 /* don't count the holes & logs as children */ 8374 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops && 8375 !vdev_is_concrete(vd))) { 8376 if (lastlog == 0) 8377 lastlog = c; 8378 continue; 8379 } 8380 8381 lastlog = 0; 8382 } 8383 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 8384 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8385 8386 /* next, ensure no spare or cache devices are part of the split */ 8387 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 8388 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 8389 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8390 8391 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 8392 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 8393 8394 /* then, loop over each vdev and validate it */ 8395 for (c = 0; c < children; c++) { 8396 uint64_t is_hole = 0; 8397 8398 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 8399 &is_hole); 8400 8401 if (is_hole != 0) { 8402 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 8403 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 8404 continue; 8405 } else { 8406 error = SET_ERROR(EINVAL); 8407 break; 8408 } 8409 } 8410 8411 /* deal with indirect vdevs */ 8412 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops == 8413 &vdev_indirect_ops) 8414 continue; 8415 8416 /* which disk is going to be split? */ 8417 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 8418 &glist[c]) != 0) { 8419 error = SET_ERROR(EINVAL); 8420 break; 8421 } 8422 8423 /* look it up in the spa */ 8424 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 8425 if (vml[c] == NULL) { 8426 error = SET_ERROR(ENODEV); 8427 break; 8428 } 8429 8430 /* make sure there's nothing stopping the split */ 8431 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 8432 vml[c]->vdev_islog || 8433 !vdev_is_concrete(vml[c]) || 8434 vml[c]->vdev_isspare || 8435 vml[c]->vdev_isl2cache || 8436 !vdev_writeable(vml[c]) || 8437 vml[c]->vdev_children != 0 || 8438 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 8439 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 8440 error = SET_ERROR(EINVAL); 8441 break; 8442 } 8443 8444 if (vdev_dtl_required(vml[c]) || 8445 vdev_resilver_needed(vml[c], NULL, NULL)) { 8446 error = SET_ERROR(EBUSY); 8447 break; 8448 } 8449 8450 /* we need certain info from the top level */ 8451 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 8452 vml[c]->vdev_top->vdev_ms_array); 8453 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 8454 vml[c]->vdev_top->vdev_ms_shift); 8455 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 8456 vml[c]->vdev_top->vdev_asize); 8457 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 8458 vml[c]->vdev_top->vdev_ashift); 8459 8460 /* transfer per-vdev ZAPs */ 8461 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 8462 VERIFY0(nvlist_add_uint64(child[c], 8463 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 8464 8465 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 8466 VERIFY0(nvlist_add_uint64(child[c], 8467 ZPOOL_CONFIG_VDEV_TOP_ZAP, 8468 vml[c]->vdev_parent->vdev_top_zap)); 8469 } 8470 8471 if (error != 0) { 8472 kmem_free(vml, children * sizeof (vdev_t *)); 8473 kmem_free(glist, children * sizeof (uint64_t)); 8474 return (spa_vdev_exit(spa, NULL, txg, error)); 8475 } 8476 8477 /* stop writers from using the disks */ 8478 for (c = 0; c < children; c++) { 8479 if (vml[c] != NULL) 8480 vml[c]->vdev_offline = B_TRUE; 8481 } 8482 vdev_reopen(spa->spa_root_vdev); 8483 8484 /* 8485 * Temporarily record the splitting vdevs in the spa config. This 8486 * will disappear once the config is regenerated. 8487 */ 8488 nvl = fnvlist_alloc(); 8489 fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children); 8490 kmem_free(glist, children * sizeof (uint64_t)); 8491 8492 mutex_enter(&spa->spa_props_lock); 8493 fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl); 8494 mutex_exit(&spa->spa_props_lock); 8495 spa->spa_config_splitting = nvl; 8496 vdev_config_dirty(spa->spa_root_vdev); 8497 8498 /* configure and create the new pool */ 8499 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname); 8500 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 8501 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE); 8502 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)); 8503 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg); 8504 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 8505 spa_generate_guid(NULL)); 8506 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 8507 (void) nvlist_lookup_string(props, 8508 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 8509 8510 /* add the new pool to the namespace */ 8511 newspa = spa_add(newname, config, altroot); 8512 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 8513 newspa->spa_config_txg = spa->spa_config_txg; 8514 spa_set_log_state(newspa, SPA_LOG_CLEAR); 8515 8516 /* release the spa config lock, retaining the namespace lock */ 8517 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 8518 8519 if (zio_injection_enabled) 8520 zio_handle_panic_injection(spa, FTAG, 1); 8521 8522 spa_activate(newspa, spa_mode_global); 8523 spa_async_suspend(newspa); 8524 8525 /* 8526 * Temporarily stop the initializing and TRIM activity. We set the 8527 * state to ACTIVE so that we know to resume initializing or TRIM 8528 * once the split has completed. 8529 */ 8530 list_t vd_initialize_list; 8531 list_create(&vd_initialize_list, sizeof (vdev_t), 8532 offsetof(vdev_t, vdev_initialize_node)); 8533 8534 list_t vd_trim_list; 8535 list_create(&vd_trim_list, sizeof (vdev_t), 8536 offsetof(vdev_t, vdev_trim_node)); 8537 8538 for (c = 0; c < children; c++) { 8539 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 8540 mutex_enter(&vml[c]->vdev_initialize_lock); 8541 vdev_initialize_stop(vml[c], 8542 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); 8543 mutex_exit(&vml[c]->vdev_initialize_lock); 8544 8545 mutex_enter(&vml[c]->vdev_trim_lock); 8546 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); 8547 mutex_exit(&vml[c]->vdev_trim_lock); 8548 } 8549 } 8550 8551 vdev_initialize_stop_wait(spa, &vd_initialize_list); 8552 vdev_trim_stop_wait(spa, &vd_trim_list); 8553 8554 list_destroy(&vd_initialize_list); 8555 list_destroy(&vd_trim_list); 8556 8557 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 8558 newspa->spa_is_splitting = B_TRUE; 8559 8560 /* create the new pool from the disks of the original pool */ 8561 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 8562 if (error) 8563 goto out; 8564 8565 /* if that worked, generate a real config for the new pool */ 8566 if (newspa->spa_root_vdev != NULL) { 8567 newspa->spa_config_splitting = fnvlist_alloc(); 8568 fnvlist_add_uint64(newspa->spa_config_splitting, 8569 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)); 8570 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 8571 B_TRUE)); 8572 } 8573 8574 /* set the props */ 8575 if (props != NULL) { 8576 spa_configfile_set(newspa, props, B_FALSE); 8577 error = spa_prop_set(newspa, props); 8578 if (error) 8579 goto out; 8580 } 8581 8582 /* flush everything */ 8583 txg = spa_vdev_config_enter(newspa); 8584 vdev_config_dirty(newspa->spa_root_vdev); 8585 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 8586 8587 if (zio_injection_enabled) 8588 zio_handle_panic_injection(spa, FTAG, 2); 8589 8590 spa_async_resume(newspa); 8591 8592 /* finally, update the original pool's config */ 8593 txg = spa_vdev_config_enter(spa); 8594 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 8595 error = dmu_tx_assign(tx, TXG_WAIT); 8596 if (error != 0) 8597 dmu_tx_abort(tx); 8598 for (c = 0; c < children; c++) { 8599 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 8600 vdev_t *tvd = vml[c]->vdev_top; 8601 8602 /* 8603 * Need to be sure the detachable VDEV is not 8604 * on any *other* txg's DTL list to prevent it 8605 * from being accessed after it's freed. 8606 */ 8607 for (int t = 0; t < TXG_SIZE; t++) { 8608 (void) txg_list_remove_this( 8609 &tvd->vdev_dtl_list, vml[c], t); 8610 } 8611 8612 vdev_split(vml[c]); 8613 if (error == 0) 8614 spa_history_log_internal(spa, "detach", tx, 8615 "vdev=%s", vml[c]->vdev_path); 8616 8617 vdev_free(vml[c]); 8618 } 8619 } 8620 spa->spa_avz_action = AVZ_ACTION_REBUILD; 8621 vdev_config_dirty(spa->spa_root_vdev); 8622 spa->spa_config_splitting = NULL; 8623 nvlist_free(nvl); 8624 if (error == 0) 8625 dmu_tx_commit(tx); 8626 (void) spa_vdev_exit(spa, NULL, txg, 0); 8627 8628 if (zio_injection_enabled) 8629 zio_handle_panic_injection(spa, FTAG, 3); 8630 8631 /* split is complete; log a history record */ 8632 spa_history_log_internal(newspa, "split", NULL, 8633 "from pool %s", spa_name(spa)); 8634 8635 newspa->spa_is_splitting = B_FALSE; 8636 kmem_free(vml, children * sizeof (vdev_t *)); 8637 8638 /* if we're not going to mount the filesystems in userland, export */ 8639 if (exp) 8640 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 8641 B_FALSE, B_FALSE); 8642 8643 return (error); 8644 8645 out: 8646 spa_unload(newspa); 8647 spa_deactivate(newspa); 8648 spa_remove(newspa); 8649 8650 txg = spa_vdev_config_enter(spa); 8651 8652 /* re-online all offlined disks */ 8653 for (c = 0; c < children; c++) { 8654 if (vml[c] != NULL) 8655 vml[c]->vdev_offline = B_FALSE; 8656 } 8657 8658 /* restart initializing or trimming disks as necessary */ 8659 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 8660 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 8661 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 8662 8663 vdev_reopen(spa->spa_root_vdev); 8664 8665 nvlist_free(spa->spa_config_splitting); 8666 spa->spa_config_splitting = NULL; 8667 (void) spa_vdev_exit(spa, NULL, txg, error); 8668 8669 kmem_free(vml, children * sizeof (vdev_t *)); 8670 return (error); 8671 } 8672 8673 /* 8674 * Find any device that's done replacing, or a vdev marked 'unspare' that's 8675 * currently spared, so we can detach it. 8676 */ 8677 static vdev_t * 8678 spa_vdev_resilver_done_hunt(vdev_t *vd) 8679 { 8680 vdev_t *newvd, *oldvd; 8681 8682 for (int c = 0; c < vd->vdev_children; c++) { 8683 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 8684 if (oldvd != NULL) 8685 return (oldvd); 8686 } 8687 8688 /* 8689 * Check for a completed replacement. We always consider the first 8690 * vdev in the list to be the oldest vdev, and the last one to be 8691 * the newest (see spa_vdev_attach() for how that works). In 8692 * the case where the newest vdev is faulted, we will not automatically 8693 * remove it after a resilver completes. This is OK as it will require 8694 * user intervention to determine which disk the admin wishes to keep. 8695 */ 8696 if (vd->vdev_ops == &vdev_replacing_ops) { 8697 ASSERT(vd->vdev_children > 1); 8698 8699 newvd = vd->vdev_child[vd->vdev_children - 1]; 8700 oldvd = vd->vdev_child[0]; 8701 8702 if (vdev_dtl_empty(newvd, DTL_MISSING) && 8703 vdev_dtl_empty(newvd, DTL_OUTAGE) && 8704 !vdev_dtl_required(oldvd)) 8705 return (oldvd); 8706 } 8707 8708 /* 8709 * Check for a completed resilver with the 'unspare' flag set. 8710 * Also potentially update faulted state. 8711 */ 8712 if (vd->vdev_ops == &vdev_spare_ops) { 8713 vdev_t *first = vd->vdev_child[0]; 8714 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 8715 8716 if (last->vdev_unspare) { 8717 oldvd = first; 8718 newvd = last; 8719 } else if (first->vdev_unspare) { 8720 oldvd = last; 8721 newvd = first; 8722 } else { 8723 oldvd = NULL; 8724 } 8725 8726 if (oldvd != NULL && 8727 vdev_dtl_empty(newvd, DTL_MISSING) && 8728 vdev_dtl_empty(newvd, DTL_OUTAGE) && 8729 !vdev_dtl_required(oldvd)) 8730 return (oldvd); 8731 8732 vdev_propagate_state(vd); 8733 8734 /* 8735 * If there are more than two spares attached to a disk, 8736 * and those spares are not required, then we want to 8737 * attempt to free them up now so that they can be used 8738 * by other pools. Once we're back down to a single 8739 * disk+spare, we stop removing them. 8740 */ 8741 if (vd->vdev_children > 2) { 8742 newvd = vd->vdev_child[1]; 8743 8744 if (newvd->vdev_isspare && last->vdev_isspare && 8745 vdev_dtl_empty(last, DTL_MISSING) && 8746 vdev_dtl_empty(last, DTL_OUTAGE) && 8747 !vdev_dtl_required(newvd)) 8748 return (newvd); 8749 } 8750 } 8751 8752 return (NULL); 8753 } 8754 8755 static void 8756 spa_vdev_resilver_done(spa_t *spa) 8757 { 8758 vdev_t *vd, *pvd, *ppvd; 8759 uint64_t guid, sguid, pguid, ppguid; 8760 8761 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8762 8763 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 8764 pvd = vd->vdev_parent; 8765 ppvd = pvd->vdev_parent; 8766 guid = vd->vdev_guid; 8767 pguid = pvd->vdev_guid; 8768 ppguid = ppvd->vdev_guid; 8769 sguid = 0; 8770 /* 8771 * If we have just finished replacing a hot spared device, then 8772 * we need to detach the parent's first child (the original hot 8773 * spare) as well. 8774 */ 8775 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 8776 ppvd->vdev_children == 2) { 8777 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 8778 sguid = ppvd->vdev_child[1]->vdev_guid; 8779 } 8780 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 8781 8782 spa_config_exit(spa, SCL_ALL, FTAG); 8783 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 8784 return; 8785 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 8786 return; 8787 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8788 } 8789 8790 spa_config_exit(spa, SCL_ALL, FTAG); 8791 8792 /* 8793 * If a detach was not performed above replace waiters will not have 8794 * been notified. In which case we must do so now. 8795 */ 8796 spa_notify_waiters(spa); 8797 } 8798 8799 /* 8800 * Update the stored path or FRU for this vdev. 8801 */ 8802 static int 8803 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 8804 boolean_t ispath) 8805 { 8806 vdev_t *vd; 8807 boolean_t sync = B_FALSE; 8808 8809 ASSERT(spa_writeable(spa)); 8810 8811 spa_vdev_state_enter(spa, SCL_ALL); 8812 8813 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 8814 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 8815 8816 if (!vd->vdev_ops->vdev_op_leaf) 8817 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 8818 8819 if (ispath) { 8820 if (strcmp(value, vd->vdev_path) != 0) { 8821 spa_strfree(vd->vdev_path); 8822 vd->vdev_path = spa_strdup(value); 8823 sync = B_TRUE; 8824 } 8825 } else { 8826 if (vd->vdev_fru == NULL) { 8827 vd->vdev_fru = spa_strdup(value); 8828 sync = B_TRUE; 8829 } else if (strcmp(value, vd->vdev_fru) != 0) { 8830 spa_strfree(vd->vdev_fru); 8831 vd->vdev_fru = spa_strdup(value); 8832 sync = B_TRUE; 8833 } 8834 } 8835 8836 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 8837 } 8838 8839 int 8840 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 8841 { 8842 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 8843 } 8844 8845 int 8846 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 8847 { 8848 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 8849 } 8850 8851 /* 8852 * ========================================================================== 8853 * SPA Scanning 8854 * ========================================================================== 8855 */ 8856 int 8857 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 8858 { 8859 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8860 8861 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8862 return (SET_ERROR(EBUSY)); 8863 8864 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 8865 } 8866 8867 int 8868 spa_scan_stop(spa_t *spa) 8869 { 8870 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8871 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8872 return (SET_ERROR(EBUSY)); 8873 8874 return (dsl_scan_cancel(spa->spa_dsl_pool)); 8875 } 8876 8877 int 8878 spa_scan(spa_t *spa, pool_scan_func_t func) 8879 { 8880 return (spa_scan_range(spa, func, 0, 0)); 8881 } 8882 8883 int 8884 spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart, 8885 uint64_t txgend) 8886 { 8887 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8888 8889 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 8890 return (SET_ERROR(ENOTSUP)); 8891 8892 if (func == POOL_SCAN_RESILVER && 8893 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 8894 return (SET_ERROR(ENOTSUP)); 8895 8896 if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0)) 8897 return (SET_ERROR(ENOTSUP)); 8898 8899 /* 8900 * If a resilver was requested, but there is no DTL on a 8901 * writeable leaf device, we have nothing to do. 8902 */ 8903 if (func == POOL_SCAN_RESILVER && 8904 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 8905 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 8906 return (0); 8907 } 8908 8909 if (func == POOL_SCAN_ERRORSCRUB && 8910 !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) 8911 return (SET_ERROR(ENOTSUP)); 8912 8913 return (dsl_scan(spa->spa_dsl_pool, func, txgstart, txgend)); 8914 } 8915 8916 /* 8917 * ========================================================================== 8918 * SPA async task processing 8919 * ========================================================================== 8920 */ 8921 8922 static void 8923 spa_async_remove(spa_t *spa, vdev_t *vd) 8924 { 8925 if (vd->vdev_remove_wanted) { 8926 vd->vdev_remove_wanted = B_FALSE; 8927 vd->vdev_delayed_close = B_FALSE; 8928 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 8929 8930 /* 8931 * We want to clear the stats, but we don't want to do a full 8932 * vdev_clear() as that will cause us to throw away 8933 * degraded/faulted state as well as attempt to reopen the 8934 * device, all of which is a waste. 8935 */ 8936 vd->vdev_stat.vs_read_errors = 0; 8937 vd->vdev_stat.vs_write_errors = 0; 8938 vd->vdev_stat.vs_checksum_errors = 0; 8939 8940 vdev_state_dirty(vd->vdev_top); 8941 8942 /* Tell userspace that the vdev is gone. */ 8943 zfs_post_remove(spa, vd); 8944 } 8945 8946 for (int c = 0; c < vd->vdev_children; c++) 8947 spa_async_remove(spa, vd->vdev_child[c]); 8948 } 8949 8950 static void 8951 spa_async_fault_vdev(spa_t *spa, vdev_t *vd) 8952 { 8953 if (vd->vdev_fault_wanted) { 8954 vd->vdev_fault_wanted = B_FALSE; 8955 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 8956 VDEV_AUX_ERR_EXCEEDED); 8957 } 8958 8959 for (int c = 0; c < vd->vdev_children; c++) 8960 spa_async_fault_vdev(spa, vd->vdev_child[c]); 8961 } 8962 8963 static void 8964 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 8965 { 8966 if (!spa->spa_autoexpand) 8967 return; 8968 8969 for (int c = 0; c < vd->vdev_children; c++) { 8970 vdev_t *cvd = vd->vdev_child[c]; 8971 spa_async_autoexpand(spa, cvd); 8972 } 8973 8974 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 8975 return; 8976 8977 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND); 8978 } 8979 8980 static __attribute__((noreturn)) void 8981 spa_async_thread(void *arg) 8982 { 8983 spa_t *spa = (spa_t *)arg; 8984 dsl_pool_t *dp = spa->spa_dsl_pool; 8985 int tasks; 8986 8987 ASSERT(spa->spa_sync_on); 8988 8989 mutex_enter(&spa->spa_async_lock); 8990 tasks = spa->spa_async_tasks; 8991 spa->spa_async_tasks = 0; 8992 mutex_exit(&spa->spa_async_lock); 8993 8994 /* 8995 * See if the config needs to be updated. 8996 */ 8997 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 8998 uint64_t old_space, new_space; 8999 9000 mutex_enter(&spa_namespace_lock); 9001 old_space = metaslab_class_get_space(spa_normal_class(spa)); 9002 old_space += metaslab_class_get_space(spa_special_class(spa)); 9003 old_space += metaslab_class_get_space(spa_dedup_class(spa)); 9004 old_space += metaslab_class_get_space( 9005 spa_embedded_log_class(spa)); 9006 9007 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 9008 9009 new_space = metaslab_class_get_space(spa_normal_class(spa)); 9010 new_space += metaslab_class_get_space(spa_special_class(spa)); 9011 new_space += metaslab_class_get_space(spa_dedup_class(spa)); 9012 new_space += metaslab_class_get_space( 9013 spa_embedded_log_class(spa)); 9014 mutex_exit(&spa_namespace_lock); 9015 9016 /* 9017 * If the pool grew as a result of the config update, 9018 * then log an internal history event. 9019 */ 9020 if (new_space != old_space) { 9021 spa_history_log_internal(spa, "vdev online", NULL, 9022 "pool '%s' size: %llu(+%llu)", 9023 spa_name(spa), (u_longlong_t)new_space, 9024 (u_longlong_t)(new_space - old_space)); 9025 } 9026 } 9027 9028 /* 9029 * See if any devices need to be marked REMOVED. 9030 */ 9031 if (tasks & SPA_ASYNC_REMOVE) { 9032 spa_vdev_state_enter(spa, SCL_NONE); 9033 spa_async_remove(spa, spa->spa_root_vdev); 9034 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 9035 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 9036 for (int i = 0; i < spa->spa_spares.sav_count; i++) 9037 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 9038 (void) spa_vdev_state_exit(spa, NULL, 0); 9039 } 9040 9041 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 9042 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9043 spa_async_autoexpand(spa, spa->spa_root_vdev); 9044 spa_config_exit(spa, SCL_CONFIG, FTAG); 9045 } 9046 9047 /* 9048 * See if any devices need to be marked faulted. 9049 */ 9050 if (tasks & SPA_ASYNC_FAULT_VDEV) { 9051 spa_vdev_state_enter(spa, SCL_NONE); 9052 spa_async_fault_vdev(spa, spa->spa_root_vdev); 9053 (void) spa_vdev_state_exit(spa, NULL, 0); 9054 } 9055 9056 /* 9057 * If any devices are done replacing, detach them. 9058 */ 9059 if (tasks & SPA_ASYNC_RESILVER_DONE || 9060 tasks & SPA_ASYNC_REBUILD_DONE || 9061 tasks & SPA_ASYNC_DETACH_SPARE) { 9062 spa_vdev_resilver_done(spa); 9063 } 9064 9065 /* 9066 * Kick off a resilver. 9067 */ 9068 if (tasks & SPA_ASYNC_RESILVER && 9069 !vdev_rebuild_active(spa->spa_root_vdev) && 9070 (!dsl_scan_resilvering(dp) || 9071 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 9072 dsl_scan_restart_resilver(dp, 0); 9073 9074 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 9075 mutex_enter(&spa_namespace_lock); 9076 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9077 vdev_initialize_restart(spa->spa_root_vdev); 9078 spa_config_exit(spa, SCL_CONFIG, FTAG); 9079 mutex_exit(&spa_namespace_lock); 9080 } 9081 9082 if (tasks & SPA_ASYNC_TRIM_RESTART) { 9083 mutex_enter(&spa_namespace_lock); 9084 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9085 vdev_trim_restart(spa->spa_root_vdev); 9086 spa_config_exit(spa, SCL_CONFIG, FTAG); 9087 mutex_exit(&spa_namespace_lock); 9088 } 9089 9090 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { 9091 mutex_enter(&spa_namespace_lock); 9092 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9093 vdev_autotrim_restart(spa); 9094 spa_config_exit(spa, SCL_CONFIG, FTAG); 9095 mutex_exit(&spa_namespace_lock); 9096 } 9097 9098 /* 9099 * Kick off L2 cache whole device TRIM. 9100 */ 9101 if (tasks & SPA_ASYNC_L2CACHE_TRIM) { 9102 mutex_enter(&spa_namespace_lock); 9103 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9104 vdev_trim_l2arc(spa); 9105 spa_config_exit(spa, SCL_CONFIG, FTAG); 9106 mutex_exit(&spa_namespace_lock); 9107 } 9108 9109 /* 9110 * Kick off L2 cache rebuilding. 9111 */ 9112 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) { 9113 mutex_enter(&spa_namespace_lock); 9114 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER); 9115 l2arc_spa_rebuild_start(spa); 9116 spa_config_exit(spa, SCL_L2ARC, FTAG); 9117 mutex_exit(&spa_namespace_lock); 9118 } 9119 9120 /* 9121 * Let the world know that we're done. 9122 */ 9123 mutex_enter(&spa->spa_async_lock); 9124 spa->spa_async_thread = NULL; 9125 cv_broadcast(&spa->spa_async_cv); 9126 mutex_exit(&spa->spa_async_lock); 9127 thread_exit(); 9128 } 9129 9130 void 9131 spa_async_suspend(spa_t *spa) 9132 { 9133 mutex_enter(&spa->spa_async_lock); 9134 spa->spa_async_suspended++; 9135 while (spa->spa_async_thread != NULL) 9136 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 9137 mutex_exit(&spa->spa_async_lock); 9138 9139 spa_vdev_remove_suspend(spa); 9140 9141 zthr_t *condense_thread = spa->spa_condense_zthr; 9142 if (condense_thread != NULL) 9143 zthr_cancel(condense_thread); 9144 9145 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr; 9146 if (raidz_expand_thread != NULL) 9147 zthr_cancel(raidz_expand_thread); 9148 9149 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 9150 if (discard_thread != NULL) 9151 zthr_cancel(discard_thread); 9152 9153 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 9154 if (ll_delete_thread != NULL) 9155 zthr_cancel(ll_delete_thread); 9156 9157 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 9158 if (ll_condense_thread != NULL) 9159 zthr_cancel(ll_condense_thread); 9160 } 9161 9162 void 9163 spa_async_resume(spa_t *spa) 9164 { 9165 mutex_enter(&spa->spa_async_lock); 9166 ASSERT(spa->spa_async_suspended != 0); 9167 spa->spa_async_suspended--; 9168 mutex_exit(&spa->spa_async_lock); 9169 spa_restart_removal(spa); 9170 9171 zthr_t *condense_thread = spa->spa_condense_zthr; 9172 if (condense_thread != NULL) 9173 zthr_resume(condense_thread); 9174 9175 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr; 9176 if (raidz_expand_thread != NULL) 9177 zthr_resume(raidz_expand_thread); 9178 9179 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 9180 if (discard_thread != NULL) 9181 zthr_resume(discard_thread); 9182 9183 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 9184 if (ll_delete_thread != NULL) 9185 zthr_resume(ll_delete_thread); 9186 9187 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 9188 if (ll_condense_thread != NULL) 9189 zthr_resume(ll_condense_thread); 9190 } 9191 9192 static boolean_t 9193 spa_async_tasks_pending(spa_t *spa) 9194 { 9195 uint_t non_config_tasks; 9196 uint_t config_task; 9197 boolean_t config_task_suspended; 9198 9199 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 9200 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 9201 if (spa->spa_ccw_fail_time == 0) { 9202 config_task_suspended = B_FALSE; 9203 } else { 9204 config_task_suspended = 9205 (gethrtime() - spa->spa_ccw_fail_time) < 9206 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC); 9207 } 9208 9209 return (non_config_tasks || (config_task && !config_task_suspended)); 9210 } 9211 9212 static void 9213 spa_async_dispatch(spa_t *spa) 9214 { 9215 mutex_enter(&spa->spa_async_lock); 9216 if (spa_async_tasks_pending(spa) && 9217 !spa->spa_async_suspended && 9218 spa->spa_async_thread == NULL) 9219 spa->spa_async_thread = thread_create(NULL, 0, 9220 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 9221 mutex_exit(&spa->spa_async_lock); 9222 } 9223 9224 void 9225 spa_async_request(spa_t *spa, int task) 9226 { 9227 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 9228 mutex_enter(&spa->spa_async_lock); 9229 spa->spa_async_tasks |= task; 9230 mutex_exit(&spa->spa_async_lock); 9231 } 9232 9233 int 9234 spa_async_tasks(spa_t *spa) 9235 { 9236 return (spa->spa_async_tasks); 9237 } 9238 9239 /* 9240 * ========================================================================== 9241 * SPA syncing routines 9242 * ========================================================================== 9243 */ 9244 9245 9246 static int 9247 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 9248 dmu_tx_t *tx) 9249 { 9250 bpobj_t *bpo = arg; 9251 bpobj_enqueue(bpo, bp, bp_freed, tx); 9252 return (0); 9253 } 9254 9255 int 9256 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9257 { 9258 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx)); 9259 } 9260 9261 int 9262 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9263 { 9264 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx)); 9265 } 9266 9267 static int 9268 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9269 { 9270 zio_t *pio = arg; 9271 9272 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp, 9273 pio->io_flags)); 9274 return (0); 9275 } 9276 9277 static int 9278 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 9279 dmu_tx_t *tx) 9280 { 9281 ASSERT(!bp_freed); 9282 return (spa_free_sync_cb(arg, bp, tx)); 9283 } 9284 9285 /* 9286 * Note: this simple function is not inlined to make it easier to dtrace the 9287 * amount of time spent syncing frees. 9288 */ 9289 static void 9290 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 9291 { 9292 zio_t *zio = zio_root(spa, NULL, NULL, 0); 9293 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 9294 VERIFY(zio_wait(zio) == 0); 9295 } 9296 9297 /* 9298 * Note: this simple function is not inlined to make it easier to dtrace the 9299 * amount of time spent syncing deferred frees. 9300 */ 9301 static void 9302 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 9303 { 9304 if (spa_sync_pass(spa) != 1) 9305 return; 9306 9307 /* 9308 * Note: 9309 * If the log space map feature is active, we stop deferring 9310 * frees to the next TXG and therefore running this function 9311 * would be considered a no-op as spa_deferred_bpobj should 9312 * not have any entries. 9313 * 9314 * That said we run this function anyway (instead of returning 9315 * immediately) for the edge-case scenario where we just 9316 * activated the log space map feature in this TXG but we have 9317 * deferred frees from the previous TXG. 9318 */ 9319 zio_t *zio = zio_root(spa, NULL, NULL, 0); 9320 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 9321 bpobj_spa_free_sync_cb, zio, tx), ==, 0); 9322 VERIFY0(zio_wait(zio)); 9323 } 9324 9325 static void 9326 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 9327 { 9328 char *packed = NULL; 9329 size_t bufsize; 9330 size_t nvsize = 0; 9331 dmu_buf_t *db; 9332 9333 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 9334 9335 /* 9336 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 9337 * information. This avoids the dmu_buf_will_dirty() path and 9338 * saves us a pre-read to get data we don't actually care about. 9339 */ 9340 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 9341 packed = vmem_alloc(bufsize, KM_SLEEP); 9342 9343 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 9344 KM_SLEEP) == 0); 9345 memset(packed + nvsize, 0, bufsize - nvsize); 9346 9347 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 9348 9349 vmem_free(packed, bufsize); 9350 9351 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 9352 dmu_buf_will_dirty(db, tx); 9353 *(uint64_t *)db->db_data = nvsize; 9354 dmu_buf_rele(db, FTAG); 9355 } 9356 9357 static void 9358 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 9359 const char *config, const char *entry) 9360 { 9361 nvlist_t *nvroot; 9362 nvlist_t **list; 9363 int i; 9364 9365 if (!sav->sav_sync) 9366 return; 9367 9368 /* 9369 * Update the MOS nvlist describing the list of available devices. 9370 * spa_validate_aux() will have already made sure this nvlist is 9371 * valid and the vdevs are labeled appropriately. 9372 */ 9373 if (sav->sav_object == 0) { 9374 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 9375 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 9376 sizeof (uint64_t), tx); 9377 VERIFY(zap_update(spa->spa_meta_objset, 9378 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 9379 &sav->sav_object, tx) == 0); 9380 } 9381 9382 nvroot = fnvlist_alloc(); 9383 if (sav->sav_count == 0) { 9384 fnvlist_add_nvlist_array(nvroot, config, 9385 (const nvlist_t * const *)NULL, 0); 9386 } else { 9387 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP); 9388 for (i = 0; i < sav->sav_count; i++) 9389 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 9390 B_FALSE, VDEV_CONFIG_L2CACHE); 9391 fnvlist_add_nvlist_array(nvroot, config, 9392 (const nvlist_t * const *)list, sav->sav_count); 9393 for (i = 0; i < sav->sav_count; i++) 9394 nvlist_free(list[i]); 9395 kmem_free(list, sav->sav_count * sizeof (void *)); 9396 } 9397 9398 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 9399 nvlist_free(nvroot); 9400 9401 sav->sav_sync = B_FALSE; 9402 } 9403 9404 /* 9405 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 9406 * The all-vdev ZAP must be empty. 9407 */ 9408 static void 9409 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 9410 { 9411 spa_t *spa = vd->vdev_spa; 9412 9413 if (vd->vdev_root_zap != 0 && 9414 spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) { 9415 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9416 vd->vdev_root_zap, tx)); 9417 } 9418 if (vd->vdev_top_zap != 0) { 9419 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9420 vd->vdev_top_zap, tx)); 9421 } 9422 if (vd->vdev_leaf_zap != 0) { 9423 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9424 vd->vdev_leaf_zap, tx)); 9425 } 9426 for (uint64_t i = 0; i < vd->vdev_children; i++) { 9427 spa_avz_build(vd->vdev_child[i], avz, tx); 9428 } 9429 } 9430 9431 static void 9432 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 9433 { 9434 nvlist_t *config; 9435 9436 /* 9437 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 9438 * its config may not be dirty but we still need to build per-vdev ZAPs. 9439 * Similarly, if the pool is being assembled (e.g. after a split), we 9440 * need to rebuild the AVZ although the config may not be dirty. 9441 */ 9442 if (list_is_empty(&spa->spa_config_dirty_list) && 9443 spa->spa_avz_action == AVZ_ACTION_NONE) 9444 return; 9445 9446 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9447 9448 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 9449 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 9450 spa->spa_all_vdev_zaps != 0); 9451 9452 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 9453 /* Make and build the new AVZ */ 9454 uint64_t new_avz = zap_create(spa->spa_meta_objset, 9455 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 9456 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 9457 9458 /* Diff old AVZ with new one */ 9459 zap_cursor_t zc; 9460 zap_attribute_t *za = zap_attribute_alloc(); 9461 9462 for (zap_cursor_init(&zc, spa->spa_meta_objset, 9463 spa->spa_all_vdev_zaps); 9464 zap_cursor_retrieve(&zc, za) == 0; 9465 zap_cursor_advance(&zc)) { 9466 uint64_t vdzap = za->za_first_integer; 9467 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 9468 vdzap) == ENOENT) { 9469 /* 9470 * ZAP is listed in old AVZ but not in new one; 9471 * destroy it 9472 */ 9473 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 9474 tx)); 9475 } 9476 } 9477 9478 zap_cursor_fini(&zc); 9479 zap_attribute_free(za); 9480 9481 /* Destroy the old AVZ */ 9482 VERIFY0(zap_destroy(spa->spa_meta_objset, 9483 spa->spa_all_vdev_zaps, tx)); 9484 9485 /* Replace the old AVZ in the dir obj with the new one */ 9486 VERIFY0(zap_update(spa->spa_meta_objset, 9487 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 9488 sizeof (new_avz), 1, &new_avz, tx)); 9489 9490 spa->spa_all_vdev_zaps = new_avz; 9491 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 9492 zap_cursor_t zc; 9493 zap_attribute_t *za = zap_attribute_alloc(); 9494 9495 /* Walk through the AVZ and destroy all listed ZAPs */ 9496 for (zap_cursor_init(&zc, spa->spa_meta_objset, 9497 spa->spa_all_vdev_zaps); 9498 zap_cursor_retrieve(&zc, za) == 0; 9499 zap_cursor_advance(&zc)) { 9500 uint64_t zap = za->za_first_integer; 9501 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 9502 } 9503 9504 zap_cursor_fini(&zc); 9505 zap_attribute_free(za); 9506 9507 /* Destroy and unlink the AVZ itself */ 9508 VERIFY0(zap_destroy(spa->spa_meta_objset, 9509 spa->spa_all_vdev_zaps, tx)); 9510 VERIFY0(zap_remove(spa->spa_meta_objset, 9511 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 9512 spa->spa_all_vdev_zaps = 0; 9513 } 9514 9515 if (spa->spa_all_vdev_zaps == 0) { 9516 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 9517 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 9518 DMU_POOL_VDEV_ZAP_MAP, tx); 9519 } 9520 spa->spa_avz_action = AVZ_ACTION_NONE; 9521 9522 /* Create ZAPs for vdevs that don't have them. */ 9523 vdev_construct_zaps(spa->spa_root_vdev, tx); 9524 9525 config = spa_config_generate(spa, spa->spa_root_vdev, 9526 dmu_tx_get_txg(tx), B_FALSE); 9527 9528 /* 9529 * If we're upgrading the spa version then make sure that 9530 * the config object gets updated with the correct version. 9531 */ 9532 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 9533 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 9534 spa->spa_uberblock.ub_version); 9535 9536 spa_config_exit(spa, SCL_STATE, FTAG); 9537 9538 nvlist_free(spa->spa_config_syncing); 9539 spa->spa_config_syncing = config; 9540 9541 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 9542 } 9543 9544 static void 9545 spa_sync_version(void *arg, dmu_tx_t *tx) 9546 { 9547 uint64_t *versionp = arg; 9548 uint64_t version = *versionp; 9549 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 9550 9551 /* 9552 * Setting the version is special cased when first creating the pool. 9553 */ 9554 ASSERT(tx->tx_txg != TXG_INITIAL); 9555 9556 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 9557 ASSERT(version >= spa_version(spa)); 9558 9559 spa->spa_uberblock.ub_version = version; 9560 vdev_config_dirty(spa->spa_root_vdev); 9561 spa_history_log_internal(spa, "set", tx, "version=%lld", 9562 (longlong_t)version); 9563 } 9564 9565 /* 9566 * Set zpool properties. 9567 */ 9568 static void 9569 spa_sync_props(void *arg, dmu_tx_t *tx) 9570 { 9571 nvlist_t *nvp = arg; 9572 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 9573 objset_t *mos = spa->spa_meta_objset; 9574 nvpair_t *elem = NULL; 9575 9576 mutex_enter(&spa->spa_props_lock); 9577 9578 while ((elem = nvlist_next_nvpair(nvp, elem))) { 9579 uint64_t intval; 9580 const char *strval, *fname; 9581 zpool_prop_t prop; 9582 const char *propname; 9583 const char *elemname = nvpair_name(elem); 9584 zprop_type_t proptype; 9585 spa_feature_t fid; 9586 9587 switch (prop = zpool_name_to_prop(elemname)) { 9588 case ZPOOL_PROP_VERSION: 9589 intval = fnvpair_value_uint64(elem); 9590 /* 9591 * The version is synced separately before other 9592 * properties and should be correct by now. 9593 */ 9594 ASSERT3U(spa_version(spa), >=, intval); 9595 break; 9596 9597 case ZPOOL_PROP_ALTROOT: 9598 /* 9599 * 'altroot' is a non-persistent property. It should 9600 * have been set temporarily at creation or import time. 9601 */ 9602 ASSERT(spa->spa_root != NULL); 9603 break; 9604 9605 case ZPOOL_PROP_READONLY: 9606 case ZPOOL_PROP_CACHEFILE: 9607 /* 9608 * 'readonly' and 'cachefile' are also non-persistent 9609 * properties. 9610 */ 9611 break; 9612 case ZPOOL_PROP_COMMENT: 9613 strval = fnvpair_value_string(elem); 9614 if (spa->spa_comment != NULL) 9615 spa_strfree(spa->spa_comment); 9616 spa->spa_comment = spa_strdup(strval); 9617 /* 9618 * We need to dirty the configuration on all the vdevs 9619 * so that their labels get updated. We also need to 9620 * update the cache file to keep it in sync with the 9621 * MOS version. It's unnecessary to do this for pool 9622 * creation since the vdev's configuration has already 9623 * been dirtied. 9624 */ 9625 if (tx->tx_txg != TXG_INITIAL) { 9626 vdev_config_dirty(spa->spa_root_vdev); 9627 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 9628 } 9629 spa_history_log_internal(spa, "set", tx, 9630 "%s=%s", elemname, strval); 9631 break; 9632 case ZPOOL_PROP_COMPATIBILITY: 9633 strval = fnvpair_value_string(elem); 9634 if (spa->spa_compatibility != NULL) 9635 spa_strfree(spa->spa_compatibility); 9636 spa->spa_compatibility = spa_strdup(strval); 9637 /* 9638 * Dirty the configuration on vdevs as above. 9639 */ 9640 if (tx->tx_txg != TXG_INITIAL) { 9641 vdev_config_dirty(spa->spa_root_vdev); 9642 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 9643 } 9644 9645 spa_history_log_internal(spa, "set", tx, 9646 "%s=%s", nvpair_name(elem), strval); 9647 break; 9648 9649 case ZPOOL_PROP_INVAL: 9650 if (zpool_prop_feature(elemname)) { 9651 fname = strchr(elemname, '@') + 1; 9652 VERIFY0(zfeature_lookup_name(fname, &fid)); 9653 9654 spa_feature_enable(spa, fid, tx); 9655 spa_history_log_internal(spa, "set", tx, 9656 "%s=enabled", elemname); 9657 break; 9658 } else if (!zfs_prop_user(elemname)) { 9659 ASSERT(zpool_prop_feature(elemname)); 9660 break; 9661 } 9662 zfs_fallthrough; 9663 default: 9664 /* 9665 * Set pool property values in the poolprops mos object. 9666 */ 9667 if (spa->spa_pool_props_object == 0) { 9668 spa->spa_pool_props_object = 9669 zap_create_link(mos, DMU_OT_POOL_PROPS, 9670 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 9671 tx); 9672 } 9673 9674 /* normalize the property name */ 9675 if (prop == ZPOOL_PROP_INVAL) { 9676 propname = elemname; 9677 proptype = PROP_TYPE_STRING; 9678 } else { 9679 propname = zpool_prop_to_name(prop); 9680 proptype = zpool_prop_get_type(prop); 9681 } 9682 9683 if (nvpair_type(elem) == DATA_TYPE_STRING) { 9684 ASSERT(proptype == PROP_TYPE_STRING); 9685 strval = fnvpair_value_string(elem); 9686 VERIFY0(zap_update(mos, 9687 spa->spa_pool_props_object, propname, 9688 1, strlen(strval) + 1, strval, tx)); 9689 spa_history_log_internal(spa, "set", tx, 9690 "%s=%s", elemname, strval); 9691 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 9692 intval = fnvpair_value_uint64(elem); 9693 9694 if (proptype == PROP_TYPE_INDEX) { 9695 const char *unused; 9696 VERIFY0(zpool_prop_index_to_string( 9697 prop, intval, &unused)); 9698 } 9699 VERIFY0(zap_update(mos, 9700 spa->spa_pool_props_object, propname, 9701 8, 1, &intval, tx)); 9702 spa_history_log_internal(spa, "set", tx, 9703 "%s=%lld", elemname, 9704 (longlong_t)intval); 9705 9706 switch (prop) { 9707 case ZPOOL_PROP_DELEGATION: 9708 spa->spa_delegation = intval; 9709 break; 9710 case ZPOOL_PROP_BOOTFS: 9711 spa->spa_bootfs = intval; 9712 break; 9713 case ZPOOL_PROP_FAILUREMODE: 9714 spa->spa_failmode = intval; 9715 break; 9716 case ZPOOL_PROP_AUTOTRIM: 9717 spa->spa_autotrim = intval; 9718 spa_async_request(spa, 9719 SPA_ASYNC_AUTOTRIM_RESTART); 9720 break; 9721 case ZPOOL_PROP_AUTOEXPAND: 9722 spa->spa_autoexpand = intval; 9723 if (tx->tx_txg != TXG_INITIAL) 9724 spa_async_request(spa, 9725 SPA_ASYNC_AUTOEXPAND); 9726 break; 9727 case ZPOOL_PROP_MULTIHOST: 9728 spa->spa_multihost = intval; 9729 break; 9730 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 9731 spa->spa_dedup_table_quota = intval; 9732 break; 9733 default: 9734 break; 9735 } 9736 } else { 9737 ASSERT(0); /* not allowed */ 9738 } 9739 } 9740 9741 } 9742 9743 mutex_exit(&spa->spa_props_lock); 9744 } 9745 9746 /* 9747 * Perform one-time upgrade on-disk changes. spa_version() does not 9748 * reflect the new version this txg, so there must be no changes this 9749 * txg to anything that the upgrade code depends on after it executes. 9750 * Therefore this must be called after dsl_pool_sync() does the sync 9751 * tasks. 9752 */ 9753 static void 9754 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 9755 { 9756 if (spa_sync_pass(spa) != 1) 9757 return; 9758 9759 dsl_pool_t *dp = spa->spa_dsl_pool; 9760 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 9761 9762 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 9763 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 9764 dsl_pool_create_origin(dp, tx); 9765 9766 /* Keeping the origin open increases spa_minref */ 9767 spa->spa_minref += 3; 9768 } 9769 9770 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 9771 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 9772 dsl_pool_upgrade_clones(dp, tx); 9773 } 9774 9775 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 9776 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 9777 dsl_pool_upgrade_dir_clones(dp, tx); 9778 9779 /* Keeping the freedir open increases spa_minref */ 9780 spa->spa_minref += 3; 9781 } 9782 9783 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 9784 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 9785 spa_feature_create_zap_objects(spa, tx); 9786 } 9787 9788 /* 9789 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 9790 * when possibility to use lz4 compression for metadata was added 9791 * Old pools that have this feature enabled must be upgraded to have 9792 * this feature active 9793 */ 9794 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 9795 boolean_t lz4_en = spa_feature_is_enabled(spa, 9796 SPA_FEATURE_LZ4_COMPRESS); 9797 boolean_t lz4_ac = spa_feature_is_active(spa, 9798 SPA_FEATURE_LZ4_COMPRESS); 9799 9800 if (lz4_en && !lz4_ac) 9801 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 9802 } 9803 9804 /* 9805 * If we haven't written the salt, do so now. Note that the 9806 * feature may not be activated yet, but that's fine since 9807 * the presence of this ZAP entry is backwards compatible. 9808 */ 9809 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 9810 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 9811 VERIFY0(zap_add(spa->spa_meta_objset, 9812 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 9813 sizeof (spa->spa_cksum_salt.zcs_bytes), 9814 spa->spa_cksum_salt.zcs_bytes, tx)); 9815 } 9816 9817 rrw_exit(&dp->dp_config_rwlock, FTAG); 9818 } 9819 9820 static void 9821 vdev_indirect_state_sync_verify(vdev_t *vd) 9822 { 9823 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping; 9824 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births; 9825 9826 if (vd->vdev_ops == &vdev_indirect_ops) { 9827 ASSERT(vim != NULL); 9828 ASSERT(vib != NULL); 9829 } 9830 9831 uint64_t obsolete_sm_object = 0; 9832 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 9833 if (obsolete_sm_object != 0) { 9834 ASSERT(vd->vdev_obsolete_sm != NULL); 9835 ASSERT(vd->vdev_removing || 9836 vd->vdev_ops == &vdev_indirect_ops); 9837 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 9838 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 9839 ASSERT3U(obsolete_sm_object, ==, 9840 space_map_object(vd->vdev_obsolete_sm)); 9841 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 9842 space_map_allocated(vd->vdev_obsolete_sm)); 9843 } 9844 ASSERT(vd->vdev_obsolete_segments != NULL); 9845 9846 /* 9847 * Since frees / remaps to an indirect vdev can only 9848 * happen in syncing context, the obsolete segments 9849 * tree must be empty when we start syncing. 9850 */ 9851 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 9852 } 9853 9854 /* 9855 * Set the top-level vdev's max queue depth. Evaluate each top-level's 9856 * async write queue depth in case it changed. The max queue depth will 9857 * not change in the middle of syncing out this txg. 9858 */ 9859 static void 9860 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 9861 { 9862 ASSERT(spa_writeable(spa)); 9863 9864 vdev_t *rvd = spa->spa_root_vdev; 9865 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 9866 zfs_vdev_queue_depth_pct / 100; 9867 metaslab_class_t *normal = spa_normal_class(spa); 9868 metaslab_class_t *special = spa_special_class(spa); 9869 metaslab_class_t *dedup = spa_dedup_class(spa); 9870 9871 uint64_t slots_per_allocator = 0; 9872 for (int c = 0; c < rvd->vdev_children; c++) { 9873 vdev_t *tvd = rvd->vdev_child[c]; 9874 9875 metaslab_group_t *mg = tvd->vdev_mg; 9876 if (mg == NULL || !metaslab_group_initialized(mg)) 9877 continue; 9878 9879 metaslab_class_t *mc = mg->mg_class; 9880 if (mc != normal && mc != special && mc != dedup) 9881 continue; 9882 9883 /* 9884 * It is safe to do a lock-free check here because only async 9885 * allocations look at mg_max_alloc_queue_depth, and async 9886 * allocations all happen from spa_sync(). 9887 */ 9888 for (int i = 0; i < mg->mg_allocators; i++) { 9889 ASSERT0(zfs_refcount_count( 9890 &(mg->mg_allocator[i].mga_alloc_queue_depth))); 9891 } 9892 mg->mg_max_alloc_queue_depth = max_queue_depth; 9893 9894 for (int i = 0; i < mg->mg_allocators; i++) { 9895 mg->mg_allocator[i].mga_cur_max_alloc_queue_depth = 9896 zfs_vdev_def_queue_depth; 9897 } 9898 slots_per_allocator += zfs_vdev_def_queue_depth; 9899 } 9900 9901 for (int i = 0; i < spa->spa_alloc_count; i++) { 9902 ASSERT0(zfs_refcount_count(&normal->mc_allocator[i]. 9903 mca_alloc_slots)); 9904 ASSERT0(zfs_refcount_count(&special->mc_allocator[i]. 9905 mca_alloc_slots)); 9906 ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i]. 9907 mca_alloc_slots)); 9908 normal->mc_allocator[i].mca_alloc_max_slots = 9909 slots_per_allocator; 9910 special->mc_allocator[i].mca_alloc_max_slots = 9911 slots_per_allocator; 9912 dedup->mc_allocator[i].mca_alloc_max_slots = 9913 slots_per_allocator; 9914 } 9915 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9916 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9917 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9918 } 9919 9920 static void 9921 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 9922 { 9923 ASSERT(spa_writeable(spa)); 9924 9925 vdev_t *rvd = spa->spa_root_vdev; 9926 for (int c = 0; c < rvd->vdev_children; c++) { 9927 vdev_t *vd = rvd->vdev_child[c]; 9928 vdev_indirect_state_sync_verify(vd); 9929 9930 if (vdev_indirect_should_condense(vd)) { 9931 spa_condense_indirect_start_sync(vd, tx); 9932 break; 9933 } 9934 } 9935 } 9936 9937 static void 9938 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 9939 { 9940 objset_t *mos = spa->spa_meta_objset; 9941 dsl_pool_t *dp = spa->spa_dsl_pool; 9942 uint64_t txg = tx->tx_txg; 9943 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 9944 9945 do { 9946 int pass = ++spa->spa_sync_pass; 9947 9948 spa_sync_config_object(spa, tx); 9949 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 9950 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 9951 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 9952 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 9953 spa_errlog_sync(spa, txg); 9954 dsl_pool_sync(dp, txg); 9955 9956 if (pass < zfs_sync_pass_deferred_free || 9957 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 9958 /* 9959 * If the log space map feature is active we don't 9960 * care about deferred frees and the deferred bpobj 9961 * as the log space map should effectively have the 9962 * same results (i.e. appending only to one object). 9963 */ 9964 spa_sync_frees(spa, free_bpl, tx); 9965 } else { 9966 /* 9967 * We can not defer frees in pass 1, because 9968 * we sync the deferred frees later in pass 1. 9969 */ 9970 ASSERT3U(pass, >, 1); 9971 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb, 9972 &spa->spa_deferred_bpobj, tx); 9973 } 9974 9975 brt_sync(spa, txg); 9976 ddt_sync(spa, txg); 9977 dsl_scan_sync(dp, tx); 9978 dsl_errorscrub_sync(dp, tx); 9979 svr_sync(spa, tx); 9980 spa_sync_upgrades(spa, tx); 9981 9982 spa_flush_metaslabs(spa, tx); 9983 9984 vdev_t *vd = NULL; 9985 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 9986 != NULL) 9987 vdev_sync(vd, txg); 9988 9989 if (pass == 1) { 9990 /* 9991 * dsl_pool_sync() -> dp_sync_tasks may have dirtied 9992 * the config. If that happens, this txg should not 9993 * be a no-op. So we must sync the config to the MOS 9994 * before checking for no-op. 9995 * 9996 * Note that when the config is dirty, it will 9997 * be written to the MOS (i.e. the MOS will be 9998 * dirtied) every time we call spa_sync_config_object() 9999 * in this txg. Therefore we can't call this after 10000 * dsl_pool_sync() every pass, because it would 10001 * prevent us from converging, since we'd dirty 10002 * the MOS every pass. 10003 * 10004 * Sync tasks can only be processed in pass 1, so 10005 * there's no need to do this in later passes. 10006 */ 10007 spa_sync_config_object(spa, tx); 10008 } 10009 10010 /* 10011 * Note: We need to check if the MOS is dirty because we could 10012 * have marked the MOS dirty without updating the uberblock 10013 * (e.g. if we have sync tasks but no dirty user data). We need 10014 * to check the uberblock's rootbp because it is updated if we 10015 * have synced out dirty data (though in this case the MOS will 10016 * most likely also be dirty due to second order effects, we 10017 * don't want to rely on that here). 10018 */ 10019 if (pass == 1 && 10020 BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg && 10021 !dmu_objset_is_dirty(mos, txg)) { 10022 /* 10023 * Nothing changed on the first pass, therefore this 10024 * TXG is a no-op. Avoid syncing deferred frees, so 10025 * that we can keep this TXG as a no-op. 10026 */ 10027 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 10028 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 10029 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 10030 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 10031 break; 10032 } 10033 10034 spa_sync_deferred_frees(spa, tx); 10035 } while (dmu_objset_is_dirty(mos, txg)); 10036 } 10037 10038 /* 10039 * Rewrite the vdev configuration (which includes the uberblock) to 10040 * commit the transaction group. 10041 * 10042 * If there are no dirty vdevs, we sync the uberblock to a few random 10043 * top-level vdevs that are known to be visible in the config cache 10044 * (see spa_vdev_add() for a complete description). If there *are* dirty 10045 * vdevs, sync the uberblock to all vdevs. 10046 */ 10047 static void 10048 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 10049 { 10050 vdev_t *rvd = spa->spa_root_vdev; 10051 uint64_t txg = tx->tx_txg; 10052 10053 for (;;) { 10054 int error = 0; 10055 10056 /* 10057 * We hold SCL_STATE to prevent vdev open/close/etc. 10058 * while we're attempting to write the vdev labels. 10059 */ 10060 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 10061 10062 if (list_is_empty(&spa->spa_config_dirty_list)) { 10063 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 10064 int svdcount = 0; 10065 int children = rvd->vdev_children; 10066 int c0 = random_in_range(children); 10067 10068 for (int c = 0; c < children; c++) { 10069 vdev_t *vd = 10070 rvd->vdev_child[(c0 + c) % children]; 10071 10072 /* Stop when revisiting the first vdev */ 10073 if (c > 0 && svd[0] == vd) 10074 break; 10075 10076 if (vd->vdev_ms_array == 0 || 10077 vd->vdev_islog || 10078 !vdev_is_concrete(vd)) 10079 continue; 10080 10081 svd[svdcount++] = vd; 10082 if (svdcount == SPA_SYNC_MIN_VDEVS) 10083 break; 10084 } 10085 error = vdev_config_sync(svd, svdcount, txg); 10086 } else { 10087 error = vdev_config_sync(rvd->vdev_child, 10088 rvd->vdev_children, txg); 10089 } 10090 10091 if (error == 0) 10092 spa->spa_last_synced_guid = rvd->vdev_guid; 10093 10094 spa_config_exit(spa, SCL_STATE, FTAG); 10095 10096 if (error == 0) 10097 break; 10098 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 10099 zio_resume_wait(spa); 10100 } 10101 } 10102 10103 /* 10104 * Sync the specified transaction group. New blocks may be dirtied as 10105 * part of the process, so we iterate until it converges. 10106 */ 10107 void 10108 spa_sync(spa_t *spa, uint64_t txg) 10109 { 10110 vdev_t *vd = NULL; 10111 10112 VERIFY(spa_writeable(spa)); 10113 10114 /* 10115 * Wait for i/os issued in open context that need to complete 10116 * before this txg syncs. 10117 */ 10118 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 10119 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 10120 ZIO_FLAG_CANFAIL); 10121 10122 /* 10123 * Now that there can be no more cloning in this transaction group, 10124 * but we are still before issuing frees, we can process pending BRT 10125 * updates. 10126 */ 10127 brt_pending_apply(spa, txg); 10128 10129 /* 10130 * Lock out configuration changes. 10131 */ 10132 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 10133 10134 spa->spa_syncing_txg = txg; 10135 spa->spa_sync_pass = 0; 10136 10137 for (int i = 0; i < spa->spa_alloc_count; i++) { 10138 mutex_enter(&spa->spa_allocs[i].spaa_lock); 10139 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 10140 mutex_exit(&spa->spa_allocs[i].spaa_lock); 10141 } 10142 10143 /* 10144 * If there are any pending vdev state changes, convert them 10145 * into config changes that go out with this transaction group. 10146 */ 10147 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 10148 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 10149 /* Avoid holding the write lock unless actually necessary */ 10150 if (vd->vdev_aux == NULL) { 10151 vdev_state_clean(vd); 10152 vdev_config_dirty(vd); 10153 continue; 10154 } 10155 /* 10156 * We need the write lock here because, for aux vdevs, 10157 * calling vdev_config_dirty() modifies sav_config. 10158 * This is ugly and will become unnecessary when we 10159 * eliminate the aux vdev wart by integrating all vdevs 10160 * into the root vdev tree. 10161 */ 10162 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10163 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 10164 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 10165 vdev_state_clean(vd); 10166 vdev_config_dirty(vd); 10167 } 10168 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10169 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10170 } 10171 spa_config_exit(spa, SCL_STATE, FTAG); 10172 10173 dsl_pool_t *dp = spa->spa_dsl_pool; 10174 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 10175 10176 spa->spa_sync_starttime = gethrtime(); 10177 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 10178 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 10179 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 10180 NSEC_TO_TICK(spa->spa_deadman_synctime)); 10181 10182 /* 10183 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 10184 * set spa_deflate if we have no raid-z vdevs. 10185 */ 10186 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 10187 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 10188 vdev_t *rvd = spa->spa_root_vdev; 10189 10190 int i; 10191 for (i = 0; i < rvd->vdev_children; i++) { 10192 vd = rvd->vdev_child[i]; 10193 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 10194 break; 10195 } 10196 if (i == rvd->vdev_children) { 10197 spa->spa_deflate = TRUE; 10198 VERIFY0(zap_add(spa->spa_meta_objset, 10199 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 10200 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 10201 } 10202 } 10203 10204 spa_sync_adjust_vdev_max_queue_depth(spa); 10205 10206 spa_sync_condense_indirect(spa, tx); 10207 10208 spa_sync_iterate_to_convergence(spa, tx); 10209 10210 #ifdef ZFS_DEBUG 10211 if (!list_is_empty(&spa->spa_config_dirty_list)) { 10212 /* 10213 * Make sure that the number of ZAPs for all the vdevs matches 10214 * the number of ZAPs in the per-vdev ZAP list. This only gets 10215 * called if the config is dirty; otherwise there may be 10216 * outstanding AVZ operations that weren't completed in 10217 * spa_sync_config_object. 10218 */ 10219 uint64_t all_vdev_zap_entry_count; 10220 ASSERT0(zap_count(spa->spa_meta_objset, 10221 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 10222 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 10223 all_vdev_zap_entry_count); 10224 } 10225 #endif 10226 10227 if (spa->spa_vdev_removal != NULL) { 10228 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 10229 } 10230 10231 spa_sync_rewrite_vdev_config(spa, tx); 10232 dmu_tx_commit(tx); 10233 10234 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 10235 spa->spa_deadman_tqid = 0; 10236 10237 /* 10238 * Clear the dirty config list. 10239 */ 10240 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 10241 vdev_config_clean(vd); 10242 10243 /* 10244 * Now that the new config has synced transactionally, 10245 * let it become visible to the config cache. 10246 */ 10247 if (spa->spa_config_syncing != NULL) { 10248 spa_config_set(spa, spa->spa_config_syncing); 10249 spa->spa_config_txg = txg; 10250 spa->spa_config_syncing = NULL; 10251 } 10252 10253 dsl_pool_sync_done(dp, txg); 10254 10255 for (int i = 0; i < spa->spa_alloc_count; i++) { 10256 mutex_enter(&spa->spa_allocs[i].spaa_lock); 10257 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 10258 mutex_exit(&spa->spa_allocs[i].spaa_lock); 10259 } 10260 10261 /* 10262 * Update usable space statistics. 10263 */ 10264 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 10265 != NULL) 10266 vdev_sync_done(vd, txg); 10267 10268 metaslab_class_evict_old(spa->spa_normal_class, txg); 10269 metaslab_class_evict_old(spa->spa_log_class, txg); 10270 /* spa_embedded_log_class has only one metaslab per vdev. */ 10271 metaslab_class_evict_old(spa->spa_special_class, txg); 10272 metaslab_class_evict_old(spa->spa_dedup_class, txg); 10273 10274 spa_sync_close_syncing_log_sm(spa); 10275 10276 spa_update_dspace(spa); 10277 10278 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) 10279 vdev_autotrim_kick(spa); 10280 10281 /* 10282 * It had better be the case that we didn't dirty anything 10283 * since vdev_config_sync(). 10284 */ 10285 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 10286 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 10287 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 10288 10289 while (zfs_pause_spa_sync) 10290 delay(1); 10291 10292 spa->spa_sync_pass = 0; 10293 10294 /* 10295 * Update the last synced uberblock here. We want to do this at 10296 * the end of spa_sync() so that consumers of spa_last_synced_txg() 10297 * will be guaranteed that all the processing associated with 10298 * that txg has been completed. 10299 */ 10300 spa->spa_ubsync = spa->spa_uberblock; 10301 spa_config_exit(spa, SCL_CONFIG, FTAG); 10302 10303 spa_handle_ignored_writes(spa); 10304 10305 /* 10306 * If any async tasks have been requested, kick them off. 10307 */ 10308 spa_async_dispatch(spa); 10309 } 10310 10311 /* 10312 * Sync all pools. We don't want to hold the namespace lock across these 10313 * operations, so we take a reference on the spa_t and drop the lock during the 10314 * sync. 10315 */ 10316 void 10317 spa_sync_allpools(void) 10318 { 10319 spa_t *spa = NULL; 10320 mutex_enter(&spa_namespace_lock); 10321 while ((spa = spa_next(spa)) != NULL) { 10322 if (spa_state(spa) != POOL_STATE_ACTIVE || 10323 !spa_writeable(spa) || spa_suspended(spa)) 10324 continue; 10325 spa_open_ref(spa, FTAG); 10326 mutex_exit(&spa_namespace_lock); 10327 txg_wait_synced(spa_get_dsl(spa), 0); 10328 mutex_enter(&spa_namespace_lock); 10329 spa_close(spa, FTAG); 10330 } 10331 mutex_exit(&spa_namespace_lock); 10332 } 10333 10334 taskq_t * 10335 spa_sync_tq_create(spa_t *spa, const char *name) 10336 { 10337 kthread_t **kthreads; 10338 10339 ASSERT(spa->spa_sync_tq == NULL); 10340 ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus); 10341 10342 /* 10343 * - do not allow more allocators than cpus. 10344 * - there may be more cpus than allocators. 10345 * - do not allow more sync taskq threads than allocators or cpus. 10346 */ 10347 int nthreads = spa->spa_alloc_count; 10348 spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) * 10349 nthreads, KM_SLEEP); 10350 10351 spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri, 10352 nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads); 10353 VERIFY(spa->spa_sync_tq != NULL); 10354 VERIFY(kthreads != NULL); 10355 10356 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10357 for (int i = 0; i < nthreads; i++, ti++) { 10358 ti->sti_thread = kthreads[i]; 10359 ti->sti_allocator = i; 10360 } 10361 10362 kmem_free(kthreads, sizeof (*kthreads) * nthreads); 10363 return (spa->spa_sync_tq); 10364 } 10365 10366 void 10367 spa_sync_tq_destroy(spa_t *spa) 10368 { 10369 ASSERT(spa->spa_sync_tq != NULL); 10370 10371 taskq_wait(spa->spa_sync_tq); 10372 taskq_destroy(spa->spa_sync_tq); 10373 kmem_free(spa->spa_syncthreads, 10374 sizeof (spa_syncthread_info_t) * spa->spa_alloc_count); 10375 spa->spa_sync_tq = NULL; 10376 } 10377 10378 uint_t 10379 spa_acq_allocator(spa_t *spa) 10380 { 10381 int i; 10382 10383 if (spa->spa_alloc_count == 1) 10384 return (0); 10385 10386 mutex_enter(&spa->spa_allocs_use->sau_lock); 10387 uint_t r = spa->spa_allocs_use->sau_rotor; 10388 do { 10389 if (++r == spa->spa_alloc_count) 10390 r = 0; 10391 } while (spa->spa_allocs_use->sau_inuse[r]); 10392 spa->spa_allocs_use->sau_inuse[r] = B_TRUE; 10393 spa->spa_allocs_use->sau_rotor = r; 10394 mutex_exit(&spa->spa_allocs_use->sau_lock); 10395 10396 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10397 for (i = 0; i < spa->spa_alloc_count; i++, ti++) { 10398 if (ti->sti_thread == curthread) { 10399 ti->sti_allocator = r; 10400 break; 10401 } 10402 } 10403 ASSERT3S(i, <, spa->spa_alloc_count); 10404 return (r); 10405 } 10406 10407 void 10408 spa_rel_allocator(spa_t *spa, uint_t allocator) 10409 { 10410 if (spa->spa_alloc_count > 1) 10411 spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE; 10412 } 10413 10414 void 10415 spa_select_allocator(zio_t *zio) 10416 { 10417 zbookmark_phys_t *bm = &zio->io_bookmark; 10418 spa_t *spa = zio->io_spa; 10419 10420 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 10421 10422 /* 10423 * A gang block (for example) may have inherited its parent's 10424 * allocator, in which case there is nothing further to do here. 10425 */ 10426 if (ZIO_HAS_ALLOCATOR(zio)) 10427 return; 10428 10429 ASSERT(spa != NULL); 10430 ASSERT(bm != NULL); 10431 10432 /* 10433 * First try to use an allocator assigned to the syncthread, and set 10434 * the corresponding write issue taskq for the allocator. 10435 * Note, we must have an open pool to do this. 10436 */ 10437 if (spa->spa_sync_tq != NULL) { 10438 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10439 for (int i = 0; i < spa->spa_alloc_count; i++, ti++) { 10440 if (ti->sti_thread == curthread) { 10441 zio->io_allocator = ti->sti_allocator; 10442 return; 10443 } 10444 } 10445 } 10446 10447 /* 10448 * We want to try to use as many allocators as possible to help improve 10449 * performance, but we also want logically adjacent IOs to be physically 10450 * adjacent to improve sequential read performance. We chunk each object 10451 * into 2^20 block regions, and then hash based on the objset, object, 10452 * level, and region to accomplish both of these goals. 10453 */ 10454 uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level, 10455 bm->zb_blkid >> 20); 10456 10457 zio->io_allocator = (uint_t)hv % spa->spa_alloc_count; 10458 } 10459 10460 /* 10461 * ========================================================================== 10462 * Miscellaneous routines 10463 * ========================================================================== 10464 */ 10465 10466 /* 10467 * Remove all pools in the system. 10468 */ 10469 void 10470 spa_evict_all(void) 10471 { 10472 spa_t *spa; 10473 10474 /* 10475 * Remove all cached state. All pools should be closed now, 10476 * so every spa in the AVL tree should be unreferenced. 10477 */ 10478 mutex_enter(&spa_namespace_lock); 10479 while ((spa = spa_next(NULL)) != NULL) { 10480 /* 10481 * Stop async tasks. The async thread may need to detach 10482 * a device that's been replaced, which requires grabbing 10483 * spa_namespace_lock, so we must drop it here. 10484 */ 10485 spa_open_ref(spa, FTAG); 10486 mutex_exit(&spa_namespace_lock); 10487 spa_async_suspend(spa); 10488 mutex_enter(&spa_namespace_lock); 10489 spa_close(spa, FTAG); 10490 10491 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 10492 spa_unload(spa); 10493 spa_deactivate(spa); 10494 } 10495 spa_remove(spa); 10496 } 10497 mutex_exit(&spa_namespace_lock); 10498 } 10499 10500 vdev_t * 10501 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 10502 { 10503 vdev_t *vd; 10504 int i; 10505 10506 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 10507 return (vd); 10508 10509 if (aux) { 10510 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 10511 vd = spa->spa_l2cache.sav_vdevs[i]; 10512 if (vd->vdev_guid == guid) 10513 return (vd); 10514 } 10515 10516 for (i = 0; i < spa->spa_spares.sav_count; i++) { 10517 vd = spa->spa_spares.sav_vdevs[i]; 10518 if (vd->vdev_guid == guid) 10519 return (vd); 10520 } 10521 } 10522 10523 return (NULL); 10524 } 10525 10526 void 10527 spa_upgrade(spa_t *spa, uint64_t version) 10528 { 10529 ASSERT(spa_writeable(spa)); 10530 10531 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 10532 10533 /* 10534 * This should only be called for a non-faulted pool, and since a 10535 * future version would result in an unopenable pool, this shouldn't be 10536 * possible. 10537 */ 10538 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 10539 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 10540 10541 spa->spa_uberblock.ub_version = version; 10542 vdev_config_dirty(spa->spa_root_vdev); 10543 10544 spa_config_exit(spa, SCL_ALL, FTAG); 10545 10546 txg_wait_synced(spa_get_dsl(spa), 0); 10547 } 10548 10549 static boolean_t 10550 spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav) 10551 { 10552 (void) spa; 10553 int i; 10554 uint64_t vdev_guid; 10555 10556 for (i = 0; i < sav->sav_count; i++) 10557 if (sav->sav_vdevs[i]->vdev_guid == guid) 10558 return (B_TRUE); 10559 10560 for (i = 0; i < sav->sav_npending; i++) { 10561 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 10562 &vdev_guid) == 0 && vdev_guid == guid) 10563 return (B_TRUE); 10564 } 10565 10566 return (B_FALSE); 10567 } 10568 10569 boolean_t 10570 spa_has_l2cache(spa_t *spa, uint64_t guid) 10571 { 10572 return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache)); 10573 } 10574 10575 boolean_t 10576 spa_has_spare(spa_t *spa, uint64_t guid) 10577 { 10578 return (spa_has_aux_vdev(spa, guid, &spa->spa_spares)); 10579 } 10580 10581 /* 10582 * Check if a pool has an active shared spare device. 10583 * Note: reference count of an active spare is 2, as a spare and as a replace 10584 */ 10585 static boolean_t 10586 spa_has_active_shared_spare(spa_t *spa) 10587 { 10588 int i, refcnt; 10589 uint64_t pool; 10590 spa_aux_vdev_t *sav = &spa->spa_spares; 10591 10592 for (i = 0; i < sav->sav_count; i++) { 10593 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 10594 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 10595 refcnt > 2) 10596 return (B_TRUE); 10597 } 10598 10599 return (B_FALSE); 10600 } 10601 10602 uint64_t 10603 spa_total_metaslabs(spa_t *spa) 10604 { 10605 vdev_t *rvd = spa->spa_root_vdev; 10606 10607 uint64_t m = 0; 10608 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 10609 vdev_t *vd = rvd->vdev_child[c]; 10610 if (!vdev_is_concrete(vd)) 10611 continue; 10612 m += vd->vdev_ms_count; 10613 } 10614 return (m); 10615 } 10616 10617 /* 10618 * Notify any waiting threads that some activity has switched from being in- 10619 * progress to not-in-progress so that the thread can wake up and determine 10620 * whether it is finished waiting. 10621 */ 10622 void 10623 spa_notify_waiters(spa_t *spa) 10624 { 10625 /* 10626 * Acquiring spa_activities_lock here prevents the cv_broadcast from 10627 * happening between the waiting thread's check and cv_wait. 10628 */ 10629 mutex_enter(&spa->spa_activities_lock); 10630 cv_broadcast(&spa->spa_activities_cv); 10631 mutex_exit(&spa->spa_activities_lock); 10632 } 10633 10634 /* 10635 * Notify any waiting threads that the pool is exporting, and then block until 10636 * they are finished using the spa_t. 10637 */ 10638 void 10639 spa_wake_waiters(spa_t *spa) 10640 { 10641 mutex_enter(&spa->spa_activities_lock); 10642 spa->spa_waiters_cancel = B_TRUE; 10643 cv_broadcast(&spa->spa_activities_cv); 10644 while (spa->spa_waiters != 0) 10645 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock); 10646 spa->spa_waiters_cancel = B_FALSE; 10647 mutex_exit(&spa->spa_activities_lock); 10648 } 10649 10650 /* Whether the vdev or any of its descendants are being initialized/trimmed. */ 10651 static boolean_t 10652 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity) 10653 { 10654 spa_t *spa = vd->vdev_spa; 10655 10656 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER)); 10657 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 10658 ASSERT(activity == ZPOOL_WAIT_INITIALIZE || 10659 activity == ZPOOL_WAIT_TRIM); 10660 10661 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ? 10662 &vd->vdev_initialize_lock : &vd->vdev_trim_lock; 10663 10664 mutex_exit(&spa->spa_activities_lock); 10665 mutex_enter(lock); 10666 mutex_enter(&spa->spa_activities_lock); 10667 10668 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ? 10669 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) : 10670 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE); 10671 mutex_exit(lock); 10672 10673 if (in_progress) 10674 return (B_TRUE); 10675 10676 for (int i = 0; i < vd->vdev_children; i++) { 10677 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i], 10678 activity)) 10679 return (B_TRUE); 10680 } 10681 10682 return (B_FALSE); 10683 } 10684 10685 /* 10686 * If use_guid is true, this checks whether the vdev specified by guid is 10687 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool 10688 * is being initialized/trimmed. The caller must hold the config lock and 10689 * spa_activities_lock. 10690 */ 10691 static int 10692 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid, 10693 zpool_wait_activity_t activity, boolean_t *in_progress) 10694 { 10695 mutex_exit(&spa->spa_activities_lock); 10696 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10697 mutex_enter(&spa->spa_activities_lock); 10698 10699 vdev_t *vd; 10700 if (use_guid) { 10701 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 10702 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) { 10703 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10704 return (EINVAL); 10705 } 10706 } else { 10707 vd = spa->spa_root_vdev; 10708 } 10709 10710 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity); 10711 10712 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10713 return (0); 10714 } 10715 10716 /* 10717 * Locking for waiting threads 10718 * --------------------------- 10719 * 10720 * Waiting threads need a way to check whether a given activity is in progress, 10721 * and then, if it is, wait for it to complete. Each activity will have some 10722 * in-memory representation of the relevant on-disk state which can be used to 10723 * determine whether or not the activity is in progress. The in-memory state and 10724 * the locking used to protect it will be different for each activity, and may 10725 * not be suitable for use with a cvar (e.g., some state is protected by the 10726 * config lock). To allow waiting threads to wait without any races, another 10727 * lock, spa_activities_lock, is used. 10728 * 10729 * When the state is checked, both the activity-specific lock (if there is one) 10730 * and spa_activities_lock are held. In some cases, the activity-specific lock 10731 * is acquired explicitly (e.g. the config lock). In others, the locking is 10732 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting 10733 * thread releases the activity-specific lock and, if the activity is in 10734 * progress, then cv_waits using spa_activities_lock. 10735 * 10736 * The waiting thread is woken when another thread, one completing some 10737 * activity, updates the state of the activity and then calls 10738 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only 10739 * needs to hold its activity-specific lock when updating the state, and this 10740 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters. 10741 * 10742 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting, 10743 * and because it is held when the waiting thread checks the state of the 10744 * activity, it can never be the case that the completing thread both updates 10745 * the activity state and cv_broadcasts in between the waiting thread's check 10746 * and cv_wait. Thus, a waiting thread can never miss a wakeup. 10747 * 10748 * In order to prevent deadlock, when the waiting thread does its check, in some 10749 * cases it will temporarily drop spa_activities_lock in order to acquire the 10750 * activity-specific lock. The order in which spa_activities_lock and the 10751 * activity specific lock are acquired in the waiting thread is determined by 10752 * the order in which they are acquired in the completing thread; if the 10753 * completing thread calls spa_notify_waiters with the activity-specific lock 10754 * held, then the waiting thread must also acquire the activity-specific lock 10755 * first. 10756 */ 10757 10758 static int 10759 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity, 10760 boolean_t use_tag, uint64_t tag, boolean_t *in_progress) 10761 { 10762 int error = 0; 10763 10764 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 10765 10766 switch (activity) { 10767 case ZPOOL_WAIT_CKPT_DISCARD: 10768 *in_progress = 10769 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) && 10770 zap_contains(spa_meta_objset(spa), 10771 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) == 10772 ENOENT); 10773 break; 10774 case ZPOOL_WAIT_FREE: 10775 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS && 10776 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) || 10777 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) || 10778 spa_livelist_delete_check(spa)); 10779 break; 10780 case ZPOOL_WAIT_INITIALIZE: 10781 case ZPOOL_WAIT_TRIM: 10782 error = spa_vdev_activity_in_progress(spa, use_tag, tag, 10783 activity, in_progress); 10784 break; 10785 case ZPOOL_WAIT_REPLACE: 10786 mutex_exit(&spa->spa_activities_lock); 10787 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10788 mutex_enter(&spa->spa_activities_lock); 10789 10790 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev); 10791 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10792 break; 10793 case ZPOOL_WAIT_REMOVE: 10794 *in_progress = (spa->spa_removing_phys.sr_state == 10795 DSS_SCANNING); 10796 break; 10797 case ZPOOL_WAIT_RESILVER: 10798 *in_progress = vdev_rebuild_active(spa->spa_root_vdev); 10799 if (*in_progress) 10800 break; 10801 zfs_fallthrough; 10802 case ZPOOL_WAIT_SCRUB: 10803 { 10804 boolean_t scanning, paused, is_scrub; 10805 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 10806 10807 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB); 10808 scanning = (scn->scn_phys.scn_state == DSS_SCANNING); 10809 paused = dsl_scan_is_paused_scrub(scn); 10810 *in_progress = (scanning && !paused && 10811 is_scrub == (activity == ZPOOL_WAIT_SCRUB)); 10812 break; 10813 } 10814 case ZPOOL_WAIT_RAIDZ_EXPAND: 10815 { 10816 vdev_raidz_expand_t *vre = spa->spa_raidz_expand; 10817 *in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING); 10818 break; 10819 } 10820 default: 10821 panic("unrecognized value for activity %d", activity); 10822 } 10823 10824 return (error); 10825 } 10826 10827 static int 10828 spa_wait_common(const char *pool, zpool_wait_activity_t activity, 10829 boolean_t use_tag, uint64_t tag, boolean_t *waited) 10830 { 10831 /* 10832 * The tag is used to distinguish between instances of an activity. 10833 * 'initialize' and 'trim' are the only activities that we use this for. 10834 * The other activities can only have a single instance in progress in a 10835 * pool at one time, making the tag unnecessary. 10836 * 10837 * There can be multiple devices being replaced at once, but since they 10838 * all finish once resilvering finishes, we don't bother keeping track 10839 * of them individually, we just wait for them all to finish. 10840 */ 10841 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE && 10842 activity != ZPOOL_WAIT_TRIM) 10843 return (EINVAL); 10844 10845 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES) 10846 return (EINVAL); 10847 10848 spa_t *spa; 10849 int error = spa_open(pool, &spa, FTAG); 10850 if (error != 0) 10851 return (error); 10852 10853 /* 10854 * Increment the spa's waiter count so that we can call spa_close and 10855 * still ensure that the spa_t doesn't get freed before this thread is 10856 * finished with it when the pool is exported. We want to call spa_close 10857 * before we start waiting because otherwise the additional ref would 10858 * prevent the pool from being exported or destroyed throughout the 10859 * potentially long wait. 10860 */ 10861 mutex_enter(&spa->spa_activities_lock); 10862 spa->spa_waiters++; 10863 spa_close(spa, FTAG); 10864 10865 *waited = B_FALSE; 10866 for (;;) { 10867 boolean_t in_progress; 10868 error = spa_activity_in_progress(spa, activity, use_tag, tag, 10869 &in_progress); 10870 10871 if (error || !in_progress || spa->spa_waiters_cancel) 10872 break; 10873 10874 *waited = B_TRUE; 10875 10876 if (cv_wait_sig(&spa->spa_activities_cv, 10877 &spa->spa_activities_lock) == 0) { 10878 error = EINTR; 10879 break; 10880 } 10881 } 10882 10883 spa->spa_waiters--; 10884 cv_signal(&spa->spa_waiters_cv); 10885 mutex_exit(&spa->spa_activities_lock); 10886 10887 return (error); 10888 } 10889 10890 /* 10891 * Wait for a particular instance of the specified activity to complete, where 10892 * the instance is identified by 'tag' 10893 */ 10894 int 10895 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag, 10896 boolean_t *waited) 10897 { 10898 return (spa_wait_common(pool, activity, B_TRUE, tag, waited)); 10899 } 10900 10901 /* 10902 * Wait for all instances of the specified activity complete 10903 */ 10904 int 10905 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited) 10906 { 10907 10908 return (spa_wait_common(pool, activity, B_FALSE, 0, waited)); 10909 } 10910 10911 sysevent_t * 10912 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 10913 { 10914 sysevent_t *ev = NULL; 10915 #ifdef _KERNEL 10916 nvlist_t *resource; 10917 10918 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl); 10919 if (resource) { 10920 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); 10921 ev->resource = resource; 10922 } 10923 #else 10924 (void) spa, (void) vd, (void) hist_nvl, (void) name; 10925 #endif 10926 return (ev); 10927 } 10928 10929 void 10930 spa_event_post(sysevent_t *ev) 10931 { 10932 #ifdef _KERNEL 10933 if (ev) { 10934 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); 10935 kmem_free(ev, sizeof (*ev)); 10936 } 10937 #else 10938 (void) ev; 10939 #endif 10940 } 10941 10942 /* 10943 * Post a zevent corresponding to the given sysevent. The 'name' must be one 10944 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be 10945 * filled in from the spa and (optionally) the vdev. This doesn't do anything 10946 * in the userland libzpool, as we don't want consumers to misinterpret ztest 10947 * or zdb as real changes. 10948 */ 10949 void 10950 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 10951 { 10952 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 10953 } 10954 10955 /* state manipulation functions */ 10956 EXPORT_SYMBOL(spa_open); 10957 EXPORT_SYMBOL(spa_open_rewind); 10958 EXPORT_SYMBOL(spa_get_stats); 10959 EXPORT_SYMBOL(spa_create); 10960 EXPORT_SYMBOL(spa_import); 10961 EXPORT_SYMBOL(spa_tryimport); 10962 EXPORT_SYMBOL(spa_destroy); 10963 EXPORT_SYMBOL(spa_export); 10964 EXPORT_SYMBOL(spa_reset); 10965 EXPORT_SYMBOL(spa_async_request); 10966 EXPORT_SYMBOL(spa_async_suspend); 10967 EXPORT_SYMBOL(spa_async_resume); 10968 EXPORT_SYMBOL(spa_inject_addref); 10969 EXPORT_SYMBOL(spa_inject_delref); 10970 EXPORT_SYMBOL(spa_scan_stat_init); 10971 EXPORT_SYMBOL(spa_scan_get_stats); 10972 10973 /* device manipulation */ 10974 EXPORT_SYMBOL(spa_vdev_add); 10975 EXPORT_SYMBOL(spa_vdev_attach); 10976 EXPORT_SYMBOL(spa_vdev_detach); 10977 EXPORT_SYMBOL(spa_vdev_setpath); 10978 EXPORT_SYMBOL(spa_vdev_setfru); 10979 EXPORT_SYMBOL(spa_vdev_split_mirror); 10980 10981 /* spare statech is global across all pools) */ 10982 EXPORT_SYMBOL(spa_spare_add); 10983 EXPORT_SYMBOL(spa_spare_remove); 10984 EXPORT_SYMBOL(spa_spare_exists); 10985 EXPORT_SYMBOL(spa_spare_activate); 10986 10987 /* L2ARC statech is global across all pools) */ 10988 EXPORT_SYMBOL(spa_l2cache_add); 10989 EXPORT_SYMBOL(spa_l2cache_remove); 10990 EXPORT_SYMBOL(spa_l2cache_exists); 10991 EXPORT_SYMBOL(spa_l2cache_activate); 10992 EXPORT_SYMBOL(spa_l2cache_drop); 10993 10994 /* scanning */ 10995 EXPORT_SYMBOL(spa_scan); 10996 EXPORT_SYMBOL(spa_scan_range); 10997 EXPORT_SYMBOL(spa_scan_stop); 10998 10999 /* spa syncing */ 11000 EXPORT_SYMBOL(spa_sync); /* only for DMU use */ 11001 EXPORT_SYMBOL(spa_sync_allpools); 11002 11003 /* properties */ 11004 EXPORT_SYMBOL(spa_prop_set); 11005 EXPORT_SYMBOL(spa_prop_get); 11006 EXPORT_SYMBOL(spa_prop_clear_bootfs); 11007 11008 /* asynchronous event notification */ 11009 EXPORT_SYMBOL(spa_event_notify); 11010 11011 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW, 11012 "Percentage of CPUs to run a metaslab preload taskq"); 11013 11014 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW, 11015 "log2 fraction of arc that can be used by inflight I/Os when " 11016 "verifying pool during import"); 11017 11018 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, 11019 "Set to traverse metadata on pool import"); 11020 11021 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW, 11022 "Set to traverse data on pool import"); 11023 11024 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW, 11025 "Print vdev tree to zfs_dbgmsg during pool import"); 11026 11027 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW, 11028 "Percentage of CPUs to run an IO worker thread"); 11029 11030 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW, 11031 "Number of threads per IO worker taskqueue"); 11032 11033 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW, 11034 "Allow importing pool with up to this number of missing top-level " 11035 "vdevs (in read-only mode)"); 11036 11037 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, 11038 ZMOD_RW, "Set the livelist condense zthr to pause"); 11039 11040 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, 11041 ZMOD_RW, "Set the livelist condense synctask to pause"); 11042 11043 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, 11044 INT, ZMOD_RW, 11045 "Whether livelist condensing was canceled in the synctask"); 11046 11047 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, 11048 INT, ZMOD_RW, 11049 "Whether livelist condensing was canceled in the zthr function"); 11050 11051 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, 11052 ZMOD_RW, 11053 "Whether extra ALLOC blkptrs were added to a livelist entry while it " 11054 "was being condensed"); 11055 11056 #ifdef _KERNEL 11057 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read, 11058 spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW, 11059 "Configure IO queues for read IO"); 11060 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write, 11061 spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW, 11062 "Configure IO queues for write IO"); 11063 #endif 11064 11065 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW, 11066 "Number of CPUs per write issue taskq"); 11067