1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 25 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 31 * Copyright 2018 Joyent, Inc. 32 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 33 * Copyright 2017 Joyent, Inc. 34 * Copyright (c) 2017, Intel Corporation. 35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36 * Copyright (c) 2023 Hewlett Packard Enterprise Development LP. 37 * Copyright (c) 2023, 2024, Klara Inc. 38 */ 39 40 /* 41 * SPA: Storage Pool Allocator 42 * 43 * This file contains all the routines used when modifying on-disk SPA state. 44 * This includes opening, importing, destroying, exporting a pool, and syncing a 45 * pool. 46 */ 47 48 #include <sys/zfs_context.h> 49 #include <sys/fm/fs/zfs.h> 50 #include <sys/spa_impl.h> 51 #include <sys/zio.h> 52 #include <sys/zio_checksum.h> 53 #include <sys/dmu.h> 54 #include <sys/dmu_tx.h> 55 #include <sys/zap.h> 56 #include <sys/zil.h> 57 #include <sys/brt.h> 58 #include <sys/ddt.h> 59 #include <sys/vdev_impl.h> 60 #include <sys/vdev_removal.h> 61 #include <sys/vdev_indirect_mapping.h> 62 #include <sys/vdev_indirect_births.h> 63 #include <sys/vdev_initialize.h> 64 #include <sys/vdev_rebuild.h> 65 #include <sys/vdev_trim.h> 66 #include <sys/vdev_disk.h> 67 #include <sys/vdev_raidz.h> 68 #include <sys/vdev_draid.h> 69 #include <sys/metaslab.h> 70 #include <sys/metaslab_impl.h> 71 #include <sys/mmp.h> 72 #include <sys/uberblock_impl.h> 73 #include <sys/txg.h> 74 #include <sys/avl.h> 75 #include <sys/bpobj.h> 76 #include <sys/dmu_traverse.h> 77 #include <sys/dmu_objset.h> 78 #include <sys/unique.h> 79 #include <sys/dsl_pool.h> 80 #include <sys/dsl_dataset.h> 81 #include <sys/dsl_dir.h> 82 #include <sys/dsl_prop.h> 83 #include <sys/dsl_synctask.h> 84 #include <sys/fs/zfs.h> 85 #include <sys/arc.h> 86 #include <sys/callb.h> 87 #include <sys/systeminfo.h> 88 #include <sys/zfs_ioctl.h> 89 #include <sys/dsl_scan.h> 90 #include <sys/zfeature.h> 91 #include <sys/dsl_destroy.h> 92 #include <sys/zvol.h> 93 94 #ifdef _KERNEL 95 #include <sys/fm/protocol.h> 96 #include <sys/fm/util.h> 97 #include <sys/callb.h> 98 #include <sys/zone.h> 99 #include <sys/vmsystm.h> 100 #endif /* _KERNEL */ 101 102 #include "zfs_prop.h" 103 #include "zfs_comutil.h" 104 #include <cityhash.h> 105 106 /* 107 * spa_thread() existed on Illumos as a parent thread for the various worker 108 * threads that actually run the pool, as a way to both reference the entire 109 * pool work as a single object, and to share properties like scheduling 110 * options. It has not yet been adapted to Linux or FreeBSD. This define is 111 * used to mark related parts of the code to make things easier for the reader, 112 * and to compile this code out. It can be removed when someone implements it, 113 * moves it to some Illumos-specific place, or removes it entirely. 114 */ 115 #undef HAVE_SPA_THREAD 116 117 /* 118 * The "System Duty Cycle" scheduling class is an Illumos feature to help 119 * prevent CPU-intensive kernel threads from affecting latency on interactive 120 * threads. It doesn't exist on Linux or FreeBSD, so the supporting code is 121 * gated behind a define. On Illumos SDC depends on spa_thread(), but 122 * spa_thread() also has other uses, so this is a separate define. 123 */ 124 #undef HAVE_SYSDC 125 126 /* 127 * The interval, in seconds, at which failed configuration cache file writes 128 * should be retried. 129 */ 130 int zfs_ccw_retry_interval = 300; 131 132 typedef enum zti_modes { 133 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 134 ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */ 135 ZTI_MODE_SYNC, /* sync thread assigned */ 136 ZTI_MODE_NULL, /* don't create a taskq */ 137 ZTI_NMODES 138 } zti_modes_t; 139 140 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 141 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 142 #define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 } 143 #define ZTI_SYNC { ZTI_MODE_SYNC, 0, 1 } 144 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 145 146 #define ZTI_N(n) ZTI_P(n, 1) 147 #define ZTI_ONE ZTI_N(1) 148 149 typedef struct zio_taskq_info { 150 zti_modes_t zti_mode; 151 uint_t zti_value; 152 uint_t zti_count; 153 } zio_taskq_info_t; 154 155 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 156 "iss", "iss_h", "int", "int_h" 157 }; 158 159 /* 160 * This table defines the taskq settings for each ZFS I/O type. When 161 * initializing a pool, we use this table to create an appropriately sized 162 * taskq. Some operations are low volume and therefore have a small, static 163 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 164 * macros. Other operations process a large amount of data; the ZTI_SCALE 165 * macro causes us to create a taskq oriented for throughput. Some operations 166 * are so high frequency and short-lived that the taskq itself can become a 167 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 168 * additional degree of parallelism specified by the number of threads per- 169 * taskq and the number of taskqs; when dispatching an event in this case, the 170 * particular taskq is chosen at random. ZTI_SCALE uses a number of taskqs 171 * that scales with the number of CPUs. 172 * 173 * The different taskq priorities are to handle the different contexts (issue 174 * and interrupt) and then to reserve threads for high priority I/Os that 175 * need to be handled with minimum delay. Illumos taskq has unfair TQ_FRONT 176 * implementation, so separate high priority threads are used there. 177 */ 178 static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 179 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 180 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 181 { ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */ 182 #ifdef illumos 183 { ZTI_SYNC, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */ 184 #else 185 { ZTI_SYNC, ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* WRITE */ 186 #endif 187 { ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 188 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 189 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FLUSH */ 190 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ 191 }; 192 193 static void spa_sync_version(void *arg, dmu_tx_t *tx); 194 static void spa_sync_props(void *arg, dmu_tx_t *tx); 195 static boolean_t spa_has_active_shared_spare(spa_t *spa); 196 static int spa_load_impl(spa_t *spa, spa_import_type_t type, 197 const char **ereport); 198 static void spa_vdev_resilver_done(spa_t *spa); 199 200 /* 201 * Percentage of all CPUs that can be used by the metaslab preload taskq. 202 */ 203 static uint_t metaslab_preload_pct = 50; 204 205 static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ 206 static uint_t zio_taskq_batch_tpq; /* threads per taskq */ 207 208 #ifdef HAVE_SYSDC 209 static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 210 static const uint_t zio_taskq_basedc = 80; /* base duty cycle */ 211 #endif 212 213 #ifdef HAVE_SPA_THREAD 214 static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */ 215 #endif 216 217 static uint_t zio_taskq_write_tpq = 16; 218 219 /* 220 * Report any spa_load_verify errors found, but do not fail spa_load. 221 * This is used by zdb to analyze non-idle pools. 222 */ 223 boolean_t spa_load_verify_dryrun = B_FALSE; 224 225 /* 226 * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ). 227 * This is used by zdb for spacemaps verification. 228 */ 229 boolean_t spa_mode_readable_spacemaps = B_FALSE; 230 231 /* 232 * This (illegal) pool name is used when temporarily importing a spa_t in order 233 * to get the vdev stats associated with the imported devices. 234 */ 235 #define TRYIMPORT_NAME "$import" 236 237 /* 238 * For debugging purposes: print out vdev tree during pool import. 239 */ 240 static int spa_load_print_vdev_tree = B_FALSE; 241 242 /* 243 * A non-zero value for zfs_max_missing_tvds means that we allow importing 244 * pools with missing top-level vdevs. This is strictly intended for advanced 245 * pool recovery cases since missing data is almost inevitable. Pools with 246 * missing devices can only be imported read-only for safety reasons, and their 247 * fail-mode will be automatically set to "continue". 248 * 249 * With 1 missing vdev we should be able to import the pool and mount all 250 * datasets. User data that was not modified after the missing device has been 251 * added should be recoverable. This means that snapshots created prior to the 252 * addition of that device should be completely intact. 253 * 254 * With 2 missing vdevs, some datasets may fail to mount since there are 255 * dataset statistics that are stored as regular metadata. Some data might be 256 * recoverable if those vdevs were added recently. 257 * 258 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 259 * may be missing entirely. Chances of data recovery are very low. Note that 260 * there are also risks of performing an inadvertent rewind as we might be 261 * missing all the vdevs with the latest uberblocks. 262 */ 263 uint64_t zfs_max_missing_tvds = 0; 264 265 /* 266 * The parameters below are similar to zfs_max_missing_tvds but are only 267 * intended for a preliminary open of the pool with an untrusted config which 268 * might be incomplete or out-dated. 269 * 270 * We are more tolerant for pools opened from a cachefile since we could have 271 * an out-dated cachefile where a device removal was not registered. 272 * We could have set the limit arbitrarily high but in the case where devices 273 * are really missing we would want to return the proper error codes; we chose 274 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 275 * and we get a chance to retrieve the trusted config. 276 */ 277 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 278 279 /* 280 * In the case where config was assembled by scanning device paths (/dev/dsks 281 * by default) we are less tolerant since all the existing devices should have 282 * been detected and we want spa_load to return the right error codes. 283 */ 284 uint64_t zfs_max_missing_tvds_scan = 0; 285 286 /* 287 * Debugging aid that pauses spa_sync() towards the end. 288 */ 289 static const boolean_t zfs_pause_spa_sync = B_FALSE; 290 291 /* 292 * Variables to indicate the livelist condense zthr func should wait at certain 293 * points for the livelist to be removed - used to test condense/destroy races 294 */ 295 static int zfs_livelist_condense_zthr_pause = 0; 296 static int zfs_livelist_condense_sync_pause = 0; 297 298 /* 299 * Variables to track whether or not condense cancellation has been 300 * triggered in testing. 301 */ 302 static int zfs_livelist_condense_sync_cancel = 0; 303 static int zfs_livelist_condense_zthr_cancel = 0; 304 305 /* 306 * Variable to track whether or not extra ALLOC blkptrs were added to a 307 * livelist entry while it was being condensed (caused by the way we track 308 * remapped blkptrs in dbuf_remap_impl) 309 */ 310 static int zfs_livelist_condense_new_alloc = 0; 311 312 /* 313 * ========================================================================== 314 * SPA properties routines 315 * ========================================================================== 316 */ 317 318 /* 319 * Add a (source=src, propname=propval) list to an nvlist. 320 */ 321 static void 322 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval, 323 uint64_t intval, zprop_source_t src) 324 { 325 const char *propname = zpool_prop_to_name(prop); 326 nvlist_t *propval; 327 328 propval = fnvlist_alloc(); 329 fnvlist_add_uint64(propval, ZPROP_SOURCE, src); 330 331 if (strval != NULL) 332 fnvlist_add_string(propval, ZPROP_VALUE, strval); 333 else 334 fnvlist_add_uint64(propval, ZPROP_VALUE, intval); 335 336 fnvlist_add_nvlist(nvl, propname, propval); 337 nvlist_free(propval); 338 } 339 340 static int 341 spa_prop_add(spa_t *spa, const char *propname, nvlist_t *outnvl) 342 { 343 zpool_prop_t prop = zpool_name_to_prop(propname); 344 zprop_source_t src = ZPROP_SRC_NONE; 345 uint64_t intval; 346 int err; 347 348 /* 349 * NB: Not all properties lookups via this API require 350 * the spa props lock, so they must explicitly grab it here. 351 */ 352 switch (prop) { 353 case ZPOOL_PROP_DEDUPCACHED: 354 err = ddt_get_pool_dedup_cached(spa, &intval); 355 if (err != 0) 356 return (SET_ERROR(err)); 357 break; 358 default: 359 return (SET_ERROR(EINVAL)); 360 } 361 362 spa_prop_add_list(outnvl, prop, NULL, intval, src); 363 364 return (0); 365 } 366 367 int 368 spa_prop_get_nvlist(spa_t *spa, char **props, unsigned int n_props, 369 nvlist_t *outnvl) 370 { 371 int err = 0; 372 373 if (props == NULL) 374 return (0); 375 376 for (unsigned int i = 0; i < n_props && err == 0; i++) { 377 err = spa_prop_add(spa, props[i], outnvl); 378 } 379 380 return (err); 381 } 382 383 /* 384 * Add a user property (source=src, propname=propval) to an nvlist. 385 */ 386 static void 387 spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval, 388 zprop_source_t src) 389 { 390 nvlist_t *propval; 391 392 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 393 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 394 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 395 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 396 nvlist_free(propval); 397 } 398 399 /* 400 * Get property values from the spa configuration. 401 */ 402 static void 403 spa_prop_get_config(spa_t *spa, nvlist_t *nv) 404 { 405 vdev_t *rvd = spa->spa_root_vdev; 406 dsl_pool_t *pool = spa->spa_dsl_pool; 407 uint64_t size, alloc, cap, version; 408 const zprop_source_t src = ZPROP_SRC_NONE; 409 spa_config_dirent_t *dp; 410 metaslab_class_t *mc = spa_normal_class(spa); 411 412 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 413 414 if (rvd != NULL) { 415 alloc = metaslab_class_get_alloc(mc); 416 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 417 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 418 alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa)); 419 420 size = metaslab_class_get_space(mc); 421 size += metaslab_class_get_space(spa_special_class(spa)); 422 size += metaslab_class_get_space(spa_dedup_class(spa)); 423 size += metaslab_class_get_space(spa_embedded_log_class(spa)); 424 425 spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 426 spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src); 427 spa_prop_add_list(nv, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 428 spa_prop_add_list(nv, ZPOOL_PROP_FREE, NULL, 429 size - alloc, src); 430 spa_prop_add_list(nv, ZPOOL_PROP_CHECKPOINT, NULL, 431 spa->spa_checkpoint_info.sci_dspace, src); 432 433 spa_prop_add_list(nv, ZPOOL_PROP_FRAGMENTATION, NULL, 434 metaslab_class_fragmentation(mc), src); 435 spa_prop_add_list(nv, ZPOOL_PROP_EXPANDSZ, NULL, 436 metaslab_class_expandable_space(mc), src); 437 spa_prop_add_list(nv, ZPOOL_PROP_READONLY, NULL, 438 (spa_mode(spa) == SPA_MODE_READ), src); 439 440 cap = (size == 0) ? 0 : (alloc * 100 / size); 441 spa_prop_add_list(nv, ZPOOL_PROP_CAPACITY, NULL, cap, src); 442 443 spa_prop_add_list(nv, ZPOOL_PROP_DEDUPRATIO, NULL, 444 ddt_get_pool_dedup_ratio(spa), src); 445 spa_prop_add_list(nv, ZPOOL_PROP_BCLONEUSED, NULL, 446 brt_get_used(spa), src); 447 spa_prop_add_list(nv, ZPOOL_PROP_BCLONESAVED, NULL, 448 brt_get_saved(spa), src); 449 spa_prop_add_list(nv, ZPOOL_PROP_BCLONERATIO, NULL, 450 brt_get_ratio(spa), src); 451 452 spa_prop_add_list(nv, ZPOOL_PROP_DEDUP_TABLE_SIZE, NULL, 453 ddt_get_ddt_dsize(spa), src); 454 455 spa_prop_add_list(nv, ZPOOL_PROP_HEALTH, NULL, 456 rvd->vdev_state, src); 457 458 version = spa_version(spa); 459 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) { 460 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL, 461 version, ZPROP_SRC_DEFAULT); 462 } else { 463 spa_prop_add_list(nv, ZPOOL_PROP_VERSION, NULL, 464 version, ZPROP_SRC_LOCAL); 465 } 466 spa_prop_add_list(nv, ZPOOL_PROP_LOAD_GUID, 467 NULL, spa_load_guid(spa), src); 468 } 469 470 if (pool != NULL) { 471 /* 472 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 473 * when opening pools before this version freedir will be NULL. 474 */ 475 if (pool->dp_free_dir != NULL) { 476 spa_prop_add_list(nv, ZPOOL_PROP_FREEING, NULL, 477 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 478 src); 479 } else { 480 spa_prop_add_list(nv, ZPOOL_PROP_FREEING, 481 NULL, 0, src); 482 } 483 484 if (pool->dp_leak_dir != NULL) { 485 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, NULL, 486 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 487 src); 488 } else { 489 spa_prop_add_list(nv, ZPOOL_PROP_LEAKED, 490 NULL, 0, src); 491 } 492 } 493 494 spa_prop_add_list(nv, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 495 496 if (spa->spa_comment != NULL) { 497 spa_prop_add_list(nv, ZPOOL_PROP_COMMENT, spa->spa_comment, 498 0, ZPROP_SRC_LOCAL); 499 } 500 501 if (spa->spa_compatibility != NULL) { 502 spa_prop_add_list(nv, ZPOOL_PROP_COMPATIBILITY, 503 spa->spa_compatibility, 0, ZPROP_SRC_LOCAL); 504 } 505 506 if (spa->spa_root != NULL) 507 spa_prop_add_list(nv, ZPOOL_PROP_ALTROOT, spa->spa_root, 508 0, ZPROP_SRC_LOCAL); 509 510 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 511 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 512 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 513 } else { 514 spa_prop_add_list(nv, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 515 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 516 } 517 518 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 519 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL, 520 DNODE_MAX_SIZE, ZPROP_SRC_NONE); 521 } else { 522 spa_prop_add_list(nv, ZPOOL_PROP_MAXDNODESIZE, NULL, 523 DNODE_MIN_SIZE, ZPROP_SRC_NONE); 524 } 525 526 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 527 if (dp->scd_path == NULL) { 528 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE, 529 "none", 0, ZPROP_SRC_LOCAL); 530 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 531 spa_prop_add_list(nv, ZPOOL_PROP_CACHEFILE, 532 dp->scd_path, 0, ZPROP_SRC_LOCAL); 533 } 534 } 535 } 536 537 /* 538 * Get zpool property values. 539 */ 540 int 541 spa_prop_get(spa_t *spa, nvlist_t *nv) 542 { 543 objset_t *mos = spa->spa_meta_objset; 544 zap_cursor_t zc; 545 zap_attribute_t *za; 546 dsl_pool_t *dp; 547 int err = 0; 548 549 dp = spa_get_dsl(spa); 550 dsl_pool_config_enter(dp, FTAG); 551 za = zap_attribute_alloc(); 552 mutex_enter(&spa->spa_props_lock); 553 554 /* 555 * Get properties from the spa config. 556 */ 557 spa_prop_get_config(spa, nv); 558 559 /* If no pool property object, no more prop to get. */ 560 if (mos == NULL || spa->spa_pool_props_object == 0) 561 goto out; 562 563 /* 564 * Get properties from the MOS pool property object. 565 */ 566 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 567 (err = zap_cursor_retrieve(&zc, za)) == 0; 568 zap_cursor_advance(&zc)) { 569 uint64_t intval = 0; 570 char *strval = NULL; 571 zprop_source_t src = ZPROP_SRC_DEFAULT; 572 zpool_prop_t prop; 573 574 if ((prop = zpool_name_to_prop(za->za_name)) == 575 ZPOOL_PROP_INVAL && !zfs_prop_user(za->za_name)) 576 continue; 577 578 switch (za->za_integer_length) { 579 case 8: 580 /* integer property */ 581 if (za->za_first_integer != 582 zpool_prop_default_numeric(prop)) 583 src = ZPROP_SRC_LOCAL; 584 585 if (prop == ZPOOL_PROP_BOOTFS) { 586 dsl_dataset_t *ds = NULL; 587 588 err = dsl_dataset_hold_obj(dp, 589 za->za_first_integer, FTAG, &ds); 590 if (err != 0) 591 break; 592 593 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 594 KM_SLEEP); 595 dsl_dataset_name(ds, strval); 596 dsl_dataset_rele(ds, FTAG); 597 } else { 598 strval = NULL; 599 intval = za->za_first_integer; 600 } 601 602 spa_prop_add_list(nv, prop, strval, intval, src); 603 604 if (strval != NULL) 605 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 606 607 break; 608 609 case 1: 610 /* string property */ 611 strval = kmem_alloc(za->za_num_integers, KM_SLEEP); 612 err = zap_lookup(mos, spa->spa_pool_props_object, 613 za->za_name, 1, za->za_num_integers, strval); 614 if (err) { 615 kmem_free(strval, za->za_num_integers); 616 break; 617 } 618 if (prop != ZPOOL_PROP_INVAL) { 619 spa_prop_add_list(nv, prop, strval, 0, src); 620 } else { 621 src = ZPROP_SRC_LOCAL; 622 spa_prop_add_user(nv, za->za_name, strval, 623 src); 624 } 625 kmem_free(strval, za->za_num_integers); 626 break; 627 628 default: 629 break; 630 } 631 } 632 zap_cursor_fini(&zc); 633 out: 634 mutex_exit(&spa->spa_props_lock); 635 dsl_pool_config_exit(dp, FTAG); 636 zap_attribute_free(za); 637 638 if (err && err != ENOENT) 639 return (err); 640 641 return (0); 642 } 643 644 /* 645 * Validate the given pool properties nvlist and modify the list 646 * for the property values to be set. 647 */ 648 static int 649 spa_prop_validate(spa_t *spa, nvlist_t *props) 650 { 651 nvpair_t *elem; 652 int error = 0, reset_bootfs = 0; 653 uint64_t objnum = 0; 654 boolean_t has_feature = B_FALSE; 655 656 elem = NULL; 657 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 658 uint64_t intval; 659 const char *strval, *slash, *check, *fname; 660 const char *propname = nvpair_name(elem); 661 zpool_prop_t prop = zpool_name_to_prop(propname); 662 663 switch (prop) { 664 case ZPOOL_PROP_INVAL: 665 /* 666 * Sanitize the input. 667 */ 668 if (zfs_prop_user(propname)) { 669 if (strlen(propname) >= ZAP_MAXNAMELEN) { 670 error = SET_ERROR(ENAMETOOLONG); 671 break; 672 } 673 674 if (strlen(fnvpair_value_string(elem)) >= 675 ZAP_MAXVALUELEN) { 676 error = SET_ERROR(E2BIG); 677 break; 678 } 679 } else if (zpool_prop_feature(propname)) { 680 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 681 error = SET_ERROR(EINVAL); 682 break; 683 } 684 685 if (nvpair_value_uint64(elem, &intval) != 0) { 686 error = SET_ERROR(EINVAL); 687 break; 688 } 689 690 if (intval != 0) { 691 error = SET_ERROR(EINVAL); 692 break; 693 } 694 695 fname = strchr(propname, '@') + 1; 696 if (zfeature_lookup_name(fname, NULL) != 0) { 697 error = SET_ERROR(EINVAL); 698 break; 699 } 700 701 has_feature = B_TRUE; 702 } else { 703 error = SET_ERROR(EINVAL); 704 break; 705 } 706 break; 707 708 case ZPOOL_PROP_VERSION: 709 error = nvpair_value_uint64(elem, &intval); 710 if (!error && 711 (intval < spa_version(spa) || 712 intval > SPA_VERSION_BEFORE_FEATURES || 713 has_feature)) 714 error = SET_ERROR(EINVAL); 715 break; 716 717 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 718 error = nvpair_value_uint64(elem, &intval); 719 break; 720 721 case ZPOOL_PROP_DELEGATION: 722 case ZPOOL_PROP_AUTOREPLACE: 723 case ZPOOL_PROP_LISTSNAPS: 724 case ZPOOL_PROP_AUTOEXPAND: 725 case ZPOOL_PROP_AUTOTRIM: 726 error = nvpair_value_uint64(elem, &intval); 727 if (!error && intval > 1) 728 error = SET_ERROR(EINVAL); 729 break; 730 731 case ZPOOL_PROP_MULTIHOST: 732 error = nvpair_value_uint64(elem, &intval); 733 if (!error && intval > 1) 734 error = SET_ERROR(EINVAL); 735 736 if (!error) { 737 uint32_t hostid = zone_get_hostid(NULL); 738 if (hostid) 739 spa->spa_hostid = hostid; 740 else 741 error = SET_ERROR(ENOTSUP); 742 } 743 744 break; 745 746 case ZPOOL_PROP_BOOTFS: 747 /* 748 * If the pool version is less than SPA_VERSION_BOOTFS, 749 * or the pool is still being created (version == 0), 750 * the bootfs property cannot be set. 751 */ 752 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 753 error = SET_ERROR(ENOTSUP); 754 break; 755 } 756 757 /* 758 * Make sure the vdev config is bootable 759 */ 760 if (!vdev_is_bootable(spa->spa_root_vdev)) { 761 error = SET_ERROR(ENOTSUP); 762 break; 763 } 764 765 reset_bootfs = 1; 766 767 error = nvpair_value_string(elem, &strval); 768 769 if (!error) { 770 objset_t *os; 771 772 if (strval == NULL || strval[0] == '\0') { 773 objnum = zpool_prop_default_numeric( 774 ZPOOL_PROP_BOOTFS); 775 break; 776 } 777 778 error = dmu_objset_hold(strval, FTAG, &os); 779 if (error != 0) 780 break; 781 782 /* Must be ZPL. */ 783 if (dmu_objset_type(os) != DMU_OST_ZFS) { 784 error = SET_ERROR(ENOTSUP); 785 } else { 786 objnum = dmu_objset_id(os); 787 } 788 dmu_objset_rele(os, FTAG); 789 } 790 break; 791 792 case ZPOOL_PROP_FAILUREMODE: 793 error = nvpair_value_uint64(elem, &intval); 794 if (!error && intval > ZIO_FAILURE_MODE_PANIC) 795 error = SET_ERROR(EINVAL); 796 797 /* 798 * This is a special case which only occurs when 799 * the pool has completely failed. This allows 800 * the user to change the in-core failmode property 801 * without syncing it out to disk (I/Os might 802 * currently be blocked). We do this by returning 803 * EIO to the caller (spa_prop_set) to trick it 804 * into thinking we encountered a property validation 805 * error. 806 */ 807 if (!error && spa_suspended(spa)) { 808 spa->spa_failmode = intval; 809 error = SET_ERROR(EIO); 810 } 811 break; 812 813 case ZPOOL_PROP_CACHEFILE: 814 if ((error = nvpair_value_string(elem, &strval)) != 0) 815 break; 816 817 if (strval[0] == '\0') 818 break; 819 820 if (strcmp(strval, "none") == 0) 821 break; 822 823 if (strval[0] != '/') { 824 error = SET_ERROR(EINVAL); 825 break; 826 } 827 828 slash = strrchr(strval, '/'); 829 ASSERT(slash != NULL); 830 831 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 832 strcmp(slash, "/..") == 0) 833 error = SET_ERROR(EINVAL); 834 break; 835 836 case ZPOOL_PROP_COMMENT: 837 if ((error = nvpair_value_string(elem, &strval)) != 0) 838 break; 839 for (check = strval; *check != '\0'; check++) { 840 if (!isprint(*check)) { 841 error = SET_ERROR(EINVAL); 842 break; 843 } 844 } 845 if (strlen(strval) > ZPROP_MAX_COMMENT) 846 error = SET_ERROR(E2BIG); 847 break; 848 849 default: 850 break; 851 } 852 853 if (error) 854 break; 855 } 856 857 (void) nvlist_remove_all(props, 858 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO)); 859 860 if (!error && reset_bootfs) { 861 error = nvlist_remove(props, 862 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 863 864 if (!error) { 865 error = nvlist_add_uint64(props, 866 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 867 } 868 } 869 870 return (error); 871 } 872 873 void 874 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 875 { 876 const char *cachefile; 877 spa_config_dirent_t *dp; 878 879 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 880 &cachefile) != 0) 881 return; 882 883 dp = kmem_alloc(sizeof (spa_config_dirent_t), 884 KM_SLEEP); 885 886 if (cachefile[0] == '\0') 887 dp->scd_path = spa_strdup(spa_config_path); 888 else if (strcmp(cachefile, "none") == 0) 889 dp->scd_path = NULL; 890 else 891 dp->scd_path = spa_strdup(cachefile); 892 893 list_insert_head(&spa->spa_config_list, dp); 894 if (need_sync) 895 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 896 } 897 898 int 899 spa_prop_set(spa_t *spa, nvlist_t *nvp) 900 { 901 int error; 902 nvpair_t *elem = NULL; 903 boolean_t need_sync = B_FALSE; 904 905 if ((error = spa_prop_validate(spa, nvp)) != 0) 906 return (error); 907 908 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 909 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 910 911 if (prop == ZPOOL_PROP_CACHEFILE || 912 prop == ZPOOL_PROP_ALTROOT || 913 prop == ZPOOL_PROP_READONLY) 914 continue; 915 916 if (prop == ZPOOL_PROP_INVAL && 917 zfs_prop_user(nvpair_name(elem))) { 918 need_sync = B_TRUE; 919 break; 920 } 921 922 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 923 uint64_t ver = 0; 924 925 if (prop == ZPOOL_PROP_VERSION) { 926 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 927 } else { 928 ASSERT(zpool_prop_feature(nvpair_name(elem))); 929 ver = SPA_VERSION_FEATURES; 930 need_sync = B_TRUE; 931 } 932 933 /* Save time if the version is already set. */ 934 if (ver == spa_version(spa)) 935 continue; 936 937 /* 938 * In addition to the pool directory object, we might 939 * create the pool properties object, the features for 940 * read object, the features for write object, or the 941 * feature descriptions object. 942 */ 943 error = dsl_sync_task(spa->spa_name, NULL, 944 spa_sync_version, &ver, 945 6, ZFS_SPACE_CHECK_RESERVED); 946 if (error) 947 return (error); 948 continue; 949 } 950 951 need_sync = B_TRUE; 952 break; 953 } 954 955 if (need_sync) { 956 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 957 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 958 } 959 960 return (0); 961 } 962 963 /* 964 * If the bootfs property value is dsobj, clear it. 965 */ 966 void 967 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 968 { 969 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 970 VERIFY(zap_remove(spa->spa_meta_objset, 971 spa->spa_pool_props_object, 972 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 973 spa->spa_bootfs = 0; 974 } 975 } 976 977 static int 978 spa_change_guid_check(void *arg, dmu_tx_t *tx) 979 { 980 uint64_t *newguid __maybe_unused = arg; 981 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 982 vdev_t *rvd = spa->spa_root_vdev; 983 uint64_t vdev_state; 984 985 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 986 int error = (spa_has_checkpoint(spa)) ? 987 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 988 return (SET_ERROR(error)); 989 } 990 991 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 992 vdev_state = rvd->vdev_state; 993 spa_config_exit(spa, SCL_STATE, FTAG); 994 995 if (vdev_state != VDEV_STATE_HEALTHY) 996 return (SET_ERROR(ENXIO)); 997 998 ASSERT3U(spa_guid(spa), !=, *newguid); 999 1000 return (0); 1001 } 1002 1003 static void 1004 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 1005 { 1006 uint64_t *newguid = arg; 1007 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1008 uint64_t oldguid; 1009 vdev_t *rvd = spa->spa_root_vdev; 1010 1011 oldguid = spa_guid(spa); 1012 1013 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1014 rvd->vdev_guid = *newguid; 1015 rvd->vdev_guid_sum += (*newguid - oldguid); 1016 vdev_config_dirty(rvd); 1017 spa_config_exit(spa, SCL_STATE, FTAG); 1018 1019 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 1020 (u_longlong_t)oldguid, (u_longlong_t)*newguid); 1021 } 1022 1023 /* 1024 * Change the GUID for the pool. This is done so that we can later 1025 * re-import a pool built from a clone of our own vdevs. We will modify 1026 * the root vdev's guid, our own pool guid, and then mark all of our 1027 * vdevs dirty. Note that we must make sure that all our vdevs are 1028 * online when we do this, or else any vdevs that weren't present 1029 * would be orphaned from our pool. We are also going to issue a 1030 * sysevent to update any watchers. 1031 * 1032 * The GUID of the pool will be changed to the value pointed to by guidp. 1033 * The GUID may not be set to the reserverd value of 0. 1034 * The new GUID will be generated if guidp is NULL. 1035 */ 1036 int 1037 spa_change_guid(spa_t *spa, const uint64_t *guidp) 1038 { 1039 uint64_t guid; 1040 int error; 1041 1042 mutex_enter(&spa->spa_vdev_top_lock); 1043 mutex_enter(&spa_namespace_lock); 1044 1045 if (guidp != NULL) { 1046 guid = *guidp; 1047 if (guid == 0) { 1048 error = SET_ERROR(EINVAL); 1049 goto out; 1050 } 1051 1052 if (spa_guid_exists(guid, 0)) { 1053 error = SET_ERROR(EEXIST); 1054 goto out; 1055 } 1056 } else { 1057 guid = spa_generate_guid(NULL); 1058 } 1059 1060 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 1061 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 1062 1063 if (error == 0) { 1064 /* 1065 * Clear the kobj flag from all the vdevs to allow 1066 * vdev_cache_process_kobj_evt() to post events to all the 1067 * vdevs since GUID is updated. 1068 */ 1069 vdev_clear_kobj_evt(spa->spa_root_vdev); 1070 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 1071 vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]); 1072 1073 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1074 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 1075 } 1076 1077 out: 1078 mutex_exit(&spa_namespace_lock); 1079 mutex_exit(&spa->spa_vdev_top_lock); 1080 1081 return (error); 1082 } 1083 1084 /* 1085 * ========================================================================== 1086 * SPA state manipulation (open/create/destroy/import/export) 1087 * ========================================================================== 1088 */ 1089 1090 static int 1091 spa_error_entry_compare(const void *a, const void *b) 1092 { 1093 const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 1094 const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 1095 int ret; 1096 1097 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 1098 sizeof (zbookmark_phys_t)); 1099 1100 return (TREE_ISIGN(ret)); 1101 } 1102 1103 /* 1104 * Utility function which retrieves copies of the current logs and 1105 * re-initializes them in the process. 1106 */ 1107 void 1108 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 1109 { 1110 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 1111 1112 memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t)); 1113 memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t)); 1114 1115 avl_create(&spa->spa_errlist_scrub, 1116 spa_error_entry_compare, sizeof (spa_error_entry_t), 1117 offsetof(spa_error_entry_t, se_avl)); 1118 avl_create(&spa->spa_errlist_last, 1119 spa_error_entry_compare, sizeof (spa_error_entry_t), 1120 offsetof(spa_error_entry_t, se_avl)); 1121 } 1122 1123 static void 1124 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1125 { 1126 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 1127 enum zti_modes mode = ztip->zti_mode; 1128 uint_t value = ztip->zti_value; 1129 uint_t count = ztip->zti_count; 1130 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1131 uint_t cpus, flags = TASKQ_DYNAMIC; 1132 1133 switch (mode) { 1134 case ZTI_MODE_FIXED: 1135 ASSERT3U(value, >, 0); 1136 break; 1137 1138 case ZTI_MODE_SYNC: 1139 1140 /* 1141 * Create one wr_iss taskq for every 'zio_taskq_write_tpq' CPUs, 1142 * not to exceed the number of spa allocators, and align to it. 1143 */ 1144 cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 1145 count = MAX(1, cpus / MAX(1, zio_taskq_write_tpq)); 1146 count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 1147 count = MIN(count, spa->spa_alloc_count); 1148 while (spa->spa_alloc_count % count != 0 && 1149 spa->spa_alloc_count < count * 2) 1150 count--; 1151 1152 /* 1153 * zio_taskq_batch_pct is unbounded and may exceed 100%, but no 1154 * single taskq may have more threads than 100% of online cpus. 1155 */ 1156 value = (zio_taskq_batch_pct + count / 2) / count; 1157 value = MIN(value, 100); 1158 flags |= TASKQ_THREADS_CPU_PCT; 1159 break; 1160 1161 case ZTI_MODE_SCALE: 1162 flags |= TASKQ_THREADS_CPU_PCT; 1163 /* 1164 * We want more taskqs to reduce lock contention, but we want 1165 * less for better request ordering and CPU utilization. 1166 */ 1167 cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 1168 if (zio_taskq_batch_tpq > 0) { 1169 count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) / 1170 zio_taskq_batch_tpq); 1171 } else { 1172 /* 1173 * Prefer 6 threads per taskq, but no more taskqs 1174 * than threads in them on large systems. For 80%: 1175 * 1176 * taskq taskq total 1177 * cpus taskqs percent threads threads 1178 * ------- ------- ------- ------- ------- 1179 * 1 1 80% 1 1 1180 * 2 1 80% 1 1 1181 * 4 1 80% 3 3 1182 * 8 2 40% 3 6 1183 * 16 3 27% 4 12 1184 * 32 5 16% 5 25 1185 * 64 7 11% 7 49 1186 * 128 10 8% 10 100 1187 * 256 14 6% 15 210 1188 */ 1189 count = 1 + cpus / 6; 1190 while (count * count > cpus) 1191 count--; 1192 } 1193 /* Limit each taskq within 100% to not trigger assertion. */ 1194 count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 1195 value = (zio_taskq_batch_pct + count / 2) / count; 1196 break; 1197 1198 case ZTI_MODE_NULL: 1199 tqs->stqs_count = 0; 1200 tqs->stqs_taskq = NULL; 1201 return; 1202 1203 default: 1204 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 1205 "spa_taskqs_init()", 1206 zio_type_name[t], zio_taskq_types[q], mode, value); 1207 break; 1208 } 1209 1210 ASSERT3U(count, >, 0); 1211 tqs->stqs_count = count; 1212 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 1213 1214 for (uint_t i = 0; i < count; i++) { 1215 taskq_t *tq; 1216 char name[32]; 1217 1218 if (count > 1) 1219 (void) snprintf(name, sizeof (name), "%s_%s_%u", 1220 zio_type_name[t], zio_taskq_types[q], i); 1221 else 1222 (void) snprintf(name, sizeof (name), "%s_%s", 1223 zio_type_name[t], zio_taskq_types[q]); 1224 1225 #ifdef HAVE_SYSDC 1226 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 1227 (void) zio_taskq_basedc; 1228 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 1229 spa->spa_proc, zio_taskq_basedc, flags); 1230 } else { 1231 #endif 1232 pri_t pri = maxclsyspri; 1233 /* 1234 * The write issue taskq can be extremely CPU 1235 * intensive. Run it at slightly less important 1236 * priority than the other taskqs. 1237 * 1238 * Under Linux and FreeBSD this means incrementing 1239 * the priority value as opposed to platforms like 1240 * illumos where it should be decremented. 1241 * 1242 * On FreeBSD, if priorities divided by four (RQ_PPQ) 1243 * are equal then a difference between them is 1244 * insignificant. 1245 */ 1246 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) { 1247 #if defined(__linux__) 1248 pri++; 1249 #elif defined(__FreeBSD__) 1250 pri += 4; 1251 #else 1252 #error "unknown OS" 1253 #endif 1254 } 1255 tq = taskq_create_proc(name, value, pri, 50, 1256 INT_MAX, spa->spa_proc, flags); 1257 #ifdef HAVE_SYSDC 1258 } 1259 #endif 1260 1261 tqs->stqs_taskq[i] = tq; 1262 } 1263 } 1264 1265 static void 1266 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1267 { 1268 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1269 1270 if (tqs->stqs_taskq == NULL) { 1271 ASSERT3U(tqs->stqs_count, ==, 0); 1272 return; 1273 } 1274 1275 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1276 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1277 taskq_destroy(tqs->stqs_taskq[i]); 1278 } 1279 1280 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1281 tqs->stqs_taskq = NULL; 1282 } 1283 1284 #ifdef _KERNEL 1285 /* 1286 * The READ and WRITE rows of zio_taskqs are configurable at module load time 1287 * by setting zio_taskq_read or zio_taskq_write. 1288 * 1289 * Example (the defaults for READ and WRITE) 1290 * zio_taskq_read='fixed,1,8 null scale null' 1291 * zio_taskq_write='sync null scale null' 1292 * 1293 * Each sets the entire row at a time. 1294 * 1295 * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number 1296 * of threads per taskq. 1297 * 1298 * 'null' can only be set on the high-priority queues (queue selection for 1299 * high-priority queues will fall back to the regular queue if the high-pri 1300 * is NULL. 1301 */ 1302 static const char *const modes[ZTI_NMODES] = { 1303 "fixed", "scale", "sync", "null" 1304 }; 1305 1306 /* Parse the incoming config string. Modifies cfg */ 1307 static int 1308 spa_taskq_param_set(zio_type_t t, char *cfg) 1309 { 1310 int err = 0; 1311 1312 zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}}; 1313 1314 char *next = cfg, *tok, *c; 1315 1316 /* 1317 * Parse out each element from the string and fill `row`. The entire 1318 * row has to be set at once, so any errors are flagged by just 1319 * breaking out of this loop early. 1320 */ 1321 uint_t q; 1322 for (q = 0; q < ZIO_TASKQ_TYPES; q++) { 1323 /* `next` is the start of the config */ 1324 if (next == NULL) 1325 break; 1326 1327 /* Eat up leading space */ 1328 while (isspace(*next)) 1329 next++; 1330 if (*next == '\0') 1331 break; 1332 1333 /* Mode ends at space or end of string */ 1334 tok = next; 1335 next = strchr(tok, ' '); 1336 if (next != NULL) *next++ = '\0'; 1337 1338 /* Parameters start after a comma */ 1339 c = strchr(tok, ','); 1340 if (c != NULL) *c++ = '\0'; 1341 1342 /* Match mode string */ 1343 uint_t mode; 1344 for (mode = 0; mode < ZTI_NMODES; mode++) 1345 if (strcmp(tok, modes[mode]) == 0) 1346 break; 1347 if (mode == ZTI_NMODES) 1348 break; 1349 1350 /* Invalid canary */ 1351 row[q].zti_mode = ZTI_NMODES; 1352 1353 /* Per-mode setup */ 1354 switch (mode) { 1355 1356 /* 1357 * FIXED is parameterised: number of queues, and number of 1358 * threads per queue. 1359 */ 1360 case ZTI_MODE_FIXED: { 1361 /* No parameters? */ 1362 if (c == NULL || *c == '\0') 1363 break; 1364 1365 /* Find next parameter */ 1366 tok = c; 1367 c = strchr(tok, ','); 1368 if (c == NULL) 1369 break; 1370 1371 /* Take digits and convert */ 1372 unsigned long long nq; 1373 if (!(isdigit(*tok))) 1374 break; 1375 err = ddi_strtoull(tok, &tok, 10, &nq); 1376 /* Must succeed and also end at the next param sep */ 1377 if (err != 0 || tok != c) 1378 break; 1379 1380 /* Move past the comma */ 1381 tok++; 1382 /* Need another number */ 1383 if (!(isdigit(*tok))) 1384 break; 1385 /* Remember start to make sure we moved */ 1386 c = tok; 1387 1388 /* Take digits */ 1389 unsigned long long ntpq; 1390 err = ddi_strtoull(tok, &tok, 10, &ntpq); 1391 /* Must succeed, and moved forward */ 1392 if (err != 0 || tok == c || *tok != '\0') 1393 break; 1394 1395 /* 1396 * sanity; zero queues/threads make no sense, and 1397 * 16K is almost certainly more than anyone will ever 1398 * need and avoids silly numbers like UINT32_MAX 1399 */ 1400 if (nq == 0 || nq >= 16384 || 1401 ntpq == 0 || ntpq >= 16384) 1402 break; 1403 1404 const zio_taskq_info_t zti = ZTI_P(ntpq, nq); 1405 row[q] = zti; 1406 break; 1407 } 1408 1409 case ZTI_MODE_SCALE: { 1410 const zio_taskq_info_t zti = ZTI_SCALE; 1411 row[q] = zti; 1412 break; 1413 } 1414 1415 case ZTI_MODE_SYNC: { 1416 const zio_taskq_info_t zti = ZTI_SYNC; 1417 row[q] = zti; 1418 break; 1419 } 1420 1421 case ZTI_MODE_NULL: { 1422 /* 1423 * Can only null the high-priority queues; the general- 1424 * purpose ones have to exist. 1425 */ 1426 if (q != ZIO_TASKQ_ISSUE_HIGH && 1427 q != ZIO_TASKQ_INTERRUPT_HIGH) 1428 break; 1429 1430 const zio_taskq_info_t zti = ZTI_NULL; 1431 row[q] = zti; 1432 break; 1433 } 1434 1435 default: 1436 break; 1437 } 1438 1439 /* Ensure we set a mode */ 1440 if (row[q].zti_mode == ZTI_NMODES) 1441 break; 1442 } 1443 1444 /* Didn't get a full row, fail */ 1445 if (q < ZIO_TASKQ_TYPES) 1446 return (SET_ERROR(EINVAL)); 1447 1448 /* Eat trailing space */ 1449 if (next != NULL) 1450 while (isspace(*next)) 1451 next++; 1452 1453 /* If there's anything left over then fail */ 1454 if (next != NULL && *next != '\0') 1455 return (SET_ERROR(EINVAL)); 1456 1457 /* Success! Copy it into the real config */ 1458 for (q = 0; q < ZIO_TASKQ_TYPES; q++) 1459 zio_taskqs[t][q] = row[q]; 1460 1461 return (0); 1462 } 1463 1464 static int 1465 spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline) 1466 { 1467 int pos = 0; 1468 1469 /* Build paramater string from live config */ 1470 const char *sep = ""; 1471 for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) { 1472 const zio_taskq_info_t *zti = &zio_taskqs[t][q]; 1473 if (zti->zti_mode == ZTI_MODE_FIXED) 1474 pos += sprintf(&buf[pos], "%s%s,%u,%u", sep, 1475 modes[zti->zti_mode], zti->zti_count, 1476 zti->zti_value); 1477 else 1478 pos += sprintf(&buf[pos], "%s%s", sep, 1479 modes[zti->zti_mode]); 1480 sep = " "; 1481 } 1482 1483 if (add_newline) 1484 buf[pos++] = '\n'; 1485 buf[pos] = '\0'; 1486 1487 return (pos); 1488 } 1489 1490 #ifdef __linux__ 1491 static int 1492 spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp) 1493 { 1494 char *cfg = kmem_strdup(val); 1495 int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg); 1496 kmem_free(cfg, strlen(val)+1); 1497 return (-err); 1498 } 1499 static int 1500 spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp) 1501 { 1502 return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE)); 1503 } 1504 1505 static int 1506 spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp) 1507 { 1508 char *cfg = kmem_strdup(val); 1509 int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg); 1510 kmem_free(cfg, strlen(val)+1); 1511 return (-err); 1512 } 1513 static int 1514 spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp) 1515 { 1516 return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE)); 1517 } 1518 #else 1519 /* 1520 * On FreeBSD load-time parameters can be set up before malloc() is available, 1521 * so we have to do all the parsing work on the stack. 1522 */ 1523 #define SPA_TASKQ_PARAM_MAX (128) 1524 1525 static int 1526 spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS) 1527 { 1528 char buf[SPA_TASKQ_PARAM_MAX]; 1529 int err; 1530 1531 (void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE); 1532 err = sysctl_handle_string(oidp, buf, sizeof (buf), req); 1533 if (err || req->newptr == NULL) 1534 return (err); 1535 return (spa_taskq_param_set(ZIO_TYPE_READ, buf)); 1536 } 1537 1538 static int 1539 spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS) 1540 { 1541 char buf[SPA_TASKQ_PARAM_MAX]; 1542 int err; 1543 1544 (void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE); 1545 err = sysctl_handle_string(oidp, buf, sizeof (buf), req); 1546 if (err || req->newptr == NULL) 1547 return (err); 1548 return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf)); 1549 } 1550 #endif 1551 #endif /* _KERNEL */ 1552 1553 /* 1554 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1555 * Note that a type may have multiple discrete taskqs to avoid lock contention 1556 * on the taskq itself. 1557 */ 1558 void 1559 spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1560 task_func_t *func, zio_t *zio, boolean_t cutinline) 1561 { 1562 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1563 taskq_t *tq; 1564 1565 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1566 ASSERT3U(tqs->stqs_count, !=, 0); 1567 1568 /* 1569 * NB: We are assuming that the zio can only be dispatched 1570 * to a single taskq at a time. It would be a grievous error 1571 * to dispatch the zio to another taskq at the same time. 1572 */ 1573 ASSERT(zio); 1574 ASSERT(taskq_empty_ent(&zio->io_tqent)); 1575 1576 if (tqs->stqs_count == 1) { 1577 tq = tqs->stqs_taskq[0]; 1578 } else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) && 1579 ZIO_HAS_ALLOCATOR(zio)) { 1580 tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count]; 1581 } else { 1582 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1583 } 1584 1585 taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0, 1586 &zio->io_tqent); 1587 } 1588 1589 static void 1590 spa_create_zio_taskqs(spa_t *spa) 1591 { 1592 for (int t = 0; t < ZIO_TYPES; t++) { 1593 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1594 spa_taskqs_init(spa, t, q); 1595 } 1596 } 1597 } 1598 1599 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD) 1600 static void 1601 spa_thread(void *arg) 1602 { 1603 psetid_t zio_taskq_psrset_bind = PS_NONE; 1604 callb_cpr_t cprinfo; 1605 1606 spa_t *spa = arg; 1607 user_t *pu = PTOU(curproc); 1608 1609 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1610 spa->spa_name); 1611 1612 ASSERT(curproc != &p0); 1613 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1614 "zpool-%s", spa->spa_name); 1615 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1616 1617 /* bind this thread to the requested psrset */ 1618 if (zio_taskq_psrset_bind != PS_NONE) { 1619 pool_lock(); 1620 mutex_enter(&cpu_lock); 1621 mutex_enter(&pidlock); 1622 mutex_enter(&curproc->p_lock); 1623 1624 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1625 0, NULL, NULL) == 0) { 1626 curthread->t_bind_pset = zio_taskq_psrset_bind; 1627 } else { 1628 cmn_err(CE_WARN, 1629 "Couldn't bind process for zfs pool \"%s\" to " 1630 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1631 } 1632 1633 mutex_exit(&curproc->p_lock); 1634 mutex_exit(&pidlock); 1635 mutex_exit(&cpu_lock); 1636 pool_unlock(); 1637 } 1638 1639 #ifdef HAVE_SYSDC 1640 if (zio_taskq_sysdc) { 1641 sysdc_thread_enter(curthread, 100, 0); 1642 } 1643 #endif 1644 1645 spa->spa_proc = curproc; 1646 spa->spa_did = curthread->t_did; 1647 1648 spa_create_zio_taskqs(spa); 1649 1650 mutex_enter(&spa->spa_proc_lock); 1651 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1652 1653 spa->spa_proc_state = SPA_PROC_ACTIVE; 1654 cv_broadcast(&spa->spa_proc_cv); 1655 1656 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1657 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1658 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1659 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1660 1661 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1662 spa->spa_proc_state = SPA_PROC_GONE; 1663 spa->spa_proc = &p0; 1664 cv_broadcast(&spa->spa_proc_cv); 1665 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1666 1667 mutex_enter(&curproc->p_lock); 1668 lwp_exit(); 1669 } 1670 #endif 1671 1672 extern metaslab_ops_t *metaslab_allocator(spa_t *spa); 1673 1674 /* 1675 * Activate an uninitialized pool. 1676 */ 1677 static void 1678 spa_activate(spa_t *spa, spa_mode_t mode) 1679 { 1680 metaslab_ops_t *msp = metaslab_allocator(spa); 1681 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1682 1683 spa->spa_state = POOL_STATE_ACTIVE; 1684 spa->spa_mode = mode; 1685 spa->spa_read_spacemaps = spa_mode_readable_spacemaps; 1686 1687 spa->spa_normal_class = metaslab_class_create(spa, msp); 1688 spa->spa_log_class = metaslab_class_create(spa, msp); 1689 spa->spa_embedded_log_class = metaslab_class_create(spa, msp); 1690 spa->spa_special_class = metaslab_class_create(spa, msp); 1691 spa->spa_dedup_class = metaslab_class_create(spa, msp); 1692 1693 /* Try to create a covering process */ 1694 mutex_enter(&spa->spa_proc_lock); 1695 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1696 ASSERT(spa->spa_proc == &p0); 1697 spa->spa_did = 0; 1698 1699 #ifdef HAVE_SPA_THREAD 1700 /* Only create a process if we're going to be around a while. */ 1701 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1702 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1703 NULL, 0) == 0) { 1704 spa->spa_proc_state = SPA_PROC_CREATED; 1705 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1706 cv_wait(&spa->spa_proc_cv, 1707 &spa->spa_proc_lock); 1708 } 1709 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1710 ASSERT(spa->spa_proc != &p0); 1711 ASSERT(spa->spa_did != 0); 1712 } else { 1713 #ifdef _KERNEL 1714 cmn_err(CE_WARN, 1715 "Couldn't create process for zfs pool \"%s\"\n", 1716 spa->spa_name); 1717 #endif 1718 } 1719 } 1720 #endif /* HAVE_SPA_THREAD */ 1721 mutex_exit(&spa->spa_proc_lock); 1722 1723 /* If we didn't create a process, we need to create our taskqs. */ 1724 if (spa->spa_proc == &p0) { 1725 spa_create_zio_taskqs(spa); 1726 } 1727 1728 for (size_t i = 0; i < TXG_SIZE; i++) { 1729 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1730 ZIO_FLAG_CANFAIL); 1731 } 1732 1733 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1734 offsetof(vdev_t, vdev_config_dirty_node)); 1735 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1736 offsetof(objset_t, os_evicting_node)); 1737 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1738 offsetof(vdev_t, vdev_state_dirty_node)); 1739 1740 txg_list_create(&spa->spa_vdev_txg_list, spa, 1741 offsetof(struct vdev, vdev_txg_node)); 1742 1743 avl_create(&spa->spa_errlist_scrub, 1744 spa_error_entry_compare, sizeof (spa_error_entry_t), 1745 offsetof(spa_error_entry_t, se_avl)); 1746 avl_create(&spa->spa_errlist_last, 1747 spa_error_entry_compare, sizeof (spa_error_entry_t), 1748 offsetof(spa_error_entry_t, se_avl)); 1749 avl_create(&spa->spa_errlist_healed, 1750 spa_error_entry_compare, sizeof (spa_error_entry_t), 1751 offsetof(spa_error_entry_t, se_avl)); 1752 1753 spa_activate_os(spa); 1754 1755 spa_keystore_init(&spa->spa_keystore); 1756 1757 /* 1758 * This taskq is used to perform zvol-minor-related tasks 1759 * asynchronously. This has several advantages, including easy 1760 * resolution of various deadlocks. 1761 * 1762 * The taskq must be single threaded to ensure tasks are always 1763 * processed in the order in which they were dispatched. 1764 * 1765 * A taskq per pool allows one to keep the pools independent. 1766 * This way if one pool is suspended, it will not impact another. 1767 * 1768 * The preferred location to dispatch a zvol minor task is a sync 1769 * task. In this context, there is easy access to the spa_t and minimal 1770 * error handling is required because the sync task must succeed. 1771 */ 1772 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1773 1, INT_MAX, 0); 1774 1775 /* 1776 * The taskq to preload metaslabs. 1777 */ 1778 spa->spa_metaslab_taskq = taskq_create("z_metaslab", 1779 metaslab_preload_pct, maxclsyspri, 1, INT_MAX, 1780 TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1781 1782 /* 1783 * Taskq dedicated to prefetcher threads: this is used to prevent the 1784 * pool traverse code from monopolizing the global (and limited) 1785 * system_taskq by inappropriately scheduling long running tasks on it. 1786 */ 1787 spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100, 1788 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1789 1790 /* 1791 * The taskq to upgrade datasets in this pool. Currently used by 1792 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1793 */ 1794 spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100, 1795 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1796 } 1797 1798 /* 1799 * Opposite of spa_activate(). 1800 */ 1801 static void 1802 spa_deactivate(spa_t *spa) 1803 { 1804 ASSERT(spa->spa_sync_on == B_FALSE); 1805 ASSERT(spa->spa_dsl_pool == NULL); 1806 ASSERT(spa->spa_root_vdev == NULL); 1807 ASSERT(spa->spa_async_zio_root == NULL); 1808 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1809 1810 spa_evicting_os_wait(spa); 1811 1812 if (spa->spa_zvol_taskq) { 1813 taskq_destroy(spa->spa_zvol_taskq); 1814 spa->spa_zvol_taskq = NULL; 1815 } 1816 1817 if (spa->spa_metaslab_taskq) { 1818 taskq_destroy(spa->spa_metaslab_taskq); 1819 spa->spa_metaslab_taskq = NULL; 1820 } 1821 1822 if (spa->spa_prefetch_taskq) { 1823 taskq_destroy(spa->spa_prefetch_taskq); 1824 spa->spa_prefetch_taskq = NULL; 1825 } 1826 1827 if (spa->spa_upgrade_taskq) { 1828 taskq_destroy(spa->spa_upgrade_taskq); 1829 spa->spa_upgrade_taskq = NULL; 1830 } 1831 1832 txg_list_destroy(&spa->spa_vdev_txg_list); 1833 1834 list_destroy(&spa->spa_config_dirty_list); 1835 list_destroy(&spa->spa_evicting_os_list); 1836 list_destroy(&spa->spa_state_dirty_list); 1837 1838 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 1839 1840 for (int t = 0; t < ZIO_TYPES; t++) { 1841 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1842 spa_taskqs_fini(spa, t, q); 1843 } 1844 } 1845 1846 for (size_t i = 0; i < TXG_SIZE; i++) { 1847 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1848 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1849 spa->spa_txg_zio[i] = NULL; 1850 } 1851 1852 metaslab_class_destroy(spa->spa_normal_class); 1853 spa->spa_normal_class = NULL; 1854 1855 metaslab_class_destroy(spa->spa_log_class); 1856 spa->spa_log_class = NULL; 1857 1858 metaslab_class_destroy(spa->spa_embedded_log_class); 1859 spa->spa_embedded_log_class = NULL; 1860 1861 metaslab_class_destroy(spa->spa_special_class); 1862 spa->spa_special_class = NULL; 1863 1864 metaslab_class_destroy(spa->spa_dedup_class); 1865 spa->spa_dedup_class = NULL; 1866 1867 /* 1868 * If this was part of an import or the open otherwise failed, we may 1869 * still have errors left in the queues. Empty them just in case. 1870 */ 1871 spa_errlog_drain(spa); 1872 avl_destroy(&spa->spa_errlist_scrub); 1873 avl_destroy(&spa->spa_errlist_last); 1874 avl_destroy(&spa->spa_errlist_healed); 1875 1876 spa_keystore_fini(&spa->spa_keystore); 1877 1878 spa->spa_state = POOL_STATE_UNINITIALIZED; 1879 1880 mutex_enter(&spa->spa_proc_lock); 1881 if (spa->spa_proc_state != SPA_PROC_NONE) { 1882 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1883 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1884 cv_broadcast(&spa->spa_proc_cv); 1885 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1886 ASSERT(spa->spa_proc != &p0); 1887 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1888 } 1889 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1890 spa->spa_proc_state = SPA_PROC_NONE; 1891 } 1892 ASSERT(spa->spa_proc == &p0); 1893 mutex_exit(&spa->spa_proc_lock); 1894 1895 /* 1896 * We want to make sure spa_thread() has actually exited the ZFS 1897 * module, so that the module can't be unloaded out from underneath 1898 * it. 1899 */ 1900 if (spa->spa_did != 0) { 1901 thread_join(spa->spa_did); 1902 spa->spa_did = 0; 1903 } 1904 1905 spa_deactivate_os(spa); 1906 1907 } 1908 1909 /* 1910 * Verify a pool configuration, and construct the vdev tree appropriately. This 1911 * will create all the necessary vdevs in the appropriate layout, with each vdev 1912 * in the CLOSED state. This will prep the pool before open/creation/import. 1913 * All vdev validation is done by the vdev_alloc() routine. 1914 */ 1915 int 1916 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1917 uint_t id, int atype) 1918 { 1919 nvlist_t **child; 1920 uint_t children; 1921 int error; 1922 1923 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1924 return (error); 1925 1926 if ((*vdp)->vdev_ops->vdev_op_leaf) 1927 return (0); 1928 1929 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1930 &child, &children); 1931 1932 if (error == ENOENT) 1933 return (0); 1934 1935 if (error) { 1936 vdev_free(*vdp); 1937 *vdp = NULL; 1938 return (SET_ERROR(EINVAL)); 1939 } 1940 1941 for (int c = 0; c < children; c++) { 1942 vdev_t *vd; 1943 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1944 atype)) != 0) { 1945 vdev_free(*vdp); 1946 *vdp = NULL; 1947 return (error); 1948 } 1949 } 1950 1951 ASSERT(*vdp != NULL); 1952 1953 return (0); 1954 } 1955 1956 static boolean_t 1957 spa_should_flush_logs_on_unload(spa_t *spa) 1958 { 1959 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1960 return (B_FALSE); 1961 1962 if (!spa_writeable(spa)) 1963 return (B_FALSE); 1964 1965 if (!spa->spa_sync_on) 1966 return (B_FALSE); 1967 1968 if (spa_state(spa) != POOL_STATE_EXPORTED) 1969 return (B_FALSE); 1970 1971 if (zfs_keep_log_spacemaps_at_export) 1972 return (B_FALSE); 1973 1974 return (B_TRUE); 1975 } 1976 1977 /* 1978 * Opens a transaction that will set the flag that will instruct 1979 * spa_sync to attempt to flush all the metaslabs for that txg. 1980 */ 1981 static void 1982 spa_unload_log_sm_flush_all(spa_t *spa) 1983 { 1984 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1985 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1986 1987 ASSERT3U(spa->spa_log_flushall_txg, ==, 0); 1988 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); 1989 1990 dmu_tx_commit(tx); 1991 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); 1992 } 1993 1994 static void 1995 spa_unload_log_sm_metadata(spa_t *spa) 1996 { 1997 void *cookie = NULL; 1998 spa_log_sm_t *sls; 1999 log_summary_entry_t *e; 2000 2001 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, 2002 &cookie)) != NULL) { 2003 VERIFY0(sls->sls_mscount); 2004 kmem_free(sls, sizeof (spa_log_sm_t)); 2005 } 2006 2007 while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) { 2008 VERIFY0(e->lse_mscount); 2009 kmem_free(e, sizeof (log_summary_entry_t)); 2010 } 2011 2012 spa->spa_unflushed_stats.sus_nblocks = 0; 2013 spa->spa_unflushed_stats.sus_memused = 0; 2014 spa->spa_unflushed_stats.sus_blocklimit = 0; 2015 } 2016 2017 static void 2018 spa_destroy_aux_threads(spa_t *spa) 2019 { 2020 if (spa->spa_condense_zthr != NULL) { 2021 zthr_destroy(spa->spa_condense_zthr); 2022 spa->spa_condense_zthr = NULL; 2023 } 2024 if (spa->spa_checkpoint_discard_zthr != NULL) { 2025 zthr_destroy(spa->spa_checkpoint_discard_zthr); 2026 spa->spa_checkpoint_discard_zthr = NULL; 2027 } 2028 if (spa->spa_livelist_delete_zthr != NULL) { 2029 zthr_destroy(spa->spa_livelist_delete_zthr); 2030 spa->spa_livelist_delete_zthr = NULL; 2031 } 2032 if (spa->spa_livelist_condense_zthr != NULL) { 2033 zthr_destroy(spa->spa_livelist_condense_zthr); 2034 spa->spa_livelist_condense_zthr = NULL; 2035 } 2036 if (spa->spa_raidz_expand_zthr != NULL) { 2037 zthr_destroy(spa->spa_raidz_expand_zthr); 2038 spa->spa_raidz_expand_zthr = NULL; 2039 } 2040 } 2041 2042 /* 2043 * Opposite of spa_load(). 2044 */ 2045 static void 2046 spa_unload(spa_t *spa) 2047 { 2048 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 2049 spa->spa_export_thread == curthread); 2050 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); 2051 2052 spa_import_progress_remove(spa_guid(spa)); 2053 spa_load_note(spa, "UNLOADING"); 2054 2055 spa_wake_waiters(spa); 2056 2057 /* 2058 * If we have set the spa_final_txg, we have already performed the 2059 * tasks below in spa_export_common(). We should not redo it here since 2060 * we delay the final TXGs beyond what spa_final_txg is set at. 2061 */ 2062 if (spa->spa_final_txg == UINT64_MAX) { 2063 /* 2064 * If the log space map feature is enabled and the pool is 2065 * getting exported (but not destroyed), we want to spend some 2066 * time flushing as many metaslabs as we can in an attempt to 2067 * destroy log space maps and save import time. 2068 */ 2069 if (spa_should_flush_logs_on_unload(spa)) 2070 spa_unload_log_sm_flush_all(spa); 2071 2072 /* 2073 * Stop async tasks. 2074 */ 2075 spa_async_suspend(spa); 2076 2077 if (spa->spa_root_vdev) { 2078 vdev_t *root_vdev = spa->spa_root_vdev; 2079 vdev_initialize_stop_all(root_vdev, 2080 VDEV_INITIALIZE_ACTIVE); 2081 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); 2082 vdev_autotrim_stop_all(spa); 2083 vdev_rebuild_stop_all(spa); 2084 } 2085 } 2086 2087 /* 2088 * Stop syncing. 2089 */ 2090 if (spa->spa_sync_on) { 2091 txg_sync_stop(spa->spa_dsl_pool); 2092 spa->spa_sync_on = B_FALSE; 2093 } 2094 2095 /* 2096 * This ensures that there is no async metaslab prefetching 2097 * while we attempt to unload the spa. 2098 */ 2099 taskq_wait(spa->spa_metaslab_taskq); 2100 2101 if (spa->spa_mmp.mmp_thread) 2102 mmp_thread_stop(spa); 2103 2104 /* 2105 * Wait for any outstanding async I/O to complete. 2106 */ 2107 if (spa->spa_async_zio_root != NULL) { 2108 for (int i = 0; i < max_ncpus; i++) 2109 (void) zio_wait(spa->spa_async_zio_root[i]); 2110 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 2111 spa->spa_async_zio_root = NULL; 2112 } 2113 2114 if (spa->spa_vdev_removal != NULL) { 2115 spa_vdev_removal_destroy(spa->spa_vdev_removal); 2116 spa->spa_vdev_removal = NULL; 2117 } 2118 2119 spa_destroy_aux_threads(spa); 2120 2121 spa_condense_fini(spa); 2122 2123 bpobj_close(&spa->spa_deferred_bpobj); 2124 2125 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 2126 2127 /* 2128 * Close all vdevs. 2129 */ 2130 if (spa->spa_root_vdev) 2131 vdev_free(spa->spa_root_vdev); 2132 ASSERT(spa->spa_root_vdev == NULL); 2133 2134 /* 2135 * Close the dsl pool. 2136 */ 2137 if (spa->spa_dsl_pool) { 2138 dsl_pool_close(spa->spa_dsl_pool); 2139 spa->spa_dsl_pool = NULL; 2140 spa->spa_meta_objset = NULL; 2141 } 2142 2143 ddt_unload(spa); 2144 brt_unload(spa); 2145 spa_unload_log_sm_metadata(spa); 2146 2147 /* 2148 * Drop and purge level 2 cache 2149 */ 2150 spa_l2cache_drop(spa); 2151 2152 if (spa->spa_spares.sav_vdevs) { 2153 for (int i = 0; i < spa->spa_spares.sav_count; i++) 2154 vdev_free(spa->spa_spares.sav_vdevs[i]); 2155 kmem_free(spa->spa_spares.sav_vdevs, 2156 spa->spa_spares.sav_count * sizeof (void *)); 2157 spa->spa_spares.sav_vdevs = NULL; 2158 } 2159 if (spa->spa_spares.sav_config) { 2160 nvlist_free(spa->spa_spares.sav_config); 2161 spa->spa_spares.sav_config = NULL; 2162 } 2163 spa->spa_spares.sav_count = 0; 2164 2165 if (spa->spa_l2cache.sav_vdevs) { 2166 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { 2167 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 2168 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 2169 } 2170 kmem_free(spa->spa_l2cache.sav_vdevs, 2171 spa->spa_l2cache.sav_count * sizeof (void *)); 2172 spa->spa_l2cache.sav_vdevs = NULL; 2173 } 2174 if (spa->spa_l2cache.sav_config) { 2175 nvlist_free(spa->spa_l2cache.sav_config); 2176 spa->spa_l2cache.sav_config = NULL; 2177 } 2178 spa->spa_l2cache.sav_count = 0; 2179 2180 spa->spa_async_suspended = 0; 2181 2182 spa->spa_indirect_vdevs_loaded = B_FALSE; 2183 2184 if (spa->spa_comment != NULL) { 2185 spa_strfree(spa->spa_comment); 2186 spa->spa_comment = NULL; 2187 } 2188 if (spa->spa_compatibility != NULL) { 2189 spa_strfree(spa->spa_compatibility); 2190 spa->spa_compatibility = NULL; 2191 } 2192 2193 spa->spa_raidz_expand = NULL; 2194 2195 spa_config_exit(spa, SCL_ALL, spa); 2196 } 2197 2198 /* 2199 * Load (or re-load) the current list of vdevs describing the active spares for 2200 * this pool. When this is called, we have some form of basic information in 2201 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 2202 * then re-generate a more complete list including status information. 2203 */ 2204 void 2205 spa_load_spares(spa_t *spa) 2206 { 2207 nvlist_t **spares; 2208 uint_t nspares; 2209 int i; 2210 vdev_t *vd, *tvd; 2211 2212 #ifndef _KERNEL 2213 /* 2214 * zdb opens both the current state of the pool and the 2215 * checkpointed state (if present), with a different spa_t. 2216 * 2217 * As spare vdevs are shared among open pools, we skip loading 2218 * them when we load the checkpointed state of the pool. 2219 */ 2220 if (!spa_writeable(spa)) 2221 return; 2222 #endif 2223 2224 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2225 2226 /* 2227 * First, close and free any existing spare vdevs. 2228 */ 2229 if (spa->spa_spares.sav_vdevs) { 2230 for (i = 0; i < spa->spa_spares.sav_count; i++) { 2231 vd = spa->spa_spares.sav_vdevs[i]; 2232 2233 /* Undo the call to spa_activate() below */ 2234 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 2235 B_FALSE)) != NULL && tvd->vdev_isspare) 2236 spa_spare_remove(tvd); 2237 vdev_close(vd); 2238 vdev_free(vd); 2239 } 2240 2241 kmem_free(spa->spa_spares.sav_vdevs, 2242 spa->spa_spares.sav_count * sizeof (void *)); 2243 } 2244 2245 if (spa->spa_spares.sav_config == NULL) 2246 nspares = 0; 2247 else 2248 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2249 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 2250 2251 spa->spa_spares.sav_count = (int)nspares; 2252 spa->spa_spares.sav_vdevs = NULL; 2253 2254 if (nspares == 0) 2255 return; 2256 2257 /* 2258 * Construct the array of vdevs, opening them to get status in the 2259 * process. For each spare, there is potentially two different vdev_t 2260 * structures associated with it: one in the list of spares (used only 2261 * for basic validation purposes) and one in the active vdev 2262 * configuration (if it's spared in). During this phase we open and 2263 * validate each vdev on the spare list. If the vdev also exists in the 2264 * active configuration, then we also mark this vdev as an active spare. 2265 */ 2266 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *), 2267 KM_SLEEP); 2268 for (i = 0; i < spa->spa_spares.sav_count; i++) { 2269 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 2270 VDEV_ALLOC_SPARE) == 0); 2271 ASSERT(vd != NULL); 2272 2273 spa->spa_spares.sav_vdevs[i] = vd; 2274 2275 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 2276 B_FALSE)) != NULL) { 2277 if (!tvd->vdev_isspare) 2278 spa_spare_add(tvd); 2279 2280 /* 2281 * We only mark the spare active if we were successfully 2282 * able to load the vdev. Otherwise, importing a pool 2283 * with a bad active spare would result in strange 2284 * behavior, because multiple pool would think the spare 2285 * is actively in use. 2286 * 2287 * There is a vulnerability here to an equally bizarre 2288 * circumstance, where a dead active spare is later 2289 * brought back to life (onlined or otherwise). Given 2290 * the rarity of this scenario, and the extra complexity 2291 * it adds, we ignore the possibility. 2292 */ 2293 if (!vdev_is_dead(tvd)) 2294 spa_spare_activate(tvd); 2295 } 2296 2297 vd->vdev_top = vd; 2298 vd->vdev_aux = &spa->spa_spares; 2299 2300 if (vdev_open(vd) != 0) 2301 continue; 2302 2303 if (vdev_validate_aux(vd) == 0) 2304 spa_spare_add(vd); 2305 } 2306 2307 /* 2308 * Recompute the stashed list of spares, with status information 2309 * this time. 2310 */ 2311 fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES); 2312 2313 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 2314 KM_SLEEP); 2315 for (i = 0; i < spa->spa_spares.sav_count; i++) 2316 spares[i] = vdev_config_generate(spa, 2317 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 2318 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 2319 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 2320 spa->spa_spares.sav_count); 2321 for (i = 0; i < spa->spa_spares.sav_count; i++) 2322 nvlist_free(spares[i]); 2323 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 2324 } 2325 2326 /* 2327 * Load (or re-load) the current list of vdevs describing the active l2cache for 2328 * this pool. When this is called, we have some form of basic information in 2329 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 2330 * then re-generate a more complete list including status information. 2331 * Devices which are already active have their details maintained, and are 2332 * not re-opened. 2333 */ 2334 void 2335 spa_load_l2cache(spa_t *spa) 2336 { 2337 nvlist_t **l2cache = NULL; 2338 uint_t nl2cache; 2339 int i, j, oldnvdevs; 2340 uint64_t guid; 2341 vdev_t *vd, **oldvdevs, **newvdevs; 2342 spa_aux_vdev_t *sav = &spa->spa_l2cache; 2343 2344 #ifndef _KERNEL 2345 /* 2346 * zdb opens both the current state of the pool and the 2347 * checkpointed state (if present), with a different spa_t. 2348 * 2349 * As L2 caches are part of the ARC which is shared among open 2350 * pools, we skip loading them when we load the checkpointed 2351 * state of the pool. 2352 */ 2353 if (!spa_writeable(spa)) 2354 return; 2355 #endif 2356 2357 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2358 2359 oldvdevs = sav->sav_vdevs; 2360 oldnvdevs = sav->sav_count; 2361 sav->sav_vdevs = NULL; 2362 sav->sav_count = 0; 2363 2364 if (sav->sav_config == NULL) { 2365 nl2cache = 0; 2366 newvdevs = NULL; 2367 goto out; 2368 } 2369 2370 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, 2371 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 2372 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 2373 2374 /* 2375 * Process new nvlist of vdevs. 2376 */ 2377 for (i = 0; i < nl2cache; i++) { 2378 guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID); 2379 2380 newvdevs[i] = NULL; 2381 for (j = 0; j < oldnvdevs; j++) { 2382 vd = oldvdevs[j]; 2383 if (vd != NULL && guid == vd->vdev_guid) { 2384 /* 2385 * Retain previous vdev for add/remove ops. 2386 */ 2387 newvdevs[i] = vd; 2388 oldvdevs[j] = NULL; 2389 break; 2390 } 2391 } 2392 2393 if (newvdevs[i] == NULL) { 2394 /* 2395 * Create new vdev 2396 */ 2397 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 2398 VDEV_ALLOC_L2CACHE) == 0); 2399 ASSERT(vd != NULL); 2400 newvdevs[i] = vd; 2401 2402 /* 2403 * Commit this vdev as an l2cache device, 2404 * even if it fails to open. 2405 */ 2406 spa_l2cache_add(vd); 2407 2408 vd->vdev_top = vd; 2409 vd->vdev_aux = sav; 2410 2411 spa_l2cache_activate(vd); 2412 2413 if (vdev_open(vd) != 0) 2414 continue; 2415 2416 (void) vdev_validate_aux(vd); 2417 2418 if (!vdev_is_dead(vd)) 2419 l2arc_add_vdev(spa, vd); 2420 2421 /* 2422 * Upon cache device addition to a pool or pool 2423 * creation with a cache device or if the header 2424 * of the device is invalid we issue an async 2425 * TRIM command for the whole device which will 2426 * execute if l2arc_trim_ahead > 0. 2427 */ 2428 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 2429 } 2430 } 2431 2432 sav->sav_vdevs = newvdevs; 2433 sav->sav_count = (int)nl2cache; 2434 2435 /* 2436 * Recompute the stashed list of l2cache devices, with status 2437 * information this time. 2438 */ 2439 fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE); 2440 2441 if (sav->sav_count > 0) 2442 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), 2443 KM_SLEEP); 2444 for (i = 0; i < sav->sav_count; i++) 2445 l2cache[i] = vdev_config_generate(spa, 2446 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 2447 fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 2448 (const nvlist_t * const *)l2cache, sav->sav_count); 2449 2450 out: 2451 /* 2452 * Purge vdevs that were dropped 2453 */ 2454 if (oldvdevs) { 2455 for (i = 0; i < oldnvdevs; i++) { 2456 uint64_t pool; 2457 2458 vd = oldvdevs[i]; 2459 if (vd != NULL) { 2460 ASSERT(vd->vdev_isl2cache); 2461 2462 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 2463 pool != 0ULL && l2arc_vdev_present(vd)) 2464 l2arc_remove_vdev(vd); 2465 vdev_clear_stats(vd); 2466 vdev_free(vd); 2467 } 2468 } 2469 2470 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 2471 } 2472 2473 for (i = 0; i < sav->sav_count; i++) 2474 nvlist_free(l2cache[i]); 2475 if (sav->sav_count) 2476 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 2477 } 2478 2479 static int 2480 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 2481 { 2482 dmu_buf_t *db; 2483 char *packed = NULL; 2484 size_t nvsize = 0; 2485 int error; 2486 *value = NULL; 2487 2488 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 2489 if (error) 2490 return (error); 2491 2492 nvsize = *(uint64_t *)db->db_data; 2493 dmu_buf_rele(db, FTAG); 2494 2495 packed = vmem_alloc(nvsize, KM_SLEEP); 2496 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 2497 DMU_READ_PREFETCH); 2498 if (error == 0) 2499 error = nvlist_unpack(packed, nvsize, value, 0); 2500 vmem_free(packed, nvsize); 2501 2502 return (error); 2503 } 2504 2505 /* 2506 * Concrete top-level vdevs that are not missing and are not logs. At every 2507 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 2508 */ 2509 static uint64_t 2510 spa_healthy_core_tvds(spa_t *spa) 2511 { 2512 vdev_t *rvd = spa->spa_root_vdev; 2513 uint64_t tvds = 0; 2514 2515 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 2516 vdev_t *vd = rvd->vdev_child[i]; 2517 if (vd->vdev_islog) 2518 continue; 2519 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 2520 tvds++; 2521 } 2522 2523 return (tvds); 2524 } 2525 2526 /* 2527 * Checks to see if the given vdev could not be opened, in which case we post a 2528 * sysevent to notify the autoreplace code that the device has been removed. 2529 */ 2530 static void 2531 spa_check_removed(vdev_t *vd) 2532 { 2533 for (uint64_t c = 0; c < vd->vdev_children; c++) 2534 spa_check_removed(vd->vdev_child[c]); 2535 2536 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 2537 vdev_is_concrete(vd)) { 2538 zfs_post_autoreplace(vd->vdev_spa, vd); 2539 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 2540 } 2541 } 2542 2543 static int 2544 spa_check_for_missing_logs(spa_t *spa) 2545 { 2546 vdev_t *rvd = spa->spa_root_vdev; 2547 2548 /* 2549 * If we're doing a normal import, then build up any additional 2550 * diagnostic information about missing log devices. 2551 * We'll pass this up to the user for further processing. 2552 */ 2553 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 2554 nvlist_t **child, *nv; 2555 uint64_t idx = 0; 2556 2557 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 2558 KM_SLEEP); 2559 nv = fnvlist_alloc(); 2560 2561 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2562 vdev_t *tvd = rvd->vdev_child[c]; 2563 2564 /* 2565 * We consider a device as missing only if it failed 2566 * to open (i.e. offline or faulted is not considered 2567 * as missing). 2568 */ 2569 if (tvd->vdev_islog && 2570 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2571 child[idx++] = vdev_config_generate(spa, tvd, 2572 B_FALSE, VDEV_CONFIG_MISSING); 2573 } 2574 } 2575 2576 if (idx > 0) { 2577 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2578 (const nvlist_t * const *)child, idx); 2579 fnvlist_add_nvlist(spa->spa_load_info, 2580 ZPOOL_CONFIG_MISSING_DEVICES, nv); 2581 2582 for (uint64_t i = 0; i < idx; i++) 2583 nvlist_free(child[i]); 2584 } 2585 nvlist_free(nv); 2586 kmem_free(child, rvd->vdev_children * sizeof (char **)); 2587 2588 if (idx > 0) { 2589 spa_load_failed(spa, "some log devices are missing"); 2590 vdev_dbgmsg_print_tree(rvd, 2); 2591 return (SET_ERROR(ENXIO)); 2592 } 2593 } else { 2594 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2595 vdev_t *tvd = rvd->vdev_child[c]; 2596 2597 if (tvd->vdev_islog && 2598 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2599 spa_set_log_state(spa, SPA_LOG_CLEAR); 2600 spa_load_note(spa, "some log devices are " 2601 "missing, ZIL is dropped."); 2602 vdev_dbgmsg_print_tree(rvd, 2); 2603 break; 2604 } 2605 } 2606 } 2607 2608 return (0); 2609 } 2610 2611 /* 2612 * Check for missing log devices 2613 */ 2614 static boolean_t 2615 spa_check_logs(spa_t *spa) 2616 { 2617 boolean_t rv = B_FALSE; 2618 dsl_pool_t *dp = spa_get_dsl(spa); 2619 2620 switch (spa->spa_log_state) { 2621 default: 2622 break; 2623 case SPA_LOG_MISSING: 2624 /* need to recheck in case slog has been restored */ 2625 case SPA_LOG_UNKNOWN: 2626 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2627 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 2628 if (rv) 2629 spa_set_log_state(spa, SPA_LOG_MISSING); 2630 break; 2631 } 2632 return (rv); 2633 } 2634 2635 /* 2636 * Passivate any log vdevs (note, does not apply to embedded log metaslabs). 2637 */ 2638 static boolean_t 2639 spa_passivate_log(spa_t *spa) 2640 { 2641 vdev_t *rvd = spa->spa_root_vdev; 2642 boolean_t slog_found = B_FALSE; 2643 2644 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2645 2646 for (int c = 0; c < rvd->vdev_children; c++) { 2647 vdev_t *tvd = rvd->vdev_child[c]; 2648 2649 if (tvd->vdev_islog) { 2650 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2651 metaslab_group_passivate(tvd->vdev_mg); 2652 slog_found = B_TRUE; 2653 } 2654 } 2655 2656 return (slog_found); 2657 } 2658 2659 /* 2660 * Activate any log vdevs (note, does not apply to embedded log metaslabs). 2661 */ 2662 static void 2663 spa_activate_log(spa_t *spa) 2664 { 2665 vdev_t *rvd = spa->spa_root_vdev; 2666 2667 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2668 2669 for (int c = 0; c < rvd->vdev_children; c++) { 2670 vdev_t *tvd = rvd->vdev_child[c]; 2671 2672 if (tvd->vdev_islog) { 2673 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2674 metaslab_group_activate(tvd->vdev_mg); 2675 } 2676 } 2677 } 2678 2679 int 2680 spa_reset_logs(spa_t *spa) 2681 { 2682 int error; 2683 2684 error = dmu_objset_find(spa_name(spa), zil_reset, 2685 NULL, DS_FIND_CHILDREN); 2686 if (error == 0) { 2687 /* 2688 * We successfully offlined the log device, sync out the 2689 * current txg so that the "stubby" block can be removed 2690 * by zil_sync(). 2691 */ 2692 txg_wait_synced(spa->spa_dsl_pool, 0); 2693 } 2694 return (error); 2695 } 2696 2697 static void 2698 spa_aux_check_removed(spa_aux_vdev_t *sav) 2699 { 2700 for (int i = 0; i < sav->sav_count; i++) 2701 spa_check_removed(sav->sav_vdevs[i]); 2702 } 2703 2704 void 2705 spa_claim_notify(zio_t *zio) 2706 { 2707 spa_t *spa = zio->io_spa; 2708 2709 if (zio->io_error) 2710 return; 2711 2712 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2713 if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp)) 2714 spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp); 2715 mutex_exit(&spa->spa_props_lock); 2716 } 2717 2718 typedef struct spa_load_error { 2719 boolean_t sle_verify_data; 2720 uint64_t sle_meta_count; 2721 uint64_t sle_data_count; 2722 } spa_load_error_t; 2723 2724 static void 2725 spa_load_verify_done(zio_t *zio) 2726 { 2727 blkptr_t *bp = zio->io_bp; 2728 spa_load_error_t *sle = zio->io_private; 2729 dmu_object_type_t type = BP_GET_TYPE(bp); 2730 int error = zio->io_error; 2731 spa_t *spa = zio->io_spa; 2732 2733 abd_free(zio->io_abd); 2734 if (error) { 2735 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2736 type != DMU_OT_INTENT_LOG) 2737 atomic_inc_64(&sle->sle_meta_count); 2738 else 2739 atomic_inc_64(&sle->sle_data_count); 2740 } 2741 2742 mutex_enter(&spa->spa_scrub_lock); 2743 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp); 2744 cv_broadcast(&spa->spa_scrub_io_cv); 2745 mutex_exit(&spa->spa_scrub_lock); 2746 } 2747 2748 /* 2749 * Maximum number of inflight bytes is the log2 fraction of the arc size. 2750 * By default, we set it to 1/16th of the arc. 2751 */ 2752 static uint_t spa_load_verify_shift = 4; 2753 static int spa_load_verify_metadata = B_TRUE; 2754 static int spa_load_verify_data = B_TRUE; 2755 2756 static int 2757 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2758 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2759 { 2760 zio_t *rio = arg; 2761 spa_load_error_t *sle = rio->io_private; 2762 2763 (void) zilog, (void) dnp; 2764 2765 /* 2766 * Note: normally this routine will not be called if 2767 * spa_load_verify_metadata is not set. However, it may be useful 2768 * to manually set the flag after the traversal has begun. 2769 */ 2770 if (!spa_load_verify_metadata) 2771 return (0); 2772 2773 /* 2774 * Sanity check the block pointer in order to detect obvious damage 2775 * before using the contents in subsequent checks or in zio_read(). 2776 * When damaged consider it to be a metadata error since we cannot 2777 * trust the BP_GET_TYPE and BP_GET_LEVEL values. 2778 */ 2779 if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) { 2780 atomic_inc_64(&sle->sle_meta_count); 2781 return (0); 2782 } 2783 2784 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || 2785 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 2786 return (0); 2787 2788 if (!BP_IS_METADATA(bp) && 2789 (!spa_load_verify_data || !sle->sle_verify_data)) 2790 return (0); 2791 2792 uint64_t maxinflight_bytes = 2793 arc_target_bytes() >> spa_load_verify_shift; 2794 size_t size = BP_GET_PSIZE(bp); 2795 2796 mutex_enter(&spa->spa_scrub_lock); 2797 while (spa->spa_load_verify_bytes >= maxinflight_bytes) 2798 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2799 spa->spa_load_verify_bytes += size; 2800 mutex_exit(&spa->spa_scrub_lock); 2801 2802 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2803 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2804 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2805 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2806 return (0); 2807 } 2808 2809 static int 2810 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2811 { 2812 (void) dp, (void) arg; 2813 2814 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2815 return (SET_ERROR(ENAMETOOLONG)); 2816 2817 return (0); 2818 } 2819 2820 static int 2821 spa_load_verify(spa_t *spa) 2822 { 2823 zio_t *rio; 2824 spa_load_error_t sle = { 0 }; 2825 zpool_load_policy_t policy; 2826 boolean_t verify_ok = B_FALSE; 2827 int error = 0; 2828 2829 zpool_get_load_policy(spa->spa_config, &policy); 2830 2831 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND || 2832 policy.zlp_maxmeta == UINT64_MAX) 2833 return (0); 2834 2835 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2836 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2837 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2838 DS_FIND_CHILDREN); 2839 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2840 if (error != 0) 2841 return (error); 2842 2843 /* 2844 * Verify data only if we are rewinding or error limit was set. 2845 * Otherwise nothing except dbgmsg care about it to waste time. 2846 */ 2847 sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) || 2848 (policy.zlp_maxdata < UINT64_MAX); 2849 2850 rio = zio_root(spa, NULL, &sle, 2851 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2852 2853 if (spa_load_verify_metadata) { 2854 if (spa->spa_extreme_rewind) { 2855 spa_load_note(spa, "performing a complete scan of the " 2856 "pool since extreme rewind is on. This may take " 2857 "a very long time.\n (spa_load_verify_data=%u, " 2858 "spa_load_verify_metadata=%u)", 2859 spa_load_verify_data, spa_load_verify_metadata); 2860 } 2861 2862 error = traverse_pool(spa, spa->spa_verify_min_txg, 2863 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2864 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2865 } 2866 2867 (void) zio_wait(rio); 2868 ASSERT0(spa->spa_load_verify_bytes); 2869 2870 spa->spa_load_meta_errors = sle.sle_meta_count; 2871 spa->spa_load_data_errors = sle.sle_data_count; 2872 2873 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2874 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2875 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2876 (u_longlong_t)sle.sle_data_count); 2877 } 2878 2879 if (spa_load_verify_dryrun || 2880 (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2881 sle.sle_data_count <= policy.zlp_maxdata)) { 2882 int64_t loss = 0; 2883 2884 verify_ok = B_TRUE; 2885 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2886 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2887 2888 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2889 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME, 2890 spa->spa_load_txg_ts); 2891 fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME, 2892 loss); 2893 fnvlist_add_uint64(spa->spa_load_info, 2894 ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count); 2895 fnvlist_add_uint64(spa->spa_load_info, 2896 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count); 2897 } else { 2898 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2899 } 2900 2901 if (spa_load_verify_dryrun) 2902 return (0); 2903 2904 if (error) { 2905 if (error != ENXIO && error != EIO) 2906 error = SET_ERROR(EIO); 2907 return (error); 2908 } 2909 2910 return (verify_ok ? 0 : EIO); 2911 } 2912 2913 /* 2914 * Find a value in the pool props object. 2915 */ 2916 static void 2917 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2918 { 2919 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2920 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2921 } 2922 2923 /* 2924 * Find a value in the pool directory object. 2925 */ 2926 static int 2927 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2928 { 2929 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2930 name, sizeof (uint64_t), 1, val); 2931 2932 if (error != 0 && (error != ENOENT || log_enoent)) { 2933 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2934 "[error=%d]", name, error); 2935 } 2936 2937 return (error); 2938 } 2939 2940 static int 2941 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2942 { 2943 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2944 return (SET_ERROR(err)); 2945 } 2946 2947 boolean_t 2948 spa_livelist_delete_check(spa_t *spa) 2949 { 2950 return (spa->spa_livelists_to_delete != 0); 2951 } 2952 2953 static boolean_t 2954 spa_livelist_delete_cb_check(void *arg, zthr_t *z) 2955 { 2956 (void) z; 2957 spa_t *spa = arg; 2958 return (spa_livelist_delete_check(spa)); 2959 } 2960 2961 static int 2962 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2963 { 2964 spa_t *spa = arg; 2965 zio_free(spa, tx->tx_txg, bp); 2966 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2967 -bp_get_dsize_sync(spa, bp), 2968 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2969 return (0); 2970 } 2971 2972 static int 2973 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp) 2974 { 2975 int err; 2976 zap_cursor_t zc; 2977 zap_attribute_t *za = zap_attribute_alloc(); 2978 zap_cursor_init(&zc, os, zap_obj); 2979 err = zap_cursor_retrieve(&zc, za); 2980 zap_cursor_fini(&zc); 2981 if (err == 0) 2982 *llp = za->za_first_integer; 2983 zap_attribute_free(za); 2984 return (err); 2985 } 2986 2987 /* 2988 * Components of livelist deletion that must be performed in syncing 2989 * context: freeing block pointers and updating the pool-wide data 2990 * structures to indicate how much work is left to do 2991 */ 2992 typedef struct sublist_delete_arg { 2993 spa_t *spa; 2994 dsl_deadlist_t *ll; 2995 uint64_t key; 2996 bplist_t *to_free; 2997 } sublist_delete_arg_t; 2998 2999 static void 3000 sublist_delete_sync(void *arg, dmu_tx_t *tx) 3001 { 3002 sublist_delete_arg_t *sda = arg; 3003 spa_t *spa = sda->spa; 3004 dsl_deadlist_t *ll = sda->ll; 3005 uint64_t key = sda->key; 3006 bplist_t *to_free = sda->to_free; 3007 3008 bplist_iterate(to_free, delete_blkptr_cb, spa, tx); 3009 dsl_deadlist_remove_entry(ll, key, tx); 3010 } 3011 3012 typedef struct livelist_delete_arg { 3013 spa_t *spa; 3014 uint64_t ll_obj; 3015 uint64_t zap_obj; 3016 } livelist_delete_arg_t; 3017 3018 static void 3019 livelist_delete_sync(void *arg, dmu_tx_t *tx) 3020 { 3021 livelist_delete_arg_t *lda = arg; 3022 spa_t *spa = lda->spa; 3023 uint64_t ll_obj = lda->ll_obj; 3024 uint64_t zap_obj = lda->zap_obj; 3025 objset_t *mos = spa->spa_meta_objset; 3026 uint64_t count; 3027 3028 /* free the livelist and decrement the feature count */ 3029 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx)); 3030 dsl_deadlist_free(mos, ll_obj, tx); 3031 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 3032 VERIFY0(zap_count(mos, zap_obj, &count)); 3033 if (count == 0) { 3034 /* no more livelists to delete */ 3035 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 3036 DMU_POOL_DELETED_CLONES, tx)); 3037 VERIFY0(zap_destroy(mos, zap_obj, tx)); 3038 spa->spa_livelists_to_delete = 0; 3039 spa_notify_waiters(spa); 3040 } 3041 } 3042 3043 /* 3044 * Load in the value for the livelist to be removed and open it. Then, 3045 * load its first sublist and determine which block pointers should actually 3046 * be freed. Then, call a synctask which performs the actual frees and updates 3047 * the pool-wide livelist data. 3048 */ 3049 static void 3050 spa_livelist_delete_cb(void *arg, zthr_t *z) 3051 { 3052 spa_t *spa = arg; 3053 uint64_t ll_obj = 0, count; 3054 objset_t *mos = spa->spa_meta_objset; 3055 uint64_t zap_obj = spa->spa_livelists_to_delete; 3056 /* 3057 * Determine the next livelist to delete. This function should only 3058 * be called if there is at least one deleted clone. 3059 */ 3060 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj)); 3061 VERIFY0(zap_count(mos, ll_obj, &count)); 3062 if (count > 0) { 3063 dsl_deadlist_t *ll; 3064 dsl_deadlist_entry_t *dle; 3065 bplist_t to_free; 3066 ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP); 3067 dsl_deadlist_open(ll, mos, ll_obj); 3068 dle = dsl_deadlist_first(ll); 3069 ASSERT3P(dle, !=, NULL); 3070 bplist_create(&to_free); 3071 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free, 3072 z, NULL); 3073 if (err == 0) { 3074 sublist_delete_arg_t sync_arg = { 3075 .spa = spa, 3076 .ll = ll, 3077 .key = dle->dle_mintxg, 3078 .to_free = &to_free 3079 }; 3080 zfs_dbgmsg("deleting sublist (id %llu) from" 3081 " livelist %llu, %lld remaining", 3082 (u_longlong_t)dle->dle_bpobj.bpo_object, 3083 (u_longlong_t)ll_obj, (longlong_t)count - 1); 3084 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 3085 sublist_delete_sync, &sync_arg, 0, 3086 ZFS_SPACE_CHECK_DESTROY)); 3087 } else { 3088 VERIFY3U(err, ==, EINTR); 3089 } 3090 bplist_clear(&to_free); 3091 bplist_destroy(&to_free); 3092 dsl_deadlist_close(ll); 3093 kmem_free(ll, sizeof (dsl_deadlist_t)); 3094 } else { 3095 livelist_delete_arg_t sync_arg = { 3096 .spa = spa, 3097 .ll_obj = ll_obj, 3098 .zap_obj = zap_obj 3099 }; 3100 zfs_dbgmsg("deletion of livelist %llu completed", 3101 (u_longlong_t)ll_obj); 3102 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync, 3103 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY)); 3104 } 3105 } 3106 3107 static void 3108 spa_start_livelist_destroy_thread(spa_t *spa) 3109 { 3110 ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL); 3111 spa->spa_livelist_delete_zthr = 3112 zthr_create("z_livelist_destroy", 3113 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa, 3114 minclsyspri); 3115 } 3116 3117 typedef struct livelist_new_arg { 3118 bplist_t *allocs; 3119 bplist_t *frees; 3120 } livelist_new_arg_t; 3121 3122 static int 3123 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3124 dmu_tx_t *tx) 3125 { 3126 ASSERT(tx == NULL); 3127 livelist_new_arg_t *lna = arg; 3128 if (bp_freed) { 3129 bplist_append(lna->frees, bp); 3130 } else { 3131 bplist_append(lna->allocs, bp); 3132 zfs_livelist_condense_new_alloc++; 3133 } 3134 return (0); 3135 } 3136 3137 typedef struct livelist_condense_arg { 3138 spa_t *spa; 3139 bplist_t to_keep; 3140 uint64_t first_size; 3141 uint64_t next_size; 3142 } livelist_condense_arg_t; 3143 3144 static void 3145 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx) 3146 { 3147 livelist_condense_arg_t *lca = arg; 3148 spa_t *spa = lca->spa; 3149 bplist_t new_frees; 3150 dsl_dataset_t *ds = spa->spa_to_condense.ds; 3151 3152 /* Have we been cancelled? */ 3153 if (spa->spa_to_condense.cancelled) { 3154 zfs_livelist_condense_sync_cancel++; 3155 goto out; 3156 } 3157 3158 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 3159 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 3160 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist; 3161 3162 /* 3163 * It's possible that the livelist was changed while the zthr was 3164 * running. Therefore, we need to check for new blkptrs in the two 3165 * entries being condensed and continue to track them in the livelist. 3166 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl), 3167 * it's possible that the newly added blkptrs are FREEs or ALLOCs so 3168 * we need to sort them into two different bplists. 3169 */ 3170 uint64_t first_obj = first->dle_bpobj.bpo_object; 3171 uint64_t next_obj = next->dle_bpobj.bpo_object; 3172 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs; 3173 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs; 3174 3175 bplist_create(&new_frees); 3176 livelist_new_arg_t new_bps = { 3177 .allocs = &lca->to_keep, 3178 .frees = &new_frees, 3179 }; 3180 3181 if (cur_first_size > lca->first_size) { 3182 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj, 3183 livelist_track_new_cb, &new_bps, lca->first_size)); 3184 } 3185 if (cur_next_size > lca->next_size) { 3186 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj, 3187 livelist_track_new_cb, &new_bps, lca->next_size)); 3188 } 3189 3190 dsl_deadlist_clear_entry(first, ll, tx); 3191 ASSERT(bpobj_is_empty(&first->dle_bpobj)); 3192 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx); 3193 3194 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx); 3195 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx); 3196 bplist_destroy(&new_frees); 3197 3198 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3199 dsl_dataset_name(ds, dsname); 3200 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu " 3201 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu " 3202 "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname, 3203 (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj, 3204 (u_longlong_t)cur_first_size, (u_longlong_t)next_obj, 3205 (u_longlong_t)cur_next_size, 3206 (u_longlong_t)first->dle_bpobj.bpo_object, 3207 (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs); 3208 out: 3209 dmu_buf_rele(ds->ds_dbuf, spa); 3210 spa->spa_to_condense.ds = NULL; 3211 bplist_clear(&lca->to_keep); 3212 bplist_destroy(&lca->to_keep); 3213 kmem_free(lca, sizeof (livelist_condense_arg_t)); 3214 spa->spa_to_condense.syncing = B_FALSE; 3215 } 3216 3217 static void 3218 spa_livelist_condense_cb(void *arg, zthr_t *t) 3219 { 3220 while (zfs_livelist_condense_zthr_pause && 3221 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 3222 delay(1); 3223 3224 spa_t *spa = arg; 3225 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 3226 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 3227 uint64_t first_size, next_size; 3228 3229 livelist_condense_arg_t *lca = 3230 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP); 3231 bplist_create(&lca->to_keep); 3232 3233 /* 3234 * Process the livelists (matching FREEs and ALLOCs) in open context 3235 * so we have minimal work in syncing context to condense. 3236 * 3237 * We save bpobj sizes (first_size and next_size) to use later in 3238 * syncing context to determine if entries were added to these sublists 3239 * while in open context. This is possible because the clone is still 3240 * active and open for normal writes and we want to make sure the new, 3241 * unprocessed blockpointers are inserted into the livelist normally. 3242 * 3243 * Note that dsl_process_sub_livelist() both stores the size number of 3244 * blockpointers and iterates over them while the bpobj's lock held, so 3245 * the sizes returned to us are consistent which what was actually 3246 * processed. 3247 */ 3248 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t, 3249 &first_size); 3250 if (err == 0) 3251 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep, 3252 t, &next_size); 3253 3254 if (err == 0) { 3255 while (zfs_livelist_condense_sync_pause && 3256 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 3257 delay(1); 3258 3259 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 3260 dmu_tx_mark_netfree(tx); 3261 dmu_tx_hold_space(tx, 1); 3262 err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); 3263 if (err == 0) { 3264 /* 3265 * Prevent the condense zthr restarting before 3266 * the synctask completes. 3267 */ 3268 spa->spa_to_condense.syncing = B_TRUE; 3269 lca->spa = spa; 3270 lca->first_size = first_size; 3271 lca->next_size = next_size; 3272 dsl_sync_task_nowait(spa_get_dsl(spa), 3273 spa_livelist_condense_sync, lca, tx); 3274 dmu_tx_commit(tx); 3275 return; 3276 } 3277 } 3278 /* 3279 * Condensing can not continue: either it was externally stopped or 3280 * we were unable to assign to a tx because the pool has run out of 3281 * space. In the second case, we'll just end up trying to condense 3282 * again in a later txg. 3283 */ 3284 ASSERT(err != 0); 3285 bplist_clear(&lca->to_keep); 3286 bplist_destroy(&lca->to_keep); 3287 kmem_free(lca, sizeof (livelist_condense_arg_t)); 3288 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa); 3289 spa->spa_to_condense.ds = NULL; 3290 if (err == EINTR) 3291 zfs_livelist_condense_zthr_cancel++; 3292 } 3293 3294 /* 3295 * Check that there is something to condense but that a condense is not 3296 * already in progress and that condensing has not been cancelled. 3297 */ 3298 static boolean_t 3299 spa_livelist_condense_cb_check(void *arg, zthr_t *z) 3300 { 3301 (void) z; 3302 spa_t *spa = arg; 3303 if ((spa->spa_to_condense.ds != NULL) && 3304 (spa->spa_to_condense.syncing == B_FALSE) && 3305 (spa->spa_to_condense.cancelled == B_FALSE)) { 3306 return (B_TRUE); 3307 } 3308 return (B_FALSE); 3309 } 3310 3311 static void 3312 spa_start_livelist_condensing_thread(spa_t *spa) 3313 { 3314 spa->spa_to_condense.ds = NULL; 3315 spa->spa_to_condense.first = NULL; 3316 spa->spa_to_condense.next = NULL; 3317 spa->spa_to_condense.syncing = B_FALSE; 3318 spa->spa_to_condense.cancelled = B_FALSE; 3319 3320 ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL); 3321 spa->spa_livelist_condense_zthr = 3322 zthr_create("z_livelist_condense", 3323 spa_livelist_condense_cb_check, 3324 spa_livelist_condense_cb, spa, minclsyspri); 3325 } 3326 3327 static void 3328 spa_spawn_aux_threads(spa_t *spa) 3329 { 3330 ASSERT(spa_writeable(spa)); 3331 3332 spa_start_raidz_expansion_thread(spa); 3333 spa_start_indirect_condensing_thread(spa); 3334 spa_start_livelist_destroy_thread(spa); 3335 spa_start_livelist_condensing_thread(spa); 3336 3337 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 3338 spa->spa_checkpoint_discard_zthr = 3339 zthr_create("z_checkpoint_discard", 3340 spa_checkpoint_discard_thread_check, 3341 spa_checkpoint_discard_thread, spa, minclsyspri); 3342 } 3343 3344 /* 3345 * Fix up config after a partly-completed split. This is done with the 3346 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 3347 * pool have that entry in their config, but only the splitting one contains 3348 * a list of all the guids of the vdevs that are being split off. 3349 * 3350 * This function determines what to do with that list: either rejoin 3351 * all the disks to the pool, or complete the splitting process. To attempt 3352 * the rejoin, each disk that is offlined is marked online again, and 3353 * we do a reopen() call. If the vdev label for every disk that was 3354 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 3355 * then we call vdev_split() on each disk, and complete the split. 3356 * 3357 * Otherwise we leave the config alone, with all the vdevs in place in 3358 * the original pool. 3359 */ 3360 static void 3361 spa_try_repair(spa_t *spa, nvlist_t *config) 3362 { 3363 uint_t extracted; 3364 uint64_t *glist; 3365 uint_t i, gcount; 3366 nvlist_t *nvl; 3367 vdev_t **vd; 3368 boolean_t attempt_reopen; 3369 3370 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 3371 return; 3372 3373 /* check that the config is complete */ 3374 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 3375 &glist, &gcount) != 0) 3376 return; 3377 3378 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 3379 3380 /* attempt to online all the vdevs & validate */ 3381 attempt_reopen = B_TRUE; 3382 for (i = 0; i < gcount; i++) { 3383 if (glist[i] == 0) /* vdev is hole */ 3384 continue; 3385 3386 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 3387 if (vd[i] == NULL) { 3388 /* 3389 * Don't bother attempting to reopen the disks; 3390 * just do the split. 3391 */ 3392 attempt_reopen = B_FALSE; 3393 } else { 3394 /* attempt to re-online it */ 3395 vd[i]->vdev_offline = B_FALSE; 3396 } 3397 } 3398 3399 if (attempt_reopen) { 3400 vdev_reopen(spa->spa_root_vdev); 3401 3402 /* check each device to see what state it's in */ 3403 for (extracted = 0, i = 0; i < gcount; i++) { 3404 if (vd[i] != NULL && 3405 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 3406 break; 3407 ++extracted; 3408 } 3409 } 3410 3411 /* 3412 * If every disk has been moved to the new pool, or if we never 3413 * even attempted to look at them, then we split them off for 3414 * good. 3415 */ 3416 if (!attempt_reopen || gcount == extracted) { 3417 for (i = 0; i < gcount; i++) 3418 if (vd[i] != NULL) 3419 vdev_split(vd[i]); 3420 vdev_reopen(spa->spa_root_vdev); 3421 } 3422 3423 kmem_free(vd, gcount * sizeof (vdev_t *)); 3424 } 3425 3426 static int 3427 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 3428 { 3429 const char *ereport = FM_EREPORT_ZFS_POOL; 3430 int error; 3431 3432 spa->spa_load_state = state; 3433 (void) spa_import_progress_set_state(spa_guid(spa), 3434 spa_load_state(spa)); 3435 spa_import_progress_set_notes(spa, "spa_load()"); 3436 3437 gethrestime(&spa->spa_loaded_ts); 3438 error = spa_load_impl(spa, type, &ereport); 3439 3440 /* 3441 * Don't count references from objsets that are already closed 3442 * and are making their way through the eviction process. 3443 */ 3444 spa_evicting_os_wait(spa); 3445 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 3446 if (error) { 3447 if (error != EEXIST) { 3448 spa->spa_loaded_ts.tv_sec = 0; 3449 spa->spa_loaded_ts.tv_nsec = 0; 3450 } 3451 if (error != EBADF) { 3452 (void) zfs_ereport_post(ereport, spa, 3453 NULL, NULL, NULL, 0); 3454 } 3455 } 3456 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 3457 spa->spa_ena = 0; 3458 3459 (void) spa_import_progress_set_state(spa_guid(spa), 3460 spa_load_state(spa)); 3461 3462 return (error); 3463 } 3464 3465 #ifdef ZFS_DEBUG 3466 /* 3467 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 3468 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 3469 * spa's per-vdev ZAP list. 3470 */ 3471 static uint64_t 3472 vdev_count_verify_zaps(vdev_t *vd) 3473 { 3474 spa_t *spa = vd->vdev_spa; 3475 uint64_t total = 0; 3476 3477 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) && 3478 vd->vdev_root_zap != 0) { 3479 total++; 3480 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3481 spa->spa_all_vdev_zaps, vd->vdev_root_zap)); 3482 } 3483 if (vd->vdev_top_zap != 0) { 3484 total++; 3485 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3486 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 3487 } 3488 if (vd->vdev_leaf_zap != 0) { 3489 total++; 3490 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3491 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 3492 } 3493 3494 for (uint64_t i = 0; i < vd->vdev_children; i++) { 3495 total += vdev_count_verify_zaps(vd->vdev_child[i]); 3496 } 3497 3498 return (total); 3499 } 3500 #else 3501 #define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0) 3502 #endif 3503 3504 /* 3505 * Determine whether the activity check is required. 3506 */ 3507 static boolean_t 3508 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 3509 nvlist_t *config) 3510 { 3511 uint64_t state = 0; 3512 uint64_t hostid = 0; 3513 uint64_t tryconfig_txg = 0; 3514 uint64_t tryconfig_timestamp = 0; 3515 uint16_t tryconfig_mmp_seq = 0; 3516 nvlist_t *nvinfo; 3517 3518 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3519 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3520 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 3521 &tryconfig_txg); 3522 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 3523 &tryconfig_timestamp); 3524 (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, 3525 &tryconfig_mmp_seq); 3526 } 3527 3528 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 3529 3530 /* 3531 * Disable the MMP activity check - This is used by zdb which 3532 * is intended to be used on potentially active pools. 3533 */ 3534 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 3535 return (B_FALSE); 3536 3537 /* 3538 * Skip the activity check when the MMP feature is disabled. 3539 */ 3540 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 3541 return (B_FALSE); 3542 3543 /* 3544 * If the tryconfig_ values are nonzero, they are the results of an 3545 * earlier tryimport. If they all match the uberblock we just found, 3546 * then the pool has not changed and we return false so we do not test 3547 * a second time. 3548 */ 3549 if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 3550 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && 3551 tryconfig_mmp_seq && tryconfig_mmp_seq == 3552 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) 3553 return (B_FALSE); 3554 3555 /* 3556 * Allow the activity check to be skipped when importing the pool 3557 * on the same host which last imported it. Since the hostid from 3558 * configuration may be stale use the one read from the label. 3559 */ 3560 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 3561 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 3562 3563 if (hostid == spa_get_hostid(spa)) 3564 return (B_FALSE); 3565 3566 /* 3567 * Skip the activity test when the pool was cleanly exported. 3568 */ 3569 if (state != POOL_STATE_ACTIVE) 3570 return (B_FALSE); 3571 3572 return (B_TRUE); 3573 } 3574 3575 /* 3576 * Nanoseconds the activity check must watch for changes on-disk. 3577 */ 3578 static uint64_t 3579 spa_activity_check_duration(spa_t *spa, uberblock_t *ub) 3580 { 3581 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 3582 uint64_t multihost_interval = MSEC2NSEC( 3583 MMP_INTERVAL_OK(zfs_multihost_interval)); 3584 uint64_t import_delay = MAX(NANOSEC, import_intervals * 3585 multihost_interval); 3586 3587 /* 3588 * Local tunables determine a minimum duration except for the case 3589 * where we know when the remote host will suspend the pool if MMP 3590 * writes do not land. 3591 * 3592 * See Big Theory comment at the top of mmp.c for the reasoning behind 3593 * these cases and times. 3594 */ 3595 3596 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); 3597 3598 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3599 MMP_FAIL_INT(ub) > 0) { 3600 3601 /* MMP on remote host will suspend pool after failed writes */ 3602 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * 3603 MMP_IMPORT_SAFETY_FACTOR / 100; 3604 3605 zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " 3606 "mmp_fails=%llu ub_mmp mmp_interval=%llu " 3607 "import_intervals=%llu", (u_longlong_t)import_delay, 3608 (u_longlong_t)MMP_FAIL_INT(ub), 3609 (u_longlong_t)MMP_INTERVAL(ub), 3610 (u_longlong_t)import_intervals); 3611 3612 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3613 MMP_FAIL_INT(ub) == 0) { 3614 3615 /* MMP on remote host will never suspend pool */ 3616 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + 3617 ub->ub_mmp_delay) * import_intervals); 3618 3619 zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " 3620 "mmp_interval=%llu ub_mmp_delay=%llu " 3621 "import_intervals=%llu", (u_longlong_t)import_delay, 3622 (u_longlong_t)MMP_INTERVAL(ub), 3623 (u_longlong_t)ub->ub_mmp_delay, 3624 (u_longlong_t)import_intervals); 3625 3626 } else if (MMP_VALID(ub)) { 3627 /* 3628 * zfs-0.7 compatibility case 3629 */ 3630 3631 import_delay = MAX(import_delay, (multihost_interval + 3632 ub->ub_mmp_delay) * import_intervals); 3633 3634 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " 3635 "import_intervals=%llu leaves=%u", 3636 (u_longlong_t)import_delay, 3637 (u_longlong_t)ub->ub_mmp_delay, 3638 (u_longlong_t)import_intervals, 3639 vdev_count_leaves(spa)); 3640 } else { 3641 /* Using local tunings is the only reasonable option */ 3642 zfs_dbgmsg("pool last imported on non-MMP aware " 3643 "host using import_delay=%llu multihost_interval=%llu " 3644 "import_intervals=%llu", (u_longlong_t)import_delay, 3645 (u_longlong_t)multihost_interval, 3646 (u_longlong_t)import_intervals); 3647 } 3648 3649 return (import_delay); 3650 } 3651 3652 /* 3653 * Remote host activity check. 3654 * 3655 * error results: 3656 * 0 - no activity detected 3657 * EREMOTEIO - remote activity detected 3658 * EINTR - user canceled the operation 3659 */ 3660 static int 3661 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config, 3662 boolean_t importing) 3663 { 3664 uint64_t txg = ub->ub_txg; 3665 uint64_t timestamp = ub->ub_timestamp; 3666 uint64_t mmp_config = ub->ub_mmp_config; 3667 uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; 3668 uint64_t import_delay; 3669 hrtime_t import_expire, now; 3670 nvlist_t *mmp_label = NULL; 3671 vdev_t *rvd = spa->spa_root_vdev; 3672 kcondvar_t cv; 3673 kmutex_t mtx; 3674 int error = 0; 3675 3676 cv_init(&cv, NULL, CV_DEFAULT, NULL); 3677 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 3678 mutex_enter(&mtx); 3679 3680 /* 3681 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 3682 * during the earlier tryimport. If the txg recorded there is 0 then 3683 * the pool is known to be active on another host. 3684 * 3685 * Otherwise, the pool might be in use on another host. Check for 3686 * changes in the uberblocks on disk if necessary. 3687 */ 3688 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3689 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 3690 ZPOOL_CONFIG_LOAD_INFO); 3691 3692 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 3693 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 3694 vdev_uberblock_load(rvd, ub, &mmp_label); 3695 error = SET_ERROR(EREMOTEIO); 3696 goto out; 3697 } 3698 } 3699 3700 import_delay = spa_activity_check_duration(spa, ub); 3701 3702 /* Add a small random factor in case of simultaneous imports (0-25%) */ 3703 import_delay += import_delay * random_in_range(250) / 1000; 3704 3705 import_expire = gethrtime() + import_delay; 3706 3707 if (importing) { 3708 spa_import_progress_set_notes(spa, "Checking MMP activity, " 3709 "waiting %llu ms", (u_longlong_t)NSEC2MSEC(import_delay)); 3710 } 3711 3712 int iterations = 0; 3713 while ((now = gethrtime()) < import_expire) { 3714 if (importing && iterations++ % 30 == 0) { 3715 spa_import_progress_set_notes(spa, "Checking MMP " 3716 "activity, %llu ms remaining", 3717 (u_longlong_t)NSEC2MSEC(import_expire - now)); 3718 } 3719 3720 if (importing) { 3721 (void) spa_import_progress_set_mmp_check(spa_guid(spa), 3722 NSEC2SEC(import_expire - gethrtime())); 3723 } 3724 3725 vdev_uberblock_load(rvd, ub, &mmp_label); 3726 3727 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || 3728 mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { 3729 zfs_dbgmsg("multihost activity detected " 3730 "txg %llu ub_txg %llu " 3731 "timestamp %llu ub_timestamp %llu " 3732 "mmp_config %#llx ub_mmp_config %#llx", 3733 (u_longlong_t)txg, (u_longlong_t)ub->ub_txg, 3734 (u_longlong_t)timestamp, 3735 (u_longlong_t)ub->ub_timestamp, 3736 (u_longlong_t)mmp_config, 3737 (u_longlong_t)ub->ub_mmp_config); 3738 3739 error = SET_ERROR(EREMOTEIO); 3740 break; 3741 } 3742 3743 if (mmp_label) { 3744 nvlist_free(mmp_label); 3745 mmp_label = NULL; 3746 } 3747 3748 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 3749 if (error != -1) { 3750 error = SET_ERROR(EINTR); 3751 break; 3752 } 3753 error = 0; 3754 } 3755 3756 out: 3757 mutex_exit(&mtx); 3758 mutex_destroy(&mtx); 3759 cv_destroy(&cv); 3760 3761 /* 3762 * If the pool is determined to be active store the status in the 3763 * spa->spa_load_info nvlist. If the remote hostname or hostid are 3764 * available from configuration read from disk store them as well. 3765 * This allows 'zpool import' to generate a more useful message. 3766 * 3767 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 3768 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 3769 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 3770 */ 3771 if (error == EREMOTEIO) { 3772 const char *hostname = "<unknown>"; 3773 uint64_t hostid = 0; 3774 3775 if (mmp_label) { 3776 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 3777 hostname = fnvlist_lookup_string(mmp_label, 3778 ZPOOL_CONFIG_HOSTNAME); 3779 fnvlist_add_string(spa->spa_load_info, 3780 ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 3781 } 3782 3783 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 3784 hostid = fnvlist_lookup_uint64(mmp_label, 3785 ZPOOL_CONFIG_HOSTID); 3786 fnvlist_add_uint64(spa->spa_load_info, 3787 ZPOOL_CONFIG_MMP_HOSTID, hostid); 3788 } 3789 } 3790 3791 fnvlist_add_uint64(spa->spa_load_info, 3792 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 3793 fnvlist_add_uint64(spa->spa_load_info, 3794 ZPOOL_CONFIG_MMP_TXG, 0); 3795 3796 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 3797 } 3798 3799 if (mmp_label) 3800 nvlist_free(mmp_label); 3801 3802 return (error); 3803 } 3804 3805 /* 3806 * Called from zfs_ioc_clear for a pool that was suspended 3807 * after failing mmp write checks. 3808 */ 3809 boolean_t 3810 spa_mmp_remote_host_activity(spa_t *spa) 3811 { 3812 ASSERT(spa_multihost(spa) && spa_suspended(spa)); 3813 3814 nvlist_t *best_label; 3815 uberblock_t best_ub; 3816 3817 /* 3818 * Locate the best uberblock on disk 3819 */ 3820 vdev_uberblock_load(spa->spa_root_vdev, &best_ub, &best_label); 3821 if (best_label) { 3822 /* 3823 * confirm that the best hostid matches our hostid 3824 */ 3825 if (nvlist_exists(best_label, ZPOOL_CONFIG_HOSTID) && 3826 spa_get_hostid(spa) != 3827 fnvlist_lookup_uint64(best_label, ZPOOL_CONFIG_HOSTID)) { 3828 nvlist_free(best_label); 3829 return (B_TRUE); 3830 } 3831 nvlist_free(best_label); 3832 } else { 3833 return (B_TRUE); 3834 } 3835 3836 if (!MMP_VALID(&best_ub) || 3837 !MMP_FAIL_INT_VALID(&best_ub) || 3838 MMP_FAIL_INT(&best_ub) == 0) { 3839 return (B_TRUE); 3840 } 3841 3842 if (best_ub.ub_txg != spa->spa_uberblock.ub_txg || 3843 best_ub.ub_timestamp != spa->spa_uberblock.ub_timestamp) { 3844 zfs_dbgmsg("txg mismatch detected during pool clear " 3845 "txg %llu ub_txg %llu timestamp %llu ub_timestamp %llu", 3846 (u_longlong_t)spa->spa_uberblock.ub_txg, 3847 (u_longlong_t)best_ub.ub_txg, 3848 (u_longlong_t)spa->spa_uberblock.ub_timestamp, 3849 (u_longlong_t)best_ub.ub_timestamp); 3850 return (B_TRUE); 3851 } 3852 3853 /* 3854 * Perform an activity check looking for any remote writer 3855 */ 3856 return (spa_activity_check(spa, &spa->spa_uberblock, spa->spa_config, 3857 B_FALSE) != 0); 3858 } 3859 3860 static int 3861 spa_verify_host(spa_t *spa, nvlist_t *mos_config) 3862 { 3863 uint64_t hostid; 3864 const char *hostname; 3865 uint64_t myhostid = 0; 3866 3867 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 3868 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 3869 hostname = fnvlist_lookup_string(mos_config, 3870 ZPOOL_CONFIG_HOSTNAME); 3871 3872 myhostid = zone_get_hostid(NULL); 3873 3874 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 3875 cmn_err(CE_WARN, "pool '%s' could not be " 3876 "loaded as it was last accessed by " 3877 "another system (host: %s hostid: 0x%llx). " 3878 "See: https://openzfs.github.io/openzfs-docs/msg/" 3879 "ZFS-8000-EY", 3880 spa_name(spa), hostname, (u_longlong_t)hostid); 3881 spa_load_failed(spa, "hostid verification failed: pool " 3882 "last accessed by host: %s (hostid: 0x%llx)", 3883 hostname, (u_longlong_t)hostid); 3884 return (SET_ERROR(EBADF)); 3885 } 3886 } 3887 3888 return (0); 3889 } 3890 3891 static int 3892 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 3893 { 3894 int error = 0; 3895 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 3896 int parse; 3897 vdev_t *rvd; 3898 uint64_t pool_guid; 3899 const char *comment; 3900 const char *compatibility; 3901 3902 /* 3903 * Versioning wasn't explicitly added to the label until later, so if 3904 * it's not present treat it as the initial version. 3905 */ 3906 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 3907 &spa->spa_ubsync.ub_version) != 0) 3908 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 3909 3910 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3911 spa_load_failed(spa, "invalid config provided: '%s' missing", 3912 ZPOOL_CONFIG_POOL_GUID); 3913 return (SET_ERROR(EINVAL)); 3914 } 3915 3916 /* 3917 * If we are doing an import, ensure that the pool is not already 3918 * imported by checking if its pool guid already exists in the 3919 * spa namespace. 3920 * 3921 * The only case that we allow an already imported pool to be 3922 * imported again, is when the pool is checkpointed and we want to 3923 * look at its checkpointed state from userland tools like zdb. 3924 */ 3925 #ifdef _KERNEL 3926 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3927 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3928 spa_guid_exists(pool_guid, 0)) { 3929 #else 3930 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3931 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3932 spa_guid_exists(pool_guid, 0) && 3933 !spa_importing_readonly_checkpoint(spa)) { 3934 #endif 3935 spa_load_failed(spa, "a pool with guid %llu is already open", 3936 (u_longlong_t)pool_guid); 3937 return (SET_ERROR(EEXIST)); 3938 } 3939 3940 spa->spa_config_guid = pool_guid; 3941 3942 nvlist_free(spa->spa_load_info); 3943 spa->spa_load_info = fnvlist_alloc(); 3944 3945 ASSERT(spa->spa_comment == NULL); 3946 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3947 spa->spa_comment = spa_strdup(comment); 3948 3949 ASSERT(spa->spa_compatibility == NULL); 3950 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY, 3951 &compatibility) == 0) 3952 spa->spa_compatibility = spa_strdup(compatibility); 3953 3954 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 3955 &spa->spa_config_txg); 3956 3957 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 3958 spa->spa_config_splitting = fnvlist_dup(nvl); 3959 3960 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 3961 spa_load_failed(spa, "invalid config provided: '%s' missing", 3962 ZPOOL_CONFIG_VDEV_TREE); 3963 return (SET_ERROR(EINVAL)); 3964 } 3965 3966 /* 3967 * Create "The Godfather" zio to hold all async IOs 3968 */ 3969 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3970 KM_SLEEP); 3971 for (int i = 0; i < max_ncpus; i++) { 3972 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3973 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3974 ZIO_FLAG_GODFATHER); 3975 } 3976 3977 /* 3978 * Parse the configuration into a vdev tree. We explicitly set the 3979 * value that will be returned by spa_version() since parsing the 3980 * configuration requires knowing the version number. 3981 */ 3982 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3983 parse = (type == SPA_IMPORT_EXISTING ? 3984 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 3985 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 3986 spa_config_exit(spa, SCL_ALL, FTAG); 3987 3988 if (error != 0) { 3989 spa_load_failed(spa, "unable to parse config [error=%d]", 3990 error); 3991 return (error); 3992 } 3993 3994 ASSERT(spa->spa_root_vdev == rvd); 3995 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 3996 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 3997 3998 if (type != SPA_IMPORT_ASSEMBLE) { 3999 ASSERT(spa_guid(spa) == pool_guid); 4000 } 4001 4002 return (0); 4003 } 4004 4005 /* 4006 * Recursively open all vdevs in the vdev tree. This function is called twice: 4007 * first with the untrusted config, then with the trusted config. 4008 */ 4009 static int 4010 spa_ld_open_vdevs(spa_t *spa) 4011 { 4012 int error = 0; 4013 4014 /* 4015 * spa_missing_tvds_allowed defines how many top-level vdevs can be 4016 * missing/unopenable for the root vdev to be still considered openable. 4017 */ 4018 if (spa->spa_trust_config) { 4019 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 4020 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 4021 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 4022 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 4023 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 4024 } else { 4025 spa->spa_missing_tvds_allowed = 0; 4026 } 4027 4028 spa->spa_missing_tvds_allowed = 4029 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 4030 4031 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4032 error = vdev_open(spa->spa_root_vdev); 4033 spa_config_exit(spa, SCL_ALL, FTAG); 4034 4035 if (spa->spa_missing_tvds != 0) { 4036 spa_load_note(spa, "vdev tree has %lld missing top-level " 4037 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 4038 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) { 4039 /* 4040 * Although theoretically we could allow users to open 4041 * incomplete pools in RW mode, we'd need to add a lot 4042 * of extra logic (e.g. adjust pool space to account 4043 * for missing vdevs). 4044 * This limitation also prevents users from accidentally 4045 * opening the pool in RW mode during data recovery and 4046 * damaging it further. 4047 */ 4048 spa_load_note(spa, "pools with missing top-level " 4049 "vdevs can only be opened in read-only mode."); 4050 error = SET_ERROR(ENXIO); 4051 } else { 4052 spa_load_note(spa, "current settings allow for maximum " 4053 "%lld missing top-level vdevs at this stage.", 4054 (u_longlong_t)spa->spa_missing_tvds_allowed); 4055 } 4056 } 4057 if (error != 0) { 4058 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 4059 error); 4060 } 4061 if (spa->spa_missing_tvds != 0 || error != 0) 4062 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 4063 4064 return (error); 4065 } 4066 4067 /* 4068 * We need to validate the vdev labels against the configuration that 4069 * we have in hand. This function is called twice: first with an untrusted 4070 * config, then with a trusted config. The validation is more strict when the 4071 * config is trusted. 4072 */ 4073 static int 4074 spa_ld_validate_vdevs(spa_t *spa) 4075 { 4076 int error = 0; 4077 vdev_t *rvd = spa->spa_root_vdev; 4078 4079 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4080 error = vdev_validate(rvd); 4081 spa_config_exit(spa, SCL_ALL, FTAG); 4082 4083 if (error != 0) { 4084 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 4085 return (error); 4086 } 4087 4088 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 4089 spa_load_failed(spa, "cannot open vdev tree after invalidating " 4090 "some vdevs"); 4091 vdev_dbgmsg_print_tree(rvd, 2); 4092 return (SET_ERROR(ENXIO)); 4093 } 4094 4095 return (0); 4096 } 4097 4098 static void 4099 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 4100 { 4101 spa->spa_state = POOL_STATE_ACTIVE; 4102 spa->spa_ubsync = spa->spa_uberblock; 4103 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 4104 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 4105 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 4106 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 4107 spa->spa_claim_max_txg = spa->spa_first_txg; 4108 spa->spa_prev_software_version = ub->ub_software_version; 4109 } 4110 4111 static int 4112 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 4113 { 4114 vdev_t *rvd = spa->spa_root_vdev; 4115 nvlist_t *label; 4116 uberblock_t *ub = &spa->spa_uberblock; 4117 boolean_t activity_check = B_FALSE; 4118 4119 /* 4120 * If we are opening the checkpointed state of the pool by 4121 * rewinding to it, at this point we will have written the 4122 * checkpointed uberblock to the vdev labels, so searching 4123 * the labels will find the right uberblock. However, if 4124 * we are opening the checkpointed state read-only, we have 4125 * not modified the labels. Therefore, we must ignore the 4126 * labels and continue using the spa_uberblock that was set 4127 * by spa_ld_checkpoint_rewind. 4128 * 4129 * Note that it would be fine to ignore the labels when 4130 * rewinding (opening writeable) as well. However, if we 4131 * crash just after writing the labels, we will end up 4132 * searching the labels. Doing so in the common case means 4133 * that this code path gets exercised normally, rather than 4134 * just in the edge case. 4135 */ 4136 if (ub->ub_checkpoint_txg != 0 && 4137 spa_importing_readonly_checkpoint(spa)) { 4138 spa_ld_select_uberblock_done(spa, ub); 4139 return (0); 4140 } 4141 4142 /* 4143 * Find the best uberblock. 4144 */ 4145 vdev_uberblock_load(rvd, ub, &label); 4146 4147 /* 4148 * If we weren't able to find a single valid uberblock, return failure. 4149 */ 4150 if (ub->ub_txg == 0) { 4151 nvlist_free(label); 4152 spa_load_failed(spa, "no valid uberblock found"); 4153 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 4154 } 4155 4156 if (spa->spa_load_max_txg != UINT64_MAX) { 4157 (void) spa_import_progress_set_max_txg(spa_guid(spa), 4158 (u_longlong_t)spa->spa_load_max_txg); 4159 } 4160 spa_load_note(spa, "using uberblock with txg=%llu", 4161 (u_longlong_t)ub->ub_txg); 4162 if (ub->ub_raidz_reflow_info != 0) { 4163 spa_load_note(spa, "uberblock raidz_reflow_info: " 4164 "state=%u offset=%llu", 4165 (int)RRSS_GET_STATE(ub), 4166 (u_longlong_t)RRSS_GET_OFFSET(ub)); 4167 } 4168 4169 4170 /* 4171 * For pools which have the multihost property on determine if the 4172 * pool is truly inactive and can be safely imported. Prevent 4173 * hosts which don't have a hostid set from importing the pool. 4174 */ 4175 activity_check = spa_activity_check_required(spa, ub, label, 4176 spa->spa_config); 4177 if (activity_check) { 4178 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 4179 spa_get_hostid(spa) == 0) { 4180 nvlist_free(label); 4181 fnvlist_add_uint64(spa->spa_load_info, 4182 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4183 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4184 } 4185 4186 int error = 4187 spa_activity_check(spa, ub, spa->spa_config, B_TRUE); 4188 if (error) { 4189 nvlist_free(label); 4190 return (error); 4191 } 4192 4193 fnvlist_add_uint64(spa->spa_load_info, 4194 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 4195 fnvlist_add_uint64(spa->spa_load_info, 4196 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 4197 fnvlist_add_uint16(spa->spa_load_info, 4198 ZPOOL_CONFIG_MMP_SEQ, 4199 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); 4200 } 4201 4202 /* 4203 * If the pool has an unsupported version we can't open it. 4204 */ 4205 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 4206 nvlist_free(label); 4207 spa_load_failed(spa, "version %llu is not supported", 4208 (u_longlong_t)ub->ub_version); 4209 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 4210 } 4211 4212 if (ub->ub_version >= SPA_VERSION_FEATURES) { 4213 nvlist_t *features; 4214 4215 /* 4216 * If we weren't able to find what's necessary for reading the 4217 * MOS in the label, return failure. 4218 */ 4219 if (label == NULL) { 4220 spa_load_failed(spa, "label config unavailable"); 4221 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4222 ENXIO)); 4223 } 4224 4225 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 4226 &features) != 0) { 4227 nvlist_free(label); 4228 spa_load_failed(spa, "invalid label: '%s' missing", 4229 ZPOOL_CONFIG_FEATURES_FOR_READ); 4230 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4231 ENXIO)); 4232 } 4233 4234 /* 4235 * Update our in-core representation with the definitive values 4236 * from the label. 4237 */ 4238 nvlist_free(spa->spa_label_features); 4239 spa->spa_label_features = fnvlist_dup(features); 4240 } 4241 4242 nvlist_free(label); 4243 4244 /* 4245 * Look through entries in the label nvlist's features_for_read. If 4246 * there is a feature listed there which we don't understand then we 4247 * cannot open a pool. 4248 */ 4249 if (ub->ub_version >= SPA_VERSION_FEATURES) { 4250 nvlist_t *unsup_feat; 4251 4252 unsup_feat = fnvlist_alloc(); 4253 4254 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 4255 NULL); nvp != NULL; 4256 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 4257 if (!zfeature_is_supported(nvpair_name(nvp))) { 4258 fnvlist_add_string(unsup_feat, 4259 nvpair_name(nvp), ""); 4260 } 4261 } 4262 4263 if (!nvlist_empty(unsup_feat)) { 4264 fnvlist_add_nvlist(spa->spa_load_info, 4265 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 4266 nvlist_free(unsup_feat); 4267 spa_load_failed(spa, "some features are unsupported"); 4268 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4269 ENOTSUP)); 4270 } 4271 4272 nvlist_free(unsup_feat); 4273 } 4274 4275 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 4276 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4277 spa_try_repair(spa, spa->spa_config); 4278 spa_config_exit(spa, SCL_ALL, FTAG); 4279 nvlist_free(spa->spa_config_splitting); 4280 spa->spa_config_splitting = NULL; 4281 } 4282 4283 /* 4284 * Initialize internal SPA structures. 4285 */ 4286 spa_ld_select_uberblock_done(spa, ub); 4287 4288 return (0); 4289 } 4290 4291 static int 4292 spa_ld_open_rootbp(spa_t *spa) 4293 { 4294 int error = 0; 4295 vdev_t *rvd = spa->spa_root_vdev; 4296 4297 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 4298 if (error != 0) { 4299 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 4300 "[error=%d]", error); 4301 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4302 } 4303 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 4304 4305 return (0); 4306 } 4307 4308 static int 4309 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 4310 boolean_t reloading) 4311 { 4312 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 4313 nvlist_t *nv, *mos_config, *policy; 4314 int error = 0, copy_error; 4315 uint64_t healthy_tvds, healthy_tvds_mos; 4316 uint64_t mos_config_txg; 4317 4318 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 4319 != 0) 4320 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4321 4322 /* 4323 * If we're assembling a pool from a split, the config provided is 4324 * already trusted so there is nothing to do. 4325 */ 4326 if (type == SPA_IMPORT_ASSEMBLE) 4327 return (0); 4328 4329 healthy_tvds = spa_healthy_core_tvds(spa); 4330 4331 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 4332 != 0) { 4333 spa_load_failed(spa, "unable to retrieve MOS config"); 4334 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4335 } 4336 4337 /* 4338 * If we are doing an open, pool owner wasn't verified yet, thus do 4339 * the verification here. 4340 */ 4341 if (spa->spa_load_state == SPA_LOAD_OPEN) { 4342 error = spa_verify_host(spa, mos_config); 4343 if (error != 0) { 4344 nvlist_free(mos_config); 4345 return (error); 4346 } 4347 } 4348 4349 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 4350 4351 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4352 4353 /* 4354 * Build a new vdev tree from the trusted config 4355 */ 4356 error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD); 4357 if (error != 0) { 4358 nvlist_free(mos_config); 4359 spa_config_exit(spa, SCL_ALL, FTAG); 4360 spa_load_failed(spa, "spa_config_parse failed [error=%d]", 4361 error); 4362 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4363 } 4364 4365 /* 4366 * Vdev paths in the MOS may be obsolete. If the untrusted config was 4367 * obtained by scanning /dev/dsk, then it will have the right vdev 4368 * paths. We update the trusted MOS config with this information. 4369 * We first try to copy the paths with vdev_copy_path_strict, which 4370 * succeeds only when both configs have exactly the same vdev tree. 4371 * If that fails, we fall back to a more flexible method that has a 4372 * best effort policy. 4373 */ 4374 copy_error = vdev_copy_path_strict(rvd, mrvd); 4375 if (copy_error != 0 || spa_load_print_vdev_tree) { 4376 spa_load_note(spa, "provided vdev tree:"); 4377 vdev_dbgmsg_print_tree(rvd, 2); 4378 spa_load_note(spa, "MOS vdev tree:"); 4379 vdev_dbgmsg_print_tree(mrvd, 2); 4380 } 4381 if (copy_error != 0) { 4382 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 4383 "back to vdev_copy_path_relaxed"); 4384 vdev_copy_path_relaxed(rvd, mrvd); 4385 } 4386 4387 vdev_close(rvd); 4388 vdev_free(rvd); 4389 spa->spa_root_vdev = mrvd; 4390 rvd = mrvd; 4391 spa_config_exit(spa, SCL_ALL, FTAG); 4392 4393 /* 4394 * If 'zpool import' used a cached config, then the on-disk hostid and 4395 * hostname may be different to the cached config in ways that should 4396 * prevent import. Userspace can't discover this without a scan, but 4397 * we know, so we add these values to LOAD_INFO so the caller can know 4398 * the difference. 4399 * 4400 * Note that we have to do this before the config is regenerated, 4401 * because the new config will have the hostid and hostname for this 4402 * host, in readiness for import. 4403 */ 4404 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID)) 4405 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID, 4406 fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID)); 4407 if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME)) 4408 fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME, 4409 fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME)); 4410 4411 /* 4412 * We will use spa_config if we decide to reload the spa or if spa_load 4413 * fails and we rewind. We must thus regenerate the config using the 4414 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 4415 * pass settings on how to load the pool and is not stored in the MOS. 4416 * We copy it over to our new, trusted config. 4417 */ 4418 mos_config_txg = fnvlist_lookup_uint64(mos_config, 4419 ZPOOL_CONFIG_POOL_TXG); 4420 nvlist_free(mos_config); 4421 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 4422 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 4423 &policy) == 0) 4424 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 4425 spa_config_set(spa, mos_config); 4426 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 4427 4428 /* 4429 * Now that we got the config from the MOS, we should be more strict 4430 * in checking blkptrs and can make assumptions about the consistency 4431 * of the vdev tree. spa_trust_config must be set to true before opening 4432 * vdevs in order for them to be writeable. 4433 */ 4434 spa->spa_trust_config = B_TRUE; 4435 4436 /* 4437 * Open and validate the new vdev tree 4438 */ 4439 error = spa_ld_open_vdevs(spa); 4440 if (error != 0) 4441 return (error); 4442 4443 error = spa_ld_validate_vdevs(spa); 4444 if (error != 0) 4445 return (error); 4446 4447 if (copy_error != 0 || spa_load_print_vdev_tree) { 4448 spa_load_note(spa, "final vdev tree:"); 4449 vdev_dbgmsg_print_tree(rvd, 2); 4450 } 4451 4452 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 4453 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 4454 /* 4455 * Sanity check to make sure that we are indeed loading the 4456 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 4457 * in the config provided and they happened to be the only ones 4458 * to have the latest uberblock, we could involuntarily perform 4459 * an extreme rewind. 4460 */ 4461 healthy_tvds_mos = spa_healthy_core_tvds(spa); 4462 if (healthy_tvds_mos - healthy_tvds >= 4463 SPA_SYNC_MIN_VDEVS) { 4464 spa_load_note(spa, "config provided misses too many " 4465 "top-level vdevs compared to MOS (%lld vs %lld). ", 4466 (u_longlong_t)healthy_tvds, 4467 (u_longlong_t)healthy_tvds_mos); 4468 spa_load_note(spa, "vdev tree:"); 4469 vdev_dbgmsg_print_tree(rvd, 2); 4470 if (reloading) { 4471 spa_load_failed(spa, "config was already " 4472 "provided from MOS. Aborting."); 4473 return (spa_vdev_err(rvd, 4474 VDEV_AUX_CORRUPT_DATA, EIO)); 4475 } 4476 spa_load_note(spa, "spa must be reloaded using MOS " 4477 "config"); 4478 return (SET_ERROR(EAGAIN)); 4479 } 4480 } 4481 4482 error = spa_check_for_missing_logs(spa); 4483 if (error != 0) 4484 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 4485 4486 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 4487 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 4488 "guid sum (%llu != %llu)", 4489 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 4490 (u_longlong_t)rvd->vdev_guid_sum); 4491 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 4492 ENXIO)); 4493 } 4494 4495 return (0); 4496 } 4497 4498 static int 4499 spa_ld_open_indirect_vdev_metadata(spa_t *spa) 4500 { 4501 int error = 0; 4502 vdev_t *rvd = spa->spa_root_vdev; 4503 4504 /* 4505 * Everything that we read before spa_remove_init() must be stored 4506 * on concreted vdevs. Therefore we do this as early as possible. 4507 */ 4508 error = spa_remove_init(spa); 4509 if (error != 0) { 4510 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 4511 error); 4512 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4513 } 4514 4515 /* 4516 * Retrieve information needed to condense indirect vdev mappings. 4517 */ 4518 error = spa_condense_init(spa); 4519 if (error != 0) { 4520 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 4521 error); 4522 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4523 } 4524 4525 return (0); 4526 } 4527 4528 static int 4529 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 4530 { 4531 int error = 0; 4532 vdev_t *rvd = spa->spa_root_vdev; 4533 4534 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 4535 boolean_t missing_feat_read = B_FALSE; 4536 nvlist_t *unsup_feat, *enabled_feat; 4537 4538 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 4539 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 4540 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4541 } 4542 4543 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 4544 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 4545 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4546 } 4547 4548 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 4549 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 4550 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4551 } 4552 4553 enabled_feat = fnvlist_alloc(); 4554 unsup_feat = fnvlist_alloc(); 4555 4556 if (!spa_features_check(spa, B_FALSE, 4557 unsup_feat, enabled_feat)) 4558 missing_feat_read = B_TRUE; 4559 4560 if (spa_writeable(spa) || 4561 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 4562 if (!spa_features_check(spa, B_TRUE, 4563 unsup_feat, enabled_feat)) { 4564 *missing_feat_writep = B_TRUE; 4565 } 4566 } 4567 4568 fnvlist_add_nvlist(spa->spa_load_info, 4569 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 4570 4571 if (!nvlist_empty(unsup_feat)) { 4572 fnvlist_add_nvlist(spa->spa_load_info, 4573 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 4574 } 4575 4576 fnvlist_free(enabled_feat); 4577 fnvlist_free(unsup_feat); 4578 4579 if (!missing_feat_read) { 4580 fnvlist_add_boolean(spa->spa_load_info, 4581 ZPOOL_CONFIG_CAN_RDONLY); 4582 } 4583 4584 /* 4585 * If the state is SPA_LOAD_TRYIMPORT, our objective is 4586 * twofold: to determine whether the pool is available for 4587 * import in read-write mode and (if it is not) whether the 4588 * pool is available for import in read-only mode. If the pool 4589 * is available for import in read-write mode, it is displayed 4590 * as available in userland; if it is not available for import 4591 * in read-only mode, it is displayed as unavailable in 4592 * userland. If the pool is available for import in read-only 4593 * mode but not read-write mode, it is displayed as unavailable 4594 * in userland with a special note that the pool is actually 4595 * available for open in read-only mode. 4596 * 4597 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 4598 * missing a feature for write, we must first determine whether 4599 * the pool can be opened read-only before returning to 4600 * userland in order to know whether to display the 4601 * abovementioned note. 4602 */ 4603 if (missing_feat_read || (*missing_feat_writep && 4604 spa_writeable(spa))) { 4605 spa_load_failed(spa, "pool uses unsupported features"); 4606 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4607 ENOTSUP)); 4608 } 4609 4610 /* 4611 * Load refcounts for ZFS features from disk into an in-memory 4612 * cache during SPA initialization. 4613 */ 4614 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 4615 uint64_t refcount; 4616 4617 error = feature_get_refcount_from_disk(spa, 4618 &spa_feature_table[i], &refcount); 4619 if (error == 0) { 4620 spa->spa_feat_refcount_cache[i] = refcount; 4621 } else if (error == ENOTSUP) { 4622 spa->spa_feat_refcount_cache[i] = 4623 SPA_FEATURE_DISABLED; 4624 } else { 4625 spa_load_failed(spa, "error getting refcount " 4626 "for feature %s [error=%d]", 4627 spa_feature_table[i].fi_guid, error); 4628 return (spa_vdev_err(rvd, 4629 VDEV_AUX_CORRUPT_DATA, EIO)); 4630 } 4631 } 4632 } 4633 4634 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 4635 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 4636 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 4637 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4638 } 4639 4640 /* 4641 * Encryption was added before bookmark_v2, even though bookmark_v2 4642 * is now a dependency. If this pool has encryption enabled without 4643 * bookmark_v2, trigger an errata message. 4644 */ 4645 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 4646 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 4647 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 4648 } 4649 4650 return (0); 4651 } 4652 4653 static int 4654 spa_ld_load_special_directories(spa_t *spa) 4655 { 4656 int error = 0; 4657 vdev_t *rvd = spa->spa_root_vdev; 4658 4659 spa->spa_is_initializing = B_TRUE; 4660 error = dsl_pool_open(spa->spa_dsl_pool); 4661 spa->spa_is_initializing = B_FALSE; 4662 if (error != 0) { 4663 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 4664 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4665 } 4666 4667 return (0); 4668 } 4669 4670 static int 4671 spa_ld_get_props(spa_t *spa) 4672 { 4673 int error = 0; 4674 uint64_t obj; 4675 vdev_t *rvd = spa->spa_root_vdev; 4676 4677 /* Grab the checksum salt from the MOS. */ 4678 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4679 DMU_POOL_CHECKSUM_SALT, 1, 4680 sizeof (spa->spa_cksum_salt.zcs_bytes), 4681 spa->spa_cksum_salt.zcs_bytes); 4682 if (error == ENOENT) { 4683 /* Generate a new salt for subsequent use */ 4684 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4685 sizeof (spa->spa_cksum_salt.zcs_bytes)); 4686 } else if (error != 0) { 4687 spa_load_failed(spa, "unable to retrieve checksum salt from " 4688 "MOS [error=%d]", error); 4689 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4690 } 4691 4692 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 4693 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4694 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 4695 if (error != 0) { 4696 spa_load_failed(spa, "error opening deferred-frees bpobj " 4697 "[error=%d]", error); 4698 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4699 } 4700 4701 /* 4702 * Load the bit that tells us to use the new accounting function 4703 * (raid-z deflation). If we have an older pool, this will not 4704 * be present. 4705 */ 4706 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 4707 if (error != 0 && error != ENOENT) 4708 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4709 4710 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 4711 &spa->spa_creation_version, B_FALSE); 4712 if (error != 0 && error != ENOENT) 4713 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4714 4715 /* 4716 * Load the persistent error log. If we have an older pool, this will 4717 * not be present. 4718 */ 4719 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 4720 B_FALSE); 4721 if (error != 0 && error != ENOENT) 4722 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4723 4724 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 4725 &spa->spa_errlog_scrub, B_FALSE); 4726 if (error != 0 && error != ENOENT) 4727 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4728 4729 /* 4730 * Load the livelist deletion field. If a livelist is queued for 4731 * deletion, indicate that in the spa 4732 */ 4733 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES, 4734 &spa->spa_livelists_to_delete, B_FALSE); 4735 if (error != 0 && error != ENOENT) 4736 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4737 4738 /* 4739 * Load the history object. If we have an older pool, this 4740 * will not be present. 4741 */ 4742 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 4743 if (error != 0 && error != ENOENT) 4744 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4745 4746 /* 4747 * Load the per-vdev ZAP map. If we have an older pool, this will not 4748 * be present; in this case, defer its creation to a later time to 4749 * avoid dirtying the MOS this early / out of sync context. See 4750 * spa_sync_config_object. 4751 */ 4752 4753 /* The sentinel is only available in the MOS config. */ 4754 nvlist_t *mos_config; 4755 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 4756 spa_load_failed(spa, "unable to retrieve MOS config"); 4757 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4758 } 4759 4760 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 4761 &spa->spa_all_vdev_zaps, B_FALSE); 4762 4763 if (error == ENOENT) { 4764 VERIFY(!nvlist_exists(mos_config, 4765 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 4766 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 4767 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4768 } else if (error != 0) { 4769 nvlist_free(mos_config); 4770 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4771 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 4772 /* 4773 * An older version of ZFS overwrote the sentinel value, so 4774 * we have orphaned per-vdev ZAPs in the MOS. Defer their 4775 * destruction to later; see spa_sync_config_object. 4776 */ 4777 spa->spa_avz_action = AVZ_ACTION_DESTROY; 4778 /* 4779 * We're assuming that no vdevs have had their ZAPs created 4780 * before this. Better be sure of it. 4781 */ 4782 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4783 } 4784 nvlist_free(mos_config); 4785 4786 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4787 4788 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 4789 B_FALSE); 4790 if (error && error != ENOENT) 4791 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4792 4793 if (error == 0) { 4794 uint64_t autoreplace = 0; 4795 4796 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 4797 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 4798 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 4799 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 4800 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 4801 spa_prop_find(spa, ZPOOL_PROP_DEDUP_TABLE_QUOTA, 4802 &spa->spa_dedup_table_quota); 4803 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 4804 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); 4805 spa->spa_autoreplace = (autoreplace != 0); 4806 } 4807 4808 /* 4809 * If we are importing a pool with missing top-level vdevs, 4810 * we enforce that the pool doesn't panic or get suspended on 4811 * error since the likelihood of missing data is extremely high. 4812 */ 4813 if (spa->spa_missing_tvds > 0 && 4814 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 4815 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4816 spa_load_note(spa, "forcing failmode to 'continue' " 4817 "as some top level vdevs are missing"); 4818 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 4819 } 4820 4821 return (0); 4822 } 4823 4824 static int 4825 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 4826 { 4827 int error = 0; 4828 vdev_t *rvd = spa->spa_root_vdev; 4829 4830 /* 4831 * If we're assembling the pool from the split-off vdevs of 4832 * an existing pool, we don't want to attach the spares & cache 4833 * devices. 4834 */ 4835 4836 /* 4837 * Load any hot spares for this pool. 4838 */ 4839 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 4840 B_FALSE); 4841 if (error != 0 && error != ENOENT) 4842 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4843 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4844 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 4845 if (load_nvlist(spa, spa->spa_spares.sav_object, 4846 &spa->spa_spares.sav_config) != 0) { 4847 spa_load_failed(spa, "error loading spares nvlist"); 4848 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4849 } 4850 4851 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4852 spa_load_spares(spa); 4853 spa_config_exit(spa, SCL_ALL, FTAG); 4854 } else if (error == 0) { 4855 spa->spa_spares.sav_sync = B_TRUE; 4856 } 4857 4858 /* 4859 * Load any level 2 ARC devices for this pool. 4860 */ 4861 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 4862 &spa->spa_l2cache.sav_object, B_FALSE); 4863 if (error != 0 && error != ENOENT) 4864 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4865 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4866 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 4867 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 4868 &spa->spa_l2cache.sav_config) != 0) { 4869 spa_load_failed(spa, "error loading l2cache nvlist"); 4870 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4871 } 4872 4873 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4874 spa_load_l2cache(spa); 4875 spa_config_exit(spa, SCL_ALL, FTAG); 4876 } else if (error == 0) { 4877 spa->spa_l2cache.sav_sync = B_TRUE; 4878 } 4879 4880 return (0); 4881 } 4882 4883 static int 4884 spa_ld_load_vdev_metadata(spa_t *spa) 4885 { 4886 int error = 0; 4887 vdev_t *rvd = spa->spa_root_vdev; 4888 4889 /* 4890 * If the 'multihost' property is set, then never allow a pool to 4891 * be imported when the system hostid is zero. The exception to 4892 * this rule is zdb which is always allowed to access pools. 4893 */ 4894 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 && 4895 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 4896 fnvlist_add_uint64(spa->spa_load_info, 4897 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4898 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4899 } 4900 4901 /* 4902 * If the 'autoreplace' property is set, then post a resource notifying 4903 * the ZFS DE that it should not issue any faults for unopenable 4904 * devices. We also iterate over the vdevs, and post a sysevent for any 4905 * unopenable vdevs so that the normal autoreplace handler can take 4906 * over. 4907 */ 4908 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4909 spa_check_removed(spa->spa_root_vdev); 4910 /* 4911 * For the import case, this is done in spa_import(), because 4912 * at this point we're using the spare definitions from 4913 * the MOS config, not necessarily from the userland config. 4914 */ 4915 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 4916 spa_aux_check_removed(&spa->spa_spares); 4917 spa_aux_check_removed(&spa->spa_l2cache); 4918 } 4919 } 4920 4921 /* 4922 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 4923 */ 4924 error = vdev_load(rvd); 4925 if (error != 0) { 4926 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 4927 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4928 } 4929 4930 error = spa_ld_log_spacemaps(spa); 4931 if (error != 0) { 4932 spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]", 4933 error); 4934 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4935 } 4936 4937 /* 4938 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 4939 */ 4940 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4941 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE); 4942 spa_config_exit(spa, SCL_ALL, FTAG); 4943 4944 return (0); 4945 } 4946 4947 static int 4948 spa_ld_load_dedup_tables(spa_t *spa) 4949 { 4950 int error = 0; 4951 vdev_t *rvd = spa->spa_root_vdev; 4952 4953 error = ddt_load(spa); 4954 if (error != 0) { 4955 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 4956 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4957 } 4958 4959 return (0); 4960 } 4961 4962 static int 4963 spa_ld_load_brt(spa_t *spa) 4964 { 4965 int error = 0; 4966 vdev_t *rvd = spa->spa_root_vdev; 4967 4968 error = brt_load(spa); 4969 if (error != 0) { 4970 spa_load_failed(spa, "brt_load failed [error=%d]", error); 4971 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4972 } 4973 4974 return (0); 4975 } 4976 4977 static int 4978 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport) 4979 { 4980 vdev_t *rvd = spa->spa_root_vdev; 4981 4982 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 4983 boolean_t missing = spa_check_logs(spa); 4984 if (missing) { 4985 if (spa->spa_missing_tvds != 0) { 4986 spa_load_note(spa, "spa_check_logs failed " 4987 "so dropping the logs"); 4988 } else { 4989 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 4990 spa_load_failed(spa, "spa_check_logs failed"); 4991 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 4992 ENXIO)); 4993 } 4994 } 4995 } 4996 4997 return (0); 4998 } 4999 5000 static int 5001 spa_ld_verify_pool_data(spa_t *spa) 5002 { 5003 int error = 0; 5004 vdev_t *rvd = spa->spa_root_vdev; 5005 5006 /* 5007 * We've successfully opened the pool, verify that we're ready 5008 * to start pushing transactions. 5009 */ 5010 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 5011 error = spa_load_verify(spa); 5012 if (error != 0) { 5013 spa_load_failed(spa, "spa_load_verify failed " 5014 "[error=%d]", error); 5015 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 5016 error)); 5017 } 5018 } 5019 5020 return (0); 5021 } 5022 5023 static void 5024 spa_ld_claim_log_blocks(spa_t *spa) 5025 { 5026 dmu_tx_t *tx; 5027 dsl_pool_t *dp = spa_get_dsl(spa); 5028 5029 /* 5030 * Claim log blocks that haven't been committed yet. 5031 * This must all happen in a single txg. 5032 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 5033 * invoked from zil_claim_log_block()'s i/o done callback. 5034 * Price of rollback is that we abandon the log. 5035 */ 5036 spa->spa_claiming = B_TRUE; 5037 5038 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 5039 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 5040 zil_claim, tx, DS_FIND_CHILDREN); 5041 dmu_tx_commit(tx); 5042 5043 spa->spa_claiming = B_FALSE; 5044 5045 spa_set_log_state(spa, SPA_LOG_GOOD); 5046 } 5047 5048 static void 5049 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 5050 boolean_t update_config_cache) 5051 { 5052 vdev_t *rvd = spa->spa_root_vdev; 5053 int need_update = B_FALSE; 5054 5055 /* 5056 * If the config cache is stale, or we have uninitialized 5057 * metaslabs (see spa_vdev_add()), then update the config. 5058 * 5059 * If this is a verbatim import, trust the current 5060 * in-core spa_config and update the disk labels. 5061 */ 5062 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 5063 spa->spa_load_state == SPA_LOAD_IMPORT || 5064 spa->spa_load_state == SPA_LOAD_RECOVER || 5065 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 5066 need_update = B_TRUE; 5067 5068 for (int c = 0; c < rvd->vdev_children; c++) 5069 if (rvd->vdev_child[c]->vdev_ms_array == 0) 5070 need_update = B_TRUE; 5071 5072 /* 5073 * Update the config cache asynchronously in case we're the 5074 * root pool, in which case the config cache isn't writable yet. 5075 */ 5076 if (need_update) 5077 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 5078 } 5079 5080 static void 5081 spa_ld_prepare_for_reload(spa_t *spa) 5082 { 5083 spa_mode_t mode = spa->spa_mode; 5084 int async_suspended = spa->spa_async_suspended; 5085 5086 spa_unload(spa); 5087 spa_deactivate(spa); 5088 spa_activate(spa, mode); 5089 5090 /* 5091 * We save the value of spa_async_suspended as it gets reset to 0 by 5092 * spa_unload(). We want to restore it back to the original value before 5093 * returning as we might be calling spa_async_resume() later. 5094 */ 5095 spa->spa_async_suspended = async_suspended; 5096 } 5097 5098 static int 5099 spa_ld_read_checkpoint_txg(spa_t *spa) 5100 { 5101 uberblock_t checkpoint; 5102 int error = 0; 5103 5104 ASSERT0(spa->spa_checkpoint_txg); 5105 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 5106 spa->spa_load_thread == curthread); 5107 5108 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5109 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 5110 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 5111 5112 if (error == ENOENT) 5113 return (0); 5114 5115 if (error != 0) 5116 return (error); 5117 5118 ASSERT3U(checkpoint.ub_txg, !=, 0); 5119 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 5120 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 5121 spa->spa_checkpoint_txg = checkpoint.ub_txg; 5122 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 5123 5124 return (0); 5125 } 5126 5127 static int 5128 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 5129 { 5130 int error = 0; 5131 5132 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5133 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 5134 5135 /* 5136 * Never trust the config that is provided unless we are assembling 5137 * a pool following a split. 5138 * This means don't trust blkptrs and the vdev tree in general. This 5139 * also effectively puts the spa in read-only mode since 5140 * spa_writeable() checks for spa_trust_config to be true. 5141 * We will later load a trusted config from the MOS. 5142 */ 5143 if (type != SPA_IMPORT_ASSEMBLE) 5144 spa->spa_trust_config = B_FALSE; 5145 5146 /* 5147 * Parse the config provided to create a vdev tree. 5148 */ 5149 error = spa_ld_parse_config(spa, type); 5150 if (error != 0) 5151 return (error); 5152 5153 spa_import_progress_add(spa); 5154 5155 /* 5156 * Now that we have the vdev tree, try to open each vdev. This involves 5157 * opening the underlying physical device, retrieving its geometry and 5158 * probing the vdev with a dummy I/O. The state of each vdev will be set 5159 * based on the success of those operations. After this we'll be ready 5160 * to read from the vdevs. 5161 */ 5162 error = spa_ld_open_vdevs(spa); 5163 if (error != 0) 5164 return (error); 5165 5166 /* 5167 * Read the label of each vdev and make sure that the GUIDs stored 5168 * there match the GUIDs in the config provided. 5169 * If we're assembling a new pool that's been split off from an 5170 * existing pool, the labels haven't yet been updated so we skip 5171 * validation for now. 5172 */ 5173 if (type != SPA_IMPORT_ASSEMBLE) { 5174 error = spa_ld_validate_vdevs(spa); 5175 if (error != 0) 5176 return (error); 5177 } 5178 5179 /* 5180 * Read all vdev labels to find the best uberblock (i.e. latest, 5181 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 5182 * get the list of features required to read blkptrs in the MOS from 5183 * the vdev label with the best uberblock and verify that our version 5184 * of zfs supports them all. 5185 */ 5186 error = spa_ld_select_uberblock(spa, type); 5187 if (error != 0) 5188 return (error); 5189 5190 /* 5191 * Pass that uberblock to the dsl_pool layer which will open the root 5192 * blkptr. This blkptr points to the latest version of the MOS and will 5193 * allow us to read its contents. 5194 */ 5195 error = spa_ld_open_rootbp(spa); 5196 if (error != 0) 5197 return (error); 5198 5199 return (0); 5200 } 5201 5202 static int 5203 spa_ld_checkpoint_rewind(spa_t *spa) 5204 { 5205 uberblock_t checkpoint; 5206 int error = 0; 5207 5208 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5209 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5210 5211 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 5212 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 5213 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 5214 5215 if (error != 0) { 5216 spa_load_failed(spa, "unable to retrieve checkpointed " 5217 "uberblock from the MOS config [error=%d]", error); 5218 5219 if (error == ENOENT) 5220 error = ZFS_ERR_NO_CHECKPOINT; 5221 5222 return (error); 5223 } 5224 5225 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 5226 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 5227 5228 /* 5229 * We need to update the txg and timestamp of the checkpointed 5230 * uberblock to be higher than the latest one. This ensures that 5231 * the checkpointed uberblock is selected if we were to close and 5232 * reopen the pool right after we've written it in the vdev labels. 5233 * (also see block comment in vdev_uberblock_compare) 5234 */ 5235 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 5236 checkpoint.ub_timestamp = gethrestime_sec(); 5237 5238 /* 5239 * Set current uberblock to be the checkpointed uberblock. 5240 */ 5241 spa->spa_uberblock = checkpoint; 5242 5243 /* 5244 * If we are doing a normal rewind, then the pool is open for 5245 * writing and we sync the "updated" checkpointed uberblock to 5246 * disk. Once this is done, we've basically rewound the whole 5247 * pool and there is no way back. 5248 * 5249 * There are cases when we don't want to attempt and sync the 5250 * checkpointed uberblock to disk because we are opening a 5251 * pool as read-only. Specifically, verifying the checkpointed 5252 * state with zdb, and importing the checkpointed state to get 5253 * a "preview" of its content. 5254 */ 5255 if (spa_writeable(spa)) { 5256 vdev_t *rvd = spa->spa_root_vdev; 5257 5258 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5259 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 5260 int svdcount = 0; 5261 int children = rvd->vdev_children; 5262 int c0 = random_in_range(children); 5263 5264 for (int c = 0; c < children; c++) { 5265 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 5266 5267 /* Stop when revisiting the first vdev */ 5268 if (c > 0 && svd[0] == vd) 5269 break; 5270 5271 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 5272 !vdev_is_concrete(vd)) 5273 continue; 5274 5275 svd[svdcount++] = vd; 5276 if (svdcount == SPA_SYNC_MIN_VDEVS) 5277 break; 5278 } 5279 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 5280 if (error == 0) 5281 spa->spa_last_synced_guid = rvd->vdev_guid; 5282 spa_config_exit(spa, SCL_ALL, FTAG); 5283 5284 if (error != 0) { 5285 spa_load_failed(spa, "failed to write checkpointed " 5286 "uberblock to the vdev labels [error=%d]", error); 5287 return (error); 5288 } 5289 } 5290 5291 return (0); 5292 } 5293 5294 static int 5295 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 5296 boolean_t *update_config_cache) 5297 { 5298 int error; 5299 5300 /* 5301 * Parse the config for pool, open and validate vdevs, 5302 * select an uberblock, and use that uberblock to open 5303 * the MOS. 5304 */ 5305 error = spa_ld_mos_init(spa, type); 5306 if (error != 0) 5307 return (error); 5308 5309 /* 5310 * Retrieve the trusted config stored in the MOS and use it to create 5311 * a new, exact version of the vdev tree, then reopen all vdevs. 5312 */ 5313 error = spa_ld_trusted_config(spa, type, B_FALSE); 5314 if (error == EAGAIN) { 5315 if (update_config_cache != NULL) 5316 *update_config_cache = B_TRUE; 5317 5318 /* 5319 * Redo the loading process with the trusted config if it is 5320 * too different from the untrusted config. 5321 */ 5322 spa_ld_prepare_for_reload(spa); 5323 spa_load_note(spa, "RELOADING"); 5324 error = spa_ld_mos_init(spa, type); 5325 if (error != 0) 5326 return (error); 5327 5328 error = spa_ld_trusted_config(spa, type, B_TRUE); 5329 if (error != 0) 5330 return (error); 5331 5332 } else if (error != 0) { 5333 return (error); 5334 } 5335 5336 return (0); 5337 } 5338 5339 /* 5340 * Load an existing storage pool, using the config provided. This config 5341 * describes which vdevs are part of the pool and is later validated against 5342 * partial configs present in each vdev's label and an entire copy of the 5343 * config stored in the MOS. 5344 */ 5345 static int 5346 spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport) 5347 { 5348 int error = 0; 5349 boolean_t missing_feat_write = B_FALSE; 5350 boolean_t checkpoint_rewind = 5351 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5352 boolean_t update_config_cache = B_FALSE; 5353 hrtime_t load_start = gethrtime(); 5354 5355 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5356 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 5357 5358 spa_load_note(spa, "LOADING"); 5359 5360 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 5361 if (error != 0) 5362 return (error); 5363 5364 /* 5365 * If we are rewinding to the checkpoint then we need to repeat 5366 * everything we've done so far in this function but this time 5367 * selecting the checkpointed uberblock and using that to open 5368 * the MOS. 5369 */ 5370 if (checkpoint_rewind) { 5371 /* 5372 * If we are rewinding to the checkpoint update config cache 5373 * anyway. 5374 */ 5375 update_config_cache = B_TRUE; 5376 5377 /* 5378 * Extract the checkpointed uberblock from the current MOS 5379 * and use this as the pool's uberblock from now on. If the 5380 * pool is imported as writeable we also write the checkpoint 5381 * uberblock to the labels, making the rewind permanent. 5382 */ 5383 error = spa_ld_checkpoint_rewind(spa); 5384 if (error != 0) 5385 return (error); 5386 5387 /* 5388 * Redo the loading process again with the 5389 * checkpointed uberblock. 5390 */ 5391 spa_ld_prepare_for_reload(spa); 5392 spa_load_note(spa, "LOADING checkpointed uberblock"); 5393 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 5394 if (error != 0) 5395 return (error); 5396 } 5397 5398 /* 5399 * Drop the namespace lock for the rest of the function. 5400 */ 5401 spa->spa_load_thread = curthread; 5402 mutex_exit(&spa_namespace_lock); 5403 5404 /* 5405 * Retrieve the checkpoint txg if the pool has a checkpoint. 5406 */ 5407 spa_import_progress_set_notes(spa, "Loading checkpoint txg"); 5408 error = spa_ld_read_checkpoint_txg(spa); 5409 if (error != 0) 5410 goto fail; 5411 5412 /* 5413 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 5414 * from the pool and their contents were re-mapped to other vdevs. Note 5415 * that everything that we read before this step must have been 5416 * rewritten on concrete vdevs after the last device removal was 5417 * initiated. Otherwise we could be reading from indirect vdevs before 5418 * we have loaded their mappings. 5419 */ 5420 spa_import_progress_set_notes(spa, "Loading indirect vdev metadata"); 5421 error = spa_ld_open_indirect_vdev_metadata(spa); 5422 if (error != 0) 5423 goto fail; 5424 5425 /* 5426 * Retrieve the full list of active features from the MOS and check if 5427 * they are all supported. 5428 */ 5429 spa_import_progress_set_notes(spa, "Checking feature flags"); 5430 error = spa_ld_check_features(spa, &missing_feat_write); 5431 if (error != 0) 5432 goto fail; 5433 5434 /* 5435 * Load several special directories from the MOS needed by the dsl_pool 5436 * layer. 5437 */ 5438 spa_import_progress_set_notes(spa, "Loading special MOS directories"); 5439 error = spa_ld_load_special_directories(spa); 5440 if (error != 0) 5441 goto fail; 5442 5443 /* 5444 * Retrieve pool properties from the MOS. 5445 */ 5446 spa_import_progress_set_notes(spa, "Loading properties"); 5447 error = spa_ld_get_props(spa); 5448 if (error != 0) 5449 goto fail; 5450 5451 /* 5452 * Retrieve the list of auxiliary devices - cache devices and spares - 5453 * and open them. 5454 */ 5455 spa_import_progress_set_notes(spa, "Loading AUX vdevs"); 5456 error = spa_ld_open_aux_vdevs(spa, type); 5457 if (error != 0) 5458 goto fail; 5459 5460 /* 5461 * Load the metadata for all vdevs. Also check if unopenable devices 5462 * should be autoreplaced. 5463 */ 5464 spa_import_progress_set_notes(spa, "Loading vdev metadata"); 5465 error = spa_ld_load_vdev_metadata(spa); 5466 if (error != 0) 5467 goto fail; 5468 5469 spa_import_progress_set_notes(spa, "Loading dedup tables"); 5470 error = spa_ld_load_dedup_tables(spa); 5471 if (error != 0) 5472 goto fail; 5473 5474 spa_import_progress_set_notes(spa, "Loading BRT"); 5475 error = spa_ld_load_brt(spa); 5476 if (error != 0) 5477 goto fail; 5478 5479 /* 5480 * Verify the logs now to make sure we don't have any unexpected errors 5481 * when we claim log blocks later. 5482 */ 5483 spa_import_progress_set_notes(spa, "Verifying Log Devices"); 5484 error = spa_ld_verify_logs(spa, type, ereport); 5485 if (error != 0) 5486 goto fail; 5487 5488 if (missing_feat_write) { 5489 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 5490 5491 /* 5492 * At this point, we know that we can open the pool in 5493 * read-only mode but not read-write mode. We now have enough 5494 * information and can return to userland. 5495 */ 5496 error = spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 5497 ENOTSUP); 5498 goto fail; 5499 } 5500 5501 /* 5502 * Traverse the last txgs to make sure the pool was left off in a safe 5503 * state. When performing an extreme rewind, we verify the whole pool, 5504 * which can take a very long time. 5505 */ 5506 spa_import_progress_set_notes(spa, "Verifying pool data"); 5507 error = spa_ld_verify_pool_data(spa); 5508 if (error != 0) 5509 goto fail; 5510 5511 /* 5512 * Calculate the deflated space for the pool. This must be done before 5513 * we write anything to the pool because we'd need to update the space 5514 * accounting using the deflated sizes. 5515 */ 5516 spa_import_progress_set_notes(spa, "Calculating deflated space"); 5517 spa_update_dspace(spa); 5518 5519 /* 5520 * We have now retrieved all the information we needed to open the 5521 * pool. If we are importing the pool in read-write mode, a few 5522 * additional steps must be performed to finish the import. 5523 */ 5524 spa_import_progress_set_notes(spa, "Starting import"); 5525 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 5526 spa->spa_load_max_txg == UINT64_MAX)) { 5527 uint64_t config_cache_txg = spa->spa_config_txg; 5528 5529 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 5530 5531 /* 5532 * Before we do any zio_write's, complete the raidz expansion 5533 * scratch space copying, if necessary. 5534 */ 5535 if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID) 5536 vdev_raidz_reflow_copy_scratch(spa); 5537 5538 /* 5539 * In case of a checkpoint rewind, log the original txg 5540 * of the checkpointed uberblock. 5541 */ 5542 if (checkpoint_rewind) { 5543 spa_history_log_internal(spa, "checkpoint rewind", 5544 NULL, "rewound state to txg=%llu", 5545 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 5546 } 5547 5548 spa_import_progress_set_notes(spa, "Claiming ZIL blocks"); 5549 /* 5550 * Traverse the ZIL and claim all blocks. 5551 */ 5552 spa_ld_claim_log_blocks(spa); 5553 5554 /* 5555 * Kick-off the syncing thread. 5556 */ 5557 spa->spa_sync_on = B_TRUE; 5558 txg_sync_start(spa->spa_dsl_pool); 5559 mmp_thread_start(spa); 5560 5561 /* 5562 * Wait for all claims to sync. We sync up to the highest 5563 * claimed log block birth time so that claimed log blocks 5564 * don't appear to be from the future. spa_claim_max_txg 5565 * will have been set for us by ZIL traversal operations 5566 * performed above. 5567 */ 5568 spa_import_progress_set_notes(spa, "Syncing ZIL claims"); 5569 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 5570 5571 /* 5572 * Check if we need to request an update of the config. On the 5573 * next sync, we would update the config stored in vdev labels 5574 * and the cachefile (by default /etc/zfs/zpool.cache). 5575 */ 5576 spa_import_progress_set_notes(spa, "Updating configs"); 5577 spa_ld_check_for_config_update(spa, config_cache_txg, 5578 update_config_cache); 5579 5580 /* 5581 * Check if a rebuild was in progress and if so resume it. 5582 * Then check all DTLs to see if anything needs resilvering. 5583 * The resilver will be deferred if a rebuild was started. 5584 */ 5585 spa_import_progress_set_notes(spa, "Starting resilvers"); 5586 if (vdev_rebuild_active(spa->spa_root_vdev)) { 5587 vdev_rebuild_restart(spa); 5588 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 5589 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5590 spa_async_request(spa, SPA_ASYNC_RESILVER); 5591 } 5592 5593 /* 5594 * Log the fact that we booted up (so that we can detect if 5595 * we rebooted in the middle of an operation). 5596 */ 5597 spa_history_log_version(spa, "open", NULL); 5598 5599 spa_import_progress_set_notes(spa, 5600 "Restarting device removals"); 5601 spa_restart_removal(spa); 5602 spa_spawn_aux_threads(spa); 5603 5604 /* 5605 * Delete any inconsistent datasets. 5606 * 5607 * Note: 5608 * Since we may be issuing deletes for clones here, 5609 * we make sure to do so after we've spawned all the 5610 * auxiliary threads above (from which the livelist 5611 * deletion zthr is part of). 5612 */ 5613 spa_import_progress_set_notes(spa, 5614 "Cleaning up inconsistent objsets"); 5615 (void) dmu_objset_find(spa_name(spa), 5616 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 5617 5618 /* 5619 * Clean up any stale temporary dataset userrefs. 5620 */ 5621 spa_import_progress_set_notes(spa, 5622 "Cleaning up temporary userrefs"); 5623 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 5624 5625 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5626 spa_import_progress_set_notes(spa, "Restarting initialize"); 5627 vdev_initialize_restart(spa->spa_root_vdev); 5628 spa_import_progress_set_notes(spa, "Restarting TRIM"); 5629 vdev_trim_restart(spa->spa_root_vdev); 5630 vdev_autotrim_restart(spa); 5631 spa_config_exit(spa, SCL_CONFIG, FTAG); 5632 spa_import_progress_set_notes(spa, "Finished importing"); 5633 } 5634 zio_handle_import_delay(spa, gethrtime() - load_start); 5635 5636 spa_import_progress_remove(spa_guid(spa)); 5637 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 5638 5639 spa_load_note(spa, "LOADED"); 5640 fail: 5641 mutex_enter(&spa_namespace_lock); 5642 spa->spa_load_thread = NULL; 5643 cv_broadcast(&spa_namespace_cv); 5644 5645 return (error); 5646 5647 } 5648 5649 static int 5650 spa_load_retry(spa_t *spa, spa_load_state_t state) 5651 { 5652 spa_mode_t mode = spa->spa_mode; 5653 5654 spa_unload(spa); 5655 spa_deactivate(spa); 5656 5657 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 5658 5659 spa_activate(spa, mode); 5660 spa_async_suspend(spa); 5661 5662 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 5663 (u_longlong_t)spa->spa_load_max_txg); 5664 5665 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 5666 } 5667 5668 /* 5669 * If spa_load() fails this function will try loading prior txg's. If 5670 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 5671 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 5672 * function will not rewind the pool and will return the same error as 5673 * spa_load(). 5674 */ 5675 static int 5676 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 5677 int rewind_flags) 5678 { 5679 nvlist_t *loadinfo = NULL; 5680 nvlist_t *config = NULL; 5681 int load_error, rewind_error; 5682 uint64_t safe_rewind_txg; 5683 uint64_t min_txg; 5684 5685 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 5686 spa->spa_load_max_txg = spa->spa_load_txg; 5687 spa_set_log_state(spa, SPA_LOG_CLEAR); 5688 } else { 5689 spa->spa_load_max_txg = max_request; 5690 if (max_request != UINT64_MAX) 5691 spa->spa_extreme_rewind = B_TRUE; 5692 } 5693 5694 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 5695 if (load_error == 0) 5696 return (0); 5697 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 5698 /* 5699 * When attempting checkpoint-rewind on a pool with no 5700 * checkpoint, we should not attempt to load uberblocks 5701 * from previous txgs when spa_load fails. 5702 */ 5703 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5704 spa_import_progress_remove(spa_guid(spa)); 5705 return (load_error); 5706 } 5707 5708 if (spa->spa_root_vdev != NULL) 5709 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5710 5711 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 5712 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 5713 5714 if (rewind_flags & ZPOOL_NEVER_REWIND) { 5715 nvlist_free(config); 5716 spa_import_progress_remove(spa_guid(spa)); 5717 return (load_error); 5718 } 5719 5720 if (state == SPA_LOAD_RECOVER) { 5721 /* Price of rolling back is discarding txgs, including log */ 5722 spa_set_log_state(spa, SPA_LOG_CLEAR); 5723 } else { 5724 /* 5725 * If we aren't rolling back save the load info from our first 5726 * import attempt so that we can restore it after attempting 5727 * to rewind. 5728 */ 5729 loadinfo = spa->spa_load_info; 5730 spa->spa_load_info = fnvlist_alloc(); 5731 } 5732 5733 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 5734 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 5735 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 5736 TXG_INITIAL : safe_rewind_txg; 5737 5738 /* 5739 * Continue as long as we're finding errors, we're still within 5740 * the acceptable rewind range, and we're still finding uberblocks 5741 */ 5742 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 5743 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 5744 if (spa->spa_load_max_txg < safe_rewind_txg) 5745 spa->spa_extreme_rewind = B_TRUE; 5746 rewind_error = spa_load_retry(spa, state); 5747 } 5748 5749 spa->spa_extreme_rewind = B_FALSE; 5750 spa->spa_load_max_txg = UINT64_MAX; 5751 5752 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 5753 spa_config_set(spa, config); 5754 else 5755 nvlist_free(config); 5756 5757 if (state == SPA_LOAD_RECOVER) { 5758 ASSERT3P(loadinfo, ==, NULL); 5759 spa_import_progress_remove(spa_guid(spa)); 5760 return (rewind_error); 5761 } else { 5762 /* Store the rewind info as part of the initial load info */ 5763 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 5764 spa->spa_load_info); 5765 5766 /* Restore the initial load info */ 5767 fnvlist_free(spa->spa_load_info); 5768 spa->spa_load_info = loadinfo; 5769 5770 spa_import_progress_remove(spa_guid(spa)); 5771 return (load_error); 5772 } 5773 } 5774 5775 /* 5776 * Pool Open/Import 5777 * 5778 * The import case is identical to an open except that the configuration is sent 5779 * down from userland, instead of grabbed from the configuration cache. For the 5780 * case of an open, the pool configuration will exist in the 5781 * POOL_STATE_UNINITIALIZED state. 5782 * 5783 * The stats information (gen/count/ustats) is used to gather vdev statistics at 5784 * the same time open the pool, without having to keep around the spa_t in some 5785 * ambiguous state. 5786 */ 5787 static int 5788 spa_open_common(const char *pool, spa_t **spapp, const void *tag, 5789 nvlist_t *nvpolicy, nvlist_t **config) 5790 { 5791 spa_t *spa; 5792 spa_load_state_t state = SPA_LOAD_OPEN; 5793 int error; 5794 int locked = B_FALSE; 5795 int firstopen = B_FALSE; 5796 5797 *spapp = NULL; 5798 5799 /* 5800 * As disgusting as this is, we need to support recursive calls to this 5801 * function because dsl_dir_open() is called during spa_load(), and ends 5802 * up calling spa_open() again. The real fix is to figure out how to 5803 * avoid dsl_dir_open() calling this in the first place. 5804 */ 5805 if (MUTEX_NOT_HELD(&spa_namespace_lock)) { 5806 mutex_enter(&spa_namespace_lock); 5807 locked = B_TRUE; 5808 } 5809 5810 if ((spa = spa_lookup(pool)) == NULL) { 5811 if (locked) 5812 mutex_exit(&spa_namespace_lock); 5813 return (SET_ERROR(ENOENT)); 5814 } 5815 5816 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 5817 zpool_load_policy_t policy; 5818 5819 firstopen = B_TRUE; 5820 5821 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 5822 &policy); 5823 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5824 state = SPA_LOAD_RECOVER; 5825 5826 spa_activate(spa, spa_mode_global); 5827 5828 if (state != SPA_LOAD_RECOVER) 5829 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5830 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5831 5832 zfs_dbgmsg("spa_open_common: opening %s", pool); 5833 error = spa_load_best(spa, state, policy.zlp_txg, 5834 policy.zlp_rewind); 5835 5836 if (error == EBADF) { 5837 /* 5838 * If vdev_validate() returns failure (indicated by 5839 * EBADF), it indicates that one of the vdevs indicates 5840 * that the pool has been exported or destroyed. If 5841 * this is the case, the config cache is out of sync and 5842 * we should remove the pool from the namespace. 5843 */ 5844 spa_unload(spa); 5845 spa_deactivate(spa); 5846 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE); 5847 spa_remove(spa); 5848 if (locked) 5849 mutex_exit(&spa_namespace_lock); 5850 return (SET_ERROR(ENOENT)); 5851 } 5852 5853 if (error) { 5854 /* 5855 * We can't open the pool, but we still have useful 5856 * information: the state of each vdev after the 5857 * attempted vdev_open(). Return this to the user. 5858 */ 5859 if (config != NULL && spa->spa_config) { 5860 *config = fnvlist_dup(spa->spa_config); 5861 fnvlist_add_nvlist(*config, 5862 ZPOOL_CONFIG_LOAD_INFO, 5863 spa->spa_load_info); 5864 } 5865 spa_unload(spa); 5866 spa_deactivate(spa); 5867 spa->spa_last_open_failed = error; 5868 if (locked) 5869 mutex_exit(&spa_namespace_lock); 5870 *spapp = NULL; 5871 return (error); 5872 } 5873 } 5874 5875 spa_open_ref(spa, tag); 5876 5877 if (config != NULL) 5878 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5879 5880 /* 5881 * If we've recovered the pool, pass back any information we 5882 * gathered while doing the load. 5883 */ 5884 if (state == SPA_LOAD_RECOVER && config != NULL) { 5885 fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 5886 spa->spa_load_info); 5887 } 5888 5889 if (locked) { 5890 spa->spa_last_open_failed = 0; 5891 spa->spa_last_ubsync_txg = 0; 5892 spa->spa_load_txg = 0; 5893 mutex_exit(&spa_namespace_lock); 5894 } 5895 5896 if (firstopen) 5897 zvol_create_minors_recursive(spa_name(spa)); 5898 5899 *spapp = spa; 5900 5901 return (0); 5902 } 5903 5904 int 5905 spa_open_rewind(const char *name, spa_t **spapp, const void *tag, 5906 nvlist_t *policy, nvlist_t **config) 5907 { 5908 return (spa_open_common(name, spapp, tag, policy, config)); 5909 } 5910 5911 int 5912 spa_open(const char *name, spa_t **spapp, const void *tag) 5913 { 5914 return (spa_open_common(name, spapp, tag, NULL, NULL)); 5915 } 5916 5917 /* 5918 * Lookup the given spa_t, incrementing the inject count in the process, 5919 * preventing it from being exported or destroyed. 5920 */ 5921 spa_t * 5922 spa_inject_addref(char *name) 5923 { 5924 spa_t *spa; 5925 5926 mutex_enter(&spa_namespace_lock); 5927 if ((spa = spa_lookup(name)) == NULL) { 5928 mutex_exit(&spa_namespace_lock); 5929 return (NULL); 5930 } 5931 spa->spa_inject_ref++; 5932 mutex_exit(&spa_namespace_lock); 5933 5934 return (spa); 5935 } 5936 5937 void 5938 spa_inject_delref(spa_t *spa) 5939 { 5940 mutex_enter(&spa_namespace_lock); 5941 spa->spa_inject_ref--; 5942 mutex_exit(&spa_namespace_lock); 5943 } 5944 5945 /* 5946 * Add spares device information to the nvlist. 5947 */ 5948 static void 5949 spa_add_spares(spa_t *spa, nvlist_t *config) 5950 { 5951 nvlist_t **spares; 5952 uint_t i, nspares; 5953 nvlist_t *nvroot; 5954 uint64_t guid; 5955 vdev_stat_t *vs; 5956 uint_t vsc; 5957 uint64_t pool; 5958 5959 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5960 5961 if (spa->spa_spares.sav_count == 0) 5962 return; 5963 5964 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 5965 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5966 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 5967 if (nspares != 0) { 5968 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5969 (const nvlist_t * const *)spares, nspares); 5970 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5971 &spares, &nspares)); 5972 5973 /* 5974 * Go through and find any spares which have since been 5975 * repurposed as an active spare. If this is the case, update 5976 * their status appropriately. 5977 */ 5978 for (i = 0; i < nspares; i++) { 5979 guid = fnvlist_lookup_uint64(spares[i], 5980 ZPOOL_CONFIG_GUID); 5981 VERIFY0(nvlist_lookup_uint64_array(spares[i], 5982 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)); 5983 if (spa_spare_exists(guid, &pool, NULL) && 5984 pool != 0ULL) { 5985 vs->vs_state = VDEV_STATE_CANT_OPEN; 5986 vs->vs_aux = VDEV_AUX_SPARED; 5987 } else { 5988 vs->vs_state = 5989 spa->spa_spares.sav_vdevs[i]->vdev_state; 5990 } 5991 } 5992 } 5993 } 5994 5995 /* 5996 * Add l2cache device information to the nvlist, including vdev stats. 5997 */ 5998 static void 5999 spa_add_l2cache(spa_t *spa, nvlist_t *config) 6000 { 6001 nvlist_t **l2cache; 6002 uint_t i, j, nl2cache; 6003 nvlist_t *nvroot; 6004 uint64_t guid; 6005 vdev_t *vd; 6006 vdev_stat_t *vs; 6007 uint_t vsc; 6008 6009 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 6010 6011 if (spa->spa_l2cache.sav_count == 0) 6012 return; 6013 6014 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6015 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 6016 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 6017 if (nl2cache != 0) { 6018 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6019 (const nvlist_t * const *)l2cache, nl2cache); 6020 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6021 &l2cache, &nl2cache)); 6022 6023 /* 6024 * Update level 2 cache device stats. 6025 */ 6026 6027 for (i = 0; i < nl2cache; i++) { 6028 guid = fnvlist_lookup_uint64(l2cache[i], 6029 ZPOOL_CONFIG_GUID); 6030 6031 vd = NULL; 6032 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 6033 if (guid == 6034 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 6035 vd = spa->spa_l2cache.sav_vdevs[j]; 6036 break; 6037 } 6038 } 6039 ASSERT(vd != NULL); 6040 6041 VERIFY0(nvlist_lookup_uint64_array(l2cache[i], 6042 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)); 6043 vdev_get_stats(vd, vs); 6044 vdev_config_generate_stats(vd, l2cache[i]); 6045 6046 } 6047 } 6048 } 6049 6050 static void 6051 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features) 6052 { 6053 zap_cursor_t zc; 6054 zap_attribute_t *za = zap_attribute_alloc(); 6055 6056 if (spa->spa_feat_for_read_obj != 0) { 6057 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6058 spa->spa_feat_for_read_obj); 6059 zap_cursor_retrieve(&zc, za) == 0; 6060 zap_cursor_advance(&zc)) { 6061 ASSERT(za->za_integer_length == sizeof (uint64_t) && 6062 za->za_num_integers == 1); 6063 VERIFY0(nvlist_add_uint64(features, za->za_name, 6064 za->za_first_integer)); 6065 } 6066 zap_cursor_fini(&zc); 6067 } 6068 6069 if (spa->spa_feat_for_write_obj != 0) { 6070 for (zap_cursor_init(&zc, spa->spa_meta_objset, 6071 spa->spa_feat_for_write_obj); 6072 zap_cursor_retrieve(&zc, za) == 0; 6073 zap_cursor_advance(&zc)) { 6074 ASSERT(za->za_integer_length == sizeof (uint64_t) && 6075 za->za_num_integers == 1); 6076 VERIFY0(nvlist_add_uint64(features, za->za_name, 6077 za->za_first_integer)); 6078 } 6079 zap_cursor_fini(&zc); 6080 } 6081 zap_attribute_free(za); 6082 } 6083 6084 static void 6085 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features) 6086 { 6087 int i; 6088 6089 for (i = 0; i < SPA_FEATURES; i++) { 6090 zfeature_info_t feature = spa_feature_table[i]; 6091 uint64_t refcount; 6092 6093 if (feature_get_refcount(spa, &feature, &refcount) != 0) 6094 continue; 6095 6096 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount)); 6097 } 6098 } 6099 6100 /* 6101 * Store a list of pool features and their reference counts in the 6102 * config. 6103 * 6104 * The first time this is called on a spa, allocate a new nvlist, fetch 6105 * the pool features and reference counts from disk, then save the list 6106 * in the spa. In subsequent calls on the same spa use the saved nvlist 6107 * and refresh its values from the cached reference counts. This 6108 * ensures we don't block here on I/O on a suspended pool so 'zpool 6109 * clear' can resume the pool. 6110 */ 6111 static void 6112 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 6113 { 6114 nvlist_t *features; 6115 6116 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 6117 6118 mutex_enter(&spa->spa_feat_stats_lock); 6119 features = spa->spa_feat_stats; 6120 6121 if (features != NULL) { 6122 spa_feature_stats_from_cache(spa, features); 6123 } else { 6124 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP)); 6125 spa->spa_feat_stats = features; 6126 spa_feature_stats_from_disk(spa, features); 6127 } 6128 6129 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 6130 features)); 6131 6132 mutex_exit(&spa->spa_feat_stats_lock); 6133 } 6134 6135 int 6136 spa_get_stats(const char *name, nvlist_t **config, 6137 char *altroot, size_t buflen) 6138 { 6139 int error; 6140 spa_t *spa; 6141 6142 *config = NULL; 6143 error = spa_open_common(name, &spa, FTAG, NULL, config); 6144 6145 if (spa != NULL) { 6146 /* 6147 * This still leaves a window of inconsistency where the spares 6148 * or l2cache devices could change and the config would be 6149 * self-inconsistent. 6150 */ 6151 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6152 6153 if (*config != NULL) { 6154 uint64_t loadtimes[2]; 6155 6156 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 6157 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 6158 fnvlist_add_uint64_array(*config, 6159 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2); 6160 6161 fnvlist_add_uint64(*config, 6162 ZPOOL_CONFIG_ERRCOUNT, 6163 spa_approx_errlog_size(spa)); 6164 6165 if (spa_suspended(spa)) { 6166 fnvlist_add_uint64(*config, 6167 ZPOOL_CONFIG_SUSPENDED, 6168 spa->spa_failmode); 6169 fnvlist_add_uint64(*config, 6170 ZPOOL_CONFIG_SUSPENDED_REASON, 6171 spa->spa_suspended); 6172 } 6173 6174 spa_add_spares(spa, *config); 6175 spa_add_l2cache(spa, *config); 6176 spa_add_feature_stats(spa, *config); 6177 } 6178 } 6179 6180 /* 6181 * We want to get the alternate root even for faulted pools, so we cheat 6182 * and call spa_lookup() directly. 6183 */ 6184 if (altroot) { 6185 if (spa == NULL) { 6186 mutex_enter(&spa_namespace_lock); 6187 spa = spa_lookup(name); 6188 if (spa) 6189 spa_altroot(spa, altroot, buflen); 6190 else 6191 altroot[0] = '\0'; 6192 spa = NULL; 6193 mutex_exit(&spa_namespace_lock); 6194 } else { 6195 spa_altroot(spa, altroot, buflen); 6196 } 6197 } 6198 6199 if (spa != NULL) { 6200 spa_config_exit(spa, SCL_CONFIG, FTAG); 6201 spa_close(spa, FTAG); 6202 } 6203 6204 return (error); 6205 } 6206 6207 /* 6208 * Validate that the auxiliary device array is well formed. We must have an 6209 * array of nvlists, each which describes a valid leaf vdev. If this is an 6210 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 6211 * specified, as long as they are well-formed. 6212 */ 6213 static int 6214 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 6215 spa_aux_vdev_t *sav, const char *config, uint64_t version, 6216 vdev_labeltype_t label) 6217 { 6218 nvlist_t **dev; 6219 uint_t i, ndev; 6220 vdev_t *vd; 6221 int error; 6222 6223 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 6224 6225 /* 6226 * It's acceptable to have no devs specified. 6227 */ 6228 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 6229 return (0); 6230 6231 if (ndev == 0) 6232 return (SET_ERROR(EINVAL)); 6233 6234 /* 6235 * Make sure the pool is formatted with a version that supports this 6236 * device type. 6237 */ 6238 if (spa_version(spa) < version) 6239 return (SET_ERROR(ENOTSUP)); 6240 6241 /* 6242 * Set the pending device list so we correctly handle device in-use 6243 * checking. 6244 */ 6245 sav->sav_pending = dev; 6246 sav->sav_npending = ndev; 6247 6248 for (i = 0; i < ndev; i++) { 6249 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 6250 mode)) != 0) 6251 goto out; 6252 6253 if (!vd->vdev_ops->vdev_op_leaf) { 6254 vdev_free(vd); 6255 error = SET_ERROR(EINVAL); 6256 goto out; 6257 } 6258 6259 vd->vdev_top = vd; 6260 6261 if ((error = vdev_open(vd)) == 0 && 6262 (error = vdev_label_init(vd, crtxg, label)) == 0) { 6263 fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 6264 vd->vdev_guid); 6265 } 6266 6267 vdev_free(vd); 6268 6269 if (error && 6270 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 6271 goto out; 6272 else 6273 error = 0; 6274 } 6275 6276 out: 6277 sav->sav_pending = NULL; 6278 sav->sav_npending = 0; 6279 return (error); 6280 } 6281 6282 static int 6283 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 6284 { 6285 int error; 6286 6287 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 6288 6289 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 6290 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 6291 VDEV_LABEL_SPARE)) != 0) { 6292 return (error); 6293 } 6294 6295 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 6296 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 6297 VDEV_LABEL_L2CACHE)); 6298 } 6299 6300 static void 6301 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 6302 const char *config) 6303 { 6304 int i; 6305 6306 if (sav->sav_config != NULL) { 6307 nvlist_t **olddevs; 6308 uint_t oldndevs; 6309 nvlist_t **newdevs; 6310 6311 /* 6312 * Generate new dev list by concatenating with the 6313 * current dev list. 6314 */ 6315 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config, 6316 &olddevs, &oldndevs)); 6317 6318 newdevs = kmem_alloc(sizeof (void *) * 6319 (ndevs + oldndevs), KM_SLEEP); 6320 for (i = 0; i < oldndevs; i++) 6321 newdevs[i] = fnvlist_dup(olddevs[i]); 6322 for (i = 0; i < ndevs; i++) 6323 newdevs[i + oldndevs] = fnvlist_dup(devs[i]); 6324 6325 fnvlist_remove(sav->sav_config, config); 6326 6327 fnvlist_add_nvlist_array(sav->sav_config, config, 6328 (const nvlist_t * const *)newdevs, ndevs + oldndevs); 6329 for (i = 0; i < oldndevs + ndevs; i++) 6330 nvlist_free(newdevs[i]); 6331 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 6332 } else { 6333 /* 6334 * Generate a new dev list. 6335 */ 6336 sav->sav_config = fnvlist_alloc(); 6337 fnvlist_add_nvlist_array(sav->sav_config, config, 6338 (const nvlist_t * const *)devs, ndevs); 6339 } 6340 } 6341 6342 /* 6343 * Stop and drop level 2 ARC devices 6344 */ 6345 void 6346 spa_l2cache_drop(spa_t *spa) 6347 { 6348 vdev_t *vd; 6349 int i; 6350 spa_aux_vdev_t *sav = &spa->spa_l2cache; 6351 6352 for (i = 0; i < sav->sav_count; i++) { 6353 uint64_t pool; 6354 6355 vd = sav->sav_vdevs[i]; 6356 ASSERT(vd != NULL); 6357 6358 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 6359 pool != 0ULL && l2arc_vdev_present(vd)) 6360 l2arc_remove_vdev(vd); 6361 } 6362 } 6363 6364 /* 6365 * Verify encryption parameters for spa creation. If we are encrypting, we must 6366 * have the encryption feature flag enabled. 6367 */ 6368 static int 6369 spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 6370 boolean_t has_encryption) 6371 { 6372 if (dcp->cp_crypt != ZIO_CRYPT_OFF && 6373 dcp->cp_crypt != ZIO_CRYPT_INHERIT && 6374 !has_encryption) 6375 return (SET_ERROR(ENOTSUP)); 6376 6377 return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 6378 } 6379 6380 /* 6381 * Pool Creation 6382 */ 6383 int 6384 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 6385 nvlist_t *zplprops, dsl_crypto_params_t *dcp) 6386 { 6387 spa_t *spa; 6388 const char *altroot = NULL; 6389 vdev_t *rvd; 6390 dsl_pool_t *dp; 6391 dmu_tx_t *tx; 6392 int error = 0; 6393 uint64_t txg = TXG_INITIAL; 6394 nvlist_t **spares, **l2cache; 6395 uint_t nspares, nl2cache; 6396 uint64_t version, obj, ndraid = 0; 6397 boolean_t has_features; 6398 boolean_t has_encryption; 6399 boolean_t has_allocclass; 6400 spa_feature_t feat; 6401 const char *feat_name; 6402 const char *poolname; 6403 nvlist_t *nvl; 6404 6405 if (props == NULL || 6406 nvlist_lookup_string(props, 6407 zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0) 6408 poolname = (char *)pool; 6409 6410 /* 6411 * If this pool already exists, return failure. 6412 */ 6413 mutex_enter(&spa_namespace_lock); 6414 if (spa_lookup(poolname) != NULL) { 6415 mutex_exit(&spa_namespace_lock); 6416 return (SET_ERROR(EEXIST)); 6417 } 6418 6419 /* 6420 * Allocate a new spa_t structure. 6421 */ 6422 nvl = fnvlist_alloc(); 6423 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 6424 (void) nvlist_lookup_string(props, 6425 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6426 spa = spa_add(poolname, nvl, altroot); 6427 fnvlist_free(nvl); 6428 spa_activate(spa, spa_mode_global); 6429 6430 if (props && (error = spa_prop_validate(spa, props))) { 6431 spa_deactivate(spa); 6432 spa_remove(spa); 6433 mutex_exit(&spa_namespace_lock); 6434 return (error); 6435 } 6436 6437 /* 6438 * Temporary pool names should never be written to disk. 6439 */ 6440 if (poolname != pool) 6441 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 6442 6443 has_features = B_FALSE; 6444 has_encryption = B_FALSE; 6445 has_allocclass = B_FALSE; 6446 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 6447 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 6448 if (zpool_prop_feature(nvpair_name(elem))) { 6449 has_features = B_TRUE; 6450 6451 feat_name = strchr(nvpair_name(elem), '@') + 1; 6452 VERIFY0(zfeature_lookup_name(feat_name, &feat)); 6453 if (feat == SPA_FEATURE_ENCRYPTION) 6454 has_encryption = B_TRUE; 6455 if (feat == SPA_FEATURE_ALLOCATION_CLASSES) 6456 has_allocclass = B_TRUE; 6457 } 6458 } 6459 6460 /* verify encryption params, if they were provided */ 6461 if (dcp != NULL) { 6462 error = spa_create_check_encryption_params(dcp, has_encryption); 6463 if (error != 0) { 6464 spa_deactivate(spa); 6465 spa_remove(spa); 6466 mutex_exit(&spa_namespace_lock); 6467 return (error); 6468 } 6469 } 6470 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) { 6471 spa_deactivate(spa); 6472 spa_remove(spa); 6473 mutex_exit(&spa_namespace_lock); 6474 return (ENOTSUP); 6475 } 6476 6477 if (has_features || nvlist_lookup_uint64(props, 6478 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 6479 version = SPA_VERSION; 6480 } 6481 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6482 6483 spa->spa_first_txg = txg; 6484 spa->spa_uberblock.ub_txg = txg - 1; 6485 spa->spa_uberblock.ub_version = version; 6486 spa->spa_ubsync = spa->spa_uberblock; 6487 spa->spa_load_state = SPA_LOAD_CREATE; 6488 spa->spa_removing_phys.sr_state = DSS_NONE; 6489 spa->spa_removing_phys.sr_removing_vdev = -1; 6490 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 6491 spa->spa_indirect_vdevs_loaded = B_TRUE; 6492 6493 /* 6494 * Create "The Godfather" zio to hold all async IOs 6495 */ 6496 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 6497 KM_SLEEP); 6498 for (int i = 0; i < max_ncpus; i++) { 6499 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 6500 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 6501 ZIO_FLAG_GODFATHER); 6502 } 6503 6504 /* 6505 * Create the root vdev. 6506 */ 6507 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6508 6509 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 6510 6511 ASSERT(error != 0 || rvd != NULL); 6512 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 6513 6514 if (error == 0 && !zfs_allocatable_devs(nvroot)) 6515 error = SET_ERROR(EINVAL); 6516 6517 if (error == 0 && 6518 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 6519 (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 && 6520 (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { 6521 /* 6522 * instantiate the metaslab groups (this will dirty the vdevs) 6523 * we can no longer error exit past this point 6524 */ 6525 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 6526 vdev_t *vd = rvd->vdev_child[c]; 6527 6528 vdev_metaslab_set_size(vd); 6529 vdev_expand(vd, txg); 6530 } 6531 } 6532 6533 spa_config_exit(spa, SCL_ALL, FTAG); 6534 6535 if (error != 0) { 6536 spa_unload(spa); 6537 spa_deactivate(spa); 6538 spa_remove(spa); 6539 mutex_exit(&spa_namespace_lock); 6540 return (error); 6541 } 6542 6543 /* 6544 * Get the list of spares, if specified. 6545 */ 6546 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6547 &spares, &nspares) == 0) { 6548 spa->spa_spares.sav_config = fnvlist_alloc(); 6549 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 6550 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 6551 nspares); 6552 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6553 spa_load_spares(spa); 6554 spa_config_exit(spa, SCL_ALL, FTAG); 6555 spa->spa_spares.sav_sync = B_TRUE; 6556 } 6557 6558 /* 6559 * Get the list of level 2 cache devices, if specified. 6560 */ 6561 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6562 &l2cache, &nl2cache) == 0) { 6563 VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config, 6564 NV_UNIQUE_NAME, KM_SLEEP)); 6565 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6566 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 6567 nl2cache); 6568 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6569 spa_load_l2cache(spa); 6570 spa_config_exit(spa, SCL_ALL, FTAG); 6571 spa->spa_l2cache.sav_sync = B_TRUE; 6572 } 6573 6574 spa->spa_is_initializing = B_TRUE; 6575 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 6576 spa->spa_is_initializing = B_FALSE; 6577 6578 /* 6579 * Create DDTs (dedup tables). 6580 */ 6581 ddt_create(spa); 6582 /* 6583 * Create BRT table and BRT table object. 6584 */ 6585 brt_create(spa); 6586 6587 spa_update_dspace(spa); 6588 6589 tx = dmu_tx_create_assigned(dp, txg); 6590 6591 /* 6592 * Create the pool's history object. 6593 */ 6594 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history) 6595 spa_history_create_obj(spa, tx); 6596 6597 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 6598 spa_history_log_version(spa, "create", tx); 6599 6600 /* 6601 * Create the pool config object. 6602 */ 6603 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 6604 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 6605 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 6606 6607 if (zap_add(spa->spa_meta_objset, 6608 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 6609 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 6610 cmn_err(CE_PANIC, "failed to add pool config"); 6611 } 6612 6613 if (zap_add(spa->spa_meta_objset, 6614 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 6615 sizeof (uint64_t), 1, &version, tx) != 0) { 6616 cmn_err(CE_PANIC, "failed to add pool version"); 6617 } 6618 6619 /* Newly created pools with the right version are always deflated. */ 6620 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 6621 spa->spa_deflate = TRUE; 6622 if (zap_add(spa->spa_meta_objset, 6623 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6624 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 6625 cmn_err(CE_PANIC, "failed to add deflate"); 6626 } 6627 } 6628 6629 /* 6630 * Create the deferred-free bpobj. Turn off compression 6631 * because sync-to-convergence takes longer if the blocksize 6632 * keeps changing. 6633 */ 6634 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 6635 dmu_object_set_compress(spa->spa_meta_objset, obj, 6636 ZIO_COMPRESS_OFF, tx); 6637 if (zap_add(spa->spa_meta_objset, 6638 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 6639 sizeof (uint64_t), 1, &obj, tx) != 0) { 6640 cmn_err(CE_PANIC, "failed to add bpobj"); 6641 } 6642 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 6643 spa->spa_meta_objset, obj)); 6644 6645 /* 6646 * Generate some random noise for salted checksums to operate on. 6647 */ 6648 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 6649 sizeof (spa->spa_cksum_salt.zcs_bytes)); 6650 6651 /* 6652 * Set pool properties. 6653 */ 6654 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 6655 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 6656 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 6657 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 6658 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 6659 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); 6660 spa->spa_dedup_table_quota = 6661 zpool_prop_default_numeric(ZPOOL_PROP_DEDUP_TABLE_QUOTA); 6662 6663 if (props != NULL) { 6664 spa_configfile_set(spa, props, B_FALSE); 6665 spa_sync_props(props, tx); 6666 } 6667 6668 for (int i = 0; i < ndraid; i++) 6669 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 6670 6671 dmu_tx_commit(tx); 6672 6673 spa->spa_sync_on = B_TRUE; 6674 txg_sync_start(dp); 6675 mmp_thread_start(spa); 6676 txg_wait_synced(dp, txg); 6677 6678 spa_spawn_aux_threads(spa); 6679 6680 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 6681 6682 /* 6683 * Don't count references from objsets that are already closed 6684 * and are making their way through the eviction process. 6685 */ 6686 spa_evicting_os_wait(spa); 6687 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 6688 spa->spa_load_state = SPA_LOAD_NONE; 6689 6690 spa_import_os(spa); 6691 6692 mutex_exit(&spa_namespace_lock); 6693 6694 return (0); 6695 } 6696 6697 /* 6698 * Import a non-root pool into the system. 6699 */ 6700 int 6701 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 6702 { 6703 spa_t *spa; 6704 const char *altroot = NULL; 6705 spa_load_state_t state = SPA_LOAD_IMPORT; 6706 zpool_load_policy_t policy; 6707 spa_mode_t mode = spa_mode_global; 6708 uint64_t readonly = B_FALSE; 6709 int error; 6710 nvlist_t *nvroot; 6711 nvlist_t **spares, **l2cache; 6712 uint_t nspares, nl2cache; 6713 6714 /* 6715 * If a pool with this name exists, return failure. 6716 */ 6717 mutex_enter(&spa_namespace_lock); 6718 if (spa_lookup(pool) != NULL) { 6719 mutex_exit(&spa_namespace_lock); 6720 return (SET_ERROR(EEXIST)); 6721 } 6722 6723 /* 6724 * Create and initialize the spa structure. 6725 */ 6726 (void) nvlist_lookup_string(props, 6727 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6728 (void) nvlist_lookup_uint64(props, 6729 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 6730 if (readonly) 6731 mode = SPA_MODE_READ; 6732 spa = spa_add(pool, config, altroot); 6733 spa->spa_import_flags = flags; 6734 6735 /* 6736 * Verbatim import - Take a pool and insert it into the namespace 6737 * as if it had been loaded at boot. 6738 */ 6739 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 6740 if (props != NULL) 6741 spa_configfile_set(spa, props, B_FALSE); 6742 6743 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 6744 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6745 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 6746 mutex_exit(&spa_namespace_lock); 6747 return (0); 6748 } 6749 6750 spa_activate(spa, mode); 6751 6752 /* 6753 * Don't start async tasks until we know everything is healthy. 6754 */ 6755 spa_async_suspend(spa); 6756 6757 zpool_get_load_policy(config, &policy); 6758 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 6759 state = SPA_LOAD_RECOVER; 6760 6761 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 6762 6763 if (state != SPA_LOAD_RECOVER) { 6764 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 6765 zfs_dbgmsg("spa_import: importing %s", pool); 6766 } else { 6767 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 6768 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 6769 } 6770 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 6771 6772 /* 6773 * Propagate anything learned while loading the pool and pass it 6774 * back to caller (i.e. rewind info, missing devices, etc). 6775 */ 6776 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info); 6777 6778 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6779 /* 6780 * Toss any existing sparelist, as it doesn't have any validity 6781 * anymore, and conflicts with spa_has_spare(). 6782 */ 6783 if (spa->spa_spares.sav_config) { 6784 nvlist_free(spa->spa_spares.sav_config); 6785 spa->spa_spares.sav_config = NULL; 6786 spa_load_spares(spa); 6787 } 6788 if (spa->spa_l2cache.sav_config) { 6789 nvlist_free(spa->spa_l2cache.sav_config); 6790 spa->spa_l2cache.sav_config = NULL; 6791 spa_load_l2cache(spa); 6792 } 6793 6794 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6795 spa_config_exit(spa, SCL_ALL, FTAG); 6796 6797 if (props != NULL) 6798 spa_configfile_set(spa, props, B_FALSE); 6799 6800 if (error != 0 || (props && spa_writeable(spa) && 6801 (error = spa_prop_set(spa, props)))) { 6802 spa_unload(spa); 6803 spa_deactivate(spa); 6804 spa_remove(spa); 6805 mutex_exit(&spa_namespace_lock); 6806 return (error); 6807 } 6808 6809 spa_async_resume(spa); 6810 6811 /* 6812 * Override any spares and level 2 cache devices as specified by 6813 * the user, as these may have correct device names/devids, etc. 6814 */ 6815 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6816 &spares, &nspares) == 0) { 6817 if (spa->spa_spares.sav_config) 6818 fnvlist_remove(spa->spa_spares.sav_config, 6819 ZPOOL_CONFIG_SPARES); 6820 else 6821 spa->spa_spares.sav_config = fnvlist_alloc(); 6822 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 6823 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 6824 nspares); 6825 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6826 spa_load_spares(spa); 6827 spa_config_exit(spa, SCL_ALL, FTAG); 6828 spa->spa_spares.sav_sync = B_TRUE; 6829 spa->spa_spares.sav_label_sync = B_TRUE; 6830 } 6831 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6832 &l2cache, &nl2cache) == 0) { 6833 if (spa->spa_l2cache.sav_config) 6834 fnvlist_remove(spa->spa_l2cache.sav_config, 6835 ZPOOL_CONFIG_L2CACHE); 6836 else 6837 spa->spa_l2cache.sav_config = fnvlist_alloc(); 6838 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6839 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 6840 nl2cache); 6841 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6842 spa_load_l2cache(spa); 6843 spa_config_exit(spa, SCL_ALL, FTAG); 6844 spa->spa_l2cache.sav_sync = B_TRUE; 6845 spa->spa_l2cache.sav_label_sync = B_TRUE; 6846 } 6847 6848 /* 6849 * Check for any removed devices. 6850 */ 6851 if (spa->spa_autoreplace) { 6852 spa_aux_check_removed(&spa->spa_spares); 6853 spa_aux_check_removed(&spa->spa_l2cache); 6854 } 6855 6856 if (spa_writeable(spa)) { 6857 /* 6858 * Update the config cache to include the newly-imported pool. 6859 */ 6860 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6861 } 6862 6863 /* 6864 * It's possible that the pool was expanded while it was exported. 6865 * We kick off an async task to handle this for us. 6866 */ 6867 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 6868 6869 spa_history_log_version(spa, "import", NULL); 6870 6871 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6872 6873 mutex_exit(&spa_namespace_lock); 6874 6875 zvol_create_minors_recursive(pool); 6876 6877 spa_import_os(spa); 6878 6879 return (0); 6880 } 6881 6882 nvlist_t * 6883 spa_tryimport(nvlist_t *tryconfig) 6884 { 6885 nvlist_t *config = NULL; 6886 const char *poolname, *cachefile; 6887 spa_t *spa; 6888 uint64_t state; 6889 int error; 6890 zpool_load_policy_t policy; 6891 6892 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 6893 return (NULL); 6894 6895 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 6896 return (NULL); 6897 6898 /* 6899 * Create and initialize the spa structure. 6900 */ 6901 char *name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6902 (void) snprintf(name, MAXPATHLEN, "%s-%llx-%s", 6903 TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname); 6904 6905 mutex_enter(&spa_namespace_lock); 6906 spa = spa_add(name, tryconfig, NULL); 6907 spa_activate(spa, SPA_MODE_READ); 6908 kmem_free(name, MAXPATHLEN); 6909 6910 /* 6911 * Rewind pool if a max txg was provided. 6912 */ 6913 zpool_get_load_policy(spa->spa_config, &policy); 6914 if (policy.zlp_txg != UINT64_MAX) { 6915 spa->spa_load_max_txg = policy.zlp_txg; 6916 spa->spa_extreme_rewind = B_TRUE; 6917 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 6918 poolname, (longlong_t)policy.zlp_txg); 6919 } else { 6920 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 6921 } 6922 6923 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 6924 == 0) { 6925 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 6926 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 6927 } else { 6928 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 6929 } 6930 6931 /* 6932 * spa_import() relies on a pool config fetched by spa_try_import() 6933 * for spare/cache devices. Import flags are not passed to 6934 * spa_tryimport(), which makes it return early due to a missing log 6935 * device and missing retrieving the cache device and spare eventually. 6936 * Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch 6937 * the correct configuration regardless of the missing log device. 6938 */ 6939 spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG; 6940 6941 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 6942 6943 /* 6944 * If 'tryconfig' was at least parsable, return the current config. 6945 */ 6946 if (spa->spa_root_vdev != NULL) { 6947 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 6948 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname); 6949 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); 6950 fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 6951 spa->spa_uberblock.ub_timestamp); 6952 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6953 spa->spa_load_info); 6954 fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, 6955 spa->spa_errata); 6956 6957 /* 6958 * If the bootfs property exists on this pool then we 6959 * copy it out so that external consumers can tell which 6960 * pools are bootable. 6961 */ 6962 if ((!error || error == EEXIST) && spa->spa_bootfs) { 6963 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6964 6965 /* 6966 * We have to play games with the name since the 6967 * pool was opened as TRYIMPORT_NAME. 6968 */ 6969 if (dsl_dsobj_to_dsname(spa_name(spa), 6970 spa->spa_bootfs, tmpname) == 0) { 6971 char *cp; 6972 char *dsname; 6973 6974 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6975 6976 cp = strchr(tmpname, '/'); 6977 if (cp == NULL) { 6978 (void) strlcpy(dsname, tmpname, 6979 MAXPATHLEN); 6980 } else { 6981 (void) snprintf(dsname, MAXPATHLEN, 6982 "%s/%s", poolname, ++cp); 6983 } 6984 fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS, 6985 dsname); 6986 kmem_free(dsname, MAXPATHLEN); 6987 } 6988 kmem_free(tmpname, MAXPATHLEN); 6989 } 6990 6991 /* 6992 * Add the list of hot spares and level 2 cache devices. 6993 */ 6994 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6995 spa_add_spares(spa, config); 6996 spa_add_l2cache(spa, config); 6997 spa_config_exit(spa, SCL_CONFIG, FTAG); 6998 } 6999 7000 spa_unload(spa); 7001 spa_deactivate(spa); 7002 spa_remove(spa); 7003 mutex_exit(&spa_namespace_lock); 7004 7005 return (config); 7006 } 7007 7008 /* 7009 * Pool export/destroy 7010 * 7011 * The act of destroying or exporting a pool is very simple. We make sure there 7012 * is no more pending I/O and any references to the pool are gone. Then, we 7013 * update the pool state and sync all the labels to disk, removing the 7014 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 7015 * we don't sync the labels or remove the configuration cache. 7016 */ 7017 static int 7018 spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, 7019 boolean_t force, boolean_t hardforce) 7020 { 7021 int error = 0; 7022 spa_t *spa; 7023 hrtime_t export_start = gethrtime(); 7024 7025 if (oldconfig) 7026 *oldconfig = NULL; 7027 7028 if (!(spa_mode_global & SPA_MODE_WRITE)) 7029 return (SET_ERROR(EROFS)); 7030 7031 mutex_enter(&spa_namespace_lock); 7032 if ((spa = spa_lookup(pool)) == NULL) { 7033 mutex_exit(&spa_namespace_lock); 7034 return (SET_ERROR(ENOENT)); 7035 } 7036 7037 if (spa->spa_is_exporting) { 7038 /* the pool is being exported by another thread */ 7039 mutex_exit(&spa_namespace_lock); 7040 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS)); 7041 } 7042 spa->spa_is_exporting = B_TRUE; 7043 7044 /* 7045 * Put a hold on the pool, drop the namespace lock, stop async tasks 7046 * and see if we can export. 7047 */ 7048 spa_open_ref(spa, FTAG); 7049 mutex_exit(&spa_namespace_lock); 7050 spa_async_suspend(spa); 7051 if (spa->spa_zvol_taskq) { 7052 zvol_remove_minors(spa, spa_name(spa), B_TRUE); 7053 taskq_wait(spa->spa_zvol_taskq); 7054 } 7055 mutex_enter(&spa_namespace_lock); 7056 spa->spa_export_thread = curthread; 7057 spa_close(spa, FTAG); 7058 7059 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 7060 mutex_exit(&spa_namespace_lock); 7061 goto export_spa; 7062 } 7063 7064 /* 7065 * The pool will be in core if it's openable, in which case we can 7066 * modify its state. Objsets may be open only because they're dirty, 7067 * so we have to force it to sync before checking spa_refcnt. 7068 */ 7069 if (spa->spa_sync_on) { 7070 txg_wait_synced(spa->spa_dsl_pool, 0); 7071 spa_evicting_os_wait(spa); 7072 } 7073 7074 /* 7075 * A pool cannot be exported or destroyed if there are active 7076 * references. If we are resetting a pool, allow references by 7077 * fault injection handlers. 7078 */ 7079 if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) { 7080 error = SET_ERROR(EBUSY); 7081 goto fail; 7082 } 7083 7084 mutex_exit(&spa_namespace_lock); 7085 /* 7086 * At this point we no longer hold the spa_namespace_lock and 7087 * there were no references on the spa. Future spa_lookups will 7088 * notice the spa->spa_export_thread and wait until we signal 7089 * that we are finshed. 7090 */ 7091 7092 if (spa->spa_sync_on) { 7093 vdev_t *rvd = spa->spa_root_vdev; 7094 /* 7095 * A pool cannot be exported if it has an active shared spare. 7096 * This is to prevent other pools stealing the active spare 7097 * from an exported pool. At user's own will, such pool can 7098 * be forcedly exported. 7099 */ 7100 if (!force && new_state == POOL_STATE_EXPORTED && 7101 spa_has_active_shared_spare(spa)) { 7102 error = SET_ERROR(EXDEV); 7103 mutex_enter(&spa_namespace_lock); 7104 goto fail; 7105 } 7106 7107 /* 7108 * We're about to export or destroy this pool. Make sure 7109 * we stop all initialization and trim activity here before 7110 * we set the spa_final_txg. This will ensure that all 7111 * dirty data resulting from the initialization is 7112 * committed to disk before we unload the pool. 7113 */ 7114 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); 7115 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); 7116 vdev_autotrim_stop_all(spa); 7117 vdev_rebuild_stop_all(spa); 7118 7119 /* 7120 * We want this to be reflected on every label, 7121 * so mark them all dirty. spa_unload() will do the 7122 * final sync that pushes these changes out. 7123 */ 7124 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 7125 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7126 spa->spa_state = new_state; 7127 vdev_config_dirty(rvd); 7128 spa_config_exit(spa, SCL_ALL, FTAG); 7129 } 7130 7131 /* 7132 * If the log space map feature is enabled and the pool is 7133 * getting exported (but not destroyed), we want to spend some 7134 * time flushing as many metaslabs as we can in an attempt to 7135 * destroy log space maps and save import time. This has to be 7136 * done before we set the spa_final_txg, otherwise 7137 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs. 7138 * spa_should_flush_logs_on_unload() should be called after 7139 * spa_state has been set to the new_state. 7140 */ 7141 if (spa_should_flush_logs_on_unload(spa)) 7142 spa_unload_log_sm_flush_all(spa); 7143 7144 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 7145 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7146 spa->spa_final_txg = spa_last_synced_txg(spa) + 7147 TXG_DEFER_SIZE + 1; 7148 spa_config_exit(spa, SCL_ALL, FTAG); 7149 } 7150 } 7151 7152 export_spa: 7153 spa_export_os(spa); 7154 7155 if (new_state == POOL_STATE_DESTROYED) 7156 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 7157 else if (new_state == POOL_STATE_EXPORTED) 7158 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT); 7159 7160 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 7161 spa_unload(spa); 7162 spa_deactivate(spa); 7163 } 7164 7165 if (oldconfig && spa->spa_config) 7166 *oldconfig = fnvlist_dup(spa->spa_config); 7167 7168 if (new_state == POOL_STATE_EXPORTED) 7169 zio_handle_export_delay(spa, gethrtime() - export_start); 7170 7171 /* 7172 * Take the namespace lock for the actual spa_t removal 7173 */ 7174 mutex_enter(&spa_namespace_lock); 7175 if (new_state != POOL_STATE_UNINITIALIZED) { 7176 if (!hardforce) 7177 spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE); 7178 spa_remove(spa); 7179 } else { 7180 /* 7181 * If spa_remove() is not called for this spa_t and 7182 * there is any possibility that it can be reused, 7183 * we make sure to reset the exporting flag. 7184 */ 7185 spa->spa_is_exporting = B_FALSE; 7186 spa->spa_export_thread = NULL; 7187 } 7188 7189 /* 7190 * Wake up any waiters in spa_lookup() 7191 */ 7192 cv_broadcast(&spa_namespace_cv); 7193 mutex_exit(&spa_namespace_lock); 7194 return (0); 7195 7196 fail: 7197 spa->spa_is_exporting = B_FALSE; 7198 spa->spa_export_thread = NULL; 7199 7200 spa_async_resume(spa); 7201 /* 7202 * Wake up any waiters in spa_lookup() 7203 */ 7204 cv_broadcast(&spa_namespace_cv); 7205 mutex_exit(&spa_namespace_lock); 7206 return (error); 7207 } 7208 7209 /* 7210 * Destroy a storage pool. 7211 */ 7212 int 7213 spa_destroy(const char *pool) 7214 { 7215 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 7216 B_FALSE, B_FALSE)); 7217 } 7218 7219 /* 7220 * Export a storage pool. 7221 */ 7222 int 7223 spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force, 7224 boolean_t hardforce) 7225 { 7226 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 7227 force, hardforce)); 7228 } 7229 7230 /* 7231 * Similar to spa_export(), this unloads the spa_t without actually removing it 7232 * from the namespace in any way. 7233 */ 7234 int 7235 spa_reset(const char *pool) 7236 { 7237 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 7238 B_FALSE, B_FALSE)); 7239 } 7240 7241 /* 7242 * ========================================================================== 7243 * Device manipulation 7244 * ========================================================================== 7245 */ 7246 7247 /* 7248 * This is called as a synctask to increment the draid feature flag 7249 */ 7250 static void 7251 spa_draid_feature_incr(void *arg, dmu_tx_t *tx) 7252 { 7253 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7254 int draid = (int)(uintptr_t)arg; 7255 7256 for (int c = 0; c < draid; c++) 7257 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 7258 } 7259 7260 /* 7261 * Add a device to a storage pool. 7262 */ 7263 int 7264 spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift) 7265 { 7266 uint64_t txg, ndraid = 0; 7267 int error; 7268 vdev_t *rvd = spa->spa_root_vdev; 7269 vdev_t *vd, *tvd; 7270 nvlist_t **spares, **l2cache; 7271 uint_t nspares, nl2cache; 7272 7273 ASSERT(spa_writeable(spa)); 7274 7275 txg = spa_vdev_enter(spa); 7276 7277 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 7278 VDEV_ALLOC_ADD)) != 0) 7279 return (spa_vdev_exit(spa, NULL, txg, error)); 7280 7281 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 7282 7283 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 7284 &nspares) != 0) 7285 nspares = 0; 7286 7287 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 7288 &nl2cache) != 0) 7289 nl2cache = 0; 7290 7291 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 7292 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7293 7294 if (vd->vdev_children != 0 && 7295 (error = vdev_create(vd, txg, B_FALSE)) != 0) { 7296 return (spa_vdev_exit(spa, vd, txg, error)); 7297 } 7298 7299 /* 7300 * The virtual dRAID spares must be added after vdev tree is created 7301 * and the vdev guids are generated. The guid of their associated 7302 * dRAID is stored in the config and used when opening the spare. 7303 */ 7304 if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, 7305 rvd->vdev_children)) == 0) { 7306 if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot, 7307 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0) 7308 nspares = 0; 7309 } else { 7310 return (spa_vdev_exit(spa, vd, txg, error)); 7311 } 7312 7313 /* 7314 * We must validate the spares and l2cache devices after checking the 7315 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 7316 */ 7317 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 7318 return (spa_vdev_exit(spa, vd, txg, error)); 7319 7320 /* 7321 * If we are in the middle of a device removal, we can only add 7322 * devices which match the existing devices in the pool. 7323 * If we are in the middle of a removal, or have some indirect 7324 * vdevs, we can not add raidz or dRAID top levels. 7325 */ 7326 if (spa->spa_vdev_removal != NULL || 7327 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 7328 for (int c = 0; c < vd->vdev_children; c++) { 7329 tvd = vd->vdev_child[c]; 7330 if (spa->spa_vdev_removal != NULL && 7331 tvd->vdev_ashift != spa->spa_max_ashift) { 7332 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7333 } 7334 /* Fail if top level vdev is raidz or a dRAID */ 7335 if (vdev_get_nparity(tvd) != 0) 7336 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 7337 7338 /* 7339 * Need the top level mirror to be 7340 * a mirror of leaf vdevs only 7341 */ 7342 if (tvd->vdev_ops == &vdev_mirror_ops) { 7343 for (uint64_t cid = 0; 7344 cid < tvd->vdev_children; cid++) { 7345 vdev_t *cvd = tvd->vdev_child[cid]; 7346 if (!cvd->vdev_ops->vdev_op_leaf) { 7347 return (spa_vdev_exit(spa, vd, 7348 txg, EINVAL)); 7349 } 7350 } 7351 } 7352 } 7353 } 7354 7355 if (check_ashift && spa->spa_max_ashift == spa->spa_min_ashift) { 7356 for (int c = 0; c < vd->vdev_children; c++) { 7357 tvd = vd->vdev_child[c]; 7358 if (tvd->vdev_ashift != spa->spa_max_ashift) { 7359 return (spa_vdev_exit(spa, vd, txg, 7360 ZFS_ERR_ASHIFT_MISMATCH)); 7361 } 7362 } 7363 } 7364 7365 for (int c = 0; c < vd->vdev_children; c++) { 7366 tvd = vd->vdev_child[c]; 7367 vdev_remove_child(vd, tvd); 7368 tvd->vdev_id = rvd->vdev_children; 7369 vdev_add_child(rvd, tvd); 7370 vdev_config_dirty(tvd); 7371 } 7372 7373 if (nspares != 0) { 7374 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 7375 ZPOOL_CONFIG_SPARES); 7376 spa_load_spares(spa); 7377 spa->spa_spares.sav_sync = B_TRUE; 7378 } 7379 7380 if (nl2cache != 0) { 7381 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 7382 ZPOOL_CONFIG_L2CACHE); 7383 spa_load_l2cache(spa); 7384 spa->spa_l2cache.sav_sync = B_TRUE; 7385 } 7386 7387 /* 7388 * We can't increment a feature while holding spa_vdev so we 7389 * have to do it in a synctask. 7390 */ 7391 if (ndraid != 0) { 7392 dmu_tx_t *tx; 7393 7394 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 7395 dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr, 7396 (void *)(uintptr_t)ndraid, tx); 7397 dmu_tx_commit(tx); 7398 } 7399 7400 /* 7401 * We have to be careful when adding new vdevs to an existing pool. 7402 * If other threads start allocating from these vdevs before we 7403 * sync the config cache, and we lose power, then upon reboot we may 7404 * fail to open the pool because there are DVAs that the config cache 7405 * can't translate. Therefore, we first add the vdevs without 7406 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 7407 * and then let spa_config_update() initialize the new metaslabs. 7408 * 7409 * spa_load() checks for added-but-not-initialized vdevs, so that 7410 * if we lose power at any point in this sequence, the remaining 7411 * steps will be completed the next time we load the pool. 7412 */ 7413 (void) spa_vdev_exit(spa, vd, txg, 0); 7414 7415 mutex_enter(&spa_namespace_lock); 7416 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 7417 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 7418 mutex_exit(&spa_namespace_lock); 7419 7420 return (0); 7421 } 7422 7423 /* 7424 * Attach a device to a vdev specified by its guid. The vdev type can be 7425 * a mirror, a raidz, or a leaf device that is also a top-level (e.g. a 7426 * single device). When the vdev is a single device, a mirror vdev will be 7427 * automatically inserted. 7428 * 7429 * If 'replacing' is specified, the new device is intended to replace the 7430 * existing device; in this case the two devices are made into their own 7431 * mirror using the 'replacing' vdev, which is functionally identical to 7432 * the mirror vdev (it actually reuses all the same ops) but has a few 7433 * extra rules: you can't attach to it after it's been created, and upon 7434 * completion of resilvering, the first disk (the one being replaced) 7435 * is automatically detached. 7436 * 7437 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild) 7438 * should be performed instead of traditional healing reconstruction. From 7439 * an administrators perspective these are both resilver operations. 7440 */ 7441 int 7442 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing, 7443 int rebuild) 7444 { 7445 uint64_t txg, dtl_max_txg; 7446 vdev_t *rvd = spa->spa_root_vdev; 7447 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 7448 vdev_ops_t *pvops; 7449 char *oldvdpath, *newvdpath; 7450 int newvd_isspare = B_FALSE; 7451 int error; 7452 7453 ASSERT(spa_writeable(spa)); 7454 7455 txg = spa_vdev_enter(spa); 7456 7457 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 7458 7459 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7460 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7461 error = (spa_has_checkpoint(spa)) ? 7462 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7463 return (spa_vdev_exit(spa, NULL, txg, error)); 7464 } 7465 7466 if (rebuild) { 7467 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 7468 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7469 7470 if (dsl_scan_resilvering(spa_get_dsl(spa)) || 7471 dsl_scan_resilver_scheduled(spa_get_dsl(spa))) { 7472 return (spa_vdev_exit(spa, NULL, txg, 7473 ZFS_ERR_RESILVER_IN_PROGRESS)); 7474 } 7475 } else { 7476 if (vdev_rebuild_active(rvd)) 7477 return (spa_vdev_exit(spa, NULL, txg, 7478 ZFS_ERR_REBUILD_IN_PROGRESS)); 7479 } 7480 7481 if (spa->spa_vdev_removal != NULL) { 7482 return (spa_vdev_exit(spa, NULL, txg, 7483 ZFS_ERR_DEVRM_IN_PROGRESS)); 7484 } 7485 7486 if (oldvd == NULL) 7487 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 7488 7489 boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops; 7490 7491 if (raidz) { 7492 if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION)) 7493 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7494 7495 /* 7496 * Can't expand a raidz while prior expand is in progress. 7497 */ 7498 if (spa->spa_raidz_expand != NULL) { 7499 return (spa_vdev_exit(spa, NULL, txg, 7500 ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS)); 7501 } 7502 } else if (!oldvd->vdev_ops->vdev_op_leaf) { 7503 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7504 } 7505 7506 if (raidz) 7507 pvd = oldvd; 7508 else 7509 pvd = oldvd->vdev_parent; 7510 7511 if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 7512 VDEV_ALLOC_ATTACH) != 0) 7513 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7514 7515 if (newrootvd->vdev_children != 1) 7516 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 7517 7518 newvd = newrootvd->vdev_child[0]; 7519 7520 if (!newvd->vdev_ops->vdev_op_leaf) 7521 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 7522 7523 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 7524 return (spa_vdev_exit(spa, newrootvd, txg, error)); 7525 7526 /* 7527 * log, dedup and special vdevs should not be replaced by spares. 7528 */ 7529 if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE || 7530 oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) { 7531 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7532 } 7533 7534 /* 7535 * A dRAID spare can only replace a child of its parent dRAID vdev. 7536 */ 7537 if (newvd->vdev_ops == &vdev_draid_spare_ops && 7538 oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) { 7539 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7540 } 7541 7542 if (rebuild) { 7543 /* 7544 * For rebuilds, the top vdev must support reconstruction 7545 * using only space maps. This means the only allowable 7546 * vdevs types are the root vdev, a mirror, or dRAID. 7547 */ 7548 tvd = pvd; 7549 if (pvd->vdev_top != NULL) 7550 tvd = pvd->vdev_top; 7551 7552 if (tvd->vdev_ops != &vdev_mirror_ops && 7553 tvd->vdev_ops != &vdev_root_ops && 7554 tvd->vdev_ops != &vdev_draid_ops) { 7555 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7556 } 7557 } 7558 7559 if (!replacing) { 7560 /* 7561 * For attach, the only allowable parent is a mirror or 7562 * the root vdev. A raidz vdev can be attached to, but 7563 * you cannot attach to a raidz child. 7564 */ 7565 if (pvd->vdev_ops != &vdev_mirror_ops && 7566 pvd->vdev_ops != &vdev_root_ops && 7567 !raidz) 7568 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7569 7570 pvops = &vdev_mirror_ops; 7571 } else { 7572 /* 7573 * Active hot spares can only be replaced by inactive hot 7574 * spares. 7575 */ 7576 if (pvd->vdev_ops == &vdev_spare_ops && 7577 oldvd->vdev_isspare && 7578 !spa_has_spare(spa, newvd->vdev_guid)) 7579 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7580 7581 /* 7582 * If the source is a hot spare, and the parent isn't already a 7583 * spare, then we want to create a new hot spare. Otherwise, we 7584 * want to create a replacing vdev. The user is not allowed to 7585 * attach to a spared vdev child unless the 'isspare' state is 7586 * the same (spare replaces spare, non-spare replaces 7587 * non-spare). 7588 */ 7589 if (pvd->vdev_ops == &vdev_replacing_ops && 7590 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 7591 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7592 } else if (pvd->vdev_ops == &vdev_spare_ops && 7593 newvd->vdev_isspare != oldvd->vdev_isspare) { 7594 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7595 } 7596 7597 if (newvd->vdev_isspare) 7598 pvops = &vdev_spare_ops; 7599 else 7600 pvops = &vdev_replacing_ops; 7601 } 7602 7603 /* 7604 * Make sure the new device is big enough. 7605 */ 7606 vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd; 7607 if (newvd->vdev_asize < vdev_get_min_asize(min_vdev)) 7608 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 7609 7610 /* 7611 * The new device cannot have a higher alignment requirement 7612 * than the top-level vdev. 7613 */ 7614 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) { 7615 return (spa_vdev_exit(spa, newrootvd, txg, 7616 ZFS_ERR_ASHIFT_MISMATCH)); 7617 } 7618 7619 /* 7620 * RAIDZ-expansion-specific checks. 7621 */ 7622 if (raidz) { 7623 if (vdev_raidz_attach_check(newvd) != 0) 7624 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 7625 7626 /* 7627 * Fail early if a child is not healthy or being replaced 7628 */ 7629 for (int i = 0; i < oldvd->vdev_children; i++) { 7630 if (vdev_is_dead(oldvd->vdev_child[i]) || 7631 !oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) { 7632 return (spa_vdev_exit(spa, newrootvd, txg, 7633 ENXIO)); 7634 } 7635 /* Also fail if reserved boot area is in-use */ 7636 if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i]) 7637 != 0) { 7638 return (spa_vdev_exit(spa, newrootvd, txg, 7639 EADDRINUSE)); 7640 } 7641 } 7642 } 7643 7644 if (raidz) { 7645 /* 7646 * Note: oldvdpath is freed by spa_strfree(), but 7647 * kmem_asprintf() is freed by kmem_strfree(), so we have to 7648 * move it to a spa_strdup-ed string. 7649 */ 7650 char *tmp = kmem_asprintf("raidz%u-%u", 7651 (uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id); 7652 oldvdpath = spa_strdup(tmp); 7653 kmem_strfree(tmp); 7654 } else { 7655 oldvdpath = spa_strdup(oldvd->vdev_path); 7656 } 7657 newvdpath = spa_strdup(newvd->vdev_path); 7658 7659 /* 7660 * If this is an in-place replacement, update oldvd's path and devid 7661 * to make it distinguishable from newvd, and unopenable from now on. 7662 */ 7663 if (strcmp(oldvdpath, newvdpath) == 0) { 7664 spa_strfree(oldvd->vdev_path); 7665 oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5, 7666 KM_SLEEP); 7667 (void) sprintf(oldvd->vdev_path, "%s/old", 7668 newvdpath); 7669 if (oldvd->vdev_devid != NULL) { 7670 spa_strfree(oldvd->vdev_devid); 7671 oldvd->vdev_devid = NULL; 7672 } 7673 spa_strfree(oldvdpath); 7674 oldvdpath = spa_strdup(oldvd->vdev_path); 7675 } 7676 7677 /* 7678 * If the parent is not a mirror, or if we're replacing, insert the new 7679 * mirror/replacing/spare vdev above oldvd. 7680 */ 7681 if (!raidz && pvd->vdev_ops != pvops) { 7682 pvd = vdev_add_parent(oldvd, pvops); 7683 ASSERT(pvd->vdev_ops == pvops); 7684 ASSERT(oldvd->vdev_parent == pvd); 7685 } 7686 7687 ASSERT(pvd->vdev_top->vdev_parent == rvd); 7688 7689 /* 7690 * Extract the new device from its root and add it to pvd. 7691 */ 7692 vdev_remove_child(newrootvd, newvd); 7693 newvd->vdev_id = pvd->vdev_children; 7694 newvd->vdev_crtxg = oldvd->vdev_crtxg; 7695 vdev_add_child(pvd, newvd); 7696 7697 /* 7698 * Reevaluate the parent vdev state. 7699 */ 7700 vdev_propagate_state(pvd); 7701 7702 tvd = newvd->vdev_top; 7703 ASSERT(pvd->vdev_top == tvd); 7704 ASSERT(tvd->vdev_parent == rvd); 7705 7706 vdev_config_dirty(tvd); 7707 7708 /* 7709 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 7710 * for any dmu_sync-ed blocks. It will propagate upward when 7711 * spa_vdev_exit() calls vdev_dtl_reassess(). 7712 */ 7713 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 7714 7715 if (raidz) { 7716 /* 7717 * Wait for the youngest allocations and frees to sync, 7718 * and then wait for the deferral of those frees to finish. 7719 */ 7720 spa_vdev_config_exit(spa, NULL, 7721 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 7722 7723 vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE); 7724 vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE); 7725 vdev_autotrim_stop_wait(tvd); 7726 7727 dtl_max_txg = spa_vdev_config_enter(spa); 7728 7729 tvd->vdev_rz_expanding = B_TRUE; 7730 7731 vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg); 7732 vdev_config_dirty(tvd); 7733 7734 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, 7735 dtl_max_txg); 7736 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync, 7737 newvd, tx); 7738 dmu_tx_commit(tx); 7739 } else { 7740 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 7741 dtl_max_txg - TXG_INITIAL); 7742 7743 if (newvd->vdev_isspare) { 7744 spa_spare_activate(newvd); 7745 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 7746 } 7747 7748 newvd_isspare = newvd->vdev_isspare; 7749 7750 /* 7751 * Mark newvd's DTL dirty in this txg. 7752 */ 7753 vdev_dirty(tvd, VDD_DTL, newvd, txg); 7754 7755 /* 7756 * Schedule the resilver or rebuild to restart in the future. 7757 * We do this to ensure that dmu_sync-ed blocks have been 7758 * stitched into the respective datasets. 7759 */ 7760 if (rebuild) { 7761 newvd->vdev_rebuild_txg = txg; 7762 7763 vdev_rebuild(tvd); 7764 } else { 7765 newvd->vdev_resilver_txg = txg; 7766 7767 if (dsl_scan_resilvering(spa_get_dsl(spa)) && 7768 spa_feature_is_enabled(spa, 7769 SPA_FEATURE_RESILVER_DEFER)) { 7770 vdev_defer_resilver(newvd); 7771 } else { 7772 dsl_scan_restart_resilver(spa->spa_dsl_pool, 7773 dtl_max_txg); 7774 } 7775 } 7776 } 7777 7778 if (spa->spa_bootfs) 7779 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 7780 7781 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 7782 7783 /* 7784 * Commit the config 7785 */ 7786 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 7787 7788 spa_history_log_internal(spa, "vdev attach", NULL, 7789 "%s vdev=%s %s vdev=%s", 7790 replacing && newvd_isspare ? "spare in" : 7791 replacing ? "replace" : "attach", newvdpath, 7792 replacing ? "for" : "to", oldvdpath); 7793 7794 spa_strfree(oldvdpath); 7795 spa_strfree(newvdpath); 7796 7797 return (0); 7798 } 7799 7800 /* 7801 * Detach a device from a mirror or replacing vdev. 7802 * 7803 * If 'replace_done' is specified, only detach if the parent 7804 * is a replacing or a spare vdev. 7805 */ 7806 int 7807 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 7808 { 7809 uint64_t txg; 7810 int error; 7811 vdev_t *rvd __maybe_unused = spa->spa_root_vdev; 7812 vdev_t *vd, *pvd, *cvd, *tvd; 7813 boolean_t unspare = B_FALSE; 7814 uint64_t unspare_guid = 0; 7815 char *vdpath; 7816 7817 ASSERT(spa_writeable(spa)); 7818 7819 txg = spa_vdev_detach_enter(spa, guid); 7820 7821 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7822 7823 /* 7824 * Besides being called directly from the userland through the 7825 * ioctl interface, spa_vdev_detach() can be potentially called 7826 * at the end of spa_vdev_resilver_done(). 7827 * 7828 * In the regular case, when we have a checkpoint this shouldn't 7829 * happen as we never empty the DTLs of a vdev during the scrub 7830 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 7831 * should never get here when we have a checkpoint. 7832 * 7833 * That said, even in a case when we checkpoint the pool exactly 7834 * as spa_vdev_resilver_done() calls this function everything 7835 * should be fine as the resilver will return right away. 7836 */ 7837 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7838 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7839 error = (spa_has_checkpoint(spa)) ? 7840 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7841 return (spa_vdev_exit(spa, NULL, txg, error)); 7842 } 7843 7844 if (vd == NULL) 7845 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 7846 7847 if (!vd->vdev_ops->vdev_op_leaf) 7848 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7849 7850 pvd = vd->vdev_parent; 7851 7852 /* 7853 * If the parent/child relationship is not as expected, don't do it. 7854 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 7855 * vdev that's replacing B with C. The user's intent in replacing 7856 * is to go from M(A,B) to M(A,C). If the user decides to cancel 7857 * the replace by detaching C, the expected behavior is to end up 7858 * M(A,B). But suppose that right after deciding to detach C, 7859 * the replacement of B completes. We would have M(A,C), and then 7860 * ask to detach C, which would leave us with just A -- not what 7861 * the user wanted. To prevent this, we make sure that the 7862 * parent/child relationship hasn't changed -- in this example, 7863 * that C's parent is still the replacing vdev R. 7864 */ 7865 if (pvd->vdev_guid != pguid && pguid != 0) 7866 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7867 7868 /* 7869 * Only 'replacing' or 'spare' vdevs can be replaced. 7870 */ 7871 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 7872 pvd->vdev_ops != &vdev_spare_ops) 7873 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7874 7875 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 7876 spa_version(spa) >= SPA_VERSION_SPARES); 7877 7878 /* 7879 * Only mirror, replacing, and spare vdevs support detach. 7880 */ 7881 if (pvd->vdev_ops != &vdev_replacing_ops && 7882 pvd->vdev_ops != &vdev_mirror_ops && 7883 pvd->vdev_ops != &vdev_spare_ops) 7884 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7885 7886 /* 7887 * If this device has the only valid copy of some data, 7888 * we cannot safely detach it. 7889 */ 7890 if (vdev_dtl_required(vd)) 7891 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7892 7893 ASSERT(pvd->vdev_children >= 2); 7894 7895 /* 7896 * If we are detaching the second disk from a replacing vdev, then 7897 * check to see if we changed the original vdev's path to have "/old" 7898 * at the end in spa_vdev_attach(). If so, undo that change now. 7899 */ 7900 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 7901 vd->vdev_path != NULL) { 7902 size_t len = strlen(vd->vdev_path); 7903 7904 for (int c = 0; c < pvd->vdev_children; c++) { 7905 cvd = pvd->vdev_child[c]; 7906 7907 if (cvd == vd || cvd->vdev_path == NULL) 7908 continue; 7909 7910 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 7911 strcmp(cvd->vdev_path + len, "/old") == 0) { 7912 spa_strfree(cvd->vdev_path); 7913 cvd->vdev_path = spa_strdup(vd->vdev_path); 7914 break; 7915 } 7916 } 7917 } 7918 7919 /* 7920 * If we are detaching the original disk from a normal spare, then it 7921 * implies that the spare should become a real disk, and be removed 7922 * from the active spare list for the pool. dRAID spares on the 7923 * other hand are coupled to the pool and thus should never be removed 7924 * from the spares list. 7925 */ 7926 if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) { 7927 vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7928 7929 if (last_cvd->vdev_isspare && 7930 last_cvd->vdev_ops != &vdev_draid_spare_ops) { 7931 unspare = B_TRUE; 7932 } 7933 } 7934 7935 /* 7936 * Erase the disk labels so the disk can be used for other things. 7937 * This must be done after all other error cases are handled, 7938 * but before we disembowel vd (so we can still do I/O to it). 7939 * But if we can't do it, don't treat the error as fatal -- 7940 * it may be that the unwritability of the disk is the reason 7941 * it's being detached! 7942 */ 7943 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 7944 7945 /* 7946 * Remove vd from its parent and compact the parent's children. 7947 */ 7948 vdev_remove_child(pvd, vd); 7949 vdev_compact_children(pvd); 7950 7951 /* 7952 * Remember one of the remaining children so we can get tvd below. 7953 */ 7954 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7955 7956 /* 7957 * If we need to remove the remaining child from the list of hot spares, 7958 * do it now, marking the vdev as no longer a spare in the process. 7959 * We must do this before vdev_remove_parent(), because that can 7960 * change the GUID if it creates a new toplevel GUID. For a similar 7961 * reason, we must remove the spare now, in the same txg as the detach; 7962 * otherwise someone could attach a new sibling, change the GUID, and 7963 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 7964 */ 7965 if (unspare) { 7966 ASSERT(cvd->vdev_isspare); 7967 spa_spare_remove(cvd); 7968 unspare_guid = cvd->vdev_guid; 7969 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 7970 cvd->vdev_unspare = B_TRUE; 7971 } 7972 7973 /* 7974 * If the parent mirror/replacing vdev only has one child, 7975 * the parent is no longer needed. Remove it from the tree. 7976 */ 7977 if (pvd->vdev_children == 1) { 7978 if (pvd->vdev_ops == &vdev_spare_ops) 7979 cvd->vdev_unspare = B_FALSE; 7980 vdev_remove_parent(cvd); 7981 } 7982 7983 /* 7984 * We don't set tvd until now because the parent we just removed 7985 * may have been the previous top-level vdev. 7986 */ 7987 tvd = cvd->vdev_top; 7988 ASSERT(tvd->vdev_parent == rvd); 7989 7990 /* 7991 * Reevaluate the parent vdev state. 7992 */ 7993 vdev_propagate_state(cvd); 7994 7995 /* 7996 * If the 'autoexpand' property is set on the pool then automatically 7997 * try to expand the size of the pool. For example if the device we 7998 * just detached was smaller than the others, it may be possible to 7999 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 8000 * first so that we can obtain the updated sizes of the leaf vdevs. 8001 */ 8002 if (spa->spa_autoexpand) { 8003 vdev_reopen(tvd); 8004 vdev_expand(tvd, txg); 8005 } 8006 8007 vdev_config_dirty(tvd); 8008 8009 /* 8010 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 8011 * vd->vdev_detached is set and free vd's DTL object in syncing context. 8012 * But first make sure we're not on any *other* txg's DTL list, to 8013 * prevent vd from being accessed after it's freed. 8014 */ 8015 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); 8016 for (int t = 0; t < TXG_SIZE; t++) 8017 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 8018 vd->vdev_detached = B_TRUE; 8019 vdev_dirty(tvd, VDD_DTL, vd, txg); 8020 8021 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 8022 spa_notify_waiters(spa); 8023 8024 /* hang on to the spa before we release the lock */ 8025 spa_open_ref(spa, FTAG); 8026 8027 error = spa_vdev_exit(spa, vd, txg, 0); 8028 8029 spa_history_log_internal(spa, "detach", NULL, 8030 "vdev=%s", vdpath); 8031 spa_strfree(vdpath); 8032 8033 /* 8034 * If this was the removal of the original device in a hot spare vdev, 8035 * then we want to go through and remove the device from the hot spare 8036 * list of every other pool. 8037 */ 8038 if (unspare) { 8039 spa_t *altspa = NULL; 8040 8041 mutex_enter(&spa_namespace_lock); 8042 while ((altspa = spa_next(altspa)) != NULL) { 8043 if (altspa->spa_state != POOL_STATE_ACTIVE || 8044 altspa == spa) 8045 continue; 8046 8047 spa_open_ref(altspa, FTAG); 8048 mutex_exit(&spa_namespace_lock); 8049 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 8050 mutex_enter(&spa_namespace_lock); 8051 spa_close(altspa, FTAG); 8052 } 8053 mutex_exit(&spa_namespace_lock); 8054 8055 /* search the rest of the vdevs for spares to remove */ 8056 spa_vdev_resilver_done(spa); 8057 } 8058 8059 /* all done with the spa; OK to release */ 8060 mutex_enter(&spa_namespace_lock); 8061 spa_close(spa, FTAG); 8062 mutex_exit(&spa_namespace_lock); 8063 8064 return (error); 8065 } 8066 8067 static int 8068 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 8069 list_t *vd_list) 8070 { 8071 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8072 8073 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 8074 8075 /* Look up vdev and ensure it's a leaf. */ 8076 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 8077 if (vd == NULL || vd->vdev_detached) { 8078 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8079 return (SET_ERROR(ENODEV)); 8080 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 8081 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8082 return (SET_ERROR(EINVAL)); 8083 } else if (!vdev_writeable(vd)) { 8084 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8085 return (SET_ERROR(EROFS)); 8086 } 8087 mutex_enter(&vd->vdev_initialize_lock); 8088 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8089 8090 /* 8091 * When we activate an initialize action we check to see 8092 * if the vdev_initialize_thread is NULL. We do this instead 8093 * of using the vdev_initialize_state since there might be 8094 * a previous initialization process which has completed but 8095 * the thread is not exited. 8096 */ 8097 if (cmd_type == POOL_INITIALIZE_START && 8098 (vd->vdev_initialize_thread != NULL || 8099 vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) { 8100 mutex_exit(&vd->vdev_initialize_lock); 8101 return (SET_ERROR(EBUSY)); 8102 } else if (cmd_type == POOL_INITIALIZE_CANCEL && 8103 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 8104 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 8105 mutex_exit(&vd->vdev_initialize_lock); 8106 return (SET_ERROR(ESRCH)); 8107 } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 8108 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 8109 mutex_exit(&vd->vdev_initialize_lock); 8110 return (SET_ERROR(ESRCH)); 8111 } else if (cmd_type == POOL_INITIALIZE_UNINIT && 8112 vd->vdev_initialize_thread != NULL) { 8113 mutex_exit(&vd->vdev_initialize_lock); 8114 return (SET_ERROR(EBUSY)); 8115 } 8116 8117 switch (cmd_type) { 8118 case POOL_INITIALIZE_START: 8119 vdev_initialize(vd); 8120 break; 8121 case POOL_INITIALIZE_CANCEL: 8122 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); 8123 break; 8124 case POOL_INITIALIZE_SUSPEND: 8125 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); 8126 break; 8127 case POOL_INITIALIZE_UNINIT: 8128 vdev_uninitialize(vd); 8129 break; 8130 default: 8131 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 8132 } 8133 mutex_exit(&vd->vdev_initialize_lock); 8134 8135 return (0); 8136 } 8137 8138 int 8139 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, 8140 nvlist_t *vdev_errlist) 8141 { 8142 int total_errors = 0; 8143 list_t vd_list; 8144 8145 list_create(&vd_list, sizeof (vdev_t), 8146 offsetof(vdev_t, vdev_initialize_node)); 8147 8148 /* 8149 * We hold the namespace lock through the whole function 8150 * to prevent any changes to the pool while we're starting or 8151 * stopping initialization. The config and state locks are held so that 8152 * we can properly assess the vdev state before we commit to 8153 * the initializing operation. 8154 */ 8155 mutex_enter(&spa_namespace_lock); 8156 8157 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 8158 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 8159 uint64_t vdev_guid = fnvpair_value_uint64(pair); 8160 8161 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, 8162 &vd_list); 8163 if (error != 0) { 8164 char guid_as_str[MAXNAMELEN]; 8165 8166 (void) snprintf(guid_as_str, sizeof (guid_as_str), 8167 "%llu", (unsigned long long)vdev_guid); 8168 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 8169 total_errors++; 8170 } 8171 } 8172 8173 /* Wait for all initialize threads to stop. */ 8174 vdev_initialize_stop_wait(spa, &vd_list); 8175 8176 /* Sync out the initializing state */ 8177 txg_wait_synced(spa->spa_dsl_pool, 0); 8178 mutex_exit(&spa_namespace_lock); 8179 8180 list_destroy(&vd_list); 8181 8182 return (total_errors); 8183 } 8184 8185 static int 8186 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 8187 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) 8188 { 8189 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8190 8191 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 8192 8193 /* Look up vdev and ensure it's a leaf. */ 8194 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 8195 if (vd == NULL || vd->vdev_detached) { 8196 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8197 return (SET_ERROR(ENODEV)); 8198 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 8199 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8200 return (SET_ERROR(EINVAL)); 8201 } else if (!vdev_writeable(vd)) { 8202 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8203 return (SET_ERROR(EROFS)); 8204 } else if (!vd->vdev_has_trim) { 8205 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8206 return (SET_ERROR(EOPNOTSUPP)); 8207 } else if (secure && !vd->vdev_has_securetrim) { 8208 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8209 return (SET_ERROR(EOPNOTSUPP)); 8210 } 8211 mutex_enter(&vd->vdev_trim_lock); 8212 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8213 8214 /* 8215 * When we activate a TRIM action we check to see if the 8216 * vdev_trim_thread is NULL. We do this instead of using the 8217 * vdev_trim_state since there might be a previous TRIM process 8218 * which has completed but the thread is not exited. 8219 */ 8220 if (cmd_type == POOL_TRIM_START && 8221 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing || 8222 vd->vdev_top->vdev_rz_expanding)) { 8223 mutex_exit(&vd->vdev_trim_lock); 8224 return (SET_ERROR(EBUSY)); 8225 } else if (cmd_type == POOL_TRIM_CANCEL && 8226 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && 8227 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { 8228 mutex_exit(&vd->vdev_trim_lock); 8229 return (SET_ERROR(ESRCH)); 8230 } else if (cmd_type == POOL_TRIM_SUSPEND && 8231 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { 8232 mutex_exit(&vd->vdev_trim_lock); 8233 return (SET_ERROR(ESRCH)); 8234 } 8235 8236 switch (cmd_type) { 8237 case POOL_TRIM_START: 8238 vdev_trim(vd, rate, partial, secure); 8239 break; 8240 case POOL_TRIM_CANCEL: 8241 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); 8242 break; 8243 case POOL_TRIM_SUSPEND: 8244 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); 8245 break; 8246 default: 8247 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 8248 } 8249 mutex_exit(&vd->vdev_trim_lock); 8250 8251 return (0); 8252 } 8253 8254 /* 8255 * Initiates a manual TRIM for the requested vdevs. This kicks off individual 8256 * TRIM threads for each child vdev. These threads pass over all of the free 8257 * space in the vdev's metaslabs and issues TRIM commands for that space. 8258 */ 8259 int 8260 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, 8261 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) 8262 { 8263 int total_errors = 0; 8264 list_t vd_list; 8265 8266 list_create(&vd_list, sizeof (vdev_t), 8267 offsetof(vdev_t, vdev_trim_node)); 8268 8269 /* 8270 * We hold the namespace lock through the whole function 8271 * to prevent any changes to the pool while we're starting or 8272 * stopping TRIM. The config and state locks are held so that 8273 * we can properly assess the vdev state before we commit to 8274 * the TRIM operation. 8275 */ 8276 mutex_enter(&spa_namespace_lock); 8277 8278 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 8279 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 8280 uint64_t vdev_guid = fnvpair_value_uint64(pair); 8281 8282 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, 8283 rate, partial, secure, &vd_list); 8284 if (error != 0) { 8285 char guid_as_str[MAXNAMELEN]; 8286 8287 (void) snprintf(guid_as_str, sizeof (guid_as_str), 8288 "%llu", (unsigned long long)vdev_guid); 8289 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 8290 total_errors++; 8291 } 8292 } 8293 8294 /* Wait for all TRIM threads to stop. */ 8295 vdev_trim_stop_wait(spa, &vd_list); 8296 8297 /* Sync out the TRIM state */ 8298 txg_wait_synced(spa->spa_dsl_pool, 0); 8299 mutex_exit(&spa_namespace_lock); 8300 8301 list_destroy(&vd_list); 8302 8303 return (total_errors); 8304 } 8305 8306 /* 8307 * Split a set of devices from their mirrors, and create a new pool from them. 8308 */ 8309 int 8310 spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config, 8311 nvlist_t *props, boolean_t exp) 8312 { 8313 int error = 0; 8314 uint64_t txg, *glist; 8315 spa_t *newspa; 8316 uint_t c, children, lastlog; 8317 nvlist_t **child, *nvl, *tmp; 8318 dmu_tx_t *tx; 8319 const char *altroot = NULL; 8320 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 8321 boolean_t activate_slog; 8322 8323 ASSERT(spa_writeable(spa)); 8324 8325 txg = spa_vdev_enter(spa); 8326 8327 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 8328 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 8329 error = (spa_has_checkpoint(spa)) ? 8330 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 8331 return (spa_vdev_exit(spa, NULL, txg, error)); 8332 } 8333 8334 /* clear the log and flush everything up to now */ 8335 activate_slog = spa_passivate_log(spa); 8336 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 8337 error = spa_reset_logs(spa); 8338 txg = spa_vdev_config_enter(spa); 8339 8340 if (activate_slog) 8341 spa_activate_log(spa); 8342 8343 if (error != 0) 8344 return (spa_vdev_exit(spa, NULL, txg, error)); 8345 8346 /* check new spa name before going any further */ 8347 if (spa_lookup(newname) != NULL) 8348 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 8349 8350 /* 8351 * scan through all the children to ensure they're all mirrors 8352 */ 8353 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 8354 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 8355 &children) != 0) 8356 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8357 8358 /* first, check to ensure we've got the right child count */ 8359 rvd = spa->spa_root_vdev; 8360 lastlog = 0; 8361 for (c = 0; c < rvd->vdev_children; c++) { 8362 vdev_t *vd = rvd->vdev_child[c]; 8363 8364 /* don't count the holes & logs as children */ 8365 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops && 8366 !vdev_is_concrete(vd))) { 8367 if (lastlog == 0) 8368 lastlog = c; 8369 continue; 8370 } 8371 8372 lastlog = 0; 8373 } 8374 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 8375 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8376 8377 /* next, ensure no spare or cache devices are part of the split */ 8378 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 8379 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 8380 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 8381 8382 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 8383 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 8384 8385 /* then, loop over each vdev and validate it */ 8386 for (c = 0; c < children; c++) { 8387 uint64_t is_hole = 0; 8388 8389 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 8390 &is_hole); 8391 8392 if (is_hole != 0) { 8393 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 8394 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 8395 continue; 8396 } else { 8397 error = SET_ERROR(EINVAL); 8398 break; 8399 } 8400 } 8401 8402 /* deal with indirect vdevs */ 8403 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops == 8404 &vdev_indirect_ops) 8405 continue; 8406 8407 /* which disk is going to be split? */ 8408 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 8409 &glist[c]) != 0) { 8410 error = SET_ERROR(EINVAL); 8411 break; 8412 } 8413 8414 /* look it up in the spa */ 8415 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 8416 if (vml[c] == NULL) { 8417 error = SET_ERROR(ENODEV); 8418 break; 8419 } 8420 8421 /* make sure there's nothing stopping the split */ 8422 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 8423 vml[c]->vdev_islog || 8424 !vdev_is_concrete(vml[c]) || 8425 vml[c]->vdev_isspare || 8426 vml[c]->vdev_isl2cache || 8427 !vdev_writeable(vml[c]) || 8428 vml[c]->vdev_children != 0 || 8429 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 8430 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 8431 error = SET_ERROR(EINVAL); 8432 break; 8433 } 8434 8435 if (vdev_dtl_required(vml[c]) || 8436 vdev_resilver_needed(vml[c], NULL, NULL)) { 8437 error = SET_ERROR(EBUSY); 8438 break; 8439 } 8440 8441 /* we need certain info from the top level */ 8442 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 8443 vml[c]->vdev_top->vdev_ms_array); 8444 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 8445 vml[c]->vdev_top->vdev_ms_shift); 8446 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 8447 vml[c]->vdev_top->vdev_asize); 8448 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 8449 vml[c]->vdev_top->vdev_ashift); 8450 8451 /* transfer per-vdev ZAPs */ 8452 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 8453 VERIFY0(nvlist_add_uint64(child[c], 8454 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 8455 8456 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 8457 VERIFY0(nvlist_add_uint64(child[c], 8458 ZPOOL_CONFIG_VDEV_TOP_ZAP, 8459 vml[c]->vdev_parent->vdev_top_zap)); 8460 } 8461 8462 if (error != 0) { 8463 kmem_free(vml, children * sizeof (vdev_t *)); 8464 kmem_free(glist, children * sizeof (uint64_t)); 8465 return (spa_vdev_exit(spa, NULL, txg, error)); 8466 } 8467 8468 /* stop writers from using the disks */ 8469 for (c = 0; c < children; c++) { 8470 if (vml[c] != NULL) 8471 vml[c]->vdev_offline = B_TRUE; 8472 } 8473 vdev_reopen(spa->spa_root_vdev); 8474 8475 /* 8476 * Temporarily record the splitting vdevs in the spa config. This 8477 * will disappear once the config is regenerated. 8478 */ 8479 nvl = fnvlist_alloc(); 8480 fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children); 8481 kmem_free(glist, children * sizeof (uint64_t)); 8482 8483 mutex_enter(&spa->spa_props_lock); 8484 fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl); 8485 mutex_exit(&spa->spa_props_lock); 8486 spa->spa_config_splitting = nvl; 8487 vdev_config_dirty(spa->spa_root_vdev); 8488 8489 /* configure and create the new pool */ 8490 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname); 8491 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 8492 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE); 8493 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)); 8494 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg); 8495 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 8496 spa_generate_guid(NULL)); 8497 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 8498 (void) nvlist_lookup_string(props, 8499 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 8500 8501 /* add the new pool to the namespace */ 8502 newspa = spa_add(newname, config, altroot); 8503 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 8504 newspa->spa_config_txg = spa->spa_config_txg; 8505 spa_set_log_state(newspa, SPA_LOG_CLEAR); 8506 8507 /* release the spa config lock, retaining the namespace lock */ 8508 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 8509 8510 if (zio_injection_enabled) 8511 zio_handle_panic_injection(spa, FTAG, 1); 8512 8513 spa_activate(newspa, spa_mode_global); 8514 spa_async_suspend(newspa); 8515 8516 /* 8517 * Temporarily stop the initializing and TRIM activity. We set the 8518 * state to ACTIVE so that we know to resume initializing or TRIM 8519 * once the split has completed. 8520 */ 8521 list_t vd_initialize_list; 8522 list_create(&vd_initialize_list, sizeof (vdev_t), 8523 offsetof(vdev_t, vdev_initialize_node)); 8524 8525 list_t vd_trim_list; 8526 list_create(&vd_trim_list, sizeof (vdev_t), 8527 offsetof(vdev_t, vdev_trim_node)); 8528 8529 for (c = 0; c < children; c++) { 8530 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 8531 mutex_enter(&vml[c]->vdev_initialize_lock); 8532 vdev_initialize_stop(vml[c], 8533 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); 8534 mutex_exit(&vml[c]->vdev_initialize_lock); 8535 8536 mutex_enter(&vml[c]->vdev_trim_lock); 8537 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); 8538 mutex_exit(&vml[c]->vdev_trim_lock); 8539 } 8540 } 8541 8542 vdev_initialize_stop_wait(spa, &vd_initialize_list); 8543 vdev_trim_stop_wait(spa, &vd_trim_list); 8544 8545 list_destroy(&vd_initialize_list); 8546 list_destroy(&vd_trim_list); 8547 8548 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 8549 newspa->spa_is_splitting = B_TRUE; 8550 8551 /* create the new pool from the disks of the original pool */ 8552 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 8553 if (error) 8554 goto out; 8555 8556 /* if that worked, generate a real config for the new pool */ 8557 if (newspa->spa_root_vdev != NULL) { 8558 newspa->spa_config_splitting = fnvlist_alloc(); 8559 fnvlist_add_uint64(newspa->spa_config_splitting, 8560 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)); 8561 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 8562 B_TRUE)); 8563 } 8564 8565 /* set the props */ 8566 if (props != NULL) { 8567 spa_configfile_set(newspa, props, B_FALSE); 8568 error = spa_prop_set(newspa, props); 8569 if (error) 8570 goto out; 8571 } 8572 8573 /* flush everything */ 8574 txg = spa_vdev_config_enter(newspa); 8575 vdev_config_dirty(newspa->spa_root_vdev); 8576 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 8577 8578 if (zio_injection_enabled) 8579 zio_handle_panic_injection(spa, FTAG, 2); 8580 8581 spa_async_resume(newspa); 8582 8583 /* finally, update the original pool's config */ 8584 txg = spa_vdev_config_enter(spa); 8585 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 8586 error = dmu_tx_assign(tx, TXG_WAIT); 8587 if (error != 0) 8588 dmu_tx_abort(tx); 8589 for (c = 0; c < children; c++) { 8590 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 8591 vdev_t *tvd = vml[c]->vdev_top; 8592 8593 /* 8594 * Need to be sure the detachable VDEV is not 8595 * on any *other* txg's DTL list to prevent it 8596 * from being accessed after it's freed. 8597 */ 8598 for (int t = 0; t < TXG_SIZE; t++) { 8599 (void) txg_list_remove_this( 8600 &tvd->vdev_dtl_list, vml[c], t); 8601 } 8602 8603 vdev_split(vml[c]); 8604 if (error == 0) 8605 spa_history_log_internal(spa, "detach", tx, 8606 "vdev=%s", vml[c]->vdev_path); 8607 8608 vdev_free(vml[c]); 8609 } 8610 } 8611 spa->spa_avz_action = AVZ_ACTION_REBUILD; 8612 vdev_config_dirty(spa->spa_root_vdev); 8613 spa->spa_config_splitting = NULL; 8614 nvlist_free(nvl); 8615 if (error == 0) 8616 dmu_tx_commit(tx); 8617 (void) spa_vdev_exit(spa, NULL, txg, 0); 8618 8619 if (zio_injection_enabled) 8620 zio_handle_panic_injection(spa, FTAG, 3); 8621 8622 /* split is complete; log a history record */ 8623 spa_history_log_internal(newspa, "split", NULL, 8624 "from pool %s", spa_name(spa)); 8625 8626 newspa->spa_is_splitting = B_FALSE; 8627 kmem_free(vml, children * sizeof (vdev_t *)); 8628 8629 /* if we're not going to mount the filesystems in userland, export */ 8630 if (exp) 8631 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 8632 B_FALSE, B_FALSE); 8633 8634 return (error); 8635 8636 out: 8637 spa_unload(newspa); 8638 spa_deactivate(newspa); 8639 spa_remove(newspa); 8640 8641 txg = spa_vdev_config_enter(spa); 8642 8643 /* re-online all offlined disks */ 8644 for (c = 0; c < children; c++) { 8645 if (vml[c] != NULL) 8646 vml[c]->vdev_offline = B_FALSE; 8647 } 8648 8649 /* restart initializing or trimming disks as necessary */ 8650 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 8651 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 8652 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 8653 8654 vdev_reopen(spa->spa_root_vdev); 8655 8656 nvlist_free(spa->spa_config_splitting); 8657 spa->spa_config_splitting = NULL; 8658 (void) spa_vdev_exit(spa, NULL, txg, error); 8659 8660 kmem_free(vml, children * sizeof (vdev_t *)); 8661 return (error); 8662 } 8663 8664 /* 8665 * Find any device that's done replacing, or a vdev marked 'unspare' that's 8666 * currently spared, so we can detach it. 8667 */ 8668 static vdev_t * 8669 spa_vdev_resilver_done_hunt(vdev_t *vd) 8670 { 8671 vdev_t *newvd, *oldvd; 8672 8673 for (int c = 0; c < vd->vdev_children; c++) { 8674 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 8675 if (oldvd != NULL) 8676 return (oldvd); 8677 } 8678 8679 /* 8680 * Check for a completed replacement. We always consider the first 8681 * vdev in the list to be the oldest vdev, and the last one to be 8682 * the newest (see spa_vdev_attach() for how that works). In 8683 * the case where the newest vdev is faulted, we will not automatically 8684 * remove it after a resilver completes. This is OK as it will require 8685 * user intervention to determine which disk the admin wishes to keep. 8686 */ 8687 if (vd->vdev_ops == &vdev_replacing_ops) { 8688 ASSERT(vd->vdev_children > 1); 8689 8690 newvd = vd->vdev_child[vd->vdev_children - 1]; 8691 oldvd = vd->vdev_child[0]; 8692 8693 if (vdev_dtl_empty(newvd, DTL_MISSING) && 8694 vdev_dtl_empty(newvd, DTL_OUTAGE) && 8695 !vdev_dtl_required(oldvd)) 8696 return (oldvd); 8697 } 8698 8699 /* 8700 * Check for a completed resilver with the 'unspare' flag set. 8701 * Also potentially update faulted state. 8702 */ 8703 if (vd->vdev_ops == &vdev_spare_ops) { 8704 vdev_t *first = vd->vdev_child[0]; 8705 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 8706 8707 if (last->vdev_unspare) { 8708 oldvd = first; 8709 newvd = last; 8710 } else if (first->vdev_unspare) { 8711 oldvd = last; 8712 newvd = first; 8713 } else { 8714 oldvd = NULL; 8715 } 8716 8717 if (oldvd != NULL && 8718 vdev_dtl_empty(newvd, DTL_MISSING) && 8719 vdev_dtl_empty(newvd, DTL_OUTAGE) && 8720 !vdev_dtl_required(oldvd)) 8721 return (oldvd); 8722 8723 vdev_propagate_state(vd); 8724 8725 /* 8726 * If there are more than two spares attached to a disk, 8727 * and those spares are not required, then we want to 8728 * attempt to free them up now so that they can be used 8729 * by other pools. Once we're back down to a single 8730 * disk+spare, we stop removing them. 8731 */ 8732 if (vd->vdev_children > 2) { 8733 newvd = vd->vdev_child[1]; 8734 8735 if (newvd->vdev_isspare && last->vdev_isspare && 8736 vdev_dtl_empty(last, DTL_MISSING) && 8737 vdev_dtl_empty(last, DTL_OUTAGE) && 8738 !vdev_dtl_required(newvd)) 8739 return (newvd); 8740 } 8741 } 8742 8743 return (NULL); 8744 } 8745 8746 static void 8747 spa_vdev_resilver_done(spa_t *spa) 8748 { 8749 vdev_t *vd, *pvd, *ppvd; 8750 uint64_t guid, sguid, pguid, ppguid; 8751 8752 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8753 8754 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 8755 pvd = vd->vdev_parent; 8756 ppvd = pvd->vdev_parent; 8757 guid = vd->vdev_guid; 8758 pguid = pvd->vdev_guid; 8759 ppguid = ppvd->vdev_guid; 8760 sguid = 0; 8761 /* 8762 * If we have just finished replacing a hot spared device, then 8763 * we need to detach the parent's first child (the original hot 8764 * spare) as well. 8765 */ 8766 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 8767 ppvd->vdev_children == 2) { 8768 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 8769 sguid = ppvd->vdev_child[1]->vdev_guid; 8770 } 8771 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 8772 8773 spa_config_exit(spa, SCL_ALL, FTAG); 8774 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 8775 return; 8776 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 8777 return; 8778 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8779 } 8780 8781 spa_config_exit(spa, SCL_ALL, FTAG); 8782 8783 /* 8784 * If a detach was not performed above replace waiters will not have 8785 * been notified. In which case we must do so now. 8786 */ 8787 spa_notify_waiters(spa); 8788 } 8789 8790 /* 8791 * Update the stored path or FRU for this vdev. 8792 */ 8793 static int 8794 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 8795 boolean_t ispath) 8796 { 8797 vdev_t *vd; 8798 boolean_t sync = B_FALSE; 8799 8800 ASSERT(spa_writeable(spa)); 8801 8802 spa_vdev_state_enter(spa, SCL_ALL); 8803 8804 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 8805 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 8806 8807 if (!vd->vdev_ops->vdev_op_leaf) 8808 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 8809 8810 if (ispath) { 8811 if (strcmp(value, vd->vdev_path) != 0) { 8812 spa_strfree(vd->vdev_path); 8813 vd->vdev_path = spa_strdup(value); 8814 sync = B_TRUE; 8815 } 8816 } else { 8817 if (vd->vdev_fru == NULL) { 8818 vd->vdev_fru = spa_strdup(value); 8819 sync = B_TRUE; 8820 } else if (strcmp(value, vd->vdev_fru) != 0) { 8821 spa_strfree(vd->vdev_fru); 8822 vd->vdev_fru = spa_strdup(value); 8823 sync = B_TRUE; 8824 } 8825 } 8826 8827 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 8828 } 8829 8830 int 8831 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 8832 { 8833 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 8834 } 8835 8836 int 8837 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 8838 { 8839 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 8840 } 8841 8842 /* 8843 * ========================================================================== 8844 * SPA Scanning 8845 * ========================================================================== 8846 */ 8847 int 8848 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 8849 { 8850 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8851 8852 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8853 return (SET_ERROR(EBUSY)); 8854 8855 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 8856 } 8857 8858 int 8859 spa_scan_stop(spa_t *spa) 8860 { 8861 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8862 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8863 return (SET_ERROR(EBUSY)); 8864 8865 return (dsl_scan_cancel(spa->spa_dsl_pool)); 8866 } 8867 8868 int 8869 spa_scan(spa_t *spa, pool_scan_func_t func) 8870 { 8871 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8872 8873 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 8874 return (SET_ERROR(ENOTSUP)); 8875 8876 if (func == POOL_SCAN_RESILVER && 8877 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 8878 return (SET_ERROR(ENOTSUP)); 8879 8880 /* 8881 * If a resilver was requested, but there is no DTL on a 8882 * writeable leaf device, we have nothing to do. 8883 */ 8884 if (func == POOL_SCAN_RESILVER && 8885 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 8886 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 8887 return (0); 8888 } 8889 8890 if (func == POOL_SCAN_ERRORSCRUB && 8891 !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) 8892 return (SET_ERROR(ENOTSUP)); 8893 8894 return (dsl_scan(spa->spa_dsl_pool, func)); 8895 } 8896 8897 /* 8898 * ========================================================================== 8899 * SPA async task processing 8900 * ========================================================================== 8901 */ 8902 8903 static void 8904 spa_async_remove(spa_t *spa, vdev_t *vd) 8905 { 8906 if (vd->vdev_remove_wanted) { 8907 vd->vdev_remove_wanted = B_FALSE; 8908 vd->vdev_delayed_close = B_FALSE; 8909 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 8910 8911 /* 8912 * We want to clear the stats, but we don't want to do a full 8913 * vdev_clear() as that will cause us to throw away 8914 * degraded/faulted state as well as attempt to reopen the 8915 * device, all of which is a waste. 8916 */ 8917 vd->vdev_stat.vs_read_errors = 0; 8918 vd->vdev_stat.vs_write_errors = 0; 8919 vd->vdev_stat.vs_checksum_errors = 0; 8920 8921 vdev_state_dirty(vd->vdev_top); 8922 8923 /* Tell userspace that the vdev is gone. */ 8924 zfs_post_remove(spa, vd); 8925 } 8926 8927 for (int c = 0; c < vd->vdev_children; c++) 8928 spa_async_remove(spa, vd->vdev_child[c]); 8929 } 8930 8931 static void 8932 spa_async_fault_vdev(spa_t *spa, vdev_t *vd) 8933 { 8934 if (vd->vdev_fault_wanted) { 8935 vd->vdev_fault_wanted = B_FALSE; 8936 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 8937 VDEV_AUX_ERR_EXCEEDED); 8938 } 8939 8940 for (int c = 0; c < vd->vdev_children; c++) 8941 spa_async_fault_vdev(spa, vd->vdev_child[c]); 8942 } 8943 8944 static void 8945 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 8946 { 8947 if (!spa->spa_autoexpand) 8948 return; 8949 8950 for (int c = 0; c < vd->vdev_children; c++) { 8951 vdev_t *cvd = vd->vdev_child[c]; 8952 spa_async_autoexpand(spa, cvd); 8953 } 8954 8955 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 8956 return; 8957 8958 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND); 8959 } 8960 8961 static __attribute__((noreturn)) void 8962 spa_async_thread(void *arg) 8963 { 8964 spa_t *spa = (spa_t *)arg; 8965 dsl_pool_t *dp = spa->spa_dsl_pool; 8966 int tasks; 8967 8968 ASSERT(spa->spa_sync_on); 8969 8970 mutex_enter(&spa->spa_async_lock); 8971 tasks = spa->spa_async_tasks; 8972 spa->spa_async_tasks = 0; 8973 mutex_exit(&spa->spa_async_lock); 8974 8975 /* 8976 * See if the config needs to be updated. 8977 */ 8978 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 8979 uint64_t old_space, new_space; 8980 8981 mutex_enter(&spa_namespace_lock); 8982 old_space = metaslab_class_get_space(spa_normal_class(spa)); 8983 old_space += metaslab_class_get_space(spa_special_class(spa)); 8984 old_space += metaslab_class_get_space(spa_dedup_class(spa)); 8985 old_space += metaslab_class_get_space( 8986 spa_embedded_log_class(spa)); 8987 8988 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 8989 8990 new_space = metaslab_class_get_space(spa_normal_class(spa)); 8991 new_space += metaslab_class_get_space(spa_special_class(spa)); 8992 new_space += metaslab_class_get_space(spa_dedup_class(spa)); 8993 new_space += metaslab_class_get_space( 8994 spa_embedded_log_class(spa)); 8995 mutex_exit(&spa_namespace_lock); 8996 8997 /* 8998 * If the pool grew as a result of the config update, 8999 * then log an internal history event. 9000 */ 9001 if (new_space != old_space) { 9002 spa_history_log_internal(spa, "vdev online", NULL, 9003 "pool '%s' size: %llu(+%llu)", 9004 spa_name(spa), (u_longlong_t)new_space, 9005 (u_longlong_t)(new_space - old_space)); 9006 } 9007 } 9008 9009 /* 9010 * See if any devices need to be marked REMOVED. 9011 */ 9012 if (tasks & SPA_ASYNC_REMOVE) { 9013 spa_vdev_state_enter(spa, SCL_NONE); 9014 spa_async_remove(spa, spa->spa_root_vdev); 9015 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 9016 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 9017 for (int i = 0; i < spa->spa_spares.sav_count; i++) 9018 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 9019 (void) spa_vdev_state_exit(spa, NULL, 0); 9020 } 9021 9022 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 9023 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9024 spa_async_autoexpand(spa, spa->spa_root_vdev); 9025 spa_config_exit(spa, SCL_CONFIG, FTAG); 9026 } 9027 9028 /* 9029 * See if any devices need to be marked faulted. 9030 */ 9031 if (tasks & SPA_ASYNC_FAULT_VDEV) { 9032 spa_vdev_state_enter(spa, SCL_NONE); 9033 spa_async_fault_vdev(spa, spa->spa_root_vdev); 9034 (void) spa_vdev_state_exit(spa, NULL, 0); 9035 } 9036 9037 /* 9038 * If any devices are done replacing, detach them. 9039 */ 9040 if (tasks & SPA_ASYNC_RESILVER_DONE || 9041 tasks & SPA_ASYNC_REBUILD_DONE || 9042 tasks & SPA_ASYNC_DETACH_SPARE) { 9043 spa_vdev_resilver_done(spa); 9044 } 9045 9046 /* 9047 * Kick off a resilver. 9048 */ 9049 if (tasks & SPA_ASYNC_RESILVER && 9050 !vdev_rebuild_active(spa->spa_root_vdev) && 9051 (!dsl_scan_resilvering(dp) || 9052 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 9053 dsl_scan_restart_resilver(dp, 0); 9054 9055 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 9056 mutex_enter(&spa_namespace_lock); 9057 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9058 vdev_initialize_restart(spa->spa_root_vdev); 9059 spa_config_exit(spa, SCL_CONFIG, FTAG); 9060 mutex_exit(&spa_namespace_lock); 9061 } 9062 9063 if (tasks & SPA_ASYNC_TRIM_RESTART) { 9064 mutex_enter(&spa_namespace_lock); 9065 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9066 vdev_trim_restart(spa->spa_root_vdev); 9067 spa_config_exit(spa, SCL_CONFIG, FTAG); 9068 mutex_exit(&spa_namespace_lock); 9069 } 9070 9071 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { 9072 mutex_enter(&spa_namespace_lock); 9073 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9074 vdev_autotrim_restart(spa); 9075 spa_config_exit(spa, SCL_CONFIG, FTAG); 9076 mutex_exit(&spa_namespace_lock); 9077 } 9078 9079 /* 9080 * Kick off L2 cache whole device TRIM. 9081 */ 9082 if (tasks & SPA_ASYNC_L2CACHE_TRIM) { 9083 mutex_enter(&spa_namespace_lock); 9084 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9085 vdev_trim_l2arc(spa); 9086 spa_config_exit(spa, SCL_CONFIG, FTAG); 9087 mutex_exit(&spa_namespace_lock); 9088 } 9089 9090 /* 9091 * Kick off L2 cache rebuilding. 9092 */ 9093 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) { 9094 mutex_enter(&spa_namespace_lock); 9095 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER); 9096 l2arc_spa_rebuild_start(spa); 9097 spa_config_exit(spa, SCL_L2ARC, FTAG); 9098 mutex_exit(&spa_namespace_lock); 9099 } 9100 9101 /* 9102 * Let the world know that we're done. 9103 */ 9104 mutex_enter(&spa->spa_async_lock); 9105 spa->spa_async_thread = NULL; 9106 cv_broadcast(&spa->spa_async_cv); 9107 mutex_exit(&spa->spa_async_lock); 9108 thread_exit(); 9109 } 9110 9111 void 9112 spa_async_suspend(spa_t *spa) 9113 { 9114 mutex_enter(&spa->spa_async_lock); 9115 spa->spa_async_suspended++; 9116 while (spa->spa_async_thread != NULL) 9117 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 9118 mutex_exit(&spa->spa_async_lock); 9119 9120 spa_vdev_remove_suspend(spa); 9121 9122 zthr_t *condense_thread = spa->spa_condense_zthr; 9123 if (condense_thread != NULL) 9124 zthr_cancel(condense_thread); 9125 9126 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr; 9127 if (raidz_expand_thread != NULL) 9128 zthr_cancel(raidz_expand_thread); 9129 9130 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 9131 if (discard_thread != NULL) 9132 zthr_cancel(discard_thread); 9133 9134 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 9135 if (ll_delete_thread != NULL) 9136 zthr_cancel(ll_delete_thread); 9137 9138 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 9139 if (ll_condense_thread != NULL) 9140 zthr_cancel(ll_condense_thread); 9141 } 9142 9143 void 9144 spa_async_resume(spa_t *spa) 9145 { 9146 mutex_enter(&spa->spa_async_lock); 9147 ASSERT(spa->spa_async_suspended != 0); 9148 spa->spa_async_suspended--; 9149 mutex_exit(&spa->spa_async_lock); 9150 spa_restart_removal(spa); 9151 9152 zthr_t *condense_thread = spa->spa_condense_zthr; 9153 if (condense_thread != NULL) 9154 zthr_resume(condense_thread); 9155 9156 zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr; 9157 if (raidz_expand_thread != NULL) 9158 zthr_resume(raidz_expand_thread); 9159 9160 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 9161 if (discard_thread != NULL) 9162 zthr_resume(discard_thread); 9163 9164 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 9165 if (ll_delete_thread != NULL) 9166 zthr_resume(ll_delete_thread); 9167 9168 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 9169 if (ll_condense_thread != NULL) 9170 zthr_resume(ll_condense_thread); 9171 } 9172 9173 static boolean_t 9174 spa_async_tasks_pending(spa_t *spa) 9175 { 9176 uint_t non_config_tasks; 9177 uint_t config_task; 9178 boolean_t config_task_suspended; 9179 9180 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 9181 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 9182 if (spa->spa_ccw_fail_time == 0) { 9183 config_task_suspended = B_FALSE; 9184 } else { 9185 config_task_suspended = 9186 (gethrtime() - spa->spa_ccw_fail_time) < 9187 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC); 9188 } 9189 9190 return (non_config_tasks || (config_task && !config_task_suspended)); 9191 } 9192 9193 static void 9194 spa_async_dispatch(spa_t *spa) 9195 { 9196 mutex_enter(&spa->spa_async_lock); 9197 if (spa_async_tasks_pending(spa) && 9198 !spa->spa_async_suspended && 9199 spa->spa_async_thread == NULL) 9200 spa->spa_async_thread = thread_create(NULL, 0, 9201 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 9202 mutex_exit(&spa->spa_async_lock); 9203 } 9204 9205 void 9206 spa_async_request(spa_t *spa, int task) 9207 { 9208 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 9209 mutex_enter(&spa->spa_async_lock); 9210 spa->spa_async_tasks |= task; 9211 mutex_exit(&spa->spa_async_lock); 9212 } 9213 9214 int 9215 spa_async_tasks(spa_t *spa) 9216 { 9217 return (spa->spa_async_tasks); 9218 } 9219 9220 /* 9221 * ========================================================================== 9222 * SPA syncing routines 9223 * ========================================================================== 9224 */ 9225 9226 9227 static int 9228 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 9229 dmu_tx_t *tx) 9230 { 9231 bpobj_t *bpo = arg; 9232 bpobj_enqueue(bpo, bp, bp_freed, tx); 9233 return (0); 9234 } 9235 9236 int 9237 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9238 { 9239 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx)); 9240 } 9241 9242 int 9243 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9244 { 9245 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx)); 9246 } 9247 9248 static int 9249 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 9250 { 9251 zio_t *pio = arg; 9252 9253 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp, 9254 pio->io_flags)); 9255 return (0); 9256 } 9257 9258 static int 9259 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 9260 dmu_tx_t *tx) 9261 { 9262 ASSERT(!bp_freed); 9263 return (spa_free_sync_cb(arg, bp, tx)); 9264 } 9265 9266 /* 9267 * Note: this simple function is not inlined to make it easier to dtrace the 9268 * amount of time spent syncing frees. 9269 */ 9270 static void 9271 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 9272 { 9273 zio_t *zio = zio_root(spa, NULL, NULL, 0); 9274 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 9275 VERIFY(zio_wait(zio) == 0); 9276 } 9277 9278 /* 9279 * Note: this simple function is not inlined to make it easier to dtrace the 9280 * amount of time spent syncing deferred frees. 9281 */ 9282 static void 9283 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 9284 { 9285 if (spa_sync_pass(spa) != 1) 9286 return; 9287 9288 /* 9289 * Note: 9290 * If the log space map feature is active, we stop deferring 9291 * frees to the next TXG and therefore running this function 9292 * would be considered a no-op as spa_deferred_bpobj should 9293 * not have any entries. 9294 * 9295 * That said we run this function anyway (instead of returning 9296 * immediately) for the edge-case scenario where we just 9297 * activated the log space map feature in this TXG but we have 9298 * deferred frees from the previous TXG. 9299 */ 9300 zio_t *zio = zio_root(spa, NULL, NULL, 0); 9301 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 9302 bpobj_spa_free_sync_cb, zio, tx), ==, 0); 9303 VERIFY0(zio_wait(zio)); 9304 } 9305 9306 static void 9307 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 9308 { 9309 char *packed = NULL; 9310 size_t bufsize; 9311 size_t nvsize = 0; 9312 dmu_buf_t *db; 9313 9314 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 9315 9316 /* 9317 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 9318 * information. This avoids the dmu_buf_will_dirty() path and 9319 * saves us a pre-read to get data we don't actually care about. 9320 */ 9321 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 9322 packed = vmem_alloc(bufsize, KM_SLEEP); 9323 9324 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 9325 KM_SLEEP) == 0); 9326 memset(packed + nvsize, 0, bufsize - nvsize); 9327 9328 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 9329 9330 vmem_free(packed, bufsize); 9331 9332 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 9333 dmu_buf_will_dirty(db, tx); 9334 *(uint64_t *)db->db_data = nvsize; 9335 dmu_buf_rele(db, FTAG); 9336 } 9337 9338 static void 9339 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 9340 const char *config, const char *entry) 9341 { 9342 nvlist_t *nvroot; 9343 nvlist_t **list; 9344 int i; 9345 9346 if (!sav->sav_sync) 9347 return; 9348 9349 /* 9350 * Update the MOS nvlist describing the list of available devices. 9351 * spa_validate_aux() will have already made sure this nvlist is 9352 * valid and the vdevs are labeled appropriately. 9353 */ 9354 if (sav->sav_object == 0) { 9355 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 9356 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 9357 sizeof (uint64_t), tx); 9358 VERIFY(zap_update(spa->spa_meta_objset, 9359 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 9360 &sav->sav_object, tx) == 0); 9361 } 9362 9363 nvroot = fnvlist_alloc(); 9364 if (sav->sav_count == 0) { 9365 fnvlist_add_nvlist_array(nvroot, config, 9366 (const nvlist_t * const *)NULL, 0); 9367 } else { 9368 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP); 9369 for (i = 0; i < sav->sav_count; i++) 9370 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 9371 B_FALSE, VDEV_CONFIG_L2CACHE); 9372 fnvlist_add_nvlist_array(nvroot, config, 9373 (const nvlist_t * const *)list, sav->sav_count); 9374 for (i = 0; i < sav->sav_count; i++) 9375 nvlist_free(list[i]); 9376 kmem_free(list, sav->sav_count * sizeof (void *)); 9377 } 9378 9379 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 9380 nvlist_free(nvroot); 9381 9382 sav->sav_sync = B_FALSE; 9383 } 9384 9385 /* 9386 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 9387 * The all-vdev ZAP must be empty. 9388 */ 9389 static void 9390 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 9391 { 9392 spa_t *spa = vd->vdev_spa; 9393 9394 if (vd->vdev_root_zap != 0 && 9395 spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) { 9396 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9397 vd->vdev_root_zap, tx)); 9398 } 9399 if (vd->vdev_top_zap != 0) { 9400 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9401 vd->vdev_top_zap, tx)); 9402 } 9403 if (vd->vdev_leaf_zap != 0) { 9404 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 9405 vd->vdev_leaf_zap, tx)); 9406 } 9407 for (uint64_t i = 0; i < vd->vdev_children; i++) { 9408 spa_avz_build(vd->vdev_child[i], avz, tx); 9409 } 9410 } 9411 9412 static void 9413 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 9414 { 9415 nvlist_t *config; 9416 9417 /* 9418 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 9419 * its config may not be dirty but we still need to build per-vdev ZAPs. 9420 * Similarly, if the pool is being assembled (e.g. after a split), we 9421 * need to rebuild the AVZ although the config may not be dirty. 9422 */ 9423 if (list_is_empty(&spa->spa_config_dirty_list) && 9424 spa->spa_avz_action == AVZ_ACTION_NONE) 9425 return; 9426 9427 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9428 9429 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 9430 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 9431 spa->spa_all_vdev_zaps != 0); 9432 9433 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 9434 /* Make and build the new AVZ */ 9435 uint64_t new_avz = zap_create(spa->spa_meta_objset, 9436 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 9437 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 9438 9439 /* Diff old AVZ with new one */ 9440 zap_cursor_t zc; 9441 zap_attribute_t *za = zap_attribute_alloc(); 9442 9443 for (zap_cursor_init(&zc, spa->spa_meta_objset, 9444 spa->spa_all_vdev_zaps); 9445 zap_cursor_retrieve(&zc, za) == 0; 9446 zap_cursor_advance(&zc)) { 9447 uint64_t vdzap = za->za_first_integer; 9448 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 9449 vdzap) == ENOENT) { 9450 /* 9451 * ZAP is listed in old AVZ but not in new one; 9452 * destroy it 9453 */ 9454 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 9455 tx)); 9456 } 9457 } 9458 9459 zap_cursor_fini(&zc); 9460 zap_attribute_free(za); 9461 9462 /* Destroy the old AVZ */ 9463 VERIFY0(zap_destroy(spa->spa_meta_objset, 9464 spa->spa_all_vdev_zaps, tx)); 9465 9466 /* Replace the old AVZ in the dir obj with the new one */ 9467 VERIFY0(zap_update(spa->spa_meta_objset, 9468 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 9469 sizeof (new_avz), 1, &new_avz, tx)); 9470 9471 spa->spa_all_vdev_zaps = new_avz; 9472 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 9473 zap_cursor_t zc; 9474 zap_attribute_t *za = zap_attribute_alloc(); 9475 9476 /* Walk through the AVZ and destroy all listed ZAPs */ 9477 for (zap_cursor_init(&zc, spa->spa_meta_objset, 9478 spa->spa_all_vdev_zaps); 9479 zap_cursor_retrieve(&zc, za) == 0; 9480 zap_cursor_advance(&zc)) { 9481 uint64_t zap = za->za_first_integer; 9482 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 9483 } 9484 9485 zap_cursor_fini(&zc); 9486 zap_attribute_free(za); 9487 9488 /* Destroy and unlink the AVZ itself */ 9489 VERIFY0(zap_destroy(spa->spa_meta_objset, 9490 spa->spa_all_vdev_zaps, tx)); 9491 VERIFY0(zap_remove(spa->spa_meta_objset, 9492 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 9493 spa->spa_all_vdev_zaps = 0; 9494 } 9495 9496 if (spa->spa_all_vdev_zaps == 0) { 9497 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 9498 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 9499 DMU_POOL_VDEV_ZAP_MAP, tx); 9500 } 9501 spa->spa_avz_action = AVZ_ACTION_NONE; 9502 9503 /* Create ZAPs for vdevs that don't have them. */ 9504 vdev_construct_zaps(spa->spa_root_vdev, tx); 9505 9506 config = spa_config_generate(spa, spa->spa_root_vdev, 9507 dmu_tx_get_txg(tx), B_FALSE); 9508 9509 /* 9510 * If we're upgrading the spa version then make sure that 9511 * the config object gets updated with the correct version. 9512 */ 9513 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 9514 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 9515 spa->spa_uberblock.ub_version); 9516 9517 spa_config_exit(spa, SCL_STATE, FTAG); 9518 9519 nvlist_free(spa->spa_config_syncing); 9520 spa->spa_config_syncing = config; 9521 9522 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 9523 } 9524 9525 static void 9526 spa_sync_version(void *arg, dmu_tx_t *tx) 9527 { 9528 uint64_t *versionp = arg; 9529 uint64_t version = *versionp; 9530 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 9531 9532 /* 9533 * Setting the version is special cased when first creating the pool. 9534 */ 9535 ASSERT(tx->tx_txg != TXG_INITIAL); 9536 9537 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 9538 ASSERT(version >= spa_version(spa)); 9539 9540 spa->spa_uberblock.ub_version = version; 9541 vdev_config_dirty(spa->spa_root_vdev); 9542 spa_history_log_internal(spa, "set", tx, "version=%lld", 9543 (longlong_t)version); 9544 } 9545 9546 /* 9547 * Set zpool properties. 9548 */ 9549 static void 9550 spa_sync_props(void *arg, dmu_tx_t *tx) 9551 { 9552 nvlist_t *nvp = arg; 9553 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 9554 objset_t *mos = spa->spa_meta_objset; 9555 nvpair_t *elem = NULL; 9556 9557 mutex_enter(&spa->spa_props_lock); 9558 9559 while ((elem = nvlist_next_nvpair(nvp, elem))) { 9560 uint64_t intval; 9561 const char *strval, *fname; 9562 zpool_prop_t prop; 9563 const char *propname; 9564 const char *elemname = nvpair_name(elem); 9565 zprop_type_t proptype; 9566 spa_feature_t fid; 9567 9568 switch (prop = zpool_name_to_prop(elemname)) { 9569 case ZPOOL_PROP_VERSION: 9570 intval = fnvpair_value_uint64(elem); 9571 /* 9572 * The version is synced separately before other 9573 * properties and should be correct by now. 9574 */ 9575 ASSERT3U(spa_version(spa), >=, intval); 9576 break; 9577 9578 case ZPOOL_PROP_ALTROOT: 9579 /* 9580 * 'altroot' is a non-persistent property. It should 9581 * have been set temporarily at creation or import time. 9582 */ 9583 ASSERT(spa->spa_root != NULL); 9584 break; 9585 9586 case ZPOOL_PROP_READONLY: 9587 case ZPOOL_PROP_CACHEFILE: 9588 /* 9589 * 'readonly' and 'cachefile' are also non-persistent 9590 * properties. 9591 */ 9592 break; 9593 case ZPOOL_PROP_COMMENT: 9594 strval = fnvpair_value_string(elem); 9595 if (spa->spa_comment != NULL) 9596 spa_strfree(spa->spa_comment); 9597 spa->spa_comment = spa_strdup(strval); 9598 /* 9599 * We need to dirty the configuration on all the vdevs 9600 * so that their labels get updated. We also need to 9601 * update the cache file to keep it in sync with the 9602 * MOS version. It's unnecessary to do this for pool 9603 * creation since the vdev's configuration has already 9604 * been dirtied. 9605 */ 9606 if (tx->tx_txg != TXG_INITIAL) { 9607 vdev_config_dirty(spa->spa_root_vdev); 9608 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 9609 } 9610 spa_history_log_internal(spa, "set", tx, 9611 "%s=%s", elemname, strval); 9612 break; 9613 case ZPOOL_PROP_COMPATIBILITY: 9614 strval = fnvpair_value_string(elem); 9615 if (spa->spa_compatibility != NULL) 9616 spa_strfree(spa->spa_compatibility); 9617 spa->spa_compatibility = spa_strdup(strval); 9618 /* 9619 * Dirty the configuration on vdevs as above. 9620 */ 9621 if (tx->tx_txg != TXG_INITIAL) { 9622 vdev_config_dirty(spa->spa_root_vdev); 9623 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 9624 } 9625 9626 spa_history_log_internal(spa, "set", tx, 9627 "%s=%s", nvpair_name(elem), strval); 9628 break; 9629 9630 case ZPOOL_PROP_INVAL: 9631 if (zpool_prop_feature(elemname)) { 9632 fname = strchr(elemname, '@') + 1; 9633 VERIFY0(zfeature_lookup_name(fname, &fid)); 9634 9635 spa_feature_enable(spa, fid, tx); 9636 spa_history_log_internal(spa, "set", tx, 9637 "%s=enabled", elemname); 9638 break; 9639 } else if (!zfs_prop_user(elemname)) { 9640 ASSERT(zpool_prop_feature(elemname)); 9641 break; 9642 } 9643 zfs_fallthrough; 9644 default: 9645 /* 9646 * Set pool property values in the poolprops mos object. 9647 */ 9648 if (spa->spa_pool_props_object == 0) { 9649 spa->spa_pool_props_object = 9650 zap_create_link(mos, DMU_OT_POOL_PROPS, 9651 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 9652 tx); 9653 } 9654 9655 /* normalize the property name */ 9656 if (prop == ZPOOL_PROP_INVAL) { 9657 propname = elemname; 9658 proptype = PROP_TYPE_STRING; 9659 } else { 9660 propname = zpool_prop_to_name(prop); 9661 proptype = zpool_prop_get_type(prop); 9662 } 9663 9664 if (nvpair_type(elem) == DATA_TYPE_STRING) { 9665 ASSERT(proptype == PROP_TYPE_STRING); 9666 strval = fnvpair_value_string(elem); 9667 VERIFY0(zap_update(mos, 9668 spa->spa_pool_props_object, propname, 9669 1, strlen(strval) + 1, strval, tx)); 9670 spa_history_log_internal(spa, "set", tx, 9671 "%s=%s", elemname, strval); 9672 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 9673 intval = fnvpair_value_uint64(elem); 9674 9675 if (proptype == PROP_TYPE_INDEX) { 9676 const char *unused; 9677 VERIFY0(zpool_prop_index_to_string( 9678 prop, intval, &unused)); 9679 } 9680 VERIFY0(zap_update(mos, 9681 spa->spa_pool_props_object, propname, 9682 8, 1, &intval, tx)); 9683 spa_history_log_internal(spa, "set", tx, 9684 "%s=%lld", elemname, 9685 (longlong_t)intval); 9686 9687 switch (prop) { 9688 case ZPOOL_PROP_DELEGATION: 9689 spa->spa_delegation = intval; 9690 break; 9691 case ZPOOL_PROP_BOOTFS: 9692 spa->spa_bootfs = intval; 9693 break; 9694 case ZPOOL_PROP_FAILUREMODE: 9695 spa->spa_failmode = intval; 9696 break; 9697 case ZPOOL_PROP_AUTOTRIM: 9698 spa->spa_autotrim = intval; 9699 spa_async_request(spa, 9700 SPA_ASYNC_AUTOTRIM_RESTART); 9701 break; 9702 case ZPOOL_PROP_AUTOEXPAND: 9703 spa->spa_autoexpand = intval; 9704 if (tx->tx_txg != TXG_INITIAL) 9705 spa_async_request(spa, 9706 SPA_ASYNC_AUTOEXPAND); 9707 break; 9708 case ZPOOL_PROP_MULTIHOST: 9709 spa->spa_multihost = intval; 9710 break; 9711 case ZPOOL_PROP_DEDUP_TABLE_QUOTA: 9712 spa->spa_dedup_table_quota = intval; 9713 break; 9714 default: 9715 break; 9716 } 9717 } else { 9718 ASSERT(0); /* not allowed */ 9719 } 9720 } 9721 9722 } 9723 9724 mutex_exit(&spa->spa_props_lock); 9725 } 9726 9727 /* 9728 * Perform one-time upgrade on-disk changes. spa_version() does not 9729 * reflect the new version this txg, so there must be no changes this 9730 * txg to anything that the upgrade code depends on after it executes. 9731 * Therefore this must be called after dsl_pool_sync() does the sync 9732 * tasks. 9733 */ 9734 static void 9735 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 9736 { 9737 if (spa_sync_pass(spa) != 1) 9738 return; 9739 9740 dsl_pool_t *dp = spa->spa_dsl_pool; 9741 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 9742 9743 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 9744 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 9745 dsl_pool_create_origin(dp, tx); 9746 9747 /* Keeping the origin open increases spa_minref */ 9748 spa->spa_minref += 3; 9749 } 9750 9751 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 9752 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 9753 dsl_pool_upgrade_clones(dp, tx); 9754 } 9755 9756 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 9757 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 9758 dsl_pool_upgrade_dir_clones(dp, tx); 9759 9760 /* Keeping the freedir open increases spa_minref */ 9761 spa->spa_minref += 3; 9762 } 9763 9764 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 9765 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 9766 spa_feature_create_zap_objects(spa, tx); 9767 } 9768 9769 /* 9770 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 9771 * when possibility to use lz4 compression for metadata was added 9772 * Old pools that have this feature enabled must be upgraded to have 9773 * this feature active 9774 */ 9775 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 9776 boolean_t lz4_en = spa_feature_is_enabled(spa, 9777 SPA_FEATURE_LZ4_COMPRESS); 9778 boolean_t lz4_ac = spa_feature_is_active(spa, 9779 SPA_FEATURE_LZ4_COMPRESS); 9780 9781 if (lz4_en && !lz4_ac) 9782 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 9783 } 9784 9785 /* 9786 * If we haven't written the salt, do so now. Note that the 9787 * feature may not be activated yet, but that's fine since 9788 * the presence of this ZAP entry is backwards compatible. 9789 */ 9790 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 9791 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 9792 VERIFY0(zap_add(spa->spa_meta_objset, 9793 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 9794 sizeof (spa->spa_cksum_salt.zcs_bytes), 9795 spa->spa_cksum_salt.zcs_bytes, tx)); 9796 } 9797 9798 rrw_exit(&dp->dp_config_rwlock, FTAG); 9799 } 9800 9801 static void 9802 vdev_indirect_state_sync_verify(vdev_t *vd) 9803 { 9804 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping; 9805 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births; 9806 9807 if (vd->vdev_ops == &vdev_indirect_ops) { 9808 ASSERT(vim != NULL); 9809 ASSERT(vib != NULL); 9810 } 9811 9812 uint64_t obsolete_sm_object = 0; 9813 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 9814 if (obsolete_sm_object != 0) { 9815 ASSERT(vd->vdev_obsolete_sm != NULL); 9816 ASSERT(vd->vdev_removing || 9817 vd->vdev_ops == &vdev_indirect_ops); 9818 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 9819 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 9820 ASSERT3U(obsolete_sm_object, ==, 9821 space_map_object(vd->vdev_obsolete_sm)); 9822 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 9823 space_map_allocated(vd->vdev_obsolete_sm)); 9824 } 9825 ASSERT(vd->vdev_obsolete_segments != NULL); 9826 9827 /* 9828 * Since frees / remaps to an indirect vdev can only 9829 * happen in syncing context, the obsolete segments 9830 * tree must be empty when we start syncing. 9831 */ 9832 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 9833 } 9834 9835 /* 9836 * Set the top-level vdev's max queue depth. Evaluate each top-level's 9837 * async write queue depth in case it changed. The max queue depth will 9838 * not change in the middle of syncing out this txg. 9839 */ 9840 static void 9841 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 9842 { 9843 ASSERT(spa_writeable(spa)); 9844 9845 vdev_t *rvd = spa->spa_root_vdev; 9846 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 9847 zfs_vdev_queue_depth_pct / 100; 9848 metaslab_class_t *normal = spa_normal_class(spa); 9849 metaslab_class_t *special = spa_special_class(spa); 9850 metaslab_class_t *dedup = spa_dedup_class(spa); 9851 9852 uint64_t slots_per_allocator = 0; 9853 for (int c = 0; c < rvd->vdev_children; c++) { 9854 vdev_t *tvd = rvd->vdev_child[c]; 9855 9856 metaslab_group_t *mg = tvd->vdev_mg; 9857 if (mg == NULL || !metaslab_group_initialized(mg)) 9858 continue; 9859 9860 metaslab_class_t *mc = mg->mg_class; 9861 if (mc != normal && mc != special && mc != dedup) 9862 continue; 9863 9864 /* 9865 * It is safe to do a lock-free check here because only async 9866 * allocations look at mg_max_alloc_queue_depth, and async 9867 * allocations all happen from spa_sync(). 9868 */ 9869 for (int i = 0; i < mg->mg_allocators; i++) { 9870 ASSERT0(zfs_refcount_count( 9871 &(mg->mg_allocator[i].mga_alloc_queue_depth))); 9872 } 9873 mg->mg_max_alloc_queue_depth = max_queue_depth; 9874 9875 for (int i = 0; i < mg->mg_allocators; i++) { 9876 mg->mg_allocator[i].mga_cur_max_alloc_queue_depth = 9877 zfs_vdev_def_queue_depth; 9878 } 9879 slots_per_allocator += zfs_vdev_def_queue_depth; 9880 } 9881 9882 for (int i = 0; i < spa->spa_alloc_count; i++) { 9883 ASSERT0(zfs_refcount_count(&normal->mc_allocator[i]. 9884 mca_alloc_slots)); 9885 ASSERT0(zfs_refcount_count(&special->mc_allocator[i]. 9886 mca_alloc_slots)); 9887 ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i]. 9888 mca_alloc_slots)); 9889 normal->mc_allocator[i].mca_alloc_max_slots = 9890 slots_per_allocator; 9891 special->mc_allocator[i].mca_alloc_max_slots = 9892 slots_per_allocator; 9893 dedup->mc_allocator[i].mca_alloc_max_slots = 9894 slots_per_allocator; 9895 } 9896 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9897 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9898 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9899 } 9900 9901 static void 9902 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 9903 { 9904 ASSERT(spa_writeable(spa)); 9905 9906 vdev_t *rvd = spa->spa_root_vdev; 9907 for (int c = 0; c < rvd->vdev_children; c++) { 9908 vdev_t *vd = rvd->vdev_child[c]; 9909 vdev_indirect_state_sync_verify(vd); 9910 9911 if (vdev_indirect_should_condense(vd)) { 9912 spa_condense_indirect_start_sync(vd, tx); 9913 break; 9914 } 9915 } 9916 } 9917 9918 static void 9919 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 9920 { 9921 objset_t *mos = spa->spa_meta_objset; 9922 dsl_pool_t *dp = spa->spa_dsl_pool; 9923 uint64_t txg = tx->tx_txg; 9924 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 9925 9926 do { 9927 int pass = ++spa->spa_sync_pass; 9928 9929 spa_sync_config_object(spa, tx); 9930 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 9931 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 9932 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 9933 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 9934 spa_errlog_sync(spa, txg); 9935 dsl_pool_sync(dp, txg); 9936 9937 if (pass < zfs_sync_pass_deferred_free || 9938 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 9939 /* 9940 * If the log space map feature is active we don't 9941 * care about deferred frees and the deferred bpobj 9942 * as the log space map should effectively have the 9943 * same results (i.e. appending only to one object). 9944 */ 9945 spa_sync_frees(spa, free_bpl, tx); 9946 } else { 9947 /* 9948 * We can not defer frees in pass 1, because 9949 * we sync the deferred frees later in pass 1. 9950 */ 9951 ASSERT3U(pass, >, 1); 9952 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb, 9953 &spa->spa_deferred_bpobj, tx); 9954 } 9955 9956 brt_sync(spa, txg); 9957 ddt_sync(spa, txg); 9958 dsl_scan_sync(dp, tx); 9959 dsl_errorscrub_sync(dp, tx); 9960 svr_sync(spa, tx); 9961 spa_sync_upgrades(spa, tx); 9962 9963 spa_flush_metaslabs(spa, tx); 9964 9965 vdev_t *vd = NULL; 9966 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 9967 != NULL) 9968 vdev_sync(vd, txg); 9969 9970 if (pass == 1) { 9971 /* 9972 * dsl_pool_sync() -> dp_sync_tasks may have dirtied 9973 * the config. If that happens, this txg should not 9974 * be a no-op. So we must sync the config to the MOS 9975 * before checking for no-op. 9976 * 9977 * Note that when the config is dirty, it will 9978 * be written to the MOS (i.e. the MOS will be 9979 * dirtied) every time we call spa_sync_config_object() 9980 * in this txg. Therefore we can't call this after 9981 * dsl_pool_sync() every pass, because it would 9982 * prevent us from converging, since we'd dirty 9983 * the MOS every pass. 9984 * 9985 * Sync tasks can only be processed in pass 1, so 9986 * there's no need to do this in later passes. 9987 */ 9988 spa_sync_config_object(spa, tx); 9989 } 9990 9991 /* 9992 * Note: We need to check if the MOS is dirty because we could 9993 * have marked the MOS dirty without updating the uberblock 9994 * (e.g. if we have sync tasks but no dirty user data). We need 9995 * to check the uberblock's rootbp because it is updated if we 9996 * have synced out dirty data (though in this case the MOS will 9997 * most likely also be dirty due to second order effects, we 9998 * don't want to rely on that here). 9999 */ 10000 if (pass == 1 && 10001 BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp) < txg && 10002 !dmu_objset_is_dirty(mos, txg)) { 10003 /* 10004 * Nothing changed on the first pass, therefore this 10005 * TXG is a no-op. Avoid syncing deferred frees, so 10006 * that we can keep this TXG as a no-op. 10007 */ 10008 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 10009 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 10010 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 10011 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 10012 break; 10013 } 10014 10015 spa_sync_deferred_frees(spa, tx); 10016 } while (dmu_objset_is_dirty(mos, txg)); 10017 } 10018 10019 /* 10020 * Rewrite the vdev configuration (which includes the uberblock) to 10021 * commit the transaction group. 10022 * 10023 * If there are no dirty vdevs, we sync the uberblock to a few random 10024 * top-level vdevs that are known to be visible in the config cache 10025 * (see spa_vdev_add() for a complete description). If there *are* dirty 10026 * vdevs, sync the uberblock to all vdevs. 10027 */ 10028 static void 10029 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 10030 { 10031 vdev_t *rvd = spa->spa_root_vdev; 10032 uint64_t txg = tx->tx_txg; 10033 10034 for (;;) { 10035 int error = 0; 10036 10037 /* 10038 * We hold SCL_STATE to prevent vdev open/close/etc. 10039 * while we're attempting to write the vdev labels. 10040 */ 10041 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 10042 10043 if (list_is_empty(&spa->spa_config_dirty_list)) { 10044 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 10045 int svdcount = 0; 10046 int children = rvd->vdev_children; 10047 int c0 = random_in_range(children); 10048 10049 for (int c = 0; c < children; c++) { 10050 vdev_t *vd = 10051 rvd->vdev_child[(c0 + c) % children]; 10052 10053 /* Stop when revisiting the first vdev */ 10054 if (c > 0 && svd[0] == vd) 10055 break; 10056 10057 if (vd->vdev_ms_array == 0 || 10058 vd->vdev_islog || 10059 !vdev_is_concrete(vd)) 10060 continue; 10061 10062 svd[svdcount++] = vd; 10063 if (svdcount == SPA_SYNC_MIN_VDEVS) 10064 break; 10065 } 10066 error = vdev_config_sync(svd, svdcount, txg); 10067 } else { 10068 error = vdev_config_sync(rvd->vdev_child, 10069 rvd->vdev_children, txg); 10070 } 10071 10072 if (error == 0) 10073 spa->spa_last_synced_guid = rvd->vdev_guid; 10074 10075 spa_config_exit(spa, SCL_STATE, FTAG); 10076 10077 if (error == 0) 10078 break; 10079 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 10080 zio_resume_wait(spa); 10081 } 10082 } 10083 10084 /* 10085 * Sync the specified transaction group. New blocks may be dirtied as 10086 * part of the process, so we iterate until it converges. 10087 */ 10088 void 10089 spa_sync(spa_t *spa, uint64_t txg) 10090 { 10091 vdev_t *vd = NULL; 10092 10093 VERIFY(spa_writeable(spa)); 10094 10095 /* 10096 * Wait for i/os issued in open context that need to complete 10097 * before this txg syncs. 10098 */ 10099 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 10100 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 10101 ZIO_FLAG_CANFAIL); 10102 10103 /* 10104 * Now that there can be no more cloning in this transaction group, 10105 * but we are still before issuing frees, we can process pending BRT 10106 * updates. 10107 */ 10108 brt_pending_apply(spa, txg); 10109 10110 /* 10111 * Lock out configuration changes. 10112 */ 10113 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 10114 10115 spa->spa_syncing_txg = txg; 10116 spa->spa_sync_pass = 0; 10117 10118 for (int i = 0; i < spa->spa_alloc_count; i++) { 10119 mutex_enter(&spa->spa_allocs[i].spaa_lock); 10120 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 10121 mutex_exit(&spa->spa_allocs[i].spaa_lock); 10122 } 10123 10124 /* 10125 * If there are any pending vdev state changes, convert them 10126 * into config changes that go out with this transaction group. 10127 */ 10128 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 10129 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 10130 /* Avoid holding the write lock unless actually necessary */ 10131 if (vd->vdev_aux == NULL) { 10132 vdev_state_clean(vd); 10133 vdev_config_dirty(vd); 10134 continue; 10135 } 10136 /* 10137 * We need the write lock here because, for aux vdevs, 10138 * calling vdev_config_dirty() modifies sav_config. 10139 * This is ugly and will become unnecessary when we 10140 * eliminate the aux vdev wart by integrating all vdevs 10141 * into the root vdev tree. 10142 */ 10143 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10144 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 10145 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 10146 vdev_state_clean(vd); 10147 vdev_config_dirty(vd); 10148 } 10149 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10150 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10151 } 10152 spa_config_exit(spa, SCL_STATE, FTAG); 10153 10154 dsl_pool_t *dp = spa->spa_dsl_pool; 10155 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 10156 10157 spa->spa_sync_starttime = gethrtime(); 10158 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 10159 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 10160 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 10161 NSEC_TO_TICK(spa->spa_deadman_synctime)); 10162 10163 /* 10164 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 10165 * set spa_deflate if we have no raid-z vdevs. 10166 */ 10167 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 10168 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 10169 vdev_t *rvd = spa->spa_root_vdev; 10170 10171 int i; 10172 for (i = 0; i < rvd->vdev_children; i++) { 10173 vd = rvd->vdev_child[i]; 10174 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 10175 break; 10176 } 10177 if (i == rvd->vdev_children) { 10178 spa->spa_deflate = TRUE; 10179 VERIFY0(zap_add(spa->spa_meta_objset, 10180 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 10181 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 10182 } 10183 } 10184 10185 spa_sync_adjust_vdev_max_queue_depth(spa); 10186 10187 spa_sync_condense_indirect(spa, tx); 10188 10189 spa_sync_iterate_to_convergence(spa, tx); 10190 10191 #ifdef ZFS_DEBUG 10192 if (!list_is_empty(&spa->spa_config_dirty_list)) { 10193 /* 10194 * Make sure that the number of ZAPs for all the vdevs matches 10195 * the number of ZAPs in the per-vdev ZAP list. This only gets 10196 * called if the config is dirty; otherwise there may be 10197 * outstanding AVZ operations that weren't completed in 10198 * spa_sync_config_object. 10199 */ 10200 uint64_t all_vdev_zap_entry_count; 10201 ASSERT0(zap_count(spa->spa_meta_objset, 10202 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 10203 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 10204 all_vdev_zap_entry_count); 10205 } 10206 #endif 10207 10208 if (spa->spa_vdev_removal != NULL) { 10209 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 10210 } 10211 10212 spa_sync_rewrite_vdev_config(spa, tx); 10213 dmu_tx_commit(tx); 10214 10215 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 10216 spa->spa_deadman_tqid = 0; 10217 10218 /* 10219 * Clear the dirty config list. 10220 */ 10221 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 10222 vdev_config_clean(vd); 10223 10224 /* 10225 * Now that the new config has synced transactionally, 10226 * let it become visible to the config cache. 10227 */ 10228 if (spa->spa_config_syncing != NULL) { 10229 spa_config_set(spa, spa->spa_config_syncing); 10230 spa->spa_config_txg = txg; 10231 spa->spa_config_syncing = NULL; 10232 } 10233 10234 dsl_pool_sync_done(dp, txg); 10235 10236 for (int i = 0; i < spa->spa_alloc_count; i++) { 10237 mutex_enter(&spa->spa_allocs[i].spaa_lock); 10238 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 10239 mutex_exit(&spa->spa_allocs[i].spaa_lock); 10240 } 10241 10242 /* 10243 * Update usable space statistics. 10244 */ 10245 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 10246 != NULL) 10247 vdev_sync_done(vd, txg); 10248 10249 metaslab_class_evict_old(spa->spa_normal_class, txg); 10250 metaslab_class_evict_old(spa->spa_log_class, txg); 10251 /* spa_embedded_log_class has only one metaslab per vdev. */ 10252 metaslab_class_evict_old(spa->spa_special_class, txg); 10253 metaslab_class_evict_old(spa->spa_dedup_class, txg); 10254 10255 spa_sync_close_syncing_log_sm(spa); 10256 10257 spa_update_dspace(spa); 10258 10259 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) 10260 vdev_autotrim_kick(spa); 10261 10262 /* 10263 * It had better be the case that we didn't dirty anything 10264 * since vdev_config_sync(). 10265 */ 10266 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 10267 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 10268 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 10269 10270 while (zfs_pause_spa_sync) 10271 delay(1); 10272 10273 spa->spa_sync_pass = 0; 10274 10275 /* 10276 * Update the last synced uberblock here. We want to do this at 10277 * the end of spa_sync() so that consumers of spa_last_synced_txg() 10278 * will be guaranteed that all the processing associated with 10279 * that txg has been completed. 10280 */ 10281 spa->spa_ubsync = spa->spa_uberblock; 10282 spa_config_exit(spa, SCL_CONFIG, FTAG); 10283 10284 spa_handle_ignored_writes(spa); 10285 10286 /* 10287 * If any async tasks have been requested, kick them off. 10288 */ 10289 spa_async_dispatch(spa); 10290 } 10291 10292 /* 10293 * Sync all pools. We don't want to hold the namespace lock across these 10294 * operations, so we take a reference on the spa_t and drop the lock during the 10295 * sync. 10296 */ 10297 void 10298 spa_sync_allpools(void) 10299 { 10300 spa_t *spa = NULL; 10301 mutex_enter(&spa_namespace_lock); 10302 while ((spa = spa_next(spa)) != NULL) { 10303 if (spa_state(spa) != POOL_STATE_ACTIVE || 10304 !spa_writeable(spa) || spa_suspended(spa)) 10305 continue; 10306 spa_open_ref(spa, FTAG); 10307 mutex_exit(&spa_namespace_lock); 10308 txg_wait_synced(spa_get_dsl(spa), 0); 10309 mutex_enter(&spa_namespace_lock); 10310 spa_close(spa, FTAG); 10311 } 10312 mutex_exit(&spa_namespace_lock); 10313 } 10314 10315 taskq_t * 10316 spa_sync_tq_create(spa_t *spa, const char *name) 10317 { 10318 kthread_t **kthreads; 10319 10320 ASSERT(spa->spa_sync_tq == NULL); 10321 ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus); 10322 10323 /* 10324 * - do not allow more allocators than cpus. 10325 * - there may be more cpus than allocators. 10326 * - do not allow more sync taskq threads than allocators or cpus. 10327 */ 10328 int nthreads = spa->spa_alloc_count; 10329 spa->spa_syncthreads = kmem_zalloc(sizeof (spa_syncthread_info_t) * 10330 nthreads, KM_SLEEP); 10331 10332 spa->spa_sync_tq = taskq_create_synced(name, nthreads, minclsyspri, 10333 nthreads, INT_MAX, TASKQ_PREPOPULATE, &kthreads); 10334 VERIFY(spa->spa_sync_tq != NULL); 10335 VERIFY(kthreads != NULL); 10336 10337 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10338 for (int i = 0; i < nthreads; i++, ti++) { 10339 ti->sti_thread = kthreads[i]; 10340 ti->sti_allocator = i; 10341 } 10342 10343 kmem_free(kthreads, sizeof (*kthreads) * nthreads); 10344 return (spa->spa_sync_tq); 10345 } 10346 10347 void 10348 spa_sync_tq_destroy(spa_t *spa) 10349 { 10350 ASSERT(spa->spa_sync_tq != NULL); 10351 10352 taskq_wait(spa->spa_sync_tq); 10353 taskq_destroy(spa->spa_sync_tq); 10354 kmem_free(spa->spa_syncthreads, 10355 sizeof (spa_syncthread_info_t) * spa->spa_alloc_count); 10356 spa->spa_sync_tq = NULL; 10357 } 10358 10359 uint_t 10360 spa_acq_allocator(spa_t *spa) 10361 { 10362 int i; 10363 10364 if (spa->spa_alloc_count == 1) 10365 return (0); 10366 10367 mutex_enter(&spa->spa_allocs_use->sau_lock); 10368 uint_t r = spa->spa_allocs_use->sau_rotor; 10369 do { 10370 if (++r == spa->spa_alloc_count) 10371 r = 0; 10372 } while (spa->spa_allocs_use->sau_inuse[r]); 10373 spa->spa_allocs_use->sau_inuse[r] = B_TRUE; 10374 spa->spa_allocs_use->sau_rotor = r; 10375 mutex_exit(&spa->spa_allocs_use->sau_lock); 10376 10377 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10378 for (i = 0; i < spa->spa_alloc_count; i++, ti++) { 10379 if (ti->sti_thread == curthread) { 10380 ti->sti_allocator = r; 10381 break; 10382 } 10383 } 10384 ASSERT3S(i, <, spa->spa_alloc_count); 10385 return (r); 10386 } 10387 10388 void 10389 spa_rel_allocator(spa_t *spa, uint_t allocator) 10390 { 10391 if (spa->spa_alloc_count > 1) 10392 spa->spa_allocs_use->sau_inuse[allocator] = B_FALSE; 10393 } 10394 10395 void 10396 spa_select_allocator(zio_t *zio) 10397 { 10398 zbookmark_phys_t *bm = &zio->io_bookmark; 10399 spa_t *spa = zio->io_spa; 10400 10401 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 10402 10403 /* 10404 * A gang block (for example) may have inherited its parent's 10405 * allocator, in which case there is nothing further to do here. 10406 */ 10407 if (ZIO_HAS_ALLOCATOR(zio)) 10408 return; 10409 10410 ASSERT(spa != NULL); 10411 ASSERT(bm != NULL); 10412 10413 /* 10414 * First try to use an allocator assigned to the syncthread, and set 10415 * the corresponding write issue taskq for the allocator. 10416 * Note, we must have an open pool to do this. 10417 */ 10418 if (spa->spa_sync_tq != NULL) { 10419 spa_syncthread_info_t *ti = spa->spa_syncthreads; 10420 for (int i = 0; i < spa->spa_alloc_count; i++, ti++) { 10421 if (ti->sti_thread == curthread) { 10422 zio->io_allocator = ti->sti_allocator; 10423 return; 10424 } 10425 } 10426 } 10427 10428 /* 10429 * We want to try to use as many allocators as possible to help improve 10430 * performance, but we also want logically adjacent IOs to be physically 10431 * adjacent to improve sequential read performance. We chunk each object 10432 * into 2^20 block regions, and then hash based on the objset, object, 10433 * level, and region to accomplish both of these goals. 10434 */ 10435 uint64_t hv = cityhash4(bm->zb_objset, bm->zb_object, bm->zb_level, 10436 bm->zb_blkid >> 20); 10437 10438 zio->io_allocator = (uint_t)hv % spa->spa_alloc_count; 10439 } 10440 10441 /* 10442 * ========================================================================== 10443 * Miscellaneous routines 10444 * ========================================================================== 10445 */ 10446 10447 /* 10448 * Remove all pools in the system. 10449 */ 10450 void 10451 spa_evict_all(void) 10452 { 10453 spa_t *spa; 10454 10455 /* 10456 * Remove all cached state. All pools should be closed now, 10457 * so every spa in the AVL tree should be unreferenced. 10458 */ 10459 mutex_enter(&spa_namespace_lock); 10460 while ((spa = spa_next(NULL)) != NULL) { 10461 /* 10462 * Stop async tasks. The async thread may need to detach 10463 * a device that's been replaced, which requires grabbing 10464 * spa_namespace_lock, so we must drop it here. 10465 */ 10466 spa_open_ref(spa, FTAG); 10467 mutex_exit(&spa_namespace_lock); 10468 spa_async_suspend(spa); 10469 mutex_enter(&spa_namespace_lock); 10470 spa_close(spa, FTAG); 10471 10472 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 10473 spa_unload(spa); 10474 spa_deactivate(spa); 10475 } 10476 spa_remove(spa); 10477 } 10478 mutex_exit(&spa_namespace_lock); 10479 } 10480 10481 vdev_t * 10482 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 10483 { 10484 vdev_t *vd; 10485 int i; 10486 10487 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 10488 return (vd); 10489 10490 if (aux) { 10491 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 10492 vd = spa->spa_l2cache.sav_vdevs[i]; 10493 if (vd->vdev_guid == guid) 10494 return (vd); 10495 } 10496 10497 for (i = 0; i < spa->spa_spares.sav_count; i++) { 10498 vd = spa->spa_spares.sav_vdevs[i]; 10499 if (vd->vdev_guid == guid) 10500 return (vd); 10501 } 10502 } 10503 10504 return (NULL); 10505 } 10506 10507 void 10508 spa_upgrade(spa_t *spa, uint64_t version) 10509 { 10510 ASSERT(spa_writeable(spa)); 10511 10512 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 10513 10514 /* 10515 * This should only be called for a non-faulted pool, and since a 10516 * future version would result in an unopenable pool, this shouldn't be 10517 * possible. 10518 */ 10519 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 10520 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 10521 10522 spa->spa_uberblock.ub_version = version; 10523 vdev_config_dirty(spa->spa_root_vdev); 10524 10525 spa_config_exit(spa, SCL_ALL, FTAG); 10526 10527 txg_wait_synced(spa_get_dsl(spa), 0); 10528 } 10529 10530 static boolean_t 10531 spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav) 10532 { 10533 (void) spa; 10534 int i; 10535 uint64_t vdev_guid; 10536 10537 for (i = 0; i < sav->sav_count; i++) 10538 if (sav->sav_vdevs[i]->vdev_guid == guid) 10539 return (B_TRUE); 10540 10541 for (i = 0; i < sav->sav_npending; i++) { 10542 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 10543 &vdev_guid) == 0 && vdev_guid == guid) 10544 return (B_TRUE); 10545 } 10546 10547 return (B_FALSE); 10548 } 10549 10550 boolean_t 10551 spa_has_l2cache(spa_t *spa, uint64_t guid) 10552 { 10553 return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache)); 10554 } 10555 10556 boolean_t 10557 spa_has_spare(spa_t *spa, uint64_t guid) 10558 { 10559 return (spa_has_aux_vdev(spa, guid, &spa->spa_spares)); 10560 } 10561 10562 /* 10563 * Check if a pool has an active shared spare device. 10564 * Note: reference count of an active spare is 2, as a spare and as a replace 10565 */ 10566 static boolean_t 10567 spa_has_active_shared_spare(spa_t *spa) 10568 { 10569 int i, refcnt; 10570 uint64_t pool; 10571 spa_aux_vdev_t *sav = &spa->spa_spares; 10572 10573 for (i = 0; i < sav->sav_count; i++) { 10574 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 10575 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 10576 refcnt > 2) 10577 return (B_TRUE); 10578 } 10579 10580 return (B_FALSE); 10581 } 10582 10583 uint64_t 10584 spa_total_metaslabs(spa_t *spa) 10585 { 10586 vdev_t *rvd = spa->spa_root_vdev; 10587 10588 uint64_t m = 0; 10589 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 10590 vdev_t *vd = rvd->vdev_child[c]; 10591 if (!vdev_is_concrete(vd)) 10592 continue; 10593 m += vd->vdev_ms_count; 10594 } 10595 return (m); 10596 } 10597 10598 /* 10599 * Notify any waiting threads that some activity has switched from being in- 10600 * progress to not-in-progress so that the thread can wake up and determine 10601 * whether it is finished waiting. 10602 */ 10603 void 10604 spa_notify_waiters(spa_t *spa) 10605 { 10606 /* 10607 * Acquiring spa_activities_lock here prevents the cv_broadcast from 10608 * happening between the waiting thread's check and cv_wait. 10609 */ 10610 mutex_enter(&spa->spa_activities_lock); 10611 cv_broadcast(&spa->spa_activities_cv); 10612 mutex_exit(&spa->spa_activities_lock); 10613 } 10614 10615 /* 10616 * Notify any waiting threads that the pool is exporting, and then block until 10617 * they are finished using the spa_t. 10618 */ 10619 void 10620 spa_wake_waiters(spa_t *spa) 10621 { 10622 mutex_enter(&spa->spa_activities_lock); 10623 spa->spa_waiters_cancel = B_TRUE; 10624 cv_broadcast(&spa->spa_activities_cv); 10625 while (spa->spa_waiters != 0) 10626 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock); 10627 spa->spa_waiters_cancel = B_FALSE; 10628 mutex_exit(&spa->spa_activities_lock); 10629 } 10630 10631 /* Whether the vdev or any of its descendants are being initialized/trimmed. */ 10632 static boolean_t 10633 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity) 10634 { 10635 spa_t *spa = vd->vdev_spa; 10636 10637 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER)); 10638 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 10639 ASSERT(activity == ZPOOL_WAIT_INITIALIZE || 10640 activity == ZPOOL_WAIT_TRIM); 10641 10642 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ? 10643 &vd->vdev_initialize_lock : &vd->vdev_trim_lock; 10644 10645 mutex_exit(&spa->spa_activities_lock); 10646 mutex_enter(lock); 10647 mutex_enter(&spa->spa_activities_lock); 10648 10649 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ? 10650 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) : 10651 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE); 10652 mutex_exit(lock); 10653 10654 if (in_progress) 10655 return (B_TRUE); 10656 10657 for (int i = 0; i < vd->vdev_children; i++) { 10658 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i], 10659 activity)) 10660 return (B_TRUE); 10661 } 10662 10663 return (B_FALSE); 10664 } 10665 10666 /* 10667 * If use_guid is true, this checks whether the vdev specified by guid is 10668 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool 10669 * is being initialized/trimmed. The caller must hold the config lock and 10670 * spa_activities_lock. 10671 */ 10672 static int 10673 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid, 10674 zpool_wait_activity_t activity, boolean_t *in_progress) 10675 { 10676 mutex_exit(&spa->spa_activities_lock); 10677 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10678 mutex_enter(&spa->spa_activities_lock); 10679 10680 vdev_t *vd; 10681 if (use_guid) { 10682 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 10683 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) { 10684 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10685 return (EINVAL); 10686 } 10687 } else { 10688 vd = spa->spa_root_vdev; 10689 } 10690 10691 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity); 10692 10693 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10694 return (0); 10695 } 10696 10697 /* 10698 * Locking for waiting threads 10699 * --------------------------- 10700 * 10701 * Waiting threads need a way to check whether a given activity is in progress, 10702 * and then, if it is, wait for it to complete. Each activity will have some 10703 * in-memory representation of the relevant on-disk state which can be used to 10704 * determine whether or not the activity is in progress. The in-memory state and 10705 * the locking used to protect it will be different for each activity, and may 10706 * not be suitable for use with a cvar (e.g., some state is protected by the 10707 * config lock). To allow waiting threads to wait without any races, another 10708 * lock, spa_activities_lock, is used. 10709 * 10710 * When the state is checked, both the activity-specific lock (if there is one) 10711 * and spa_activities_lock are held. In some cases, the activity-specific lock 10712 * is acquired explicitly (e.g. the config lock). In others, the locking is 10713 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting 10714 * thread releases the activity-specific lock and, if the activity is in 10715 * progress, then cv_waits using spa_activities_lock. 10716 * 10717 * The waiting thread is woken when another thread, one completing some 10718 * activity, updates the state of the activity and then calls 10719 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only 10720 * needs to hold its activity-specific lock when updating the state, and this 10721 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters. 10722 * 10723 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting, 10724 * and because it is held when the waiting thread checks the state of the 10725 * activity, it can never be the case that the completing thread both updates 10726 * the activity state and cv_broadcasts in between the waiting thread's check 10727 * and cv_wait. Thus, a waiting thread can never miss a wakeup. 10728 * 10729 * In order to prevent deadlock, when the waiting thread does its check, in some 10730 * cases it will temporarily drop spa_activities_lock in order to acquire the 10731 * activity-specific lock. The order in which spa_activities_lock and the 10732 * activity specific lock are acquired in the waiting thread is determined by 10733 * the order in which they are acquired in the completing thread; if the 10734 * completing thread calls spa_notify_waiters with the activity-specific lock 10735 * held, then the waiting thread must also acquire the activity-specific lock 10736 * first. 10737 */ 10738 10739 static int 10740 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity, 10741 boolean_t use_tag, uint64_t tag, boolean_t *in_progress) 10742 { 10743 int error = 0; 10744 10745 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 10746 10747 switch (activity) { 10748 case ZPOOL_WAIT_CKPT_DISCARD: 10749 *in_progress = 10750 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) && 10751 zap_contains(spa_meta_objset(spa), 10752 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) == 10753 ENOENT); 10754 break; 10755 case ZPOOL_WAIT_FREE: 10756 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS && 10757 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) || 10758 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) || 10759 spa_livelist_delete_check(spa)); 10760 break; 10761 case ZPOOL_WAIT_INITIALIZE: 10762 case ZPOOL_WAIT_TRIM: 10763 error = spa_vdev_activity_in_progress(spa, use_tag, tag, 10764 activity, in_progress); 10765 break; 10766 case ZPOOL_WAIT_REPLACE: 10767 mutex_exit(&spa->spa_activities_lock); 10768 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 10769 mutex_enter(&spa->spa_activities_lock); 10770 10771 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev); 10772 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 10773 break; 10774 case ZPOOL_WAIT_REMOVE: 10775 *in_progress = (spa->spa_removing_phys.sr_state == 10776 DSS_SCANNING); 10777 break; 10778 case ZPOOL_WAIT_RESILVER: 10779 *in_progress = vdev_rebuild_active(spa->spa_root_vdev); 10780 if (*in_progress) 10781 break; 10782 zfs_fallthrough; 10783 case ZPOOL_WAIT_SCRUB: 10784 { 10785 boolean_t scanning, paused, is_scrub; 10786 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 10787 10788 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB); 10789 scanning = (scn->scn_phys.scn_state == DSS_SCANNING); 10790 paused = dsl_scan_is_paused_scrub(scn); 10791 *in_progress = (scanning && !paused && 10792 is_scrub == (activity == ZPOOL_WAIT_SCRUB)); 10793 break; 10794 } 10795 case ZPOOL_WAIT_RAIDZ_EXPAND: 10796 { 10797 vdev_raidz_expand_t *vre = spa->spa_raidz_expand; 10798 *in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING); 10799 break; 10800 } 10801 default: 10802 panic("unrecognized value for activity %d", activity); 10803 } 10804 10805 return (error); 10806 } 10807 10808 static int 10809 spa_wait_common(const char *pool, zpool_wait_activity_t activity, 10810 boolean_t use_tag, uint64_t tag, boolean_t *waited) 10811 { 10812 /* 10813 * The tag is used to distinguish between instances of an activity. 10814 * 'initialize' and 'trim' are the only activities that we use this for. 10815 * The other activities can only have a single instance in progress in a 10816 * pool at one time, making the tag unnecessary. 10817 * 10818 * There can be multiple devices being replaced at once, but since they 10819 * all finish once resilvering finishes, we don't bother keeping track 10820 * of them individually, we just wait for them all to finish. 10821 */ 10822 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE && 10823 activity != ZPOOL_WAIT_TRIM) 10824 return (EINVAL); 10825 10826 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES) 10827 return (EINVAL); 10828 10829 spa_t *spa; 10830 int error = spa_open(pool, &spa, FTAG); 10831 if (error != 0) 10832 return (error); 10833 10834 /* 10835 * Increment the spa's waiter count so that we can call spa_close and 10836 * still ensure that the spa_t doesn't get freed before this thread is 10837 * finished with it when the pool is exported. We want to call spa_close 10838 * before we start waiting because otherwise the additional ref would 10839 * prevent the pool from being exported or destroyed throughout the 10840 * potentially long wait. 10841 */ 10842 mutex_enter(&spa->spa_activities_lock); 10843 spa->spa_waiters++; 10844 spa_close(spa, FTAG); 10845 10846 *waited = B_FALSE; 10847 for (;;) { 10848 boolean_t in_progress; 10849 error = spa_activity_in_progress(spa, activity, use_tag, tag, 10850 &in_progress); 10851 10852 if (error || !in_progress || spa->spa_waiters_cancel) 10853 break; 10854 10855 *waited = B_TRUE; 10856 10857 if (cv_wait_sig(&spa->spa_activities_cv, 10858 &spa->spa_activities_lock) == 0) { 10859 error = EINTR; 10860 break; 10861 } 10862 } 10863 10864 spa->spa_waiters--; 10865 cv_signal(&spa->spa_waiters_cv); 10866 mutex_exit(&spa->spa_activities_lock); 10867 10868 return (error); 10869 } 10870 10871 /* 10872 * Wait for a particular instance of the specified activity to complete, where 10873 * the instance is identified by 'tag' 10874 */ 10875 int 10876 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag, 10877 boolean_t *waited) 10878 { 10879 return (spa_wait_common(pool, activity, B_TRUE, tag, waited)); 10880 } 10881 10882 /* 10883 * Wait for all instances of the specified activity complete 10884 */ 10885 int 10886 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited) 10887 { 10888 10889 return (spa_wait_common(pool, activity, B_FALSE, 0, waited)); 10890 } 10891 10892 sysevent_t * 10893 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 10894 { 10895 sysevent_t *ev = NULL; 10896 #ifdef _KERNEL 10897 nvlist_t *resource; 10898 10899 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl); 10900 if (resource) { 10901 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); 10902 ev->resource = resource; 10903 } 10904 #else 10905 (void) spa, (void) vd, (void) hist_nvl, (void) name; 10906 #endif 10907 return (ev); 10908 } 10909 10910 void 10911 spa_event_post(sysevent_t *ev) 10912 { 10913 #ifdef _KERNEL 10914 if (ev) { 10915 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); 10916 kmem_free(ev, sizeof (*ev)); 10917 } 10918 #else 10919 (void) ev; 10920 #endif 10921 } 10922 10923 /* 10924 * Post a zevent corresponding to the given sysevent. The 'name' must be one 10925 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be 10926 * filled in from the spa and (optionally) the vdev. This doesn't do anything 10927 * in the userland libzpool, as we don't want consumers to misinterpret ztest 10928 * or zdb as real changes. 10929 */ 10930 void 10931 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 10932 { 10933 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 10934 } 10935 10936 /* state manipulation functions */ 10937 EXPORT_SYMBOL(spa_open); 10938 EXPORT_SYMBOL(spa_open_rewind); 10939 EXPORT_SYMBOL(spa_get_stats); 10940 EXPORT_SYMBOL(spa_create); 10941 EXPORT_SYMBOL(spa_import); 10942 EXPORT_SYMBOL(spa_tryimport); 10943 EXPORT_SYMBOL(spa_destroy); 10944 EXPORT_SYMBOL(spa_export); 10945 EXPORT_SYMBOL(spa_reset); 10946 EXPORT_SYMBOL(spa_async_request); 10947 EXPORT_SYMBOL(spa_async_suspend); 10948 EXPORT_SYMBOL(spa_async_resume); 10949 EXPORT_SYMBOL(spa_inject_addref); 10950 EXPORT_SYMBOL(spa_inject_delref); 10951 EXPORT_SYMBOL(spa_scan_stat_init); 10952 EXPORT_SYMBOL(spa_scan_get_stats); 10953 10954 /* device manipulation */ 10955 EXPORT_SYMBOL(spa_vdev_add); 10956 EXPORT_SYMBOL(spa_vdev_attach); 10957 EXPORT_SYMBOL(spa_vdev_detach); 10958 EXPORT_SYMBOL(spa_vdev_setpath); 10959 EXPORT_SYMBOL(spa_vdev_setfru); 10960 EXPORT_SYMBOL(spa_vdev_split_mirror); 10961 10962 /* spare statech is global across all pools) */ 10963 EXPORT_SYMBOL(spa_spare_add); 10964 EXPORT_SYMBOL(spa_spare_remove); 10965 EXPORT_SYMBOL(spa_spare_exists); 10966 EXPORT_SYMBOL(spa_spare_activate); 10967 10968 /* L2ARC statech is global across all pools) */ 10969 EXPORT_SYMBOL(spa_l2cache_add); 10970 EXPORT_SYMBOL(spa_l2cache_remove); 10971 EXPORT_SYMBOL(spa_l2cache_exists); 10972 EXPORT_SYMBOL(spa_l2cache_activate); 10973 EXPORT_SYMBOL(spa_l2cache_drop); 10974 10975 /* scanning */ 10976 EXPORT_SYMBOL(spa_scan); 10977 EXPORT_SYMBOL(spa_scan_stop); 10978 10979 /* spa syncing */ 10980 EXPORT_SYMBOL(spa_sync); /* only for DMU use */ 10981 EXPORT_SYMBOL(spa_sync_allpools); 10982 10983 /* properties */ 10984 EXPORT_SYMBOL(spa_prop_set); 10985 EXPORT_SYMBOL(spa_prop_get); 10986 EXPORT_SYMBOL(spa_prop_clear_bootfs); 10987 10988 /* asynchronous event notification */ 10989 EXPORT_SYMBOL(spa_event_notify); 10990 10991 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW, 10992 "Percentage of CPUs to run a metaslab preload taskq"); 10993 10994 /* BEGIN CSTYLED */ 10995 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW, 10996 "log2 fraction of arc that can be used by inflight I/Os when " 10997 "verifying pool during import"); 10998 /* END CSTYLED */ 10999 11000 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, 11001 "Set to traverse metadata on pool import"); 11002 11003 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW, 11004 "Set to traverse data on pool import"); 11005 11006 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW, 11007 "Print vdev tree to zfs_dbgmsg during pool import"); 11008 11009 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW, 11010 "Percentage of CPUs to run an IO worker thread"); 11011 11012 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW, 11013 "Number of threads per IO worker taskqueue"); 11014 11015 /* BEGIN CSTYLED */ 11016 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW, 11017 "Allow importing pool with up to this number of missing top-level " 11018 "vdevs (in read-only mode)"); 11019 /* END CSTYLED */ 11020 11021 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, 11022 ZMOD_RW, "Set the livelist condense zthr to pause"); 11023 11024 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, 11025 ZMOD_RW, "Set the livelist condense synctask to pause"); 11026 11027 /* BEGIN CSTYLED */ 11028 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, 11029 INT, ZMOD_RW, 11030 "Whether livelist condensing was canceled in the synctask"); 11031 11032 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, 11033 INT, ZMOD_RW, 11034 "Whether livelist condensing was canceled in the zthr function"); 11035 11036 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, 11037 ZMOD_RW, 11038 "Whether extra ALLOC blkptrs were added to a livelist entry while it " 11039 "was being condensed"); 11040 11041 #ifdef _KERNEL 11042 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read, 11043 spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW, 11044 "Configure IO queues for read IO"); 11045 ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write, 11046 spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW, 11047 "Configure IO queues for write IO"); 11048 #endif 11049 /* END CSTYLED */ 11050 11051 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW, 11052 "Number of CPUs per write issue taskq"); 11053