1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 31 * Copyright 2018 Joyent, Inc. 32 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 33 * Copyright 2017 Joyent, Inc. 34 * Copyright (c) 2017, Intel Corporation. 35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36 */ 37 38 /* 39 * SPA: Storage Pool Allocator 40 * 41 * This file contains all the routines used when modifying on-disk SPA state. 42 * This includes opening, importing, destroying, exporting a pool, and syncing a 43 * pool. 44 */ 45 46 #include <sys/zfs_context.h> 47 #include <sys/fm/fs/zfs.h> 48 #include <sys/spa_impl.h> 49 #include <sys/zio.h> 50 #include <sys/zio_checksum.h> 51 #include <sys/dmu.h> 52 #include <sys/dmu_tx.h> 53 #include <sys/zap.h> 54 #include <sys/zil.h> 55 #include <sys/ddt.h> 56 #include <sys/vdev_impl.h> 57 #include <sys/vdev_removal.h> 58 #include <sys/vdev_indirect_mapping.h> 59 #include <sys/vdev_indirect_births.h> 60 #include <sys/vdev_initialize.h> 61 #include <sys/vdev_rebuild.h> 62 #include <sys/vdev_trim.h> 63 #include <sys/vdev_disk.h> 64 #include <sys/vdev_draid.h> 65 #include <sys/metaslab.h> 66 #include <sys/metaslab_impl.h> 67 #include <sys/mmp.h> 68 #include <sys/uberblock_impl.h> 69 #include <sys/txg.h> 70 #include <sys/avl.h> 71 #include <sys/bpobj.h> 72 #include <sys/dmu_traverse.h> 73 #include <sys/dmu_objset.h> 74 #include <sys/unique.h> 75 #include <sys/dsl_pool.h> 76 #include <sys/dsl_dataset.h> 77 #include <sys/dsl_dir.h> 78 #include <sys/dsl_prop.h> 79 #include <sys/dsl_synctask.h> 80 #include <sys/fs/zfs.h> 81 #include <sys/arc.h> 82 #include <sys/callb.h> 83 #include <sys/systeminfo.h> 84 #include <sys/zfs_ioctl.h> 85 #include <sys/dsl_scan.h> 86 #include <sys/zfeature.h> 87 #include <sys/dsl_destroy.h> 88 #include <sys/zvol.h> 89 90 #ifdef _KERNEL 91 #include <sys/fm/protocol.h> 92 #include <sys/fm/util.h> 93 #include <sys/callb.h> 94 #include <sys/zone.h> 95 #include <sys/vmsystm.h> 96 #endif /* _KERNEL */ 97 98 #include "zfs_prop.h" 99 #include "zfs_comutil.h" 100 101 /* 102 * The interval, in seconds, at which failed configuration cache file writes 103 * should be retried. 104 */ 105 int zfs_ccw_retry_interval = 300; 106 107 typedef enum zti_modes { 108 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 109 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 110 ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */ 111 ZTI_MODE_NULL, /* don't create a taskq */ 112 ZTI_NMODES 113 } zti_modes_t; 114 115 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 116 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 117 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 118 #define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 } 119 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 120 121 #define ZTI_N(n) ZTI_P(n, 1) 122 #define ZTI_ONE ZTI_N(1) 123 124 typedef struct zio_taskq_info { 125 zti_modes_t zti_mode; 126 uint_t zti_value; 127 uint_t zti_count; 128 } zio_taskq_info_t; 129 130 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 131 "iss", "iss_h", "int", "int_h" 132 }; 133 134 /* 135 * This table defines the taskq settings for each ZFS I/O type. When 136 * initializing a pool, we use this table to create an appropriately sized 137 * taskq. Some operations are low volume and therefore have a small, static 138 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 139 * macros. Other operations process a large amount of data; the ZTI_BATCH 140 * macro causes us to create a taskq oriented for throughput. Some operations 141 * are so high frequency and short-lived that the taskq itself can become a 142 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 143 * additional degree of parallelism specified by the number of threads per- 144 * taskq and the number of taskqs; when dispatching an event in this case, the 145 * particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH, 146 * but with number of taskqs also scaling with number of CPUs. 147 * 148 * The different taskq priorities are to handle the different contexts (issue 149 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 150 * need to be handled with minimum delay. 151 */ 152 static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 153 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 154 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 155 { ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */ 156 { ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */ 157 { ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 158 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 159 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 160 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ 161 }; 162 163 static void spa_sync_version(void *arg, dmu_tx_t *tx); 164 static void spa_sync_props(void *arg, dmu_tx_t *tx); 165 static boolean_t spa_has_active_shared_spare(spa_t *spa); 166 static int spa_load_impl(spa_t *spa, spa_import_type_t type, 167 const char **ereport); 168 static void spa_vdev_resilver_done(spa_t *spa); 169 170 static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ 171 static uint_t zio_taskq_batch_tpq; /* threads per taskq */ 172 static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 173 static const uint_t zio_taskq_basedc = 80; /* base duty cycle */ 174 175 static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */ 176 177 /* 178 * Report any spa_load_verify errors found, but do not fail spa_load. 179 * This is used by zdb to analyze non-idle pools. 180 */ 181 boolean_t spa_load_verify_dryrun = B_FALSE; 182 183 /* 184 * Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ). 185 * This is used by zdb for spacemaps verification. 186 */ 187 boolean_t spa_mode_readable_spacemaps = B_FALSE; 188 189 /* 190 * This (illegal) pool name is used when temporarily importing a spa_t in order 191 * to get the vdev stats associated with the imported devices. 192 */ 193 #define TRYIMPORT_NAME "$import" 194 195 /* 196 * For debugging purposes: print out vdev tree during pool import. 197 */ 198 static int spa_load_print_vdev_tree = B_FALSE; 199 200 /* 201 * A non-zero value for zfs_max_missing_tvds means that we allow importing 202 * pools with missing top-level vdevs. This is strictly intended for advanced 203 * pool recovery cases since missing data is almost inevitable. Pools with 204 * missing devices can only be imported read-only for safety reasons, and their 205 * fail-mode will be automatically set to "continue". 206 * 207 * With 1 missing vdev we should be able to import the pool and mount all 208 * datasets. User data that was not modified after the missing device has been 209 * added should be recoverable. This means that snapshots created prior to the 210 * addition of that device should be completely intact. 211 * 212 * With 2 missing vdevs, some datasets may fail to mount since there are 213 * dataset statistics that are stored as regular metadata. Some data might be 214 * recoverable if those vdevs were added recently. 215 * 216 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 217 * may be missing entirely. Chances of data recovery are very low. Note that 218 * there are also risks of performing an inadvertent rewind as we might be 219 * missing all the vdevs with the latest uberblocks. 220 */ 221 unsigned long zfs_max_missing_tvds = 0; 222 223 /* 224 * The parameters below are similar to zfs_max_missing_tvds but are only 225 * intended for a preliminary open of the pool with an untrusted config which 226 * might be incomplete or out-dated. 227 * 228 * We are more tolerant for pools opened from a cachefile since we could have 229 * an out-dated cachefile where a device removal was not registered. 230 * We could have set the limit arbitrarily high but in the case where devices 231 * are really missing we would want to return the proper error codes; we chose 232 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 233 * and we get a chance to retrieve the trusted config. 234 */ 235 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 236 237 /* 238 * In the case where config was assembled by scanning device paths (/dev/dsks 239 * by default) we are less tolerant since all the existing devices should have 240 * been detected and we want spa_load to return the right error codes. 241 */ 242 uint64_t zfs_max_missing_tvds_scan = 0; 243 244 /* 245 * Debugging aid that pauses spa_sync() towards the end. 246 */ 247 static const boolean_t zfs_pause_spa_sync = B_FALSE; 248 249 /* 250 * Variables to indicate the livelist condense zthr func should wait at certain 251 * points for the livelist to be removed - used to test condense/destroy races 252 */ 253 static int zfs_livelist_condense_zthr_pause = 0; 254 static int zfs_livelist_condense_sync_pause = 0; 255 256 /* 257 * Variables to track whether or not condense cancellation has been 258 * triggered in testing. 259 */ 260 static int zfs_livelist_condense_sync_cancel = 0; 261 static int zfs_livelist_condense_zthr_cancel = 0; 262 263 /* 264 * Variable to track whether or not extra ALLOC blkptrs were added to a 265 * livelist entry while it was being condensed (caused by the way we track 266 * remapped blkptrs in dbuf_remap_impl) 267 */ 268 static int zfs_livelist_condense_new_alloc = 0; 269 270 /* 271 * ========================================================================== 272 * SPA properties routines 273 * ========================================================================== 274 */ 275 276 /* 277 * Add a (source=src, propname=propval) list to an nvlist. 278 */ 279 static void 280 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval, 281 uint64_t intval, zprop_source_t src) 282 { 283 const char *propname = zpool_prop_to_name(prop); 284 nvlist_t *propval; 285 286 propval = fnvlist_alloc(); 287 fnvlist_add_uint64(propval, ZPROP_SOURCE, src); 288 289 if (strval != NULL) 290 fnvlist_add_string(propval, ZPROP_VALUE, strval); 291 else 292 fnvlist_add_uint64(propval, ZPROP_VALUE, intval); 293 294 fnvlist_add_nvlist(nvl, propname, propval); 295 nvlist_free(propval); 296 } 297 298 /* 299 * Get property values from the spa configuration. 300 */ 301 static void 302 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 303 { 304 vdev_t *rvd = spa->spa_root_vdev; 305 dsl_pool_t *pool = spa->spa_dsl_pool; 306 uint64_t size, alloc, cap, version; 307 const zprop_source_t src = ZPROP_SRC_NONE; 308 spa_config_dirent_t *dp; 309 metaslab_class_t *mc = spa_normal_class(spa); 310 311 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 312 313 if (rvd != NULL) { 314 alloc = metaslab_class_get_alloc(mc); 315 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 316 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 317 alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa)); 318 319 size = metaslab_class_get_space(mc); 320 size += metaslab_class_get_space(spa_special_class(spa)); 321 size += metaslab_class_get_space(spa_dedup_class(spa)); 322 size += metaslab_class_get_space(spa_embedded_log_class(spa)); 323 324 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 325 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 326 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 327 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 328 size - alloc, src); 329 spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL, 330 spa->spa_checkpoint_info.sci_dspace, src); 331 332 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 333 metaslab_class_fragmentation(mc), src); 334 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 335 metaslab_class_expandable_space(mc), src); 336 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 337 (spa_mode(spa) == SPA_MODE_READ), src); 338 339 cap = (size == 0) ? 0 : (alloc * 100 / size); 340 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 341 342 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 343 ddt_get_pool_dedup_ratio(spa), src); 344 345 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 346 rvd->vdev_state, src); 347 348 version = spa_version(spa); 349 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) { 350 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 351 version, ZPROP_SRC_DEFAULT); 352 } else { 353 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 354 version, ZPROP_SRC_LOCAL); 355 } 356 spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID, 357 NULL, spa_load_guid(spa), src); 358 } 359 360 if (pool != NULL) { 361 /* 362 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 363 * when opening pools before this version freedir will be NULL. 364 */ 365 if (pool->dp_free_dir != NULL) { 366 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 367 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 368 src); 369 } else { 370 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 371 NULL, 0, src); 372 } 373 374 if (pool->dp_leak_dir != NULL) { 375 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 376 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 377 src); 378 } else { 379 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 380 NULL, 0, src); 381 } 382 } 383 384 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 385 386 if (spa->spa_comment != NULL) { 387 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 388 0, ZPROP_SRC_LOCAL); 389 } 390 391 if (spa->spa_compatibility != NULL) { 392 spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY, 393 spa->spa_compatibility, 0, ZPROP_SRC_LOCAL); 394 } 395 396 if (spa->spa_root != NULL) 397 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 398 0, ZPROP_SRC_LOCAL); 399 400 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 401 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 402 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 403 } else { 404 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 405 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 406 } 407 408 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 409 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 410 DNODE_MAX_SIZE, ZPROP_SRC_NONE); 411 } else { 412 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 413 DNODE_MIN_SIZE, ZPROP_SRC_NONE); 414 } 415 416 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 417 if (dp->scd_path == NULL) { 418 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 419 "none", 0, ZPROP_SRC_LOCAL); 420 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 421 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 422 dp->scd_path, 0, ZPROP_SRC_LOCAL); 423 } 424 } 425 } 426 427 /* 428 * Get zpool property values. 429 */ 430 int 431 spa_prop_get(spa_t *spa, nvlist_t **nvp) 432 { 433 objset_t *mos = spa->spa_meta_objset; 434 zap_cursor_t zc; 435 zap_attribute_t za; 436 dsl_pool_t *dp; 437 int err; 438 439 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP); 440 if (err) 441 return (err); 442 443 dp = spa_get_dsl(spa); 444 dsl_pool_config_enter(dp, FTAG); 445 mutex_enter(&spa->spa_props_lock); 446 447 /* 448 * Get properties from the spa config. 449 */ 450 spa_prop_get_config(spa, nvp); 451 452 /* If no pool property object, no more prop to get. */ 453 if (mos == NULL || spa->spa_pool_props_object == 0) 454 goto out; 455 456 /* 457 * Get properties from the MOS pool property object. 458 */ 459 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 460 (err = zap_cursor_retrieve(&zc, &za)) == 0; 461 zap_cursor_advance(&zc)) { 462 uint64_t intval = 0; 463 char *strval = NULL; 464 zprop_source_t src = ZPROP_SRC_DEFAULT; 465 zpool_prop_t prop; 466 467 if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL) 468 continue; 469 470 switch (za.za_integer_length) { 471 case 8: 472 /* integer property */ 473 if (za.za_first_integer != 474 zpool_prop_default_numeric(prop)) 475 src = ZPROP_SRC_LOCAL; 476 477 if (prop == ZPOOL_PROP_BOOTFS) { 478 dsl_dataset_t *ds = NULL; 479 480 err = dsl_dataset_hold_obj(dp, 481 za.za_first_integer, FTAG, &ds); 482 if (err != 0) 483 break; 484 485 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 486 KM_SLEEP); 487 dsl_dataset_name(ds, strval); 488 dsl_dataset_rele(ds, FTAG); 489 } else { 490 strval = NULL; 491 intval = za.za_first_integer; 492 } 493 494 spa_prop_add_list(*nvp, prop, strval, intval, src); 495 496 if (strval != NULL) 497 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 498 499 break; 500 501 case 1: 502 /* string property */ 503 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 504 err = zap_lookup(mos, spa->spa_pool_props_object, 505 za.za_name, 1, za.za_num_integers, strval); 506 if (err) { 507 kmem_free(strval, za.za_num_integers); 508 break; 509 } 510 spa_prop_add_list(*nvp, prop, strval, 0, src); 511 kmem_free(strval, za.za_num_integers); 512 break; 513 514 default: 515 break; 516 } 517 } 518 zap_cursor_fini(&zc); 519 out: 520 mutex_exit(&spa->spa_props_lock); 521 dsl_pool_config_exit(dp, FTAG); 522 if (err && err != ENOENT) { 523 nvlist_free(*nvp); 524 *nvp = NULL; 525 return (err); 526 } 527 528 return (0); 529 } 530 531 /* 532 * Validate the given pool properties nvlist and modify the list 533 * for the property values to be set. 534 */ 535 static int 536 spa_prop_validate(spa_t *spa, nvlist_t *props) 537 { 538 nvpair_t *elem; 539 int error = 0, reset_bootfs = 0; 540 uint64_t objnum = 0; 541 boolean_t has_feature = B_FALSE; 542 543 elem = NULL; 544 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 545 uint64_t intval; 546 char *strval, *slash, *check, *fname; 547 const char *propname = nvpair_name(elem); 548 zpool_prop_t prop = zpool_name_to_prop(propname); 549 550 switch (prop) { 551 case ZPOOL_PROP_INVAL: 552 if (!zpool_prop_feature(propname)) { 553 error = SET_ERROR(EINVAL); 554 break; 555 } 556 557 /* 558 * Sanitize the input. 559 */ 560 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 561 error = SET_ERROR(EINVAL); 562 break; 563 } 564 565 if (nvpair_value_uint64(elem, &intval) != 0) { 566 error = SET_ERROR(EINVAL); 567 break; 568 } 569 570 if (intval != 0) { 571 error = SET_ERROR(EINVAL); 572 break; 573 } 574 575 fname = strchr(propname, '@') + 1; 576 if (zfeature_lookup_name(fname, NULL) != 0) { 577 error = SET_ERROR(EINVAL); 578 break; 579 } 580 581 has_feature = B_TRUE; 582 break; 583 584 case ZPOOL_PROP_VERSION: 585 error = nvpair_value_uint64(elem, &intval); 586 if (!error && 587 (intval < spa_version(spa) || 588 intval > SPA_VERSION_BEFORE_FEATURES || 589 has_feature)) 590 error = SET_ERROR(EINVAL); 591 break; 592 593 case ZPOOL_PROP_DELEGATION: 594 case ZPOOL_PROP_AUTOREPLACE: 595 case ZPOOL_PROP_LISTSNAPS: 596 case ZPOOL_PROP_AUTOEXPAND: 597 case ZPOOL_PROP_AUTOTRIM: 598 error = nvpair_value_uint64(elem, &intval); 599 if (!error && intval > 1) 600 error = SET_ERROR(EINVAL); 601 break; 602 603 case ZPOOL_PROP_MULTIHOST: 604 error = nvpair_value_uint64(elem, &intval); 605 if (!error && intval > 1) 606 error = SET_ERROR(EINVAL); 607 608 if (!error) { 609 uint32_t hostid = zone_get_hostid(NULL); 610 if (hostid) 611 spa->spa_hostid = hostid; 612 else 613 error = SET_ERROR(ENOTSUP); 614 } 615 616 break; 617 618 case ZPOOL_PROP_BOOTFS: 619 /* 620 * If the pool version is less than SPA_VERSION_BOOTFS, 621 * or the pool is still being created (version == 0), 622 * the bootfs property cannot be set. 623 */ 624 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 625 error = SET_ERROR(ENOTSUP); 626 break; 627 } 628 629 /* 630 * Make sure the vdev config is bootable 631 */ 632 if (!vdev_is_bootable(spa->spa_root_vdev)) { 633 error = SET_ERROR(ENOTSUP); 634 break; 635 } 636 637 reset_bootfs = 1; 638 639 error = nvpair_value_string(elem, &strval); 640 641 if (!error) { 642 objset_t *os; 643 644 if (strval == NULL || strval[0] == '\0') { 645 objnum = zpool_prop_default_numeric( 646 ZPOOL_PROP_BOOTFS); 647 break; 648 } 649 650 error = dmu_objset_hold(strval, FTAG, &os); 651 if (error != 0) 652 break; 653 654 /* Must be ZPL. */ 655 if (dmu_objset_type(os) != DMU_OST_ZFS) { 656 error = SET_ERROR(ENOTSUP); 657 } else { 658 objnum = dmu_objset_id(os); 659 } 660 dmu_objset_rele(os, FTAG); 661 } 662 break; 663 664 case ZPOOL_PROP_FAILUREMODE: 665 error = nvpair_value_uint64(elem, &intval); 666 if (!error && intval > ZIO_FAILURE_MODE_PANIC) 667 error = SET_ERROR(EINVAL); 668 669 /* 670 * This is a special case which only occurs when 671 * the pool has completely failed. This allows 672 * the user to change the in-core failmode property 673 * without syncing it out to disk (I/Os might 674 * currently be blocked). We do this by returning 675 * EIO to the caller (spa_prop_set) to trick it 676 * into thinking we encountered a property validation 677 * error. 678 */ 679 if (!error && spa_suspended(spa)) { 680 spa->spa_failmode = intval; 681 error = SET_ERROR(EIO); 682 } 683 break; 684 685 case ZPOOL_PROP_CACHEFILE: 686 if ((error = nvpair_value_string(elem, &strval)) != 0) 687 break; 688 689 if (strval[0] == '\0') 690 break; 691 692 if (strcmp(strval, "none") == 0) 693 break; 694 695 if (strval[0] != '/') { 696 error = SET_ERROR(EINVAL); 697 break; 698 } 699 700 slash = strrchr(strval, '/'); 701 ASSERT(slash != NULL); 702 703 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 704 strcmp(slash, "/..") == 0) 705 error = SET_ERROR(EINVAL); 706 break; 707 708 case ZPOOL_PROP_COMMENT: 709 if ((error = nvpair_value_string(elem, &strval)) != 0) 710 break; 711 for (check = strval; *check != '\0'; check++) { 712 if (!isprint(*check)) { 713 error = SET_ERROR(EINVAL); 714 break; 715 } 716 } 717 if (strlen(strval) > ZPROP_MAX_COMMENT) 718 error = SET_ERROR(E2BIG); 719 break; 720 721 default: 722 break; 723 } 724 725 if (error) 726 break; 727 } 728 729 (void) nvlist_remove_all(props, 730 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO)); 731 732 if (!error && reset_bootfs) { 733 error = nvlist_remove(props, 734 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 735 736 if (!error) { 737 error = nvlist_add_uint64(props, 738 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 739 } 740 } 741 742 return (error); 743 } 744 745 void 746 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 747 { 748 char *cachefile; 749 spa_config_dirent_t *dp; 750 751 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 752 &cachefile) != 0) 753 return; 754 755 dp = kmem_alloc(sizeof (spa_config_dirent_t), 756 KM_SLEEP); 757 758 if (cachefile[0] == '\0') 759 dp->scd_path = spa_strdup(spa_config_path); 760 else if (strcmp(cachefile, "none") == 0) 761 dp->scd_path = NULL; 762 else 763 dp->scd_path = spa_strdup(cachefile); 764 765 list_insert_head(&spa->spa_config_list, dp); 766 if (need_sync) 767 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 768 } 769 770 int 771 spa_prop_set(spa_t *spa, nvlist_t *nvp) 772 { 773 int error; 774 nvpair_t *elem = NULL; 775 boolean_t need_sync = B_FALSE; 776 777 if ((error = spa_prop_validate(spa, nvp)) != 0) 778 return (error); 779 780 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 781 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 782 783 if (prop == ZPOOL_PROP_CACHEFILE || 784 prop == ZPOOL_PROP_ALTROOT || 785 prop == ZPOOL_PROP_READONLY) 786 continue; 787 788 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 789 uint64_t ver = 0; 790 791 if (prop == ZPOOL_PROP_VERSION) { 792 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 793 } else { 794 ASSERT(zpool_prop_feature(nvpair_name(elem))); 795 ver = SPA_VERSION_FEATURES; 796 need_sync = B_TRUE; 797 } 798 799 /* Save time if the version is already set. */ 800 if (ver == spa_version(spa)) 801 continue; 802 803 /* 804 * In addition to the pool directory object, we might 805 * create the pool properties object, the features for 806 * read object, the features for write object, or the 807 * feature descriptions object. 808 */ 809 error = dsl_sync_task(spa->spa_name, NULL, 810 spa_sync_version, &ver, 811 6, ZFS_SPACE_CHECK_RESERVED); 812 if (error) 813 return (error); 814 continue; 815 } 816 817 need_sync = B_TRUE; 818 break; 819 } 820 821 if (need_sync) { 822 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 823 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 824 } 825 826 return (0); 827 } 828 829 /* 830 * If the bootfs property value is dsobj, clear it. 831 */ 832 void 833 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 834 { 835 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 836 VERIFY(zap_remove(spa->spa_meta_objset, 837 spa->spa_pool_props_object, 838 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 839 spa->spa_bootfs = 0; 840 } 841 } 842 843 static int 844 spa_change_guid_check(void *arg, dmu_tx_t *tx) 845 { 846 uint64_t *newguid __maybe_unused = arg; 847 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 848 vdev_t *rvd = spa->spa_root_vdev; 849 uint64_t vdev_state; 850 851 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 852 int error = (spa_has_checkpoint(spa)) ? 853 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 854 return (SET_ERROR(error)); 855 } 856 857 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 858 vdev_state = rvd->vdev_state; 859 spa_config_exit(spa, SCL_STATE, FTAG); 860 861 if (vdev_state != VDEV_STATE_HEALTHY) 862 return (SET_ERROR(ENXIO)); 863 864 ASSERT3U(spa_guid(spa), !=, *newguid); 865 866 return (0); 867 } 868 869 static void 870 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 871 { 872 uint64_t *newguid = arg; 873 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 874 uint64_t oldguid; 875 vdev_t *rvd = spa->spa_root_vdev; 876 877 oldguid = spa_guid(spa); 878 879 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 880 rvd->vdev_guid = *newguid; 881 rvd->vdev_guid_sum += (*newguid - oldguid); 882 vdev_config_dirty(rvd); 883 spa_config_exit(spa, SCL_STATE, FTAG); 884 885 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 886 (u_longlong_t)oldguid, (u_longlong_t)*newguid); 887 } 888 889 /* 890 * Change the GUID for the pool. This is done so that we can later 891 * re-import a pool built from a clone of our own vdevs. We will modify 892 * the root vdev's guid, our own pool guid, and then mark all of our 893 * vdevs dirty. Note that we must make sure that all our vdevs are 894 * online when we do this, or else any vdevs that weren't present 895 * would be orphaned from our pool. We are also going to issue a 896 * sysevent to update any watchers. 897 */ 898 int 899 spa_change_guid(spa_t *spa) 900 { 901 int error; 902 uint64_t guid; 903 904 mutex_enter(&spa->spa_vdev_top_lock); 905 mutex_enter(&spa_namespace_lock); 906 guid = spa_generate_guid(NULL); 907 908 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 909 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 910 911 if (error == 0) { 912 spa_write_cachefile(spa, B_FALSE, B_TRUE); 913 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 914 } 915 916 mutex_exit(&spa_namespace_lock); 917 mutex_exit(&spa->spa_vdev_top_lock); 918 919 return (error); 920 } 921 922 /* 923 * ========================================================================== 924 * SPA state manipulation (open/create/destroy/import/export) 925 * ========================================================================== 926 */ 927 928 static int 929 spa_error_entry_compare(const void *a, const void *b) 930 { 931 const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 932 const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 933 int ret; 934 935 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 936 sizeof (zbookmark_phys_t)); 937 938 return (TREE_ISIGN(ret)); 939 } 940 941 /* 942 * Utility function which retrieves copies of the current logs and 943 * re-initializes them in the process. 944 */ 945 void 946 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 947 { 948 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 949 950 memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t)); 951 memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t)); 952 953 avl_create(&spa->spa_errlist_scrub, 954 spa_error_entry_compare, sizeof (spa_error_entry_t), 955 offsetof(spa_error_entry_t, se_avl)); 956 avl_create(&spa->spa_errlist_last, 957 spa_error_entry_compare, sizeof (spa_error_entry_t), 958 offsetof(spa_error_entry_t, se_avl)); 959 } 960 961 static void 962 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 963 { 964 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 965 enum zti_modes mode = ztip->zti_mode; 966 uint_t value = ztip->zti_value; 967 uint_t count = ztip->zti_count; 968 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 969 uint_t cpus, flags = TASKQ_DYNAMIC; 970 boolean_t batch = B_FALSE; 971 972 switch (mode) { 973 case ZTI_MODE_FIXED: 974 ASSERT3U(value, >, 0); 975 break; 976 977 case ZTI_MODE_BATCH: 978 batch = B_TRUE; 979 flags |= TASKQ_THREADS_CPU_PCT; 980 value = MIN(zio_taskq_batch_pct, 100); 981 break; 982 983 case ZTI_MODE_SCALE: 984 flags |= TASKQ_THREADS_CPU_PCT; 985 /* 986 * We want more taskqs to reduce lock contention, but we want 987 * less for better request ordering and CPU utilization. 988 */ 989 cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100); 990 if (zio_taskq_batch_tpq > 0) { 991 count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) / 992 zio_taskq_batch_tpq); 993 } else { 994 /* 995 * Prefer 6 threads per taskq, but no more taskqs 996 * than threads in them on large systems. For 80%: 997 * 998 * taskq taskq total 999 * cpus taskqs percent threads threads 1000 * ------- ------- ------- ------- ------- 1001 * 1 1 80% 1 1 1002 * 2 1 80% 1 1 1003 * 4 1 80% 3 3 1004 * 8 2 40% 3 6 1005 * 16 3 27% 4 12 1006 * 32 5 16% 5 25 1007 * 64 7 11% 7 49 1008 * 128 10 8% 10 100 1009 * 256 14 6% 15 210 1010 */ 1011 count = 1 + cpus / 6; 1012 while (count * count > cpus) 1013 count--; 1014 } 1015 /* Limit each taskq within 100% to not trigger assertion. */ 1016 count = MAX(count, (zio_taskq_batch_pct + 99) / 100); 1017 value = (zio_taskq_batch_pct + count / 2) / count; 1018 break; 1019 1020 case ZTI_MODE_NULL: 1021 tqs->stqs_count = 0; 1022 tqs->stqs_taskq = NULL; 1023 return; 1024 1025 default: 1026 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 1027 "spa_activate()", 1028 zio_type_name[t], zio_taskq_types[q], mode, value); 1029 break; 1030 } 1031 1032 ASSERT3U(count, >, 0); 1033 tqs->stqs_count = count; 1034 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 1035 1036 for (uint_t i = 0; i < count; i++) { 1037 taskq_t *tq; 1038 char name[32]; 1039 1040 if (count > 1) 1041 (void) snprintf(name, sizeof (name), "%s_%s_%u", 1042 zio_type_name[t], zio_taskq_types[q], i); 1043 else 1044 (void) snprintf(name, sizeof (name), "%s_%s", 1045 zio_type_name[t], zio_taskq_types[q]); 1046 1047 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 1048 if (batch) 1049 flags |= TASKQ_DC_BATCH; 1050 1051 (void) zio_taskq_basedc; 1052 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 1053 spa->spa_proc, zio_taskq_basedc, flags); 1054 } else { 1055 pri_t pri = maxclsyspri; 1056 /* 1057 * The write issue taskq can be extremely CPU 1058 * intensive. Run it at slightly less important 1059 * priority than the other taskqs. 1060 * 1061 * Under Linux and FreeBSD this means incrementing 1062 * the priority value as opposed to platforms like 1063 * illumos where it should be decremented. 1064 * 1065 * On FreeBSD, if priorities divided by four (RQ_PPQ) 1066 * are equal then a difference between them is 1067 * insignificant. 1068 */ 1069 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) { 1070 #if defined(__linux__) 1071 pri++; 1072 #elif defined(__FreeBSD__) 1073 pri += 4; 1074 #else 1075 #error "unknown OS" 1076 #endif 1077 } 1078 tq = taskq_create_proc(name, value, pri, 50, 1079 INT_MAX, spa->spa_proc, flags); 1080 } 1081 1082 tqs->stqs_taskq[i] = tq; 1083 } 1084 } 1085 1086 static void 1087 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1088 { 1089 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1090 1091 if (tqs->stqs_taskq == NULL) { 1092 ASSERT3U(tqs->stqs_count, ==, 0); 1093 return; 1094 } 1095 1096 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1097 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1098 taskq_destroy(tqs->stqs_taskq[i]); 1099 } 1100 1101 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1102 tqs->stqs_taskq = NULL; 1103 } 1104 1105 /* 1106 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1107 * Note that a type may have multiple discrete taskqs to avoid lock contention 1108 * on the taskq itself. In that case we choose which taskq at random by using 1109 * the low bits of gethrtime(). 1110 */ 1111 void 1112 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1113 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 1114 { 1115 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1116 taskq_t *tq; 1117 1118 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1119 ASSERT3U(tqs->stqs_count, !=, 0); 1120 1121 if (tqs->stqs_count == 1) { 1122 tq = tqs->stqs_taskq[0]; 1123 } else { 1124 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1125 } 1126 1127 taskq_dispatch_ent(tq, func, arg, flags, ent); 1128 } 1129 1130 /* 1131 * Same as spa_taskq_dispatch_ent() but block on the task until completion. 1132 */ 1133 void 1134 spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1135 task_func_t *func, void *arg, uint_t flags) 1136 { 1137 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1138 taskq_t *tq; 1139 taskqid_t id; 1140 1141 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1142 ASSERT3U(tqs->stqs_count, !=, 0); 1143 1144 if (tqs->stqs_count == 1) { 1145 tq = tqs->stqs_taskq[0]; 1146 } else { 1147 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1148 } 1149 1150 id = taskq_dispatch(tq, func, arg, flags); 1151 if (id) 1152 taskq_wait_id(tq, id); 1153 } 1154 1155 static void 1156 spa_create_zio_taskqs(spa_t *spa) 1157 { 1158 for (int t = 0; t < ZIO_TYPES; t++) { 1159 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1160 spa_taskqs_init(spa, t, q); 1161 } 1162 } 1163 } 1164 1165 /* 1166 * Disabled until spa_thread() can be adapted for Linux. 1167 */ 1168 #undef HAVE_SPA_THREAD 1169 1170 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD) 1171 static void 1172 spa_thread(void *arg) 1173 { 1174 psetid_t zio_taskq_psrset_bind = PS_NONE; 1175 callb_cpr_t cprinfo; 1176 1177 spa_t *spa = arg; 1178 user_t *pu = PTOU(curproc); 1179 1180 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1181 spa->spa_name); 1182 1183 ASSERT(curproc != &p0); 1184 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1185 "zpool-%s", spa->spa_name); 1186 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1187 1188 /* bind this thread to the requested psrset */ 1189 if (zio_taskq_psrset_bind != PS_NONE) { 1190 pool_lock(); 1191 mutex_enter(&cpu_lock); 1192 mutex_enter(&pidlock); 1193 mutex_enter(&curproc->p_lock); 1194 1195 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1196 0, NULL, NULL) == 0) { 1197 curthread->t_bind_pset = zio_taskq_psrset_bind; 1198 } else { 1199 cmn_err(CE_WARN, 1200 "Couldn't bind process for zfs pool \"%s\" to " 1201 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1202 } 1203 1204 mutex_exit(&curproc->p_lock); 1205 mutex_exit(&pidlock); 1206 mutex_exit(&cpu_lock); 1207 pool_unlock(); 1208 } 1209 1210 if (zio_taskq_sysdc) { 1211 sysdc_thread_enter(curthread, 100, 0); 1212 } 1213 1214 spa->spa_proc = curproc; 1215 spa->spa_did = curthread->t_did; 1216 1217 spa_create_zio_taskqs(spa); 1218 1219 mutex_enter(&spa->spa_proc_lock); 1220 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1221 1222 spa->spa_proc_state = SPA_PROC_ACTIVE; 1223 cv_broadcast(&spa->spa_proc_cv); 1224 1225 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1226 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1227 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1228 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1229 1230 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1231 spa->spa_proc_state = SPA_PROC_GONE; 1232 spa->spa_proc = &p0; 1233 cv_broadcast(&spa->spa_proc_cv); 1234 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1235 1236 mutex_enter(&curproc->p_lock); 1237 lwp_exit(); 1238 } 1239 #endif 1240 1241 /* 1242 * Activate an uninitialized pool. 1243 */ 1244 static void 1245 spa_activate(spa_t *spa, spa_mode_t mode) 1246 { 1247 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1248 1249 spa->spa_state = POOL_STATE_ACTIVE; 1250 spa->spa_mode = mode; 1251 spa->spa_read_spacemaps = spa_mode_readable_spacemaps; 1252 1253 spa->spa_normal_class = metaslab_class_create(spa, &zfs_metaslab_ops); 1254 spa->spa_log_class = metaslab_class_create(spa, &zfs_metaslab_ops); 1255 spa->spa_embedded_log_class = 1256 metaslab_class_create(spa, &zfs_metaslab_ops); 1257 spa->spa_special_class = metaslab_class_create(spa, &zfs_metaslab_ops); 1258 spa->spa_dedup_class = metaslab_class_create(spa, &zfs_metaslab_ops); 1259 1260 /* Try to create a covering process */ 1261 mutex_enter(&spa->spa_proc_lock); 1262 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1263 ASSERT(spa->spa_proc == &p0); 1264 spa->spa_did = 0; 1265 1266 (void) spa_create_process; 1267 #ifdef HAVE_SPA_THREAD 1268 /* Only create a process if we're going to be around a while. */ 1269 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1270 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1271 NULL, 0) == 0) { 1272 spa->spa_proc_state = SPA_PROC_CREATED; 1273 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1274 cv_wait(&spa->spa_proc_cv, 1275 &spa->spa_proc_lock); 1276 } 1277 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1278 ASSERT(spa->spa_proc != &p0); 1279 ASSERT(spa->spa_did != 0); 1280 } else { 1281 #ifdef _KERNEL 1282 cmn_err(CE_WARN, 1283 "Couldn't create process for zfs pool \"%s\"\n", 1284 spa->spa_name); 1285 #endif 1286 } 1287 } 1288 #endif /* HAVE_SPA_THREAD */ 1289 mutex_exit(&spa->spa_proc_lock); 1290 1291 /* If we didn't create a process, we need to create our taskqs. */ 1292 if (spa->spa_proc == &p0) { 1293 spa_create_zio_taskqs(spa); 1294 } 1295 1296 for (size_t i = 0; i < TXG_SIZE; i++) { 1297 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1298 ZIO_FLAG_CANFAIL); 1299 } 1300 1301 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1302 offsetof(vdev_t, vdev_config_dirty_node)); 1303 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1304 offsetof(objset_t, os_evicting_node)); 1305 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1306 offsetof(vdev_t, vdev_state_dirty_node)); 1307 1308 txg_list_create(&spa->spa_vdev_txg_list, spa, 1309 offsetof(struct vdev, vdev_txg_node)); 1310 1311 avl_create(&spa->spa_errlist_scrub, 1312 spa_error_entry_compare, sizeof (spa_error_entry_t), 1313 offsetof(spa_error_entry_t, se_avl)); 1314 avl_create(&spa->spa_errlist_last, 1315 spa_error_entry_compare, sizeof (spa_error_entry_t), 1316 offsetof(spa_error_entry_t, se_avl)); 1317 avl_create(&spa->spa_errlist_healed, 1318 spa_error_entry_compare, sizeof (spa_error_entry_t), 1319 offsetof(spa_error_entry_t, se_avl)); 1320 1321 spa_activate_os(spa); 1322 1323 spa_keystore_init(&spa->spa_keystore); 1324 1325 /* 1326 * This taskq is used to perform zvol-minor-related tasks 1327 * asynchronously. This has several advantages, including easy 1328 * resolution of various deadlocks. 1329 * 1330 * The taskq must be single threaded to ensure tasks are always 1331 * processed in the order in which they were dispatched. 1332 * 1333 * A taskq per pool allows one to keep the pools independent. 1334 * This way if one pool is suspended, it will not impact another. 1335 * 1336 * The preferred location to dispatch a zvol minor task is a sync 1337 * task. In this context, there is easy access to the spa_t and minimal 1338 * error handling is required because the sync task must succeed. 1339 */ 1340 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1341 1, INT_MAX, 0); 1342 1343 /* 1344 * Taskq dedicated to prefetcher threads: this is used to prevent the 1345 * pool traverse code from monopolizing the global (and limited) 1346 * system_taskq by inappropriately scheduling long running tasks on it. 1347 */ 1348 spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100, 1349 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1350 1351 /* 1352 * The taskq to upgrade datasets in this pool. Currently used by 1353 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1354 */ 1355 spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100, 1356 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1357 } 1358 1359 /* 1360 * Opposite of spa_activate(). 1361 */ 1362 static void 1363 spa_deactivate(spa_t *spa) 1364 { 1365 ASSERT(spa->spa_sync_on == B_FALSE); 1366 ASSERT(spa->spa_dsl_pool == NULL); 1367 ASSERT(spa->spa_root_vdev == NULL); 1368 ASSERT(spa->spa_async_zio_root == NULL); 1369 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1370 1371 spa_evicting_os_wait(spa); 1372 1373 if (spa->spa_zvol_taskq) { 1374 taskq_destroy(spa->spa_zvol_taskq); 1375 spa->spa_zvol_taskq = NULL; 1376 } 1377 1378 if (spa->spa_prefetch_taskq) { 1379 taskq_destroy(spa->spa_prefetch_taskq); 1380 spa->spa_prefetch_taskq = NULL; 1381 } 1382 1383 if (spa->spa_upgrade_taskq) { 1384 taskq_destroy(spa->spa_upgrade_taskq); 1385 spa->spa_upgrade_taskq = NULL; 1386 } 1387 1388 txg_list_destroy(&spa->spa_vdev_txg_list); 1389 1390 list_destroy(&spa->spa_config_dirty_list); 1391 list_destroy(&spa->spa_evicting_os_list); 1392 list_destroy(&spa->spa_state_dirty_list); 1393 1394 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 1395 1396 for (int t = 0; t < ZIO_TYPES; t++) { 1397 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1398 spa_taskqs_fini(spa, t, q); 1399 } 1400 } 1401 1402 for (size_t i = 0; i < TXG_SIZE; i++) { 1403 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1404 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1405 spa->spa_txg_zio[i] = NULL; 1406 } 1407 1408 metaslab_class_destroy(spa->spa_normal_class); 1409 spa->spa_normal_class = NULL; 1410 1411 metaslab_class_destroy(spa->spa_log_class); 1412 spa->spa_log_class = NULL; 1413 1414 metaslab_class_destroy(spa->spa_embedded_log_class); 1415 spa->spa_embedded_log_class = NULL; 1416 1417 metaslab_class_destroy(spa->spa_special_class); 1418 spa->spa_special_class = NULL; 1419 1420 metaslab_class_destroy(spa->spa_dedup_class); 1421 spa->spa_dedup_class = NULL; 1422 1423 /* 1424 * If this was part of an import or the open otherwise failed, we may 1425 * still have errors left in the queues. Empty them just in case. 1426 */ 1427 spa_errlog_drain(spa); 1428 avl_destroy(&spa->spa_errlist_scrub); 1429 avl_destroy(&spa->spa_errlist_last); 1430 avl_destroy(&spa->spa_errlist_healed); 1431 1432 spa_keystore_fini(&spa->spa_keystore); 1433 1434 spa->spa_state = POOL_STATE_UNINITIALIZED; 1435 1436 mutex_enter(&spa->spa_proc_lock); 1437 if (spa->spa_proc_state != SPA_PROC_NONE) { 1438 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1439 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1440 cv_broadcast(&spa->spa_proc_cv); 1441 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1442 ASSERT(spa->spa_proc != &p0); 1443 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1444 } 1445 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1446 spa->spa_proc_state = SPA_PROC_NONE; 1447 } 1448 ASSERT(spa->spa_proc == &p0); 1449 mutex_exit(&spa->spa_proc_lock); 1450 1451 /* 1452 * We want to make sure spa_thread() has actually exited the ZFS 1453 * module, so that the module can't be unloaded out from underneath 1454 * it. 1455 */ 1456 if (spa->spa_did != 0) { 1457 thread_join(spa->spa_did); 1458 spa->spa_did = 0; 1459 } 1460 1461 spa_deactivate_os(spa); 1462 1463 } 1464 1465 /* 1466 * Verify a pool configuration, and construct the vdev tree appropriately. This 1467 * will create all the necessary vdevs in the appropriate layout, with each vdev 1468 * in the CLOSED state. This will prep the pool before open/creation/import. 1469 * All vdev validation is done by the vdev_alloc() routine. 1470 */ 1471 int 1472 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1473 uint_t id, int atype) 1474 { 1475 nvlist_t **child; 1476 uint_t children; 1477 int error; 1478 1479 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1480 return (error); 1481 1482 if ((*vdp)->vdev_ops->vdev_op_leaf) 1483 return (0); 1484 1485 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1486 &child, &children); 1487 1488 if (error == ENOENT) 1489 return (0); 1490 1491 if (error) { 1492 vdev_free(*vdp); 1493 *vdp = NULL; 1494 return (SET_ERROR(EINVAL)); 1495 } 1496 1497 for (int c = 0; c < children; c++) { 1498 vdev_t *vd; 1499 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1500 atype)) != 0) { 1501 vdev_free(*vdp); 1502 *vdp = NULL; 1503 return (error); 1504 } 1505 } 1506 1507 ASSERT(*vdp != NULL); 1508 1509 return (0); 1510 } 1511 1512 static boolean_t 1513 spa_should_flush_logs_on_unload(spa_t *spa) 1514 { 1515 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1516 return (B_FALSE); 1517 1518 if (!spa_writeable(spa)) 1519 return (B_FALSE); 1520 1521 if (!spa->spa_sync_on) 1522 return (B_FALSE); 1523 1524 if (spa_state(spa) != POOL_STATE_EXPORTED) 1525 return (B_FALSE); 1526 1527 if (zfs_keep_log_spacemaps_at_export) 1528 return (B_FALSE); 1529 1530 return (B_TRUE); 1531 } 1532 1533 /* 1534 * Opens a transaction that will set the flag that will instruct 1535 * spa_sync to attempt to flush all the metaslabs for that txg. 1536 */ 1537 static void 1538 spa_unload_log_sm_flush_all(spa_t *spa) 1539 { 1540 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1541 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1542 1543 ASSERT3U(spa->spa_log_flushall_txg, ==, 0); 1544 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); 1545 1546 dmu_tx_commit(tx); 1547 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); 1548 } 1549 1550 static void 1551 spa_unload_log_sm_metadata(spa_t *spa) 1552 { 1553 void *cookie = NULL; 1554 spa_log_sm_t *sls; 1555 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, 1556 &cookie)) != NULL) { 1557 VERIFY0(sls->sls_mscount); 1558 kmem_free(sls, sizeof (spa_log_sm_t)); 1559 } 1560 1561 for (log_summary_entry_t *e = list_head(&spa->spa_log_summary); 1562 e != NULL; e = list_head(&spa->spa_log_summary)) { 1563 VERIFY0(e->lse_mscount); 1564 list_remove(&spa->spa_log_summary, e); 1565 kmem_free(e, sizeof (log_summary_entry_t)); 1566 } 1567 1568 spa->spa_unflushed_stats.sus_nblocks = 0; 1569 spa->spa_unflushed_stats.sus_memused = 0; 1570 spa->spa_unflushed_stats.sus_blocklimit = 0; 1571 } 1572 1573 static void 1574 spa_destroy_aux_threads(spa_t *spa) 1575 { 1576 if (spa->spa_condense_zthr != NULL) { 1577 zthr_destroy(spa->spa_condense_zthr); 1578 spa->spa_condense_zthr = NULL; 1579 } 1580 if (spa->spa_checkpoint_discard_zthr != NULL) { 1581 zthr_destroy(spa->spa_checkpoint_discard_zthr); 1582 spa->spa_checkpoint_discard_zthr = NULL; 1583 } 1584 if (spa->spa_livelist_delete_zthr != NULL) { 1585 zthr_destroy(spa->spa_livelist_delete_zthr); 1586 spa->spa_livelist_delete_zthr = NULL; 1587 } 1588 if (spa->spa_livelist_condense_zthr != NULL) { 1589 zthr_destroy(spa->spa_livelist_condense_zthr); 1590 spa->spa_livelist_condense_zthr = NULL; 1591 } 1592 } 1593 1594 /* 1595 * Opposite of spa_load(). 1596 */ 1597 static void 1598 spa_unload(spa_t *spa) 1599 { 1600 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1601 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); 1602 1603 spa_import_progress_remove(spa_guid(spa)); 1604 spa_load_note(spa, "UNLOADING"); 1605 1606 spa_wake_waiters(spa); 1607 1608 /* 1609 * If we have set the spa_final_txg, we have already performed the 1610 * tasks below in spa_export_common(). We should not redo it here since 1611 * we delay the final TXGs beyond what spa_final_txg is set at. 1612 */ 1613 if (spa->spa_final_txg == UINT64_MAX) { 1614 /* 1615 * If the log space map feature is enabled and the pool is 1616 * getting exported (but not destroyed), we want to spend some 1617 * time flushing as many metaslabs as we can in an attempt to 1618 * destroy log space maps and save import time. 1619 */ 1620 if (spa_should_flush_logs_on_unload(spa)) 1621 spa_unload_log_sm_flush_all(spa); 1622 1623 /* 1624 * Stop async tasks. 1625 */ 1626 spa_async_suspend(spa); 1627 1628 if (spa->spa_root_vdev) { 1629 vdev_t *root_vdev = spa->spa_root_vdev; 1630 vdev_initialize_stop_all(root_vdev, 1631 VDEV_INITIALIZE_ACTIVE); 1632 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); 1633 vdev_autotrim_stop_all(spa); 1634 vdev_rebuild_stop_all(spa); 1635 } 1636 } 1637 1638 /* 1639 * Stop syncing. 1640 */ 1641 if (spa->spa_sync_on) { 1642 txg_sync_stop(spa->spa_dsl_pool); 1643 spa->spa_sync_on = B_FALSE; 1644 } 1645 1646 /* 1647 * This ensures that there is no async metaslab prefetching 1648 * while we attempt to unload the spa. 1649 */ 1650 if (spa->spa_root_vdev != NULL) { 1651 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) { 1652 vdev_t *vc = spa->spa_root_vdev->vdev_child[c]; 1653 if (vc->vdev_mg != NULL) 1654 taskq_wait(vc->vdev_mg->mg_taskq); 1655 } 1656 } 1657 1658 if (spa->spa_mmp.mmp_thread) 1659 mmp_thread_stop(spa); 1660 1661 /* 1662 * Wait for any outstanding async I/O to complete. 1663 */ 1664 if (spa->spa_async_zio_root != NULL) { 1665 for (int i = 0; i < max_ncpus; i++) 1666 (void) zio_wait(spa->spa_async_zio_root[i]); 1667 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1668 spa->spa_async_zio_root = NULL; 1669 } 1670 1671 if (spa->spa_vdev_removal != NULL) { 1672 spa_vdev_removal_destroy(spa->spa_vdev_removal); 1673 spa->spa_vdev_removal = NULL; 1674 } 1675 1676 spa_destroy_aux_threads(spa); 1677 1678 spa_condense_fini(spa); 1679 1680 bpobj_close(&spa->spa_deferred_bpobj); 1681 1682 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1683 1684 /* 1685 * Close all vdevs. 1686 */ 1687 if (spa->spa_root_vdev) 1688 vdev_free(spa->spa_root_vdev); 1689 ASSERT(spa->spa_root_vdev == NULL); 1690 1691 /* 1692 * Close the dsl pool. 1693 */ 1694 if (spa->spa_dsl_pool) { 1695 dsl_pool_close(spa->spa_dsl_pool); 1696 spa->spa_dsl_pool = NULL; 1697 spa->spa_meta_objset = NULL; 1698 } 1699 1700 ddt_unload(spa); 1701 spa_unload_log_sm_metadata(spa); 1702 1703 /* 1704 * Drop and purge level 2 cache 1705 */ 1706 spa_l2cache_drop(spa); 1707 1708 for (int i = 0; i < spa->spa_spares.sav_count; i++) 1709 vdev_free(spa->spa_spares.sav_vdevs[i]); 1710 if (spa->spa_spares.sav_vdevs) { 1711 kmem_free(spa->spa_spares.sav_vdevs, 1712 spa->spa_spares.sav_count * sizeof (void *)); 1713 spa->spa_spares.sav_vdevs = NULL; 1714 } 1715 if (spa->spa_spares.sav_config) { 1716 nvlist_free(spa->spa_spares.sav_config); 1717 spa->spa_spares.sav_config = NULL; 1718 } 1719 spa->spa_spares.sav_count = 0; 1720 1721 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { 1722 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1723 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1724 } 1725 if (spa->spa_l2cache.sav_vdevs) { 1726 kmem_free(spa->spa_l2cache.sav_vdevs, 1727 spa->spa_l2cache.sav_count * sizeof (void *)); 1728 spa->spa_l2cache.sav_vdevs = NULL; 1729 } 1730 if (spa->spa_l2cache.sav_config) { 1731 nvlist_free(spa->spa_l2cache.sav_config); 1732 spa->spa_l2cache.sav_config = NULL; 1733 } 1734 spa->spa_l2cache.sav_count = 0; 1735 1736 spa->spa_async_suspended = 0; 1737 1738 spa->spa_indirect_vdevs_loaded = B_FALSE; 1739 1740 if (spa->spa_comment != NULL) { 1741 spa_strfree(spa->spa_comment); 1742 spa->spa_comment = NULL; 1743 } 1744 if (spa->spa_compatibility != NULL) { 1745 spa_strfree(spa->spa_compatibility); 1746 spa->spa_compatibility = NULL; 1747 } 1748 1749 spa_config_exit(spa, SCL_ALL, spa); 1750 } 1751 1752 /* 1753 * Load (or re-load) the current list of vdevs describing the active spares for 1754 * this pool. When this is called, we have some form of basic information in 1755 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1756 * then re-generate a more complete list including status information. 1757 */ 1758 void 1759 spa_load_spares(spa_t *spa) 1760 { 1761 nvlist_t **spares; 1762 uint_t nspares; 1763 int i; 1764 vdev_t *vd, *tvd; 1765 1766 #ifndef _KERNEL 1767 /* 1768 * zdb opens both the current state of the pool and the 1769 * checkpointed state (if present), with a different spa_t. 1770 * 1771 * As spare vdevs are shared among open pools, we skip loading 1772 * them when we load the checkpointed state of the pool. 1773 */ 1774 if (!spa_writeable(spa)) 1775 return; 1776 #endif 1777 1778 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1779 1780 /* 1781 * First, close and free any existing spare vdevs. 1782 */ 1783 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1784 vd = spa->spa_spares.sav_vdevs[i]; 1785 1786 /* Undo the call to spa_activate() below */ 1787 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1788 B_FALSE)) != NULL && tvd->vdev_isspare) 1789 spa_spare_remove(tvd); 1790 vdev_close(vd); 1791 vdev_free(vd); 1792 } 1793 1794 if (spa->spa_spares.sav_vdevs) 1795 kmem_free(spa->spa_spares.sav_vdevs, 1796 spa->spa_spares.sav_count * sizeof (void *)); 1797 1798 if (spa->spa_spares.sav_config == NULL) 1799 nspares = 0; 1800 else 1801 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1802 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 1803 1804 spa->spa_spares.sav_count = (int)nspares; 1805 spa->spa_spares.sav_vdevs = NULL; 1806 1807 if (nspares == 0) 1808 return; 1809 1810 /* 1811 * Construct the array of vdevs, opening them to get status in the 1812 * process. For each spare, there is potentially two different vdev_t 1813 * structures associated with it: one in the list of spares (used only 1814 * for basic validation purposes) and one in the active vdev 1815 * configuration (if it's spared in). During this phase we open and 1816 * validate each vdev on the spare list. If the vdev also exists in the 1817 * active configuration, then we also mark this vdev as an active spare. 1818 */ 1819 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *), 1820 KM_SLEEP); 1821 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1822 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1823 VDEV_ALLOC_SPARE) == 0); 1824 ASSERT(vd != NULL); 1825 1826 spa->spa_spares.sav_vdevs[i] = vd; 1827 1828 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1829 B_FALSE)) != NULL) { 1830 if (!tvd->vdev_isspare) 1831 spa_spare_add(tvd); 1832 1833 /* 1834 * We only mark the spare active if we were successfully 1835 * able to load the vdev. Otherwise, importing a pool 1836 * with a bad active spare would result in strange 1837 * behavior, because multiple pool would think the spare 1838 * is actively in use. 1839 * 1840 * There is a vulnerability here to an equally bizarre 1841 * circumstance, where a dead active spare is later 1842 * brought back to life (onlined or otherwise). Given 1843 * the rarity of this scenario, and the extra complexity 1844 * it adds, we ignore the possibility. 1845 */ 1846 if (!vdev_is_dead(tvd)) 1847 spa_spare_activate(tvd); 1848 } 1849 1850 vd->vdev_top = vd; 1851 vd->vdev_aux = &spa->spa_spares; 1852 1853 if (vdev_open(vd) != 0) 1854 continue; 1855 1856 if (vdev_validate_aux(vd) == 0) 1857 spa_spare_add(vd); 1858 } 1859 1860 /* 1861 * Recompute the stashed list of spares, with status information 1862 * this time. 1863 */ 1864 fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES); 1865 1866 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1867 KM_SLEEP); 1868 for (i = 0; i < spa->spa_spares.sav_count; i++) 1869 spares[i] = vdev_config_generate(spa, 1870 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1871 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 1872 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 1873 spa->spa_spares.sav_count); 1874 for (i = 0; i < spa->spa_spares.sav_count; i++) 1875 nvlist_free(spares[i]); 1876 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1877 } 1878 1879 /* 1880 * Load (or re-load) the current list of vdevs describing the active l2cache for 1881 * this pool. When this is called, we have some form of basic information in 1882 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1883 * then re-generate a more complete list including status information. 1884 * Devices which are already active have their details maintained, and are 1885 * not re-opened. 1886 */ 1887 void 1888 spa_load_l2cache(spa_t *spa) 1889 { 1890 nvlist_t **l2cache = NULL; 1891 uint_t nl2cache; 1892 int i, j, oldnvdevs; 1893 uint64_t guid; 1894 vdev_t *vd, **oldvdevs, **newvdevs; 1895 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1896 1897 #ifndef _KERNEL 1898 /* 1899 * zdb opens both the current state of the pool and the 1900 * checkpointed state (if present), with a different spa_t. 1901 * 1902 * As L2 caches are part of the ARC which is shared among open 1903 * pools, we skip loading them when we load the checkpointed 1904 * state of the pool. 1905 */ 1906 if (!spa_writeable(spa)) 1907 return; 1908 #endif 1909 1910 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1911 1912 oldvdevs = sav->sav_vdevs; 1913 oldnvdevs = sav->sav_count; 1914 sav->sav_vdevs = NULL; 1915 sav->sav_count = 0; 1916 1917 if (sav->sav_config == NULL) { 1918 nl2cache = 0; 1919 newvdevs = NULL; 1920 goto out; 1921 } 1922 1923 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, 1924 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 1925 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1926 1927 /* 1928 * Process new nvlist of vdevs. 1929 */ 1930 for (i = 0; i < nl2cache; i++) { 1931 guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID); 1932 1933 newvdevs[i] = NULL; 1934 for (j = 0; j < oldnvdevs; j++) { 1935 vd = oldvdevs[j]; 1936 if (vd != NULL && guid == vd->vdev_guid) { 1937 /* 1938 * Retain previous vdev for add/remove ops. 1939 */ 1940 newvdevs[i] = vd; 1941 oldvdevs[j] = NULL; 1942 break; 1943 } 1944 } 1945 1946 if (newvdevs[i] == NULL) { 1947 /* 1948 * Create new vdev 1949 */ 1950 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1951 VDEV_ALLOC_L2CACHE) == 0); 1952 ASSERT(vd != NULL); 1953 newvdevs[i] = vd; 1954 1955 /* 1956 * Commit this vdev as an l2cache device, 1957 * even if it fails to open. 1958 */ 1959 spa_l2cache_add(vd); 1960 1961 vd->vdev_top = vd; 1962 vd->vdev_aux = sav; 1963 1964 spa_l2cache_activate(vd); 1965 1966 if (vdev_open(vd) != 0) 1967 continue; 1968 1969 (void) vdev_validate_aux(vd); 1970 1971 if (!vdev_is_dead(vd)) 1972 l2arc_add_vdev(spa, vd); 1973 1974 /* 1975 * Upon cache device addition to a pool or pool 1976 * creation with a cache device or if the header 1977 * of the device is invalid we issue an async 1978 * TRIM command for the whole device which will 1979 * execute if l2arc_trim_ahead > 0. 1980 */ 1981 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 1982 } 1983 } 1984 1985 sav->sav_vdevs = newvdevs; 1986 sav->sav_count = (int)nl2cache; 1987 1988 /* 1989 * Recompute the stashed list of l2cache devices, with status 1990 * information this time. 1991 */ 1992 fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE); 1993 1994 if (sav->sav_count > 0) 1995 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), 1996 KM_SLEEP); 1997 for (i = 0; i < sav->sav_count; i++) 1998 l2cache[i] = vdev_config_generate(spa, 1999 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 2000 fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 2001 (const nvlist_t * const *)l2cache, sav->sav_count); 2002 2003 out: 2004 /* 2005 * Purge vdevs that were dropped 2006 */ 2007 for (i = 0; i < oldnvdevs; i++) { 2008 uint64_t pool; 2009 2010 vd = oldvdevs[i]; 2011 if (vd != NULL) { 2012 ASSERT(vd->vdev_isl2cache); 2013 2014 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 2015 pool != 0ULL && l2arc_vdev_present(vd)) 2016 l2arc_remove_vdev(vd); 2017 vdev_clear_stats(vd); 2018 vdev_free(vd); 2019 } 2020 } 2021 2022 if (oldvdevs) 2023 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 2024 2025 for (i = 0; i < sav->sav_count; i++) 2026 nvlist_free(l2cache[i]); 2027 if (sav->sav_count) 2028 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 2029 } 2030 2031 static int 2032 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 2033 { 2034 dmu_buf_t *db; 2035 char *packed = NULL; 2036 size_t nvsize = 0; 2037 int error; 2038 *value = NULL; 2039 2040 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 2041 if (error) 2042 return (error); 2043 2044 nvsize = *(uint64_t *)db->db_data; 2045 dmu_buf_rele(db, FTAG); 2046 2047 packed = vmem_alloc(nvsize, KM_SLEEP); 2048 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 2049 DMU_READ_PREFETCH); 2050 if (error == 0) 2051 error = nvlist_unpack(packed, nvsize, value, 0); 2052 vmem_free(packed, nvsize); 2053 2054 return (error); 2055 } 2056 2057 /* 2058 * Concrete top-level vdevs that are not missing and are not logs. At every 2059 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 2060 */ 2061 static uint64_t 2062 spa_healthy_core_tvds(spa_t *spa) 2063 { 2064 vdev_t *rvd = spa->spa_root_vdev; 2065 uint64_t tvds = 0; 2066 2067 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 2068 vdev_t *vd = rvd->vdev_child[i]; 2069 if (vd->vdev_islog) 2070 continue; 2071 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 2072 tvds++; 2073 } 2074 2075 return (tvds); 2076 } 2077 2078 /* 2079 * Checks to see if the given vdev could not be opened, in which case we post a 2080 * sysevent to notify the autoreplace code that the device has been removed. 2081 */ 2082 static void 2083 spa_check_removed(vdev_t *vd) 2084 { 2085 for (uint64_t c = 0; c < vd->vdev_children; c++) 2086 spa_check_removed(vd->vdev_child[c]); 2087 2088 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 2089 vdev_is_concrete(vd)) { 2090 zfs_post_autoreplace(vd->vdev_spa, vd); 2091 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 2092 } 2093 } 2094 2095 static int 2096 spa_check_for_missing_logs(spa_t *spa) 2097 { 2098 vdev_t *rvd = spa->spa_root_vdev; 2099 2100 /* 2101 * If we're doing a normal import, then build up any additional 2102 * diagnostic information about missing log devices. 2103 * We'll pass this up to the user for further processing. 2104 */ 2105 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 2106 nvlist_t **child, *nv; 2107 uint64_t idx = 0; 2108 2109 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 2110 KM_SLEEP); 2111 nv = fnvlist_alloc(); 2112 2113 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2114 vdev_t *tvd = rvd->vdev_child[c]; 2115 2116 /* 2117 * We consider a device as missing only if it failed 2118 * to open (i.e. offline or faulted is not considered 2119 * as missing). 2120 */ 2121 if (tvd->vdev_islog && 2122 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2123 child[idx++] = vdev_config_generate(spa, tvd, 2124 B_FALSE, VDEV_CONFIG_MISSING); 2125 } 2126 } 2127 2128 if (idx > 0) { 2129 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2130 (const nvlist_t * const *)child, idx); 2131 fnvlist_add_nvlist(spa->spa_load_info, 2132 ZPOOL_CONFIG_MISSING_DEVICES, nv); 2133 2134 for (uint64_t i = 0; i < idx; i++) 2135 nvlist_free(child[i]); 2136 } 2137 nvlist_free(nv); 2138 kmem_free(child, rvd->vdev_children * sizeof (char **)); 2139 2140 if (idx > 0) { 2141 spa_load_failed(spa, "some log devices are missing"); 2142 vdev_dbgmsg_print_tree(rvd, 2); 2143 return (SET_ERROR(ENXIO)); 2144 } 2145 } else { 2146 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2147 vdev_t *tvd = rvd->vdev_child[c]; 2148 2149 if (tvd->vdev_islog && 2150 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2151 spa_set_log_state(spa, SPA_LOG_CLEAR); 2152 spa_load_note(spa, "some log devices are " 2153 "missing, ZIL is dropped."); 2154 vdev_dbgmsg_print_tree(rvd, 2); 2155 break; 2156 } 2157 } 2158 } 2159 2160 return (0); 2161 } 2162 2163 /* 2164 * Check for missing log devices 2165 */ 2166 static boolean_t 2167 spa_check_logs(spa_t *spa) 2168 { 2169 boolean_t rv = B_FALSE; 2170 dsl_pool_t *dp = spa_get_dsl(spa); 2171 2172 switch (spa->spa_log_state) { 2173 default: 2174 break; 2175 case SPA_LOG_MISSING: 2176 /* need to recheck in case slog has been restored */ 2177 case SPA_LOG_UNKNOWN: 2178 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2179 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 2180 if (rv) 2181 spa_set_log_state(spa, SPA_LOG_MISSING); 2182 break; 2183 } 2184 return (rv); 2185 } 2186 2187 /* 2188 * Passivate any log vdevs (note, does not apply to embedded log metaslabs). 2189 */ 2190 static boolean_t 2191 spa_passivate_log(spa_t *spa) 2192 { 2193 vdev_t *rvd = spa->spa_root_vdev; 2194 boolean_t slog_found = B_FALSE; 2195 2196 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2197 2198 for (int c = 0; c < rvd->vdev_children; c++) { 2199 vdev_t *tvd = rvd->vdev_child[c]; 2200 2201 if (tvd->vdev_islog) { 2202 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2203 metaslab_group_passivate(tvd->vdev_mg); 2204 slog_found = B_TRUE; 2205 } 2206 } 2207 2208 return (slog_found); 2209 } 2210 2211 /* 2212 * Activate any log vdevs (note, does not apply to embedded log metaslabs). 2213 */ 2214 static void 2215 spa_activate_log(spa_t *spa) 2216 { 2217 vdev_t *rvd = spa->spa_root_vdev; 2218 2219 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2220 2221 for (int c = 0; c < rvd->vdev_children; c++) { 2222 vdev_t *tvd = rvd->vdev_child[c]; 2223 2224 if (tvd->vdev_islog) { 2225 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2226 metaslab_group_activate(tvd->vdev_mg); 2227 } 2228 } 2229 } 2230 2231 int 2232 spa_reset_logs(spa_t *spa) 2233 { 2234 int error; 2235 2236 error = dmu_objset_find(spa_name(spa), zil_reset, 2237 NULL, DS_FIND_CHILDREN); 2238 if (error == 0) { 2239 /* 2240 * We successfully offlined the log device, sync out the 2241 * current txg so that the "stubby" block can be removed 2242 * by zil_sync(). 2243 */ 2244 txg_wait_synced(spa->spa_dsl_pool, 0); 2245 } 2246 return (error); 2247 } 2248 2249 static void 2250 spa_aux_check_removed(spa_aux_vdev_t *sav) 2251 { 2252 for (int i = 0; i < sav->sav_count; i++) 2253 spa_check_removed(sav->sav_vdevs[i]); 2254 } 2255 2256 void 2257 spa_claim_notify(zio_t *zio) 2258 { 2259 spa_t *spa = zio->io_spa; 2260 2261 if (zio->io_error) 2262 return; 2263 2264 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2265 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 2266 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 2267 mutex_exit(&spa->spa_props_lock); 2268 } 2269 2270 typedef struct spa_load_error { 2271 boolean_t sle_verify_data; 2272 uint64_t sle_meta_count; 2273 uint64_t sle_data_count; 2274 } spa_load_error_t; 2275 2276 static void 2277 spa_load_verify_done(zio_t *zio) 2278 { 2279 blkptr_t *bp = zio->io_bp; 2280 spa_load_error_t *sle = zio->io_private; 2281 dmu_object_type_t type = BP_GET_TYPE(bp); 2282 int error = zio->io_error; 2283 spa_t *spa = zio->io_spa; 2284 2285 abd_free(zio->io_abd); 2286 if (error) { 2287 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2288 type != DMU_OT_INTENT_LOG) 2289 atomic_inc_64(&sle->sle_meta_count); 2290 else 2291 atomic_inc_64(&sle->sle_data_count); 2292 } 2293 2294 mutex_enter(&spa->spa_scrub_lock); 2295 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp); 2296 cv_broadcast(&spa->spa_scrub_io_cv); 2297 mutex_exit(&spa->spa_scrub_lock); 2298 } 2299 2300 /* 2301 * Maximum number of inflight bytes is the log2 fraction of the arc size. 2302 * By default, we set it to 1/16th of the arc. 2303 */ 2304 static int spa_load_verify_shift = 4; 2305 static int spa_load_verify_metadata = B_TRUE; 2306 static int spa_load_verify_data = B_TRUE; 2307 2308 static int 2309 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2310 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2311 { 2312 zio_t *rio = arg; 2313 spa_load_error_t *sle = rio->io_private; 2314 2315 (void) zilog, (void) dnp; 2316 2317 /* 2318 * Note: normally this routine will not be called if 2319 * spa_load_verify_metadata is not set. However, it may be useful 2320 * to manually set the flag after the traversal has begun. 2321 */ 2322 if (!spa_load_verify_metadata) 2323 return (0); 2324 2325 /* 2326 * Sanity check the block pointer in order to detect obvious damage 2327 * before using the contents in subsequent checks or in zio_read(). 2328 * When damaged consider it to be a metadata error since we cannot 2329 * trust the BP_GET_TYPE and BP_GET_LEVEL values. 2330 */ 2331 if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) { 2332 atomic_inc_64(&sle->sle_meta_count); 2333 return (0); 2334 } 2335 2336 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || 2337 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 2338 return (0); 2339 2340 if (!BP_IS_METADATA(bp) && 2341 (!spa_load_verify_data || !sle->sle_verify_data)) 2342 return (0); 2343 2344 uint64_t maxinflight_bytes = 2345 arc_target_bytes() >> spa_load_verify_shift; 2346 size_t size = BP_GET_PSIZE(bp); 2347 2348 mutex_enter(&spa->spa_scrub_lock); 2349 while (spa->spa_load_verify_bytes >= maxinflight_bytes) 2350 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2351 spa->spa_load_verify_bytes += size; 2352 mutex_exit(&spa->spa_scrub_lock); 2353 2354 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2355 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2356 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2357 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2358 return (0); 2359 } 2360 2361 static int 2362 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2363 { 2364 (void) dp, (void) arg; 2365 2366 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2367 return (SET_ERROR(ENAMETOOLONG)); 2368 2369 return (0); 2370 } 2371 2372 static int 2373 spa_load_verify(spa_t *spa) 2374 { 2375 zio_t *rio; 2376 spa_load_error_t sle = { 0 }; 2377 zpool_load_policy_t policy; 2378 boolean_t verify_ok = B_FALSE; 2379 int error = 0; 2380 2381 zpool_get_load_policy(spa->spa_config, &policy); 2382 2383 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND || 2384 policy.zlp_maxmeta == UINT64_MAX) 2385 return (0); 2386 2387 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2388 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2389 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2390 DS_FIND_CHILDREN); 2391 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2392 if (error != 0) 2393 return (error); 2394 2395 /* 2396 * Verify data only if we are rewinding or error limit was set. 2397 * Otherwise nothing except dbgmsg care about it to waste time. 2398 */ 2399 sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) || 2400 (policy.zlp_maxdata < UINT64_MAX); 2401 2402 rio = zio_root(spa, NULL, &sle, 2403 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2404 2405 if (spa_load_verify_metadata) { 2406 if (spa->spa_extreme_rewind) { 2407 spa_load_note(spa, "performing a complete scan of the " 2408 "pool since extreme rewind is on. This may take " 2409 "a very long time.\n (spa_load_verify_data=%u, " 2410 "spa_load_verify_metadata=%u)", 2411 spa_load_verify_data, spa_load_verify_metadata); 2412 } 2413 2414 error = traverse_pool(spa, spa->spa_verify_min_txg, 2415 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2416 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2417 } 2418 2419 (void) zio_wait(rio); 2420 ASSERT0(spa->spa_load_verify_bytes); 2421 2422 spa->spa_load_meta_errors = sle.sle_meta_count; 2423 spa->spa_load_data_errors = sle.sle_data_count; 2424 2425 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2426 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2427 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2428 (u_longlong_t)sle.sle_data_count); 2429 } 2430 2431 if (spa_load_verify_dryrun || 2432 (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2433 sle.sle_data_count <= policy.zlp_maxdata)) { 2434 int64_t loss = 0; 2435 2436 verify_ok = B_TRUE; 2437 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2438 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2439 2440 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2441 fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME, 2442 spa->spa_load_txg_ts); 2443 fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME, 2444 loss); 2445 fnvlist_add_uint64(spa->spa_load_info, 2446 ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count); 2447 fnvlist_add_uint64(spa->spa_load_info, 2448 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count); 2449 } else { 2450 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2451 } 2452 2453 if (spa_load_verify_dryrun) 2454 return (0); 2455 2456 if (error) { 2457 if (error != ENXIO && error != EIO) 2458 error = SET_ERROR(EIO); 2459 return (error); 2460 } 2461 2462 return (verify_ok ? 0 : EIO); 2463 } 2464 2465 /* 2466 * Find a value in the pool props object. 2467 */ 2468 static void 2469 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2470 { 2471 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2472 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2473 } 2474 2475 /* 2476 * Find a value in the pool directory object. 2477 */ 2478 static int 2479 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2480 { 2481 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2482 name, sizeof (uint64_t), 1, val); 2483 2484 if (error != 0 && (error != ENOENT || log_enoent)) { 2485 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2486 "[error=%d]", name, error); 2487 } 2488 2489 return (error); 2490 } 2491 2492 static int 2493 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2494 { 2495 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2496 return (SET_ERROR(err)); 2497 } 2498 2499 boolean_t 2500 spa_livelist_delete_check(spa_t *spa) 2501 { 2502 return (spa->spa_livelists_to_delete != 0); 2503 } 2504 2505 static boolean_t 2506 spa_livelist_delete_cb_check(void *arg, zthr_t *z) 2507 { 2508 (void) z; 2509 spa_t *spa = arg; 2510 return (spa_livelist_delete_check(spa)); 2511 } 2512 2513 static int 2514 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2515 { 2516 spa_t *spa = arg; 2517 zio_free(spa, tx->tx_txg, bp); 2518 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2519 -bp_get_dsize_sync(spa, bp), 2520 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2521 return (0); 2522 } 2523 2524 static int 2525 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp) 2526 { 2527 int err; 2528 zap_cursor_t zc; 2529 zap_attribute_t za; 2530 zap_cursor_init(&zc, os, zap_obj); 2531 err = zap_cursor_retrieve(&zc, &za); 2532 zap_cursor_fini(&zc); 2533 if (err == 0) 2534 *llp = za.za_first_integer; 2535 return (err); 2536 } 2537 2538 /* 2539 * Components of livelist deletion that must be performed in syncing 2540 * context: freeing block pointers and updating the pool-wide data 2541 * structures to indicate how much work is left to do 2542 */ 2543 typedef struct sublist_delete_arg { 2544 spa_t *spa; 2545 dsl_deadlist_t *ll; 2546 uint64_t key; 2547 bplist_t *to_free; 2548 } sublist_delete_arg_t; 2549 2550 static void 2551 sublist_delete_sync(void *arg, dmu_tx_t *tx) 2552 { 2553 sublist_delete_arg_t *sda = arg; 2554 spa_t *spa = sda->spa; 2555 dsl_deadlist_t *ll = sda->ll; 2556 uint64_t key = sda->key; 2557 bplist_t *to_free = sda->to_free; 2558 2559 bplist_iterate(to_free, delete_blkptr_cb, spa, tx); 2560 dsl_deadlist_remove_entry(ll, key, tx); 2561 } 2562 2563 typedef struct livelist_delete_arg { 2564 spa_t *spa; 2565 uint64_t ll_obj; 2566 uint64_t zap_obj; 2567 } livelist_delete_arg_t; 2568 2569 static void 2570 livelist_delete_sync(void *arg, dmu_tx_t *tx) 2571 { 2572 livelist_delete_arg_t *lda = arg; 2573 spa_t *spa = lda->spa; 2574 uint64_t ll_obj = lda->ll_obj; 2575 uint64_t zap_obj = lda->zap_obj; 2576 objset_t *mos = spa->spa_meta_objset; 2577 uint64_t count; 2578 2579 /* free the livelist and decrement the feature count */ 2580 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx)); 2581 dsl_deadlist_free(mos, ll_obj, tx); 2582 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 2583 VERIFY0(zap_count(mos, zap_obj, &count)); 2584 if (count == 0) { 2585 /* no more livelists to delete */ 2586 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 2587 DMU_POOL_DELETED_CLONES, tx)); 2588 VERIFY0(zap_destroy(mos, zap_obj, tx)); 2589 spa->spa_livelists_to_delete = 0; 2590 spa_notify_waiters(spa); 2591 } 2592 } 2593 2594 /* 2595 * Load in the value for the livelist to be removed and open it. Then, 2596 * load its first sublist and determine which block pointers should actually 2597 * be freed. Then, call a synctask which performs the actual frees and updates 2598 * the pool-wide livelist data. 2599 */ 2600 static void 2601 spa_livelist_delete_cb(void *arg, zthr_t *z) 2602 { 2603 spa_t *spa = arg; 2604 uint64_t ll_obj = 0, count; 2605 objset_t *mos = spa->spa_meta_objset; 2606 uint64_t zap_obj = spa->spa_livelists_to_delete; 2607 /* 2608 * Determine the next livelist to delete. This function should only 2609 * be called if there is at least one deleted clone. 2610 */ 2611 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj)); 2612 VERIFY0(zap_count(mos, ll_obj, &count)); 2613 if (count > 0) { 2614 dsl_deadlist_t *ll; 2615 dsl_deadlist_entry_t *dle; 2616 bplist_t to_free; 2617 ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP); 2618 dsl_deadlist_open(ll, mos, ll_obj); 2619 dle = dsl_deadlist_first(ll); 2620 ASSERT3P(dle, !=, NULL); 2621 bplist_create(&to_free); 2622 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free, 2623 z, NULL); 2624 if (err == 0) { 2625 sublist_delete_arg_t sync_arg = { 2626 .spa = spa, 2627 .ll = ll, 2628 .key = dle->dle_mintxg, 2629 .to_free = &to_free 2630 }; 2631 zfs_dbgmsg("deleting sublist (id %llu) from" 2632 " livelist %llu, %lld remaining", 2633 (u_longlong_t)dle->dle_bpobj.bpo_object, 2634 (u_longlong_t)ll_obj, (longlong_t)count - 1); 2635 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 2636 sublist_delete_sync, &sync_arg, 0, 2637 ZFS_SPACE_CHECK_DESTROY)); 2638 } else { 2639 VERIFY3U(err, ==, EINTR); 2640 } 2641 bplist_clear(&to_free); 2642 bplist_destroy(&to_free); 2643 dsl_deadlist_close(ll); 2644 kmem_free(ll, sizeof (dsl_deadlist_t)); 2645 } else { 2646 livelist_delete_arg_t sync_arg = { 2647 .spa = spa, 2648 .ll_obj = ll_obj, 2649 .zap_obj = zap_obj 2650 }; 2651 zfs_dbgmsg("deletion of livelist %llu completed", 2652 (u_longlong_t)ll_obj); 2653 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync, 2654 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY)); 2655 } 2656 } 2657 2658 static void 2659 spa_start_livelist_destroy_thread(spa_t *spa) 2660 { 2661 ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL); 2662 spa->spa_livelist_delete_zthr = 2663 zthr_create("z_livelist_destroy", 2664 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa, 2665 minclsyspri); 2666 } 2667 2668 typedef struct livelist_new_arg { 2669 bplist_t *allocs; 2670 bplist_t *frees; 2671 } livelist_new_arg_t; 2672 2673 static int 2674 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 2675 dmu_tx_t *tx) 2676 { 2677 ASSERT(tx == NULL); 2678 livelist_new_arg_t *lna = arg; 2679 if (bp_freed) { 2680 bplist_append(lna->frees, bp); 2681 } else { 2682 bplist_append(lna->allocs, bp); 2683 zfs_livelist_condense_new_alloc++; 2684 } 2685 return (0); 2686 } 2687 2688 typedef struct livelist_condense_arg { 2689 spa_t *spa; 2690 bplist_t to_keep; 2691 uint64_t first_size; 2692 uint64_t next_size; 2693 } livelist_condense_arg_t; 2694 2695 static void 2696 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx) 2697 { 2698 livelist_condense_arg_t *lca = arg; 2699 spa_t *spa = lca->spa; 2700 bplist_t new_frees; 2701 dsl_dataset_t *ds = spa->spa_to_condense.ds; 2702 2703 /* Have we been cancelled? */ 2704 if (spa->spa_to_condense.cancelled) { 2705 zfs_livelist_condense_sync_cancel++; 2706 goto out; 2707 } 2708 2709 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2710 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2711 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist; 2712 2713 /* 2714 * It's possible that the livelist was changed while the zthr was 2715 * running. Therefore, we need to check for new blkptrs in the two 2716 * entries being condensed and continue to track them in the livelist. 2717 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl), 2718 * it's possible that the newly added blkptrs are FREEs or ALLOCs so 2719 * we need to sort them into two different bplists. 2720 */ 2721 uint64_t first_obj = first->dle_bpobj.bpo_object; 2722 uint64_t next_obj = next->dle_bpobj.bpo_object; 2723 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2724 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2725 2726 bplist_create(&new_frees); 2727 livelist_new_arg_t new_bps = { 2728 .allocs = &lca->to_keep, 2729 .frees = &new_frees, 2730 }; 2731 2732 if (cur_first_size > lca->first_size) { 2733 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj, 2734 livelist_track_new_cb, &new_bps, lca->first_size)); 2735 } 2736 if (cur_next_size > lca->next_size) { 2737 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj, 2738 livelist_track_new_cb, &new_bps, lca->next_size)); 2739 } 2740 2741 dsl_deadlist_clear_entry(first, ll, tx); 2742 ASSERT(bpobj_is_empty(&first->dle_bpobj)); 2743 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx); 2744 2745 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx); 2746 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx); 2747 bplist_destroy(&new_frees); 2748 2749 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 2750 dsl_dataset_name(ds, dsname); 2751 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu " 2752 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu " 2753 "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname, 2754 (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj, 2755 (u_longlong_t)cur_first_size, (u_longlong_t)next_obj, 2756 (u_longlong_t)cur_next_size, 2757 (u_longlong_t)first->dle_bpobj.bpo_object, 2758 (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs); 2759 out: 2760 dmu_buf_rele(ds->ds_dbuf, spa); 2761 spa->spa_to_condense.ds = NULL; 2762 bplist_clear(&lca->to_keep); 2763 bplist_destroy(&lca->to_keep); 2764 kmem_free(lca, sizeof (livelist_condense_arg_t)); 2765 spa->spa_to_condense.syncing = B_FALSE; 2766 } 2767 2768 static void 2769 spa_livelist_condense_cb(void *arg, zthr_t *t) 2770 { 2771 while (zfs_livelist_condense_zthr_pause && 2772 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2773 delay(1); 2774 2775 spa_t *spa = arg; 2776 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2777 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2778 uint64_t first_size, next_size; 2779 2780 livelist_condense_arg_t *lca = 2781 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP); 2782 bplist_create(&lca->to_keep); 2783 2784 /* 2785 * Process the livelists (matching FREEs and ALLOCs) in open context 2786 * so we have minimal work in syncing context to condense. 2787 * 2788 * We save bpobj sizes (first_size and next_size) to use later in 2789 * syncing context to determine if entries were added to these sublists 2790 * while in open context. This is possible because the clone is still 2791 * active and open for normal writes and we want to make sure the new, 2792 * unprocessed blockpointers are inserted into the livelist normally. 2793 * 2794 * Note that dsl_process_sub_livelist() both stores the size number of 2795 * blockpointers and iterates over them while the bpobj's lock held, so 2796 * the sizes returned to us are consistent which what was actually 2797 * processed. 2798 */ 2799 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t, 2800 &first_size); 2801 if (err == 0) 2802 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep, 2803 t, &next_size); 2804 2805 if (err == 0) { 2806 while (zfs_livelist_condense_sync_pause && 2807 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2808 delay(1); 2809 2810 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2811 dmu_tx_mark_netfree(tx); 2812 dmu_tx_hold_space(tx, 1); 2813 err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); 2814 if (err == 0) { 2815 /* 2816 * Prevent the condense zthr restarting before 2817 * the synctask completes. 2818 */ 2819 spa->spa_to_condense.syncing = B_TRUE; 2820 lca->spa = spa; 2821 lca->first_size = first_size; 2822 lca->next_size = next_size; 2823 dsl_sync_task_nowait(spa_get_dsl(spa), 2824 spa_livelist_condense_sync, lca, tx); 2825 dmu_tx_commit(tx); 2826 return; 2827 } 2828 } 2829 /* 2830 * Condensing can not continue: either it was externally stopped or 2831 * we were unable to assign to a tx because the pool has run out of 2832 * space. In the second case, we'll just end up trying to condense 2833 * again in a later txg. 2834 */ 2835 ASSERT(err != 0); 2836 bplist_clear(&lca->to_keep); 2837 bplist_destroy(&lca->to_keep); 2838 kmem_free(lca, sizeof (livelist_condense_arg_t)); 2839 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa); 2840 spa->spa_to_condense.ds = NULL; 2841 if (err == EINTR) 2842 zfs_livelist_condense_zthr_cancel++; 2843 } 2844 2845 /* 2846 * Check that there is something to condense but that a condense is not 2847 * already in progress and that condensing has not been cancelled. 2848 */ 2849 static boolean_t 2850 spa_livelist_condense_cb_check(void *arg, zthr_t *z) 2851 { 2852 (void) z; 2853 spa_t *spa = arg; 2854 if ((spa->spa_to_condense.ds != NULL) && 2855 (spa->spa_to_condense.syncing == B_FALSE) && 2856 (spa->spa_to_condense.cancelled == B_FALSE)) { 2857 return (B_TRUE); 2858 } 2859 return (B_FALSE); 2860 } 2861 2862 static void 2863 spa_start_livelist_condensing_thread(spa_t *spa) 2864 { 2865 spa->spa_to_condense.ds = NULL; 2866 spa->spa_to_condense.first = NULL; 2867 spa->spa_to_condense.next = NULL; 2868 spa->spa_to_condense.syncing = B_FALSE; 2869 spa->spa_to_condense.cancelled = B_FALSE; 2870 2871 ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL); 2872 spa->spa_livelist_condense_zthr = 2873 zthr_create("z_livelist_condense", 2874 spa_livelist_condense_cb_check, 2875 spa_livelist_condense_cb, spa, minclsyspri); 2876 } 2877 2878 static void 2879 spa_spawn_aux_threads(spa_t *spa) 2880 { 2881 ASSERT(spa_writeable(spa)); 2882 2883 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2884 2885 spa_start_indirect_condensing_thread(spa); 2886 spa_start_livelist_destroy_thread(spa); 2887 spa_start_livelist_condensing_thread(spa); 2888 2889 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 2890 spa->spa_checkpoint_discard_zthr = 2891 zthr_create("z_checkpoint_discard", 2892 spa_checkpoint_discard_thread_check, 2893 spa_checkpoint_discard_thread, spa, minclsyspri); 2894 } 2895 2896 /* 2897 * Fix up config after a partly-completed split. This is done with the 2898 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2899 * pool have that entry in their config, but only the splitting one contains 2900 * a list of all the guids of the vdevs that are being split off. 2901 * 2902 * This function determines what to do with that list: either rejoin 2903 * all the disks to the pool, or complete the splitting process. To attempt 2904 * the rejoin, each disk that is offlined is marked online again, and 2905 * we do a reopen() call. If the vdev label for every disk that was 2906 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2907 * then we call vdev_split() on each disk, and complete the split. 2908 * 2909 * Otherwise we leave the config alone, with all the vdevs in place in 2910 * the original pool. 2911 */ 2912 static void 2913 spa_try_repair(spa_t *spa, nvlist_t *config) 2914 { 2915 uint_t extracted; 2916 uint64_t *glist; 2917 uint_t i, gcount; 2918 nvlist_t *nvl; 2919 vdev_t **vd; 2920 boolean_t attempt_reopen; 2921 2922 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2923 return; 2924 2925 /* check that the config is complete */ 2926 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2927 &glist, &gcount) != 0) 2928 return; 2929 2930 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2931 2932 /* attempt to online all the vdevs & validate */ 2933 attempt_reopen = B_TRUE; 2934 for (i = 0; i < gcount; i++) { 2935 if (glist[i] == 0) /* vdev is hole */ 2936 continue; 2937 2938 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2939 if (vd[i] == NULL) { 2940 /* 2941 * Don't bother attempting to reopen the disks; 2942 * just do the split. 2943 */ 2944 attempt_reopen = B_FALSE; 2945 } else { 2946 /* attempt to re-online it */ 2947 vd[i]->vdev_offline = B_FALSE; 2948 } 2949 } 2950 2951 if (attempt_reopen) { 2952 vdev_reopen(spa->spa_root_vdev); 2953 2954 /* check each device to see what state it's in */ 2955 for (extracted = 0, i = 0; i < gcount; i++) { 2956 if (vd[i] != NULL && 2957 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2958 break; 2959 ++extracted; 2960 } 2961 } 2962 2963 /* 2964 * If every disk has been moved to the new pool, or if we never 2965 * even attempted to look at them, then we split them off for 2966 * good. 2967 */ 2968 if (!attempt_reopen || gcount == extracted) { 2969 for (i = 0; i < gcount; i++) 2970 if (vd[i] != NULL) 2971 vdev_split(vd[i]); 2972 vdev_reopen(spa->spa_root_vdev); 2973 } 2974 2975 kmem_free(vd, gcount * sizeof (vdev_t *)); 2976 } 2977 2978 static int 2979 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 2980 { 2981 const char *ereport = FM_EREPORT_ZFS_POOL; 2982 int error; 2983 2984 spa->spa_load_state = state; 2985 (void) spa_import_progress_set_state(spa_guid(spa), 2986 spa_load_state(spa)); 2987 2988 gethrestime(&spa->spa_loaded_ts); 2989 error = spa_load_impl(spa, type, &ereport); 2990 2991 /* 2992 * Don't count references from objsets that are already closed 2993 * and are making their way through the eviction process. 2994 */ 2995 spa_evicting_os_wait(spa); 2996 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 2997 if (error) { 2998 if (error != EEXIST) { 2999 spa->spa_loaded_ts.tv_sec = 0; 3000 spa->spa_loaded_ts.tv_nsec = 0; 3001 } 3002 if (error != EBADF) { 3003 (void) zfs_ereport_post(ereport, spa, 3004 NULL, NULL, NULL, 0); 3005 } 3006 } 3007 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 3008 spa->spa_ena = 0; 3009 3010 (void) spa_import_progress_set_state(spa_guid(spa), 3011 spa_load_state(spa)); 3012 3013 return (error); 3014 } 3015 3016 #ifdef ZFS_DEBUG 3017 /* 3018 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 3019 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 3020 * spa's per-vdev ZAP list. 3021 */ 3022 static uint64_t 3023 vdev_count_verify_zaps(vdev_t *vd) 3024 { 3025 spa_t *spa = vd->vdev_spa; 3026 uint64_t total = 0; 3027 3028 if (vd->vdev_top_zap != 0) { 3029 total++; 3030 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3031 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 3032 } 3033 if (vd->vdev_leaf_zap != 0) { 3034 total++; 3035 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 3036 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 3037 } 3038 3039 for (uint64_t i = 0; i < vd->vdev_children; i++) { 3040 total += vdev_count_verify_zaps(vd->vdev_child[i]); 3041 } 3042 3043 return (total); 3044 } 3045 #else 3046 #define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0) 3047 #endif 3048 3049 /* 3050 * Determine whether the activity check is required. 3051 */ 3052 static boolean_t 3053 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 3054 nvlist_t *config) 3055 { 3056 uint64_t state = 0; 3057 uint64_t hostid = 0; 3058 uint64_t tryconfig_txg = 0; 3059 uint64_t tryconfig_timestamp = 0; 3060 uint16_t tryconfig_mmp_seq = 0; 3061 nvlist_t *nvinfo; 3062 3063 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3064 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3065 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 3066 &tryconfig_txg); 3067 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 3068 &tryconfig_timestamp); 3069 (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, 3070 &tryconfig_mmp_seq); 3071 } 3072 3073 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 3074 3075 /* 3076 * Disable the MMP activity check - This is used by zdb which 3077 * is intended to be used on potentially active pools. 3078 */ 3079 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 3080 return (B_FALSE); 3081 3082 /* 3083 * Skip the activity check when the MMP feature is disabled. 3084 */ 3085 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 3086 return (B_FALSE); 3087 3088 /* 3089 * If the tryconfig_ values are nonzero, they are the results of an 3090 * earlier tryimport. If they all match the uberblock we just found, 3091 * then the pool has not changed and we return false so we do not test 3092 * a second time. 3093 */ 3094 if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 3095 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && 3096 tryconfig_mmp_seq && tryconfig_mmp_seq == 3097 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) 3098 return (B_FALSE); 3099 3100 /* 3101 * Allow the activity check to be skipped when importing the pool 3102 * on the same host which last imported it. Since the hostid from 3103 * configuration may be stale use the one read from the label. 3104 */ 3105 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 3106 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 3107 3108 if (hostid == spa_get_hostid(spa)) 3109 return (B_FALSE); 3110 3111 /* 3112 * Skip the activity test when the pool was cleanly exported. 3113 */ 3114 if (state != POOL_STATE_ACTIVE) 3115 return (B_FALSE); 3116 3117 return (B_TRUE); 3118 } 3119 3120 /* 3121 * Nanoseconds the activity check must watch for changes on-disk. 3122 */ 3123 static uint64_t 3124 spa_activity_check_duration(spa_t *spa, uberblock_t *ub) 3125 { 3126 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 3127 uint64_t multihost_interval = MSEC2NSEC( 3128 MMP_INTERVAL_OK(zfs_multihost_interval)); 3129 uint64_t import_delay = MAX(NANOSEC, import_intervals * 3130 multihost_interval); 3131 3132 /* 3133 * Local tunables determine a minimum duration except for the case 3134 * where we know when the remote host will suspend the pool if MMP 3135 * writes do not land. 3136 * 3137 * See Big Theory comment at the top of mmp.c for the reasoning behind 3138 * these cases and times. 3139 */ 3140 3141 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); 3142 3143 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3144 MMP_FAIL_INT(ub) > 0) { 3145 3146 /* MMP on remote host will suspend pool after failed writes */ 3147 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * 3148 MMP_IMPORT_SAFETY_FACTOR / 100; 3149 3150 zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " 3151 "mmp_fails=%llu ub_mmp mmp_interval=%llu " 3152 "import_intervals=%llu", (u_longlong_t)import_delay, 3153 (u_longlong_t)MMP_FAIL_INT(ub), 3154 (u_longlong_t)MMP_INTERVAL(ub), 3155 (u_longlong_t)import_intervals); 3156 3157 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3158 MMP_FAIL_INT(ub) == 0) { 3159 3160 /* MMP on remote host will never suspend pool */ 3161 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + 3162 ub->ub_mmp_delay) * import_intervals); 3163 3164 zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " 3165 "mmp_interval=%llu ub_mmp_delay=%llu " 3166 "import_intervals=%llu", (u_longlong_t)import_delay, 3167 (u_longlong_t)MMP_INTERVAL(ub), 3168 (u_longlong_t)ub->ub_mmp_delay, 3169 (u_longlong_t)import_intervals); 3170 3171 } else if (MMP_VALID(ub)) { 3172 /* 3173 * zfs-0.7 compatibility case 3174 */ 3175 3176 import_delay = MAX(import_delay, (multihost_interval + 3177 ub->ub_mmp_delay) * import_intervals); 3178 3179 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " 3180 "import_intervals=%llu leaves=%u", 3181 (u_longlong_t)import_delay, 3182 (u_longlong_t)ub->ub_mmp_delay, 3183 (u_longlong_t)import_intervals, 3184 vdev_count_leaves(spa)); 3185 } else { 3186 /* Using local tunings is the only reasonable option */ 3187 zfs_dbgmsg("pool last imported on non-MMP aware " 3188 "host using import_delay=%llu multihost_interval=%llu " 3189 "import_intervals=%llu", (u_longlong_t)import_delay, 3190 (u_longlong_t)multihost_interval, 3191 (u_longlong_t)import_intervals); 3192 } 3193 3194 return (import_delay); 3195 } 3196 3197 /* 3198 * Perform the import activity check. If the user canceled the import or 3199 * we detected activity then fail. 3200 */ 3201 static int 3202 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) 3203 { 3204 uint64_t txg = ub->ub_txg; 3205 uint64_t timestamp = ub->ub_timestamp; 3206 uint64_t mmp_config = ub->ub_mmp_config; 3207 uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; 3208 uint64_t import_delay; 3209 hrtime_t import_expire; 3210 nvlist_t *mmp_label = NULL; 3211 vdev_t *rvd = spa->spa_root_vdev; 3212 kcondvar_t cv; 3213 kmutex_t mtx; 3214 int error = 0; 3215 3216 cv_init(&cv, NULL, CV_DEFAULT, NULL); 3217 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 3218 mutex_enter(&mtx); 3219 3220 /* 3221 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 3222 * during the earlier tryimport. If the txg recorded there is 0 then 3223 * the pool is known to be active on another host. 3224 * 3225 * Otherwise, the pool might be in use on another host. Check for 3226 * changes in the uberblocks on disk if necessary. 3227 */ 3228 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3229 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 3230 ZPOOL_CONFIG_LOAD_INFO); 3231 3232 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 3233 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 3234 vdev_uberblock_load(rvd, ub, &mmp_label); 3235 error = SET_ERROR(EREMOTEIO); 3236 goto out; 3237 } 3238 } 3239 3240 import_delay = spa_activity_check_duration(spa, ub); 3241 3242 /* Add a small random factor in case of simultaneous imports (0-25%) */ 3243 import_delay += import_delay * random_in_range(250) / 1000; 3244 3245 import_expire = gethrtime() + import_delay; 3246 3247 while (gethrtime() < import_expire) { 3248 (void) spa_import_progress_set_mmp_check(spa_guid(spa), 3249 NSEC2SEC(import_expire - gethrtime())); 3250 3251 vdev_uberblock_load(rvd, ub, &mmp_label); 3252 3253 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || 3254 mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { 3255 zfs_dbgmsg("multihost activity detected " 3256 "txg %llu ub_txg %llu " 3257 "timestamp %llu ub_timestamp %llu " 3258 "mmp_config %#llx ub_mmp_config %#llx", 3259 (u_longlong_t)txg, (u_longlong_t)ub->ub_txg, 3260 (u_longlong_t)timestamp, 3261 (u_longlong_t)ub->ub_timestamp, 3262 (u_longlong_t)mmp_config, 3263 (u_longlong_t)ub->ub_mmp_config); 3264 3265 error = SET_ERROR(EREMOTEIO); 3266 break; 3267 } 3268 3269 if (mmp_label) { 3270 nvlist_free(mmp_label); 3271 mmp_label = NULL; 3272 } 3273 3274 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 3275 if (error != -1) { 3276 error = SET_ERROR(EINTR); 3277 break; 3278 } 3279 error = 0; 3280 } 3281 3282 out: 3283 mutex_exit(&mtx); 3284 mutex_destroy(&mtx); 3285 cv_destroy(&cv); 3286 3287 /* 3288 * If the pool is determined to be active store the status in the 3289 * spa->spa_load_info nvlist. If the remote hostname or hostid are 3290 * available from configuration read from disk store them as well. 3291 * This allows 'zpool import' to generate a more useful message. 3292 * 3293 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 3294 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 3295 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 3296 */ 3297 if (error == EREMOTEIO) { 3298 const char *hostname = "<unknown>"; 3299 uint64_t hostid = 0; 3300 3301 if (mmp_label) { 3302 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 3303 hostname = fnvlist_lookup_string(mmp_label, 3304 ZPOOL_CONFIG_HOSTNAME); 3305 fnvlist_add_string(spa->spa_load_info, 3306 ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 3307 } 3308 3309 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 3310 hostid = fnvlist_lookup_uint64(mmp_label, 3311 ZPOOL_CONFIG_HOSTID); 3312 fnvlist_add_uint64(spa->spa_load_info, 3313 ZPOOL_CONFIG_MMP_HOSTID, hostid); 3314 } 3315 } 3316 3317 fnvlist_add_uint64(spa->spa_load_info, 3318 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 3319 fnvlist_add_uint64(spa->spa_load_info, 3320 ZPOOL_CONFIG_MMP_TXG, 0); 3321 3322 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 3323 } 3324 3325 if (mmp_label) 3326 nvlist_free(mmp_label); 3327 3328 return (error); 3329 } 3330 3331 static int 3332 spa_verify_host(spa_t *spa, nvlist_t *mos_config) 3333 { 3334 uint64_t hostid; 3335 char *hostname; 3336 uint64_t myhostid = 0; 3337 3338 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 3339 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 3340 hostname = fnvlist_lookup_string(mos_config, 3341 ZPOOL_CONFIG_HOSTNAME); 3342 3343 myhostid = zone_get_hostid(NULL); 3344 3345 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 3346 cmn_err(CE_WARN, "pool '%s' could not be " 3347 "loaded as it was last accessed by " 3348 "another system (host: %s hostid: 0x%llx). " 3349 "See: https://openzfs.github.io/openzfs-docs/msg/" 3350 "ZFS-8000-EY", 3351 spa_name(spa), hostname, (u_longlong_t)hostid); 3352 spa_load_failed(spa, "hostid verification failed: pool " 3353 "last accessed by host: %s (hostid: 0x%llx)", 3354 hostname, (u_longlong_t)hostid); 3355 return (SET_ERROR(EBADF)); 3356 } 3357 } 3358 3359 return (0); 3360 } 3361 3362 static int 3363 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 3364 { 3365 int error = 0; 3366 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 3367 int parse; 3368 vdev_t *rvd; 3369 uint64_t pool_guid; 3370 char *comment; 3371 char *compatibility; 3372 3373 /* 3374 * Versioning wasn't explicitly added to the label until later, so if 3375 * it's not present treat it as the initial version. 3376 */ 3377 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 3378 &spa->spa_ubsync.ub_version) != 0) 3379 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 3380 3381 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3382 spa_load_failed(spa, "invalid config provided: '%s' missing", 3383 ZPOOL_CONFIG_POOL_GUID); 3384 return (SET_ERROR(EINVAL)); 3385 } 3386 3387 /* 3388 * If we are doing an import, ensure that the pool is not already 3389 * imported by checking if its pool guid already exists in the 3390 * spa namespace. 3391 * 3392 * The only case that we allow an already imported pool to be 3393 * imported again, is when the pool is checkpointed and we want to 3394 * look at its checkpointed state from userland tools like zdb. 3395 */ 3396 #ifdef _KERNEL 3397 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3398 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3399 spa_guid_exists(pool_guid, 0)) { 3400 #else 3401 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3402 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3403 spa_guid_exists(pool_guid, 0) && 3404 !spa_importing_readonly_checkpoint(spa)) { 3405 #endif 3406 spa_load_failed(spa, "a pool with guid %llu is already open", 3407 (u_longlong_t)pool_guid); 3408 return (SET_ERROR(EEXIST)); 3409 } 3410 3411 spa->spa_config_guid = pool_guid; 3412 3413 nvlist_free(spa->spa_load_info); 3414 spa->spa_load_info = fnvlist_alloc(); 3415 3416 ASSERT(spa->spa_comment == NULL); 3417 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3418 spa->spa_comment = spa_strdup(comment); 3419 3420 ASSERT(spa->spa_compatibility == NULL); 3421 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY, 3422 &compatibility) == 0) 3423 spa->spa_compatibility = spa_strdup(compatibility); 3424 3425 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 3426 &spa->spa_config_txg); 3427 3428 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 3429 spa->spa_config_splitting = fnvlist_dup(nvl); 3430 3431 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 3432 spa_load_failed(spa, "invalid config provided: '%s' missing", 3433 ZPOOL_CONFIG_VDEV_TREE); 3434 return (SET_ERROR(EINVAL)); 3435 } 3436 3437 /* 3438 * Create "The Godfather" zio to hold all async IOs 3439 */ 3440 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3441 KM_SLEEP); 3442 for (int i = 0; i < max_ncpus; i++) { 3443 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3444 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3445 ZIO_FLAG_GODFATHER); 3446 } 3447 3448 /* 3449 * Parse the configuration into a vdev tree. We explicitly set the 3450 * value that will be returned by spa_version() since parsing the 3451 * configuration requires knowing the version number. 3452 */ 3453 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3454 parse = (type == SPA_IMPORT_EXISTING ? 3455 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 3456 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 3457 spa_config_exit(spa, SCL_ALL, FTAG); 3458 3459 if (error != 0) { 3460 spa_load_failed(spa, "unable to parse config [error=%d]", 3461 error); 3462 return (error); 3463 } 3464 3465 ASSERT(spa->spa_root_vdev == rvd); 3466 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 3467 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 3468 3469 if (type != SPA_IMPORT_ASSEMBLE) { 3470 ASSERT(spa_guid(spa) == pool_guid); 3471 } 3472 3473 return (0); 3474 } 3475 3476 /* 3477 * Recursively open all vdevs in the vdev tree. This function is called twice: 3478 * first with the untrusted config, then with the trusted config. 3479 */ 3480 static int 3481 spa_ld_open_vdevs(spa_t *spa) 3482 { 3483 int error = 0; 3484 3485 /* 3486 * spa_missing_tvds_allowed defines how many top-level vdevs can be 3487 * missing/unopenable for the root vdev to be still considered openable. 3488 */ 3489 if (spa->spa_trust_config) { 3490 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 3491 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 3492 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 3493 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 3494 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 3495 } else { 3496 spa->spa_missing_tvds_allowed = 0; 3497 } 3498 3499 spa->spa_missing_tvds_allowed = 3500 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 3501 3502 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3503 error = vdev_open(spa->spa_root_vdev); 3504 spa_config_exit(spa, SCL_ALL, FTAG); 3505 3506 if (spa->spa_missing_tvds != 0) { 3507 spa_load_note(spa, "vdev tree has %lld missing top-level " 3508 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 3509 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) { 3510 /* 3511 * Although theoretically we could allow users to open 3512 * incomplete pools in RW mode, we'd need to add a lot 3513 * of extra logic (e.g. adjust pool space to account 3514 * for missing vdevs). 3515 * This limitation also prevents users from accidentally 3516 * opening the pool in RW mode during data recovery and 3517 * damaging it further. 3518 */ 3519 spa_load_note(spa, "pools with missing top-level " 3520 "vdevs can only be opened in read-only mode."); 3521 error = SET_ERROR(ENXIO); 3522 } else { 3523 spa_load_note(spa, "current settings allow for maximum " 3524 "%lld missing top-level vdevs at this stage.", 3525 (u_longlong_t)spa->spa_missing_tvds_allowed); 3526 } 3527 } 3528 if (error != 0) { 3529 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 3530 error); 3531 } 3532 if (spa->spa_missing_tvds != 0 || error != 0) 3533 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 3534 3535 return (error); 3536 } 3537 3538 /* 3539 * We need to validate the vdev labels against the configuration that 3540 * we have in hand. This function is called twice: first with an untrusted 3541 * config, then with a trusted config. The validation is more strict when the 3542 * config is trusted. 3543 */ 3544 static int 3545 spa_ld_validate_vdevs(spa_t *spa) 3546 { 3547 int error = 0; 3548 vdev_t *rvd = spa->spa_root_vdev; 3549 3550 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3551 error = vdev_validate(rvd); 3552 spa_config_exit(spa, SCL_ALL, FTAG); 3553 3554 if (error != 0) { 3555 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 3556 return (error); 3557 } 3558 3559 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 3560 spa_load_failed(spa, "cannot open vdev tree after invalidating " 3561 "some vdevs"); 3562 vdev_dbgmsg_print_tree(rvd, 2); 3563 return (SET_ERROR(ENXIO)); 3564 } 3565 3566 return (0); 3567 } 3568 3569 static void 3570 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 3571 { 3572 spa->spa_state = POOL_STATE_ACTIVE; 3573 spa->spa_ubsync = spa->spa_uberblock; 3574 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 3575 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 3576 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 3577 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 3578 spa->spa_claim_max_txg = spa->spa_first_txg; 3579 spa->spa_prev_software_version = ub->ub_software_version; 3580 } 3581 3582 static int 3583 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 3584 { 3585 vdev_t *rvd = spa->spa_root_vdev; 3586 nvlist_t *label; 3587 uberblock_t *ub = &spa->spa_uberblock; 3588 boolean_t activity_check = B_FALSE; 3589 3590 /* 3591 * If we are opening the checkpointed state of the pool by 3592 * rewinding to it, at this point we will have written the 3593 * checkpointed uberblock to the vdev labels, so searching 3594 * the labels will find the right uberblock. However, if 3595 * we are opening the checkpointed state read-only, we have 3596 * not modified the labels. Therefore, we must ignore the 3597 * labels and continue using the spa_uberblock that was set 3598 * by spa_ld_checkpoint_rewind. 3599 * 3600 * Note that it would be fine to ignore the labels when 3601 * rewinding (opening writeable) as well. However, if we 3602 * crash just after writing the labels, we will end up 3603 * searching the labels. Doing so in the common case means 3604 * that this code path gets exercised normally, rather than 3605 * just in the edge case. 3606 */ 3607 if (ub->ub_checkpoint_txg != 0 && 3608 spa_importing_readonly_checkpoint(spa)) { 3609 spa_ld_select_uberblock_done(spa, ub); 3610 return (0); 3611 } 3612 3613 /* 3614 * Find the best uberblock. 3615 */ 3616 vdev_uberblock_load(rvd, ub, &label); 3617 3618 /* 3619 * If we weren't able to find a single valid uberblock, return failure. 3620 */ 3621 if (ub->ub_txg == 0) { 3622 nvlist_free(label); 3623 spa_load_failed(spa, "no valid uberblock found"); 3624 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 3625 } 3626 3627 if (spa->spa_load_max_txg != UINT64_MAX) { 3628 (void) spa_import_progress_set_max_txg(spa_guid(spa), 3629 (u_longlong_t)spa->spa_load_max_txg); 3630 } 3631 spa_load_note(spa, "using uberblock with txg=%llu", 3632 (u_longlong_t)ub->ub_txg); 3633 3634 3635 /* 3636 * For pools which have the multihost property on determine if the 3637 * pool is truly inactive and can be safely imported. Prevent 3638 * hosts which don't have a hostid set from importing the pool. 3639 */ 3640 activity_check = spa_activity_check_required(spa, ub, label, 3641 spa->spa_config); 3642 if (activity_check) { 3643 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 3644 spa_get_hostid(spa) == 0) { 3645 nvlist_free(label); 3646 fnvlist_add_uint64(spa->spa_load_info, 3647 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 3648 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 3649 } 3650 3651 int error = spa_activity_check(spa, ub, spa->spa_config); 3652 if (error) { 3653 nvlist_free(label); 3654 return (error); 3655 } 3656 3657 fnvlist_add_uint64(spa->spa_load_info, 3658 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 3659 fnvlist_add_uint64(spa->spa_load_info, 3660 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 3661 fnvlist_add_uint16(spa->spa_load_info, 3662 ZPOOL_CONFIG_MMP_SEQ, 3663 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); 3664 } 3665 3666 /* 3667 * If the pool has an unsupported version we can't open it. 3668 */ 3669 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 3670 nvlist_free(label); 3671 spa_load_failed(spa, "version %llu is not supported", 3672 (u_longlong_t)ub->ub_version); 3673 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 3674 } 3675 3676 if (ub->ub_version >= SPA_VERSION_FEATURES) { 3677 nvlist_t *features; 3678 3679 /* 3680 * If we weren't able to find what's necessary for reading the 3681 * MOS in the label, return failure. 3682 */ 3683 if (label == NULL) { 3684 spa_load_failed(spa, "label config unavailable"); 3685 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3686 ENXIO)); 3687 } 3688 3689 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 3690 &features) != 0) { 3691 nvlist_free(label); 3692 spa_load_failed(spa, "invalid label: '%s' missing", 3693 ZPOOL_CONFIG_FEATURES_FOR_READ); 3694 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3695 ENXIO)); 3696 } 3697 3698 /* 3699 * Update our in-core representation with the definitive values 3700 * from the label. 3701 */ 3702 nvlist_free(spa->spa_label_features); 3703 spa->spa_label_features = fnvlist_dup(features); 3704 } 3705 3706 nvlist_free(label); 3707 3708 /* 3709 * Look through entries in the label nvlist's features_for_read. If 3710 * there is a feature listed there which we don't understand then we 3711 * cannot open a pool. 3712 */ 3713 if (ub->ub_version >= SPA_VERSION_FEATURES) { 3714 nvlist_t *unsup_feat; 3715 3716 unsup_feat = fnvlist_alloc(); 3717 3718 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 3719 NULL); nvp != NULL; 3720 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 3721 if (!zfeature_is_supported(nvpair_name(nvp))) { 3722 fnvlist_add_string(unsup_feat, 3723 nvpair_name(nvp), ""); 3724 } 3725 } 3726 3727 if (!nvlist_empty(unsup_feat)) { 3728 fnvlist_add_nvlist(spa->spa_load_info, 3729 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 3730 nvlist_free(unsup_feat); 3731 spa_load_failed(spa, "some features are unsupported"); 3732 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3733 ENOTSUP)); 3734 } 3735 3736 nvlist_free(unsup_feat); 3737 } 3738 3739 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 3740 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3741 spa_try_repair(spa, spa->spa_config); 3742 spa_config_exit(spa, SCL_ALL, FTAG); 3743 nvlist_free(spa->spa_config_splitting); 3744 spa->spa_config_splitting = NULL; 3745 } 3746 3747 /* 3748 * Initialize internal SPA structures. 3749 */ 3750 spa_ld_select_uberblock_done(spa, ub); 3751 3752 return (0); 3753 } 3754 3755 static int 3756 spa_ld_open_rootbp(spa_t *spa) 3757 { 3758 int error = 0; 3759 vdev_t *rvd = spa->spa_root_vdev; 3760 3761 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 3762 if (error != 0) { 3763 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 3764 "[error=%d]", error); 3765 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3766 } 3767 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 3768 3769 return (0); 3770 } 3771 3772 static int 3773 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 3774 boolean_t reloading) 3775 { 3776 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 3777 nvlist_t *nv, *mos_config, *policy; 3778 int error = 0, copy_error; 3779 uint64_t healthy_tvds, healthy_tvds_mos; 3780 uint64_t mos_config_txg; 3781 3782 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 3783 != 0) 3784 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3785 3786 /* 3787 * If we're assembling a pool from a split, the config provided is 3788 * already trusted so there is nothing to do. 3789 */ 3790 if (type == SPA_IMPORT_ASSEMBLE) 3791 return (0); 3792 3793 healthy_tvds = spa_healthy_core_tvds(spa); 3794 3795 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 3796 != 0) { 3797 spa_load_failed(spa, "unable to retrieve MOS config"); 3798 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3799 } 3800 3801 /* 3802 * If we are doing an open, pool owner wasn't verified yet, thus do 3803 * the verification here. 3804 */ 3805 if (spa->spa_load_state == SPA_LOAD_OPEN) { 3806 error = spa_verify_host(spa, mos_config); 3807 if (error != 0) { 3808 nvlist_free(mos_config); 3809 return (error); 3810 } 3811 } 3812 3813 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 3814 3815 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3816 3817 /* 3818 * Build a new vdev tree from the trusted config 3819 */ 3820 error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD); 3821 if (error != 0) { 3822 nvlist_free(mos_config); 3823 spa_config_exit(spa, SCL_ALL, FTAG); 3824 spa_load_failed(spa, "spa_config_parse failed [error=%d]", 3825 error); 3826 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3827 } 3828 3829 /* 3830 * Vdev paths in the MOS may be obsolete. If the untrusted config was 3831 * obtained by scanning /dev/dsk, then it will have the right vdev 3832 * paths. We update the trusted MOS config with this information. 3833 * We first try to copy the paths with vdev_copy_path_strict, which 3834 * succeeds only when both configs have exactly the same vdev tree. 3835 * If that fails, we fall back to a more flexible method that has a 3836 * best effort policy. 3837 */ 3838 copy_error = vdev_copy_path_strict(rvd, mrvd); 3839 if (copy_error != 0 || spa_load_print_vdev_tree) { 3840 spa_load_note(spa, "provided vdev tree:"); 3841 vdev_dbgmsg_print_tree(rvd, 2); 3842 spa_load_note(spa, "MOS vdev tree:"); 3843 vdev_dbgmsg_print_tree(mrvd, 2); 3844 } 3845 if (copy_error != 0) { 3846 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 3847 "back to vdev_copy_path_relaxed"); 3848 vdev_copy_path_relaxed(rvd, mrvd); 3849 } 3850 3851 vdev_close(rvd); 3852 vdev_free(rvd); 3853 spa->spa_root_vdev = mrvd; 3854 rvd = mrvd; 3855 spa_config_exit(spa, SCL_ALL, FTAG); 3856 3857 /* 3858 * We will use spa_config if we decide to reload the spa or if spa_load 3859 * fails and we rewind. We must thus regenerate the config using the 3860 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 3861 * pass settings on how to load the pool and is not stored in the MOS. 3862 * We copy it over to our new, trusted config. 3863 */ 3864 mos_config_txg = fnvlist_lookup_uint64(mos_config, 3865 ZPOOL_CONFIG_POOL_TXG); 3866 nvlist_free(mos_config); 3867 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 3868 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 3869 &policy) == 0) 3870 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 3871 spa_config_set(spa, mos_config); 3872 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 3873 3874 /* 3875 * Now that we got the config from the MOS, we should be more strict 3876 * in checking blkptrs and can make assumptions about the consistency 3877 * of the vdev tree. spa_trust_config must be set to true before opening 3878 * vdevs in order for them to be writeable. 3879 */ 3880 spa->spa_trust_config = B_TRUE; 3881 3882 /* 3883 * Open and validate the new vdev tree 3884 */ 3885 error = spa_ld_open_vdevs(spa); 3886 if (error != 0) 3887 return (error); 3888 3889 error = spa_ld_validate_vdevs(spa); 3890 if (error != 0) 3891 return (error); 3892 3893 if (copy_error != 0 || spa_load_print_vdev_tree) { 3894 spa_load_note(spa, "final vdev tree:"); 3895 vdev_dbgmsg_print_tree(rvd, 2); 3896 } 3897 3898 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 3899 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 3900 /* 3901 * Sanity check to make sure that we are indeed loading the 3902 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 3903 * in the config provided and they happened to be the only ones 3904 * to have the latest uberblock, we could involuntarily perform 3905 * an extreme rewind. 3906 */ 3907 healthy_tvds_mos = spa_healthy_core_tvds(spa); 3908 if (healthy_tvds_mos - healthy_tvds >= 3909 SPA_SYNC_MIN_VDEVS) { 3910 spa_load_note(spa, "config provided misses too many " 3911 "top-level vdevs compared to MOS (%lld vs %lld). ", 3912 (u_longlong_t)healthy_tvds, 3913 (u_longlong_t)healthy_tvds_mos); 3914 spa_load_note(spa, "vdev tree:"); 3915 vdev_dbgmsg_print_tree(rvd, 2); 3916 if (reloading) { 3917 spa_load_failed(spa, "config was already " 3918 "provided from MOS. Aborting."); 3919 return (spa_vdev_err(rvd, 3920 VDEV_AUX_CORRUPT_DATA, EIO)); 3921 } 3922 spa_load_note(spa, "spa must be reloaded using MOS " 3923 "config"); 3924 return (SET_ERROR(EAGAIN)); 3925 } 3926 } 3927 3928 error = spa_check_for_missing_logs(spa); 3929 if (error != 0) 3930 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 3931 3932 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 3933 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 3934 "guid sum (%llu != %llu)", 3935 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 3936 (u_longlong_t)rvd->vdev_guid_sum); 3937 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 3938 ENXIO)); 3939 } 3940 3941 return (0); 3942 } 3943 3944 static int 3945 spa_ld_open_indirect_vdev_metadata(spa_t *spa) 3946 { 3947 int error = 0; 3948 vdev_t *rvd = spa->spa_root_vdev; 3949 3950 /* 3951 * Everything that we read before spa_remove_init() must be stored 3952 * on concreted vdevs. Therefore we do this as early as possible. 3953 */ 3954 error = spa_remove_init(spa); 3955 if (error != 0) { 3956 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 3957 error); 3958 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3959 } 3960 3961 /* 3962 * Retrieve information needed to condense indirect vdev mappings. 3963 */ 3964 error = spa_condense_init(spa); 3965 if (error != 0) { 3966 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 3967 error); 3968 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3969 } 3970 3971 return (0); 3972 } 3973 3974 static int 3975 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 3976 { 3977 int error = 0; 3978 vdev_t *rvd = spa->spa_root_vdev; 3979 3980 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 3981 boolean_t missing_feat_read = B_FALSE; 3982 nvlist_t *unsup_feat, *enabled_feat; 3983 3984 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 3985 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 3986 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3987 } 3988 3989 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 3990 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 3991 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3992 } 3993 3994 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 3995 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 3996 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3997 } 3998 3999 enabled_feat = fnvlist_alloc(); 4000 unsup_feat = fnvlist_alloc(); 4001 4002 if (!spa_features_check(spa, B_FALSE, 4003 unsup_feat, enabled_feat)) 4004 missing_feat_read = B_TRUE; 4005 4006 if (spa_writeable(spa) || 4007 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 4008 if (!spa_features_check(spa, B_TRUE, 4009 unsup_feat, enabled_feat)) { 4010 *missing_feat_writep = B_TRUE; 4011 } 4012 } 4013 4014 fnvlist_add_nvlist(spa->spa_load_info, 4015 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 4016 4017 if (!nvlist_empty(unsup_feat)) { 4018 fnvlist_add_nvlist(spa->spa_load_info, 4019 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 4020 } 4021 4022 fnvlist_free(enabled_feat); 4023 fnvlist_free(unsup_feat); 4024 4025 if (!missing_feat_read) { 4026 fnvlist_add_boolean(spa->spa_load_info, 4027 ZPOOL_CONFIG_CAN_RDONLY); 4028 } 4029 4030 /* 4031 * If the state is SPA_LOAD_TRYIMPORT, our objective is 4032 * twofold: to determine whether the pool is available for 4033 * import in read-write mode and (if it is not) whether the 4034 * pool is available for import in read-only mode. If the pool 4035 * is available for import in read-write mode, it is displayed 4036 * as available in userland; if it is not available for import 4037 * in read-only mode, it is displayed as unavailable in 4038 * userland. If the pool is available for import in read-only 4039 * mode but not read-write mode, it is displayed as unavailable 4040 * in userland with a special note that the pool is actually 4041 * available for open in read-only mode. 4042 * 4043 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 4044 * missing a feature for write, we must first determine whether 4045 * the pool can be opened read-only before returning to 4046 * userland in order to know whether to display the 4047 * abovementioned note. 4048 */ 4049 if (missing_feat_read || (*missing_feat_writep && 4050 spa_writeable(spa))) { 4051 spa_load_failed(spa, "pool uses unsupported features"); 4052 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 4053 ENOTSUP)); 4054 } 4055 4056 /* 4057 * Load refcounts for ZFS features from disk into an in-memory 4058 * cache during SPA initialization. 4059 */ 4060 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 4061 uint64_t refcount; 4062 4063 error = feature_get_refcount_from_disk(spa, 4064 &spa_feature_table[i], &refcount); 4065 if (error == 0) { 4066 spa->spa_feat_refcount_cache[i] = refcount; 4067 } else if (error == ENOTSUP) { 4068 spa->spa_feat_refcount_cache[i] = 4069 SPA_FEATURE_DISABLED; 4070 } else { 4071 spa_load_failed(spa, "error getting refcount " 4072 "for feature %s [error=%d]", 4073 spa_feature_table[i].fi_guid, error); 4074 return (spa_vdev_err(rvd, 4075 VDEV_AUX_CORRUPT_DATA, EIO)); 4076 } 4077 } 4078 } 4079 4080 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 4081 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 4082 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 4083 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4084 } 4085 4086 /* 4087 * Encryption was added before bookmark_v2, even though bookmark_v2 4088 * is now a dependency. If this pool has encryption enabled without 4089 * bookmark_v2, trigger an errata message. 4090 */ 4091 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 4092 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 4093 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 4094 } 4095 4096 return (0); 4097 } 4098 4099 static int 4100 spa_ld_load_special_directories(spa_t *spa) 4101 { 4102 int error = 0; 4103 vdev_t *rvd = spa->spa_root_vdev; 4104 4105 spa->spa_is_initializing = B_TRUE; 4106 error = dsl_pool_open(spa->spa_dsl_pool); 4107 spa->spa_is_initializing = B_FALSE; 4108 if (error != 0) { 4109 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 4110 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4111 } 4112 4113 return (0); 4114 } 4115 4116 static int 4117 spa_ld_get_props(spa_t *spa) 4118 { 4119 int error = 0; 4120 uint64_t obj; 4121 vdev_t *rvd = spa->spa_root_vdev; 4122 4123 /* Grab the checksum salt from the MOS. */ 4124 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4125 DMU_POOL_CHECKSUM_SALT, 1, 4126 sizeof (spa->spa_cksum_salt.zcs_bytes), 4127 spa->spa_cksum_salt.zcs_bytes); 4128 if (error == ENOENT) { 4129 /* Generate a new salt for subsequent use */ 4130 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4131 sizeof (spa->spa_cksum_salt.zcs_bytes)); 4132 } else if (error != 0) { 4133 spa_load_failed(spa, "unable to retrieve checksum salt from " 4134 "MOS [error=%d]", error); 4135 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4136 } 4137 4138 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 4139 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4140 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 4141 if (error != 0) { 4142 spa_load_failed(spa, "error opening deferred-frees bpobj " 4143 "[error=%d]", error); 4144 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4145 } 4146 4147 /* 4148 * Load the bit that tells us to use the new accounting function 4149 * (raid-z deflation). If we have an older pool, this will not 4150 * be present. 4151 */ 4152 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 4153 if (error != 0 && error != ENOENT) 4154 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4155 4156 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 4157 &spa->spa_creation_version, B_FALSE); 4158 if (error != 0 && error != ENOENT) 4159 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4160 4161 /* 4162 * Load the persistent error log. If we have an older pool, this will 4163 * not be present. 4164 */ 4165 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 4166 B_FALSE); 4167 if (error != 0 && error != ENOENT) 4168 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4169 4170 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 4171 &spa->spa_errlog_scrub, B_FALSE); 4172 if (error != 0 && error != ENOENT) 4173 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4174 4175 /* 4176 * Load the livelist deletion field. If a livelist is queued for 4177 * deletion, indicate that in the spa 4178 */ 4179 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES, 4180 &spa->spa_livelists_to_delete, B_FALSE); 4181 if (error != 0 && error != ENOENT) 4182 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4183 4184 /* 4185 * Load the history object. If we have an older pool, this 4186 * will not be present. 4187 */ 4188 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 4189 if (error != 0 && error != ENOENT) 4190 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4191 4192 /* 4193 * Load the per-vdev ZAP map. If we have an older pool, this will not 4194 * be present; in this case, defer its creation to a later time to 4195 * avoid dirtying the MOS this early / out of sync context. See 4196 * spa_sync_config_object. 4197 */ 4198 4199 /* The sentinel is only available in the MOS config. */ 4200 nvlist_t *mos_config; 4201 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 4202 spa_load_failed(spa, "unable to retrieve MOS config"); 4203 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4204 } 4205 4206 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 4207 &spa->spa_all_vdev_zaps, B_FALSE); 4208 4209 if (error == ENOENT) { 4210 VERIFY(!nvlist_exists(mos_config, 4211 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 4212 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 4213 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4214 } else if (error != 0) { 4215 nvlist_free(mos_config); 4216 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4217 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 4218 /* 4219 * An older version of ZFS overwrote the sentinel value, so 4220 * we have orphaned per-vdev ZAPs in the MOS. Defer their 4221 * destruction to later; see spa_sync_config_object. 4222 */ 4223 spa->spa_avz_action = AVZ_ACTION_DESTROY; 4224 /* 4225 * We're assuming that no vdevs have had their ZAPs created 4226 * before this. Better be sure of it. 4227 */ 4228 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4229 } 4230 nvlist_free(mos_config); 4231 4232 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4233 4234 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 4235 B_FALSE); 4236 if (error && error != ENOENT) 4237 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4238 4239 if (error == 0) { 4240 uint64_t autoreplace = 0; 4241 4242 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 4243 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 4244 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 4245 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 4246 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 4247 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 4248 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); 4249 spa->spa_autoreplace = (autoreplace != 0); 4250 } 4251 4252 /* 4253 * If we are importing a pool with missing top-level vdevs, 4254 * we enforce that the pool doesn't panic or get suspended on 4255 * error since the likelihood of missing data is extremely high. 4256 */ 4257 if (spa->spa_missing_tvds > 0 && 4258 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 4259 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4260 spa_load_note(spa, "forcing failmode to 'continue' " 4261 "as some top level vdevs are missing"); 4262 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 4263 } 4264 4265 return (0); 4266 } 4267 4268 static int 4269 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 4270 { 4271 int error = 0; 4272 vdev_t *rvd = spa->spa_root_vdev; 4273 4274 /* 4275 * If we're assembling the pool from the split-off vdevs of 4276 * an existing pool, we don't want to attach the spares & cache 4277 * devices. 4278 */ 4279 4280 /* 4281 * Load any hot spares for this pool. 4282 */ 4283 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 4284 B_FALSE); 4285 if (error != 0 && error != ENOENT) 4286 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4287 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4288 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 4289 if (load_nvlist(spa, spa->spa_spares.sav_object, 4290 &spa->spa_spares.sav_config) != 0) { 4291 spa_load_failed(spa, "error loading spares nvlist"); 4292 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4293 } 4294 4295 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4296 spa_load_spares(spa); 4297 spa_config_exit(spa, SCL_ALL, FTAG); 4298 } else if (error == 0) { 4299 spa->spa_spares.sav_sync = B_TRUE; 4300 } 4301 4302 /* 4303 * Load any level 2 ARC devices for this pool. 4304 */ 4305 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 4306 &spa->spa_l2cache.sav_object, B_FALSE); 4307 if (error != 0 && error != ENOENT) 4308 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4309 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4310 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 4311 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 4312 &spa->spa_l2cache.sav_config) != 0) { 4313 spa_load_failed(spa, "error loading l2cache nvlist"); 4314 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4315 } 4316 4317 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4318 spa_load_l2cache(spa); 4319 spa_config_exit(spa, SCL_ALL, FTAG); 4320 } else if (error == 0) { 4321 spa->spa_l2cache.sav_sync = B_TRUE; 4322 } 4323 4324 return (0); 4325 } 4326 4327 static int 4328 spa_ld_load_vdev_metadata(spa_t *spa) 4329 { 4330 int error = 0; 4331 vdev_t *rvd = spa->spa_root_vdev; 4332 4333 /* 4334 * If the 'multihost' property is set, then never allow a pool to 4335 * be imported when the system hostid is zero. The exception to 4336 * this rule is zdb which is always allowed to access pools. 4337 */ 4338 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 && 4339 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 4340 fnvlist_add_uint64(spa->spa_load_info, 4341 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4342 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4343 } 4344 4345 /* 4346 * If the 'autoreplace' property is set, then post a resource notifying 4347 * the ZFS DE that it should not issue any faults for unopenable 4348 * devices. We also iterate over the vdevs, and post a sysevent for any 4349 * unopenable vdevs so that the normal autoreplace handler can take 4350 * over. 4351 */ 4352 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4353 spa_check_removed(spa->spa_root_vdev); 4354 /* 4355 * For the import case, this is done in spa_import(), because 4356 * at this point we're using the spare definitions from 4357 * the MOS config, not necessarily from the userland config. 4358 */ 4359 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 4360 spa_aux_check_removed(&spa->spa_spares); 4361 spa_aux_check_removed(&spa->spa_l2cache); 4362 } 4363 } 4364 4365 /* 4366 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 4367 */ 4368 error = vdev_load(rvd); 4369 if (error != 0) { 4370 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 4371 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4372 } 4373 4374 error = spa_ld_log_spacemaps(spa); 4375 if (error != 0) { 4376 spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]", 4377 error); 4378 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4379 } 4380 4381 /* 4382 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 4383 */ 4384 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4385 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE); 4386 spa_config_exit(spa, SCL_ALL, FTAG); 4387 4388 return (0); 4389 } 4390 4391 static int 4392 spa_ld_load_dedup_tables(spa_t *spa) 4393 { 4394 int error = 0; 4395 vdev_t *rvd = spa->spa_root_vdev; 4396 4397 error = ddt_load(spa); 4398 if (error != 0) { 4399 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 4400 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4401 } 4402 4403 return (0); 4404 } 4405 4406 static int 4407 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport) 4408 { 4409 vdev_t *rvd = spa->spa_root_vdev; 4410 4411 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 4412 boolean_t missing = spa_check_logs(spa); 4413 if (missing) { 4414 if (spa->spa_missing_tvds != 0) { 4415 spa_load_note(spa, "spa_check_logs failed " 4416 "so dropping the logs"); 4417 } else { 4418 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 4419 spa_load_failed(spa, "spa_check_logs failed"); 4420 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 4421 ENXIO)); 4422 } 4423 } 4424 } 4425 4426 return (0); 4427 } 4428 4429 static int 4430 spa_ld_verify_pool_data(spa_t *spa) 4431 { 4432 int error = 0; 4433 vdev_t *rvd = spa->spa_root_vdev; 4434 4435 /* 4436 * We've successfully opened the pool, verify that we're ready 4437 * to start pushing transactions. 4438 */ 4439 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4440 error = spa_load_verify(spa); 4441 if (error != 0) { 4442 spa_load_failed(spa, "spa_load_verify failed " 4443 "[error=%d]", error); 4444 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4445 error)); 4446 } 4447 } 4448 4449 return (0); 4450 } 4451 4452 static void 4453 spa_ld_claim_log_blocks(spa_t *spa) 4454 { 4455 dmu_tx_t *tx; 4456 dsl_pool_t *dp = spa_get_dsl(spa); 4457 4458 /* 4459 * Claim log blocks that haven't been committed yet. 4460 * This must all happen in a single txg. 4461 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 4462 * invoked from zil_claim_log_block()'s i/o done callback. 4463 * Price of rollback is that we abandon the log. 4464 */ 4465 spa->spa_claiming = B_TRUE; 4466 4467 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 4468 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 4469 zil_claim, tx, DS_FIND_CHILDREN); 4470 dmu_tx_commit(tx); 4471 4472 spa->spa_claiming = B_FALSE; 4473 4474 spa_set_log_state(spa, SPA_LOG_GOOD); 4475 } 4476 4477 static void 4478 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 4479 boolean_t update_config_cache) 4480 { 4481 vdev_t *rvd = spa->spa_root_vdev; 4482 int need_update = B_FALSE; 4483 4484 /* 4485 * If the config cache is stale, or we have uninitialized 4486 * metaslabs (see spa_vdev_add()), then update the config. 4487 * 4488 * If this is a verbatim import, trust the current 4489 * in-core spa_config and update the disk labels. 4490 */ 4491 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 4492 spa->spa_load_state == SPA_LOAD_IMPORT || 4493 spa->spa_load_state == SPA_LOAD_RECOVER || 4494 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 4495 need_update = B_TRUE; 4496 4497 for (int c = 0; c < rvd->vdev_children; c++) 4498 if (rvd->vdev_child[c]->vdev_ms_array == 0) 4499 need_update = B_TRUE; 4500 4501 /* 4502 * Update the config cache asynchronously in case we're the 4503 * root pool, in which case the config cache isn't writable yet. 4504 */ 4505 if (need_update) 4506 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 4507 } 4508 4509 static void 4510 spa_ld_prepare_for_reload(spa_t *spa) 4511 { 4512 spa_mode_t mode = spa->spa_mode; 4513 int async_suspended = spa->spa_async_suspended; 4514 4515 spa_unload(spa); 4516 spa_deactivate(spa); 4517 spa_activate(spa, mode); 4518 4519 /* 4520 * We save the value of spa_async_suspended as it gets reset to 0 by 4521 * spa_unload(). We want to restore it back to the original value before 4522 * returning as we might be calling spa_async_resume() later. 4523 */ 4524 spa->spa_async_suspended = async_suspended; 4525 } 4526 4527 static int 4528 spa_ld_read_checkpoint_txg(spa_t *spa) 4529 { 4530 uberblock_t checkpoint; 4531 int error = 0; 4532 4533 ASSERT0(spa->spa_checkpoint_txg); 4534 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4535 4536 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4537 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4538 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4539 4540 if (error == ENOENT) 4541 return (0); 4542 4543 if (error != 0) 4544 return (error); 4545 4546 ASSERT3U(checkpoint.ub_txg, !=, 0); 4547 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 4548 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 4549 spa->spa_checkpoint_txg = checkpoint.ub_txg; 4550 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 4551 4552 return (0); 4553 } 4554 4555 static int 4556 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 4557 { 4558 int error = 0; 4559 4560 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4561 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4562 4563 /* 4564 * Never trust the config that is provided unless we are assembling 4565 * a pool following a split. 4566 * This means don't trust blkptrs and the vdev tree in general. This 4567 * also effectively puts the spa in read-only mode since 4568 * spa_writeable() checks for spa_trust_config to be true. 4569 * We will later load a trusted config from the MOS. 4570 */ 4571 if (type != SPA_IMPORT_ASSEMBLE) 4572 spa->spa_trust_config = B_FALSE; 4573 4574 /* 4575 * Parse the config provided to create a vdev tree. 4576 */ 4577 error = spa_ld_parse_config(spa, type); 4578 if (error != 0) 4579 return (error); 4580 4581 spa_import_progress_add(spa); 4582 4583 /* 4584 * Now that we have the vdev tree, try to open each vdev. This involves 4585 * opening the underlying physical device, retrieving its geometry and 4586 * probing the vdev with a dummy I/O. The state of each vdev will be set 4587 * based on the success of those operations. After this we'll be ready 4588 * to read from the vdevs. 4589 */ 4590 error = spa_ld_open_vdevs(spa); 4591 if (error != 0) 4592 return (error); 4593 4594 /* 4595 * Read the label of each vdev and make sure that the GUIDs stored 4596 * there match the GUIDs in the config provided. 4597 * If we're assembling a new pool that's been split off from an 4598 * existing pool, the labels haven't yet been updated so we skip 4599 * validation for now. 4600 */ 4601 if (type != SPA_IMPORT_ASSEMBLE) { 4602 error = spa_ld_validate_vdevs(spa); 4603 if (error != 0) 4604 return (error); 4605 } 4606 4607 /* 4608 * Read all vdev labels to find the best uberblock (i.e. latest, 4609 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 4610 * get the list of features required to read blkptrs in the MOS from 4611 * the vdev label with the best uberblock and verify that our version 4612 * of zfs supports them all. 4613 */ 4614 error = spa_ld_select_uberblock(spa, type); 4615 if (error != 0) 4616 return (error); 4617 4618 /* 4619 * Pass that uberblock to the dsl_pool layer which will open the root 4620 * blkptr. This blkptr points to the latest version of the MOS and will 4621 * allow us to read its contents. 4622 */ 4623 error = spa_ld_open_rootbp(spa); 4624 if (error != 0) 4625 return (error); 4626 4627 return (0); 4628 } 4629 4630 static int 4631 spa_ld_checkpoint_rewind(spa_t *spa) 4632 { 4633 uberblock_t checkpoint; 4634 int error = 0; 4635 4636 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4637 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4638 4639 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4640 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4641 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4642 4643 if (error != 0) { 4644 spa_load_failed(spa, "unable to retrieve checkpointed " 4645 "uberblock from the MOS config [error=%d]", error); 4646 4647 if (error == ENOENT) 4648 error = ZFS_ERR_NO_CHECKPOINT; 4649 4650 return (error); 4651 } 4652 4653 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 4654 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 4655 4656 /* 4657 * We need to update the txg and timestamp of the checkpointed 4658 * uberblock to be higher than the latest one. This ensures that 4659 * the checkpointed uberblock is selected if we were to close and 4660 * reopen the pool right after we've written it in the vdev labels. 4661 * (also see block comment in vdev_uberblock_compare) 4662 */ 4663 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 4664 checkpoint.ub_timestamp = gethrestime_sec(); 4665 4666 /* 4667 * Set current uberblock to be the checkpointed uberblock. 4668 */ 4669 spa->spa_uberblock = checkpoint; 4670 4671 /* 4672 * If we are doing a normal rewind, then the pool is open for 4673 * writing and we sync the "updated" checkpointed uberblock to 4674 * disk. Once this is done, we've basically rewound the whole 4675 * pool and there is no way back. 4676 * 4677 * There are cases when we don't want to attempt and sync the 4678 * checkpointed uberblock to disk because we are opening a 4679 * pool as read-only. Specifically, verifying the checkpointed 4680 * state with zdb, and importing the checkpointed state to get 4681 * a "preview" of its content. 4682 */ 4683 if (spa_writeable(spa)) { 4684 vdev_t *rvd = spa->spa_root_vdev; 4685 4686 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4687 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 4688 int svdcount = 0; 4689 int children = rvd->vdev_children; 4690 int c0 = random_in_range(children); 4691 4692 for (int c = 0; c < children; c++) { 4693 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 4694 4695 /* Stop when revisiting the first vdev */ 4696 if (c > 0 && svd[0] == vd) 4697 break; 4698 4699 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 4700 !vdev_is_concrete(vd)) 4701 continue; 4702 4703 svd[svdcount++] = vd; 4704 if (svdcount == SPA_SYNC_MIN_VDEVS) 4705 break; 4706 } 4707 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 4708 if (error == 0) 4709 spa->spa_last_synced_guid = rvd->vdev_guid; 4710 spa_config_exit(spa, SCL_ALL, FTAG); 4711 4712 if (error != 0) { 4713 spa_load_failed(spa, "failed to write checkpointed " 4714 "uberblock to the vdev labels [error=%d]", error); 4715 return (error); 4716 } 4717 } 4718 4719 return (0); 4720 } 4721 4722 static int 4723 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 4724 boolean_t *update_config_cache) 4725 { 4726 int error; 4727 4728 /* 4729 * Parse the config for pool, open and validate vdevs, 4730 * select an uberblock, and use that uberblock to open 4731 * the MOS. 4732 */ 4733 error = spa_ld_mos_init(spa, type); 4734 if (error != 0) 4735 return (error); 4736 4737 /* 4738 * Retrieve the trusted config stored in the MOS and use it to create 4739 * a new, exact version of the vdev tree, then reopen all vdevs. 4740 */ 4741 error = spa_ld_trusted_config(spa, type, B_FALSE); 4742 if (error == EAGAIN) { 4743 if (update_config_cache != NULL) 4744 *update_config_cache = B_TRUE; 4745 4746 /* 4747 * Redo the loading process with the trusted config if it is 4748 * too different from the untrusted config. 4749 */ 4750 spa_ld_prepare_for_reload(spa); 4751 spa_load_note(spa, "RELOADING"); 4752 error = spa_ld_mos_init(spa, type); 4753 if (error != 0) 4754 return (error); 4755 4756 error = spa_ld_trusted_config(spa, type, B_TRUE); 4757 if (error != 0) 4758 return (error); 4759 4760 } else if (error != 0) { 4761 return (error); 4762 } 4763 4764 return (0); 4765 } 4766 4767 /* 4768 * Load an existing storage pool, using the config provided. This config 4769 * describes which vdevs are part of the pool and is later validated against 4770 * partial configs present in each vdev's label and an entire copy of the 4771 * config stored in the MOS. 4772 */ 4773 static int 4774 spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport) 4775 { 4776 int error = 0; 4777 boolean_t missing_feat_write = B_FALSE; 4778 boolean_t checkpoint_rewind = 4779 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4780 boolean_t update_config_cache = B_FALSE; 4781 4782 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4783 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4784 4785 spa_load_note(spa, "LOADING"); 4786 4787 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 4788 if (error != 0) 4789 return (error); 4790 4791 /* 4792 * If we are rewinding to the checkpoint then we need to repeat 4793 * everything we've done so far in this function but this time 4794 * selecting the checkpointed uberblock and using that to open 4795 * the MOS. 4796 */ 4797 if (checkpoint_rewind) { 4798 /* 4799 * If we are rewinding to the checkpoint update config cache 4800 * anyway. 4801 */ 4802 update_config_cache = B_TRUE; 4803 4804 /* 4805 * Extract the checkpointed uberblock from the current MOS 4806 * and use this as the pool's uberblock from now on. If the 4807 * pool is imported as writeable we also write the checkpoint 4808 * uberblock to the labels, making the rewind permanent. 4809 */ 4810 error = spa_ld_checkpoint_rewind(spa); 4811 if (error != 0) 4812 return (error); 4813 4814 /* 4815 * Redo the loading process again with the 4816 * checkpointed uberblock. 4817 */ 4818 spa_ld_prepare_for_reload(spa); 4819 spa_load_note(spa, "LOADING checkpointed uberblock"); 4820 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 4821 if (error != 0) 4822 return (error); 4823 } 4824 4825 /* 4826 * Retrieve the checkpoint txg if the pool has a checkpoint. 4827 */ 4828 error = spa_ld_read_checkpoint_txg(spa); 4829 if (error != 0) 4830 return (error); 4831 4832 /* 4833 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 4834 * from the pool and their contents were re-mapped to other vdevs. Note 4835 * that everything that we read before this step must have been 4836 * rewritten on concrete vdevs after the last device removal was 4837 * initiated. Otherwise we could be reading from indirect vdevs before 4838 * we have loaded their mappings. 4839 */ 4840 error = spa_ld_open_indirect_vdev_metadata(spa); 4841 if (error != 0) 4842 return (error); 4843 4844 /* 4845 * Retrieve the full list of active features from the MOS and check if 4846 * they are all supported. 4847 */ 4848 error = spa_ld_check_features(spa, &missing_feat_write); 4849 if (error != 0) 4850 return (error); 4851 4852 /* 4853 * Load several special directories from the MOS needed by the dsl_pool 4854 * layer. 4855 */ 4856 error = spa_ld_load_special_directories(spa); 4857 if (error != 0) 4858 return (error); 4859 4860 /* 4861 * Retrieve pool properties from the MOS. 4862 */ 4863 error = spa_ld_get_props(spa); 4864 if (error != 0) 4865 return (error); 4866 4867 /* 4868 * Retrieve the list of auxiliary devices - cache devices and spares - 4869 * and open them. 4870 */ 4871 error = spa_ld_open_aux_vdevs(spa, type); 4872 if (error != 0) 4873 return (error); 4874 4875 /* 4876 * Load the metadata for all vdevs. Also check if unopenable devices 4877 * should be autoreplaced. 4878 */ 4879 error = spa_ld_load_vdev_metadata(spa); 4880 if (error != 0) 4881 return (error); 4882 4883 error = spa_ld_load_dedup_tables(spa); 4884 if (error != 0) 4885 return (error); 4886 4887 /* 4888 * Verify the logs now to make sure we don't have any unexpected errors 4889 * when we claim log blocks later. 4890 */ 4891 error = spa_ld_verify_logs(spa, type, ereport); 4892 if (error != 0) 4893 return (error); 4894 4895 if (missing_feat_write) { 4896 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 4897 4898 /* 4899 * At this point, we know that we can open the pool in 4900 * read-only mode but not read-write mode. We now have enough 4901 * information and can return to userland. 4902 */ 4903 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 4904 ENOTSUP)); 4905 } 4906 4907 /* 4908 * Traverse the last txgs to make sure the pool was left off in a safe 4909 * state. When performing an extreme rewind, we verify the whole pool, 4910 * which can take a very long time. 4911 */ 4912 error = spa_ld_verify_pool_data(spa); 4913 if (error != 0) 4914 return (error); 4915 4916 /* 4917 * Calculate the deflated space for the pool. This must be done before 4918 * we write anything to the pool because we'd need to update the space 4919 * accounting using the deflated sizes. 4920 */ 4921 spa_update_dspace(spa); 4922 4923 /* 4924 * We have now retrieved all the information we needed to open the 4925 * pool. If we are importing the pool in read-write mode, a few 4926 * additional steps must be performed to finish the import. 4927 */ 4928 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 4929 spa->spa_load_max_txg == UINT64_MAX)) { 4930 uint64_t config_cache_txg = spa->spa_config_txg; 4931 4932 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 4933 4934 /* 4935 * In case of a checkpoint rewind, log the original txg 4936 * of the checkpointed uberblock. 4937 */ 4938 if (checkpoint_rewind) { 4939 spa_history_log_internal(spa, "checkpoint rewind", 4940 NULL, "rewound state to txg=%llu", 4941 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 4942 } 4943 4944 /* 4945 * Traverse the ZIL and claim all blocks. 4946 */ 4947 spa_ld_claim_log_blocks(spa); 4948 4949 /* 4950 * Kick-off the syncing thread. 4951 */ 4952 spa->spa_sync_on = B_TRUE; 4953 txg_sync_start(spa->spa_dsl_pool); 4954 mmp_thread_start(spa); 4955 4956 /* 4957 * Wait for all claims to sync. We sync up to the highest 4958 * claimed log block birth time so that claimed log blocks 4959 * don't appear to be from the future. spa_claim_max_txg 4960 * will have been set for us by ZIL traversal operations 4961 * performed above. 4962 */ 4963 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 4964 4965 /* 4966 * Check if we need to request an update of the config. On the 4967 * next sync, we would update the config stored in vdev labels 4968 * and the cachefile (by default /etc/zfs/zpool.cache). 4969 */ 4970 spa_ld_check_for_config_update(spa, config_cache_txg, 4971 update_config_cache); 4972 4973 /* 4974 * Check if a rebuild was in progress and if so resume it. 4975 * Then check all DTLs to see if anything needs resilvering. 4976 * The resilver will be deferred if a rebuild was started. 4977 */ 4978 if (vdev_rebuild_active(spa->spa_root_vdev)) { 4979 vdev_rebuild_restart(spa); 4980 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 4981 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 4982 spa_async_request(spa, SPA_ASYNC_RESILVER); 4983 } 4984 4985 /* 4986 * Log the fact that we booted up (so that we can detect if 4987 * we rebooted in the middle of an operation). 4988 */ 4989 spa_history_log_version(spa, "open", NULL); 4990 4991 spa_restart_removal(spa); 4992 spa_spawn_aux_threads(spa); 4993 4994 /* 4995 * Delete any inconsistent datasets. 4996 * 4997 * Note: 4998 * Since we may be issuing deletes for clones here, 4999 * we make sure to do so after we've spawned all the 5000 * auxiliary threads above (from which the livelist 5001 * deletion zthr is part of). 5002 */ 5003 (void) dmu_objset_find(spa_name(spa), 5004 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 5005 5006 /* 5007 * Clean up any stale temporary dataset userrefs. 5008 */ 5009 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 5010 5011 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5012 vdev_initialize_restart(spa->spa_root_vdev); 5013 vdev_trim_restart(spa->spa_root_vdev); 5014 vdev_autotrim_restart(spa); 5015 spa_config_exit(spa, SCL_CONFIG, FTAG); 5016 } 5017 5018 spa_import_progress_remove(spa_guid(spa)); 5019 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 5020 5021 spa_load_note(spa, "LOADED"); 5022 5023 return (0); 5024 } 5025 5026 static int 5027 spa_load_retry(spa_t *spa, spa_load_state_t state) 5028 { 5029 spa_mode_t mode = spa->spa_mode; 5030 5031 spa_unload(spa); 5032 spa_deactivate(spa); 5033 5034 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 5035 5036 spa_activate(spa, mode); 5037 spa_async_suspend(spa); 5038 5039 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 5040 (u_longlong_t)spa->spa_load_max_txg); 5041 5042 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 5043 } 5044 5045 /* 5046 * If spa_load() fails this function will try loading prior txg's. If 5047 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 5048 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 5049 * function will not rewind the pool and will return the same error as 5050 * spa_load(). 5051 */ 5052 static int 5053 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 5054 int rewind_flags) 5055 { 5056 nvlist_t *loadinfo = NULL; 5057 nvlist_t *config = NULL; 5058 int load_error, rewind_error; 5059 uint64_t safe_rewind_txg; 5060 uint64_t min_txg; 5061 5062 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 5063 spa->spa_load_max_txg = spa->spa_load_txg; 5064 spa_set_log_state(spa, SPA_LOG_CLEAR); 5065 } else { 5066 spa->spa_load_max_txg = max_request; 5067 if (max_request != UINT64_MAX) 5068 spa->spa_extreme_rewind = B_TRUE; 5069 } 5070 5071 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 5072 if (load_error == 0) 5073 return (0); 5074 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 5075 /* 5076 * When attempting checkpoint-rewind on a pool with no 5077 * checkpoint, we should not attempt to load uberblocks 5078 * from previous txgs when spa_load fails. 5079 */ 5080 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 5081 spa_import_progress_remove(spa_guid(spa)); 5082 return (load_error); 5083 } 5084 5085 if (spa->spa_root_vdev != NULL) 5086 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5087 5088 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 5089 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 5090 5091 if (rewind_flags & ZPOOL_NEVER_REWIND) { 5092 nvlist_free(config); 5093 spa_import_progress_remove(spa_guid(spa)); 5094 return (load_error); 5095 } 5096 5097 if (state == SPA_LOAD_RECOVER) { 5098 /* Price of rolling back is discarding txgs, including log */ 5099 spa_set_log_state(spa, SPA_LOG_CLEAR); 5100 } else { 5101 /* 5102 * If we aren't rolling back save the load info from our first 5103 * import attempt so that we can restore it after attempting 5104 * to rewind. 5105 */ 5106 loadinfo = spa->spa_load_info; 5107 spa->spa_load_info = fnvlist_alloc(); 5108 } 5109 5110 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 5111 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 5112 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 5113 TXG_INITIAL : safe_rewind_txg; 5114 5115 /* 5116 * Continue as long as we're finding errors, we're still within 5117 * the acceptable rewind range, and we're still finding uberblocks 5118 */ 5119 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 5120 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 5121 if (spa->spa_load_max_txg < safe_rewind_txg) 5122 spa->spa_extreme_rewind = B_TRUE; 5123 rewind_error = spa_load_retry(spa, state); 5124 } 5125 5126 spa->spa_extreme_rewind = B_FALSE; 5127 spa->spa_load_max_txg = UINT64_MAX; 5128 5129 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 5130 spa_config_set(spa, config); 5131 else 5132 nvlist_free(config); 5133 5134 if (state == SPA_LOAD_RECOVER) { 5135 ASSERT3P(loadinfo, ==, NULL); 5136 spa_import_progress_remove(spa_guid(spa)); 5137 return (rewind_error); 5138 } else { 5139 /* Store the rewind info as part of the initial load info */ 5140 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 5141 spa->spa_load_info); 5142 5143 /* Restore the initial load info */ 5144 fnvlist_free(spa->spa_load_info); 5145 spa->spa_load_info = loadinfo; 5146 5147 spa_import_progress_remove(spa_guid(spa)); 5148 return (load_error); 5149 } 5150 } 5151 5152 /* 5153 * Pool Open/Import 5154 * 5155 * The import case is identical to an open except that the configuration is sent 5156 * down from userland, instead of grabbed from the configuration cache. For the 5157 * case of an open, the pool configuration will exist in the 5158 * POOL_STATE_UNINITIALIZED state. 5159 * 5160 * The stats information (gen/count/ustats) is used to gather vdev statistics at 5161 * the same time open the pool, without having to keep around the spa_t in some 5162 * ambiguous state. 5163 */ 5164 static int 5165 spa_open_common(const char *pool, spa_t **spapp, const void *tag, 5166 nvlist_t *nvpolicy, nvlist_t **config) 5167 { 5168 spa_t *spa; 5169 spa_load_state_t state = SPA_LOAD_OPEN; 5170 int error; 5171 int locked = B_FALSE; 5172 int firstopen = B_FALSE; 5173 5174 *spapp = NULL; 5175 5176 /* 5177 * As disgusting as this is, we need to support recursive calls to this 5178 * function because dsl_dir_open() is called during spa_load(), and ends 5179 * up calling spa_open() again. The real fix is to figure out how to 5180 * avoid dsl_dir_open() calling this in the first place. 5181 */ 5182 if (MUTEX_NOT_HELD(&spa_namespace_lock)) { 5183 mutex_enter(&spa_namespace_lock); 5184 locked = B_TRUE; 5185 } 5186 5187 if ((spa = spa_lookup(pool)) == NULL) { 5188 if (locked) 5189 mutex_exit(&spa_namespace_lock); 5190 return (SET_ERROR(ENOENT)); 5191 } 5192 5193 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 5194 zpool_load_policy_t policy; 5195 5196 firstopen = B_TRUE; 5197 5198 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 5199 &policy); 5200 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5201 state = SPA_LOAD_RECOVER; 5202 5203 spa_activate(spa, spa_mode_global); 5204 5205 if (state != SPA_LOAD_RECOVER) 5206 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5207 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5208 5209 zfs_dbgmsg("spa_open_common: opening %s", pool); 5210 error = spa_load_best(spa, state, policy.zlp_txg, 5211 policy.zlp_rewind); 5212 5213 if (error == EBADF) { 5214 /* 5215 * If vdev_validate() returns failure (indicated by 5216 * EBADF), it indicates that one of the vdevs indicates 5217 * that the pool has been exported or destroyed. If 5218 * this is the case, the config cache is out of sync and 5219 * we should remove the pool from the namespace. 5220 */ 5221 spa_unload(spa); 5222 spa_deactivate(spa); 5223 spa_write_cachefile(spa, B_TRUE, B_TRUE); 5224 spa_remove(spa); 5225 if (locked) 5226 mutex_exit(&spa_namespace_lock); 5227 return (SET_ERROR(ENOENT)); 5228 } 5229 5230 if (error) { 5231 /* 5232 * We can't open the pool, but we still have useful 5233 * information: the state of each vdev after the 5234 * attempted vdev_open(). Return this to the user. 5235 */ 5236 if (config != NULL && spa->spa_config) { 5237 *config = fnvlist_dup(spa->spa_config); 5238 fnvlist_add_nvlist(*config, 5239 ZPOOL_CONFIG_LOAD_INFO, 5240 spa->spa_load_info); 5241 } 5242 spa_unload(spa); 5243 spa_deactivate(spa); 5244 spa->spa_last_open_failed = error; 5245 if (locked) 5246 mutex_exit(&spa_namespace_lock); 5247 *spapp = NULL; 5248 return (error); 5249 } 5250 } 5251 5252 spa_open_ref(spa, tag); 5253 5254 if (config != NULL) 5255 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5256 5257 /* 5258 * If we've recovered the pool, pass back any information we 5259 * gathered while doing the load. 5260 */ 5261 if (state == SPA_LOAD_RECOVER) { 5262 fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 5263 spa->spa_load_info); 5264 } 5265 5266 if (locked) { 5267 spa->spa_last_open_failed = 0; 5268 spa->spa_last_ubsync_txg = 0; 5269 spa->spa_load_txg = 0; 5270 mutex_exit(&spa_namespace_lock); 5271 } 5272 5273 if (firstopen) 5274 zvol_create_minors_recursive(spa_name(spa)); 5275 5276 *spapp = spa; 5277 5278 return (0); 5279 } 5280 5281 int 5282 spa_open_rewind(const char *name, spa_t **spapp, const void *tag, 5283 nvlist_t *policy, nvlist_t **config) 5284 { 5285 return (spa_open_common(name, spapp, tag, policy, config)); 5286 } 5287 5288 int 5289 spa_open(const char *name, spa_t **spapp, const void *tag) 5290 { 5291 return (spa_open_common(name, spapp, tag, NULL, NULL)); 5292 } 5293 5294 /* 5295 * Lookup the given spa_t, incrementing the inject count in the process, 5296 * preventing it from being exported or destroyed. 5297 */ 5298 spa_t * 5299 spa_inject_addref(char *name) 5300 { 5301 spa_t *spa; 5302 5303 mutex_enter(&spa_namespace_lock); 5304 if ((spa = spa_lookup(name)) == NULL) { 5305 mutex_exit(&spa_namespace_lock); 5306 return (NULL); 5307 } 5308 spa->spa_inject_ref++; 5309 mutex_exit(&spa_namespace_lock); 5310 5311 return (spa); 5312 } 5313 5314 void 5315 spa_inject_delref(spa_t *spa) 5316 { 5317 mutex_enter(&spa_namespace_lock); 5318 spa->spa_inject_ref--; 5319 mutex_exit(&spa_namespace_lock); 5320 } 5321 5322 /* 5323 * Add spares device information to the nvlist. 5324 */ 5325 static void 5326 spa_add_spares(spa_t *spa, nvlist_t *config) 5327 { 5328 nvlist_t **spares; 5329 uint_t i, nspares; 5330 nvlist_t *nvroot; 5331 uint64_t guid; 5332 vdev_stat_t *vs; 5333 uint_t vsc; 5334 uint64_t pool; 5335 5336 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5337 5338 if (spa->spa_spares.sav_count == 0) 5339 return; 5340 5341 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 5342 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5343 ZPOOL_CONFIG_SPARES, &spares, &nspares)); 5344 if (nspares != 0) { 5345 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5346 (const nvlist_t * const *)spares, nspares); 5347 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5348 &spares, &nspares)); 5349 5350 /* 5351 * Go through and find any spares which have since been 5352 * repurposed as an active spare. If this is the case, update 5353 * their status appropriately. 5354 */ 5355 for (i = 0; i < nspares; i++) { 5356 guid = fnvlist_lookup_uint64(spares[i], 5357 ZPOOL_CONFIG_GUID); 5358 if (spa_spare_exists(guid, &pool, NULL) && 5359 pool != 0ULL) { 5360 VERIFY0(nvlist_lookup_uint64_array(spares[i], 5361 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, 5362 &vsc)); 5363 vs->vs_state = VDEV_STATE_CANT_OPEN; 5364 vs->vs_aux = VDEV_AUX_SPARED; 5365 } 5366 } 5367 } 5368 } 5369 5370 /* 5371 * Add l2cache device information to the nvlist, including vdev stats. 5372 */ 5373 static void 5374 spa_add_l2cache(spa_t *spa, nvlist_t *config) 5375 { 5376 nvlist_t **l2cache; 5377 uint_t i, j, nl2cache; 5378 nvlist_t *nvroot; 5379 uint64_t guid; 5380 vdev_t *vd; 5381 vdev_stat_t *vs; 5382 uint_t vsc; 5383 5384 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5385 5386 if (spa->spa_l2cache.sav_count == 0) 5387 return; 5388 5389 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 5390 VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5391 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache)); 5392 if (nl2cache != 0) { 5393 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5394 (const nvlist_t * const *)l2cache, nl2cache); 5395 VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5396 &l2cache, &nl2cache)); 5397 5398 /* 5399 * Update level 2 cache device stats. 5400 */ 5401 5402 for (i = 0; i < nl2cache; i++) { 5403 guid = fnvlist_lookup_uint64(l2cache[i], 5404 ZPOOL_CONFIG_GUID); 5405 5406 vd = NULL; 5407 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 5408 if (guid == 5409 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 5410 vd = spa->spa_l2cache.sav_vdevs[j]; 5411 break; 5412 } 5413 } 5414 ASSERT(vd != NULL); 5415 5416 VERIFY0(nvlist_lookup_uint64_array(l2cache[i], 5417 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)); 5418 vdev_get_stats(vd, vs); 5419 vdev_config_generate_stats(vd, l2cache[i]); 5420 5421 } 5422 } 5423 } 5424 5425 static void 5426 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features) 5427 { 5428 zap_cursor_t zc; 5429 zap_attribute_t za; 5430 5431 if (spa->spa_feat_for_read_obj != 0) { 5432 for (zap_cursor_init(&zc, spa->spa_meta_objset, 5433 spa->spa_feat_for_read_obj); 5434 zap_cursor_retrieve(&zc, &za) == 0; 5435 zap_cursor_advance(&zc)) { 5436 ASSERT(za.za_integer_length == sizeof (uint64_t) && 5437 za.za_num_integers == 1); 5438 VERIFY0(nvlist_add_uint64(features, za.za_name, 5439 za.za_first_integer)); 5440 } 5441 zap_cursor_fini(&zc); 5442 } 5443 5444 if (spa->spa_feat_for_write_obj != 0) { 5445 for (zap_cursor_init(&zc, spa->spa_meta_objset, 5446 spa->spa_feat_for_write_obj); 5447 zap_cursor_retrieve(&zc, &za) == 0; 5448 zap_cursor_advance(&zc)) { 5449 ASSERT(za.za_integer_length == sizeof (uint64_t) && 5450 za.za_num_integers == 1); 5451 VERIFY0(nvlist_add_uint64(features, za.za_name, 5452 za.za_first_integer)); 5453 } 5454 zap_cursor_fini(&zc); 5455 } 5456 } 5457 5458 static void 5459 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features) 5460 { 5461 int i; 5462 5463 for (i = 0; i < SPA_FEATURES; i++) { 5464 zfeature_info_t feature = spa_feature_table[i]; 5465 uint64_t refcount; 5466 5467 if (feature_get_refcount(spa, &feature, &refcount) != 0) 5468 continue; 5469 5470 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount)); 5471 } 5472 } 5473 5474 /* 5475 * Store a list of pool features and their reference counts in the 5476 * config. 5477 * 5478 * The first time this is called on a spa, allocate a new nvlist, fetch 5479 * the pool features and reference counts from disk, then save the list 5480 * in the spa. In subsequent calls on the same spa use the saved nvlist 5481 * and refresh its values from the cached reference counts. This 5482 * ensures we don't block here on I/O on a suspended pool so 'zpool 5483 * clear' can resume the pool. 5484 */ 5485 static void 5486 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 5487 { 5488 nvlist_t *features; 5489 5490 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5491 5492 mutex_enter(&spa->spa_feat_stats_lock); 5493 features = spa->spa_feat_stats; 5494 5495 if (features != NULL) { 5496 spa_feature_stats_from_cache(spa, features); 5497 } else { 5498 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP)); 5499 spa->spa_feat_stats = features; 5500 spa_feature_stats_from_disk(spa, features); 5501 } 5502 5503 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 5504 features)); 5505 5506 mutex_exit(&spa->spa_feat_stats_lock); 5507 } 5508 5509 int 5510 spa_get_stats(const char *name, nvlist_t **config, 5511 char *altroot, size_t buflen) 5512 { 5513 int error; 5514 spa_t *spa; 5515 5516 *config = NULL; 5517 error = spa_open_common(name, &spa, FTAG, NULL, config); 5518 5519 if (spa != NULL) { 5520 /* 5521 * This still leaves a window of inconsistency where the spares 5522 * or l2cache devices could change and the config would be 5523 * self-inconsistent. 5524 */ 5525 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5526 5527 if (*config != NULL) { 5528 uint64_t loadtimes[2]; 5529 5530 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 5531 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 5532 fnvlist_add_uint64_array(*config, 5533 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2); 5534 5535 fnvlist_add_uint64(*config, 5536 ZPOOL_CONFIG_ERRCOUNT, 5537 spa_get_errlog_size(spa)); 5538 5539 if (spa_suspended(spa)) { 5540 fnvlist_add_uint64(*config, 5541 ZPOOL_CONFIG_SUSPENDED, 5542 spa->spa_failmode); 5543 fnvlist_add_uint64(*config, 5544 ZPOOL_CONFIG_SUSPENDED_REASON, 5545 spa->spa_suspended); 5546 } 5547 5548 spa_add_spares(spa, *config); 5549 spa_add_l2cache(spa, *config); 5550 spa_add_feature_stats(spa, *config); 5551 } 5552 } 5553 5554 /* 5555 * We want to get the alternate root even for faulted pools, so we cheat 5556 * and call spa_lookup() directly. 5557 */ 5558 if (altroot) { 5559 if (spa == NULL) { 5560 mutex_enter(&spa_namespace_lock); 5561 spa = spa_lookup(name); 5562 if (spa) 5563 spa_altroot(spa, altroot, buflen); 5564 else 5565 altroot[0] = '\0'; 5566 spa = NULL; 5567 mutex_exit(&spa_namespace_lock); 5568 } else { 5569 spa_altroot(spa, altroot, buflen); 5570 } 5571 } 5572 5573 if (spa != NULL) { 5574 spa_config_exit(spa, SCL_CONFIG, FTAG); 5575 spa_close(spa, FTAG); 5576 } 5577 5578 return (error); 5579 } 5580 5581 /* 5582 * Validate that the auxiliary device array is well formed. We must have an 5583 * array of nvlists, each which describes a valid leaf vdev. If this is an 5584 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 5585 * specified, as long as they are well-formed. 5586 */ 5587 static int 5588 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 5589 spa_aux_vdev_t *sav, const char *config, uint64_t version, 5590 vdev_labeltype_t label) 5591 { 5592 nvlist_t **dev; 5593 uint_t i, ndev; 5594 vdev_t *vd; 5595 int error; 5596 5597 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5598 5599 /* 5600 * It's acceptable to have no devs specified. 5601 */ 5602 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 5603 return (0); 5604 5605 if (ndev == 0) 5606 return (SET_ERROR(EINVAL)); 5607 5608 /* 5609 * Make sure the pool is formatted with a version that supports this 5610 * device type. 5611 */ 5612 if (spa_version(spa) < version) 5613 return (SET_ERROR(ENOTSUP)); 5614 5615 /* 5616 * Set the pending device list so we correctly handle device in-use 5617 * checking. 5618 */ 5619 sav->sav_pending = dev; 5620 sav->sav_npending = ndev; 5621 5622 for (i = 0; i < ndev; i++) { 5623 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 5624 mode)) != 0) 5625 goto out; 5626 5627 if (!vd->vdev_ops->vdev_op_leaf) { 5628 vdev_free(vd); 5629 error = SET_ERROR(EINVAL); 5630 goto out; 5631 } 5632 5633 vd->vdev_top = vd; 5634 5635 if ((error = vdev_open(vd)) == 0 && 5636 (error = vdev_label_init(vd, crtxg, label)) == 0) { 5637 fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 5638 vd->vdev_guid); 5639 } 5640 5641 vdev_free(vd); 5642 5643 if (error && 5644 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 5645 goto out; 5646 else 5647 error = 0; 5648 } 5649 5650 out: 5651 sav->sav_pending = NULL; 5652 sav->sav_npending = 0; 5653 return (error); 5654 } 5655 5656 static int 5657 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 5658 { 5659 int error; 5660 5661 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5662 5663 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5664 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 5665 VDEV_LABEL_SPARE)) != 0) { 5666 return (error); 5667 } 5668 5669 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5670 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 5671 VDEV_LABEL_L2CACHE)); 5672 } 5673 5674 static void 5675 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 5676 const char *config) 5677 { 5678 int i; 5679 5680 if (sav->sav_config != NULL) { 5681 nvlist_t **olddevs; 5682 uint_t oldndevs; 5683 nvlist_t **newdevs; 5684 5685 /* 5686 * Generate new dev list by concatenating with the 5687 * current dev list. 5688 */ 5689 VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config, 5690 &olddevs, &oldndevs)); 5691 5692 newdevs = kmem_alloc(sizeof (void *) * 5693 (ndevs + oldndevs), KM_SLEEP); 5694 for (i = 0; i < oldndevs; i++) 5695 newdevs[i] = fnvlist_dup(olddevs[i]); 5696 for (i = 0; i < ndevs; i++) 5697 newdevs[i + oldndevs] = fnvlist_dup(devs[i]); 5698 5699 fnvlist_remove(sav->sav_config, config); 5700 5701 fnvlist_add_nvlist_array(sav->sav_config, config, 5702 (const nvlist_t * const *)newdevs, ndevs + oldndevs); 5703 for (i = 0; i < oldndevs + ndevs; i++) 5704 nvlist_free(newdevs[i]); 5705 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 5706 } else { 5707 /* 5708 * Generate a new dev list. 5709 */ 5710 sav->sav_config = fnvlist_alloc(); 5711 fnvlist_add_nvlist_array(sav->sav_config, config, 5712 (const nvlist_t * const *)devs, ndevs); 5713 } 5714 } 5715 5716 /* 5717 * Stop and drop level 2 ARC devices 5718 */ 5719 void 5720 spa_l2cache_drop(spa_t *spa) 5721 { 5722 vdev_t *vd; 5723 int i; 5724 spa_aux_vdev_t *sav = &spa->spa_l2cache; 5725 5726 for (i = 0; i < sav->sav_count; i++) { 5727 uint64_t pool; 5728 5729 vd = sav->sav_vdevs[i]; 5730 ASSERT(vd != NULL); 5731 5732 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 5733 pool != 0ULL && l2arc_vdev_present(vd)) 5734 l2arc_remove_vdev(vd); 5735 } 5736 } 5737 5738 /* 5739 * Verify encryption parameters for spa creation. If we are encrypting, we must 5740 * have the encryption feature flag enabled. 5741 */ 5742 static int 5743 spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 5744 boolean_t has_encryption) 5745 { 5746 if (dcp->cp_crypt != ZIO_CRYPT_OFF && 5747 dcp->cp_crypt != ZIO_CRYPT_INHERIT && 5748 !has_encryption) 5749 return (SET_ERROR(ENOTSUP)); 5750 5751 return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 5752 } 5753 5754 /* 5755 * Pool Creation 5756 */ 5757 int 5758 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 5759 nvlist_t *zplprops, dsl_crypto_params_t *dcp) 5760 { 5761 spa_t *spa; 5762 char *altroot = NULL; 5763 vdev_t *rvd; 5764 dsl_pool_t *dp; 5765 dmu_tx_t *tx; 5766 int error = 0; 5767 uint64_t txg = TXG_INITIAL; 5768 nvlist_t **spares, **l2cache; 5769 uint_t nspares, nl2cache; 5770 uint64_t version, obj, ndraid = 0; 5771 boolean_t has_features; 5772 boolean_t has_encryption; 5773 boolean_t has_allocclass; 5774 spa_feature_t feat; 5775 char *feat_name; 5776 char *poolname; 5777 nvlist_t *nvl; 5778 5779 if (props == NULL || 5780 nvlist_lookup_string(props, "tname", &poolname) != 0) 5781 poolname = (char *)pool; 5782 5783 /* 5784 * If this pool already exists, return failure. 5785 */ 5786 mutex_enter(&spa_namespace_lock); 5787 if (spa_lookup(poolname) != NULL) { 5788 mutex_exit(&spa_namespace_lock); 5789 return (SET_ERROR(EEXIST)); 5790 } 5791 5792 /* 5793 * Allocate a new spa_t structure. 5794 */ 5795 nvl = fnvlist_alloc(); 5796 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 5797 (void) nvlist_lookup_string(props, 5798 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5799 spa = spa_add(poolname, nvl, altroot); 5800 fnvlist_free(nvl); 5801 spa_activate(spa, spa_mode_global); 5802 5803 if (props && (error = spa_prop_validate(spa, props))) { 5804 spa_deactivate(spa); 5805 spa_remove(spa); 5806 mutex_exit(&spa_namespace_lock); 5807 return (error); 5808 } 5809 5810 /* 5811 * Temporary pool names should never be written to disk. 5812 */ 5813 if (poolname != pool) 5814 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 5815 5816 has_features = B_FALSE; 5817 has_encryption = B_FALSE; 5818 has_allocclass = B_FALSE; 5819 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 5820 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 5821 if (zpool_prop_feature(nvpair_name(elem))) { 5822 has_features = B_TRUE; 5823 5824 feat_name = strchr(nvpair_name(elem), '@') + 1; 5825 VERIFY0(zfeature_lookup_name(feat_name, &feat)); 5826 if (feat == SPA_FEATURE_ENCRYPTION) 5827 has_encryption = B_TRUE; 5828 if (feat == SPA_FEATURE_ALLOCATION_CLASSES) 5829 has_allocclass = B_TRUE; 5830 } 5831 } 5832 5833 /* verify encryption params, if they were provided */ 5834 if (dcp != NULL) { 5835 error = spa_create_check_encryption_params(dcp, has_encryption); 5836 if (error != 0) { 5837 spa_deactivate(spa); 5838 spa_remove(spa); 5839 mutex_exit(&spa_namespace_lock); 5840 return (error); 5841 } 5842 } 5843 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) { 5844 spa_deactivate(spa); 5845 spa_remove(spa); 5846 mutex_exit(&spa_namespace_lock); 5847 return (ENOTSUP); 5848 } 5849 5850 if (has_features || nvlist_lookup_uint64(props, 5851 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 5852 version = SPA_VERSION; 5853 } 5854 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 5855 5856 spa->spa_first_txg = txg; 5857 spa->spa_uberblock.ub_txg = txg - 1; 5858 spa->spa_uberblock.ub_version = version; 5859 spa->spa_ubsync = spa->spa_uberblock; 5860 spa->spa_load_state = SPA_LOAD_CREATE; 5861 spa->spa_removing_phys.sr_state = DSS_NONE; 5862 spa->spa_removing_phys.sr_removing_vdev = -1; 5863 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 5864 spa->spa_indirect_vdevs_loaded = B_TRUE; 5865 5866 /* 5867 * Create "The Godfather" zio to hold all async IOs 5868 */ 5869 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 5870 KM_SLEEP); 5871 for (int i = 0; i < max_ncpus; i++) { 5872 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 5873 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 5874 ZIO_FLAG_GODFATHER); 5875 } 5876 5877 /* 5878 * Create the root vdev. 5879 */ 5880 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5881 5882 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 5883 5884 ASSERT(error != 0 || rvd != NULL); 5885 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 5886 5887 if (error == 0 && !zfs_allocatable_devs(nvroot)) 5888 error = SET_ERROR(EINVAL); 5889 5890 if (error == 0 && 5891 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 5892 (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 && 5893 (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { 5894 /* 5895 * instantiate the metaslab groups (this will dirty the vdevs) 5896 * we can no longer error exit past this point 5897 */ 5898 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 5899 vdev_t *vd = rvd->vdev_child[c]; 5900 5901 vdev_metaslab_set_size(vd); 5902 vdev_expand(vd, txg); 5903 } 5904 } 5905 5906 spa_config_exit(spa, SCL_ALL, FTAG); 5907 5908 if (error != 0) { 5909 spa_unload(spa); 5910 spa_deactivate(spa); 5911 spa_remove(spa); 5912 mutex_exit(&spa_namespace_lock); 5913 return (error); 5914 } 5915 5916 /* 5917 * Get the list of spares, if specified. 5918 */ 5919 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5920 &spares, &nspares) == 0) { 5921 spa->spa_spares.sav_config = fnvlist_alloc(); 5922 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 5923 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 5924 nspares); 5925 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5926 spa_load_spares(spa); 5927 spa_config_exit(spa, SCL_ALL, FTAG); 5928 spa->spa_spares.sav_sync = B_TRUE; 5929 } 5930 5931 /* 5932 * Get the list of level 2 cache devices, if specified. 5933 */ 5934 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5935 &l2cache, &nl2cache) == 0) { 5936 VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config, 5937 NV_UNIQUE_NAME, KM_SLEEP)); 5938 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5939 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 5940 nl2cache); 5941 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5942 spa_load_l2cache(spa); 5943 spa_config_exit(spa, SCL_ALL, FTAG); 5944 spa->spa_l2cache.sav_sync = B_TRUE; 5945 } 5946 5947 spa->spa_is_initializing = B_TRUE; 5948 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 5949 spa->spa_is_initializing = B_FALSE; 5950 5951 /* 5952 * Create DDTs (dedup tables). 5953 */ 5954 ddt_create(spa); 5955 5956 spa_update_dspace(spa); 5957 5958 tx = dmu_tx_create_assigned(dp, txg); 5959 5960 /* 5961 * Create the pool's history object. 5962 */ 5963 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history) 5964 spa_history_create_obj(spa, tx); 5965 5966 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 5967 spa_history_log_version(spa, "create", tx); 5968 5969 /* 5970 * Create the pool config object. 5971 */ 5972 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 5973 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 5974 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 5975 5976 if (zap_add(spa->spa_meta_objset, 5977 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5978 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 5979 cmn_err(CE_PANIC, "failed to add pool config"); 5980 } 5981 5982 if (zap_add(spa->spa_meta_objset, 5983 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 5984 sizeof (uint64_t), 1, &version, tx) != 0) { 5985 cmn_err(CE_PANIC, "failed to add pool version"); 5986 } 5987 5988 /* Newly created pools with the right version are always deflated. */ 5989 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 5990 spa->spa_deflate = TRUE; 5991 if (zap_add(spa->spa_meta_objset, 5992 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5993 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 5994 cmn_err(CE_PANIC, "failed to add deflate"); 5995 } 5996 } 5997 5998 /* 5999 * Create the deferred-free bpobj. Turn off compression 6000 * because sync-to-convergence takes longer if the blocksize 6001 * keeps changing. 6002 */ 6003 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 6004 dmu_object_set_compress(spa->spa_meta_objset, obj, 6005 ZIO_COMPRESS_OFF, tx); 6006 if (zap_add(spa->spa_meta_objset, 6007 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 6008 sizeof (uint64_t), 1, &obj, tx) != 0) { 6009 cmn_err(CE_PANIC, "failed to add bpobj"); 6010 } 6011 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 6012 spa->spa_meta_objset, obj)); 6013 6014 /* 6015 * Generate some random noise for salted checksums to operate on. 6016 */ 6017 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 6018 sizeof (spa->spa_cksum_salt.zcs_bytes)); 6019 6020 /* 6021 * Set pool properties. 6022 */ 6023 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 6024 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 6025 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 6026 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 6027 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 6028 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); 6029 6030 if (props != NULL) { 6031 spa_configfile_set(spa, props, B_FALSE); 6032 spa_sync_props(props, tx); 6033 } 6034 6035 for (int i = 0; i < ndraid; i++) 6036 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 6037 6038 dmu_tx_commit(tx); 6039 6040 spa->spa_sync_on = B_TRUE; 6041 txg_sync_start(dp); 6042 mmp_thread_start(spa); 6043 txg_wait_synced(dp, txg); 6044 6045 spa_spawn_aux_threads(spa); 6046 6047 spa_write_cachefile(spa, B_FALSE, B_TRUE); 6048 6049 /* 6050 * Don't count references from objsets that are already closed 6051 * and are making their way through the eviction process. 6052 */ 6053 spa_evicting_os_wait(spa); 6054 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 6055 spa->spa_load_state = SPA_LOAD_NONE; 6056 6057 spa_import_os(spa); 6058 6059 mutex_exit(&spa_namespace_lock); 6060 6061 return (0); 6062 } 6063 6064 /* 6065 * Import a non-root pool into the system. 6066 */ 6067 int 6068 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 6069 { 6070 spa_t *spa; 6071 char *altroot = NULL; 6072 spa_load_state_t state = SPA_LOAD_IMPORT; 6073 zpool_load_policy_t policy; 6074 spa_mode_t mode = spa_mode_global; 6075 uint64_t readonly = B_FALSE; 6076 int error; 6077 nvlist_t *nvroot; 6078 nvlist_t **spares, **l2cache; 6079 uint_t nspares, nl2cache; 6080 6081 /* 6082 * If a pool with this name exists, return failure. 6083 */ 6084 mutex_enter(&spa_namespace_lock); 6085 if (spa_lookup(pool) != NULL) { 6086 mutex_exit(&spa_namespace_lock); 6087 return (SET_ERROR(EEXIST)); 6088 } 6089 6090 /* 6091 * Create and initialize the spa structure. 6092 */ 6093 (void) nvlist_lookup_string(props, 6094 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6095 (void) nvlist_lookup_uint64(props, 6096 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 6097 if (readonly) 6098 mode = SPA_MODE_READ; 6099 spa = spa_add(pool, config, altroot); 6100 spa->spa_import_flags = flags; 6101 6102 /* 6103 * Verbatim import - Take a pool and insert it into the namespace 6104 * as if it had been loaded at boot. 6105 */ 6106 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 6107 if (props != NULL) 6108 spa_configfile_set(spa, props, B_FALSE); 6109 6110 spa_write_cachefile(spa, B_FALSE, B_TRUE); 6111 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6112 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 6113 mutex_exit(&spa_namespace_lock); 6114 return (0); 6115 } 6116 6117 spa_activate(spa, mode); 6118 6119 /* 6120 * Don't start async tasks until we know everything is healthy. 6121 */ 6122 spa_async_suspend(spa); 6123 6124 zpool_get_load_policy(config, &policy); 6125 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 6126 state = SPA_LOAD_RECOVER; 6127 6128 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 6129 6130 if (state != SPA_LOAD_RECOVER) { 6131 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 6132 zfs_dbgmsg("spa_import: importing %s", pool); 6133 } else { 6134 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 6135 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 6136 } 6137 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 6138 6139 /* 6140 * Propagate anything learned while loading the pool and pass it 6141 * back to caller (i.e. rewind info, missing devices, etc). 6142 */ 6143 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info); 6144 6145 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6146 /* 6147 * Toss any existing sparelist, as it doesn't have any validity 6148 * anymore, and conflicts with spa_has_spare(). 6149 */ 6150 if (spa->spa_spares.sav_config) { 6151 nvlist_free(spa->spa_spares.sav_config); 6152 spa->spa_spares.sav_config = NULL; 6153 spa_load_spares(spa); 6154 } 6155 if (spa->spa_l2cache.sav_config) { 6156 nvlist_free(spa->spa_l2cache.sav_config); 6157 spa->spa_l2cache.sav_config = NULL; 6158 spa_load_l2cache(spa); 6159 } 6160 6161 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 6162 spa_config_exit(spa, SCL_ALL, FTAG); 6163 6164 if (props != NULL) 6165 spa_configfile_set(spa, props, B_FALSE); 6166 6167 if (error != 0 || (props && spa_writeable(spa) && 6168 (error = spa_prop_set(spa, props)))) { 6169 spa_unload(spa); 6170 spa_deactivate(spa); 6171 spa_remove(spa); 6172 mutex_exit(&spa_namespace_lock); 6173 return (error); 6174 } 6175 6176 spa_async_resume(spa); 6177 6178 /* 6179 * Override any spares and level 2 cache devices as specified by 6180 * the user, as these may have correct device names/devids, etc. 6181 */ 6182 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6183 &spares, &nspares) == 0) { 6184 if (spa->spa_spares.sav_config) 6185 fnvlist_remove(spa->spa_spares.sav_config, 6186 ZPOOL_CONFIG_SPARES); 6187 else 6188 spa->spa_spares.sav_config = fnvlist_alloc(); 6189 fnvlist_add_nvlist_array(spa->spa_spares.sav_config, 6190 ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares, 6191 nspares); 6192 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6193 spa_load_spares(spa); 6194 spa_config_exit(spa, SCL_ALL, FTAG); 6195 spa->spa_spares.sav_sync = B_TRUE; 6196 } 6197 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6198 &l2cache, &nl2cache) == 0) { 6199 if (spa->spa_l2cache.sav_config) 6200 fnvlist_remove(spa->spa_l2cache.sav_config, 6201 ZPOOL_CONFIG_L2CACHE); 6202 else 6203 spa->spa_l2cache.sav_config = fnvlist_alloc(); 6204 fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6205 ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache, 6206 nl2cache); 6207 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6208 spa_load_l2cache(spa); 6209 spa_config_exit(spa, SCL_ALL, FTAG); 6210 spa->spa_l2cache.sav_sync = B_TRUE; 6211 } 6212 6213 /* 6214 * Check for any removed devices. 6215 */ 6216 if (spa->spa_autoreplace) { 6217 spa_aux_check_removed(&spa->spa_spares); 6218 spa_aux_check_removed(&spa->spa_l2cache); 6219 } 6220 6221 if (spa_writeable(spa)) { 6222 /* 6223 * Update the config cache to include the newly-imported pool. 6224 */ 6225 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6226 } 6227 6228 /* 6229 * It's possible that the pool was expanded while it was exported. 6230 * We kick off an async task to handle this for us. 6231 */ 6232 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 6233 6234 spa_history_log_version(spa, "import", NULL); 6235 6236 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6237 6238 mutex_exit(&spa_namespace_lock); 6239 6240 zvol_create_minors_recursive(pool); 6241 6242 spa_import_os(spa); 6243 6244 return (0); 6245 } 6246 6247 nvlist_t * 6248 spa_tryimport(nvlist_t *tryconfig) 6249 { 6250 nvlist_t *config = NULL; 6251 char *poolname, *cachefile; 6252 spa_t *spa; 6253 uint64_t state; 6254 int error; 6255 zpool_load_policy_t policy; 6256 6257 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 6258 return (NULL); 6259 6260 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 6261 return (NULL); 6262 6263 /* 6264 * Create and initialize the spa structure. 6265 */ 6266 mutex_enter(&spa_namespace_lock); 6267 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 6268 spa_activate(spa, SPA_MODE_READ); 6269 6270 /* 6271 * Rewind pool if a max txg was provided. 6272 */ 6273 zpool_get_load_policy(spa->spa_config, &policy); 6274 if (policy.zlp_txg != UINT64_MAX) { 6275 spa->spa_load_max_txg = policy.zlp_txg; 6276 spa->spa_extreme_rewind = B_TRUE; 6277 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 6278 poolname, (longlong_t)policy.zlp_txg); 6279 } else { 6280 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 6281 } 6282 6283 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 6284 == 0) { 6285 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 6286 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 6287 } else { 6288 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 6289 } 6290 6291 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 6292 6293 /* 6294 * If 'tryconfig' was at least parsable, return the current config. 6295 */ 6296 if (spa->spa_root_vdev != NULL) { 6297 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 6298 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname); 6299 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); 6300 fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 6301 spa->spa_uberblock.ub_timestamp); 6302 fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6303 spa->spa_load_info); 6304 fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, 6305 spa->spa_errata); 6306 6307 /* 6308 * If the bootfs property exists on this pool then we 6309 * copy it out so that external consumers can tell which 6310 * pools are bootable. 6311 */ 6312 if ((!error || error == EEXIST) && spa->spa_bootfs) { 6313 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6314 6315 /* 6316 * We have to play games with the name since the 6317 * pool was opened as TRYIMPORT_NAME. 6318 */ 6319 if (dsl_dsobj_to_dsname(spa_name(spa), 6320 spa->spa_bootfs, tmpname) == 0) { 6321 char *cp; 6322 char *dsname; 6323 6324 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6325 6326 cp = strchr(tmpname, '/'); 6327 if (cp == NULL) { 6328 (void) strlcpy(dsname, tmpname, 6329 MAXPATHLEN); 6330 } else { 6331 (void) snprintf(dsname, MAXPATHLEN, 6332 "%s/%s", poolname, ++cp); 6333 } 6334 fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS, 6335 dsname); 6336 kmem_free(dsname, MAXPATHLEN); 6337 } 6338 kmem_free(tmpname, MAXPATHLEN); 6339 } 6340 6341 /* 6342 * Add the list of hot spares and level 2 cache devices. 6343 */ 6344 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6345 spa_add_spares(spa, config); 6346 spa_add_l2cache(spa, config); 6347 spa_config_exit(spa, SCL_CONFIG, FTAG); 6348 } 6349 6350 spa_unload(spa); 6351 spa_deactivate(spa); 6352 spa_remove(spa); 6353 mutex_exit(&spa_namespace_lock); 6354 6355 return (config); 6356 } 6357 6358 /* 6359 * Pool export/destroy 6360 * 6361 * The act of destroying or exporting a pool is very simple. We make sure there 6362 * is no more pending I/O and any references to the pool are gone. Then, we 6363 * update the pool state and sync all the labels to disk, removing the 6364 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 6365 * we don't sync the labels or remove the configuration cache. 6366 */ 6367 static int 6368 spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, 6369 boolean_t force, boolean_t hardforce) 6370 { 6371 int error; 6372 spa_t *spa; 6373 6374 if (oldconfig) 6375 *oldconfig = NULL; 6376 6377 if (!(spa_mode_global & SPA_MODE_WRITE)) 6378 return (SET_ERROR(EROFS)); 6379 6380 mutex_enter(&spa_namespace_lock); 6381 if ((spa = spa_lookup(pool)) == NULL) { 6382 mutex_exit(&spa_namespace_lock); 6383 return (SET_ERROR(ENOENT)); 6384 } 6385 6386 if (spa->spa_is_exporting) { 6387 /* the pool is being exported by another thread */ 6388 mutex_exit(&spa_namespace_lock); 6389 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS)); 6390 } 6391 spa->spa_is_exporting = B_TRUE; 6392 6393 /* 6394 * Put a hold on the pool, drop the namespace lock, stop async tasks, 6395 * reacquire the namespace lock, and see if we can export. 6396 */ 6397 spa_open_ref(spa, FTAG); 6398 mutex_exit(&spa_namespace_lock); 6399 spa_async_suspend(spa); 6400 if (spa->spa_zvol_taskq) { 6401 zvol_remove_minors(spa, spa_name(spa), B_TRUE); 6402 taskq_wait(spa->spa_zvol_taskq); 6403 } 6404 mutex_enter(&spa_namespace_lock); 6405 spa_close(spa, FTAG); 6406 6407 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 6408 goto export_spa; 6409 /* 6410 * The pool will be in core if it's openable, in which case we can 6411 * modify its state. Objsets may be open only because they're dirty, 6412 * so we have to force it to sync before checking spa_refcnt. 6413 */ 6414 if (spa->spa_sync_on) { 6415 txg_wait_synced(spa->spa_dsl_pool, 0); 6416 spa_evicting_os_wait(spa); 6417 } 6418 6419 /* 6420 * A pool cannot be exported or destroyed if there are active 6421 * references. If we are resetting a pool, allow references by 6422 * fault injection handlers. 6423 */ 6424 if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) { 6425 error = SET_ERROR(EBUSY); 6426 goto fail; 6427 } 6428 6429 if (spa->spa_sync_on) { 6430 /* 6431 * A pool cannot be exported if it has an active shared spare. 6432 * This is to prevent other pools stealing the active spare 6433 * from an exported pool. At user's own will, such pool can 6434 * be forcedly exported. 6435 */ 6436 if (!force && new_state == POOL_STATE_EXPORTED && 6437 spa_has_active_shared_spare(spa)) { 6438 error = SET_ERROR(EXDEV); 6439 goto fail; 6440 } 6441 6442 /* 6443 * We're about to export or destroy this pool. Make sure 6444 * we stop all initialization and trim activity here before 6445 * we set the spa_final_txg. This will ensure that all 6446 * dirty data resulting from the initialization is 6447 * committed to disk before we unload the pool. 6448 */ 6449 if (spa->spa_root_vdev != NULL) { 6450 vdev_t *rvd = spa->spa_root_vdev; 6451 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); 6452 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); 6453 vdev_autotrim_stop_all(spa); 6454 vdev_rebuild_stop_all(spa); 6455 } 6456 6457 /* 6458 * We want this to be reflected on every label, 6459 * so mark them all dirty. spa_unload() will do the 6460 * final sync that pushes these changes out. 6461 */ 6462 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 6463 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6464 spa->spa_state = new_state; 6465 vdev_config_dirty(spa->spa_root_vdev); 6466 spa_config_exit(spa, SCL_ALL, FTAG); 6467 } 6468 6469 /* 6470 * If the log space map feature is enabled and the pool is 6471 * getting exported (but not destroyed), we want to spend some 6472 * time flushing as many metaslabs as we can in an attempt to 6473 * destroy log space maps and save import time. This has to be 6474 * done before we set the spa_final_txg, otherwise 6475 * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs. 6476 * spa_should_flush_logs_on_unload() should be called after 6477 * spa_state has been set to the new_state. 6478 */ 6479 if (spa_should_flush_logs_on_unload(spa)) 6480 spa_unload_log_sm_flush_all(spa); 6481 6482 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 6483 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6484 spa->spa_final_txg = spa_last_synced_txg(spa) + 6485 TXG_DEFER_SIZE + 1; 6486 spa_config_exit(spa, SCL_ALL, FTAG); 6487 } 6488 } 6489 6490 export_spa: 6491 spa_export_os(spa); 6492 6493 if (new_state == POOL_STATE_DESTROYED) 6494 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 6495 else if (new_state == POOL_STATE_EXPORTED) 6496 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT); 6497 6498 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6499 spa_unload(spa); 6500 spa_deactivate(spa); 6501 } 6502 6503 if (oldconfig && spa->spa_config) 6504 *oldconfig = fnvlist_dup(spa->spa_config); 6505 6506 if (new_state != POOL_STATE_UNINITIALIZED) { 6507 if (!hardforce) 6508 spa_write_cachefile(spa, B_TRUE, B_TRUE); 6509 spa_remove(spa); 6510 } else { 6511 /* 6512 * If spa_remove() is not called for this spa_t and 6513 * there is any possibility that it can be reused, 6514 * we make sure to reset the exporting flag. 6515 */ 6516 spa->spa_is_exporting = B_FALSE; 6517 } 6518 6519 mutex_exit(&spa_namespace_lock); 6520 return (0); 6521 6522 fail: 6523 spa->spa_is_exporting = B_FALSE; 6524 spa_async_resume(spa); 6525 mutex_exit(&spa_namespace_lock); 6526 return (error); 6527 } 6528 6529 /* 6530 * Destroy a storage pool. 6531 */ 6532 int 6533 spa_destroy(const char *pool) 6534 { 6535 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 6536 B_FALSE, B_FALSE)); 6537 } 6538 6539 /* 6540 * Export a storage pool. 6541 */ 6542 int 6543 spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force, 6544 boolean_t hardforce) 6545 { 6546 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 6547 force, hardforce)); 6548 } 6549 6550 /* 6551 * Similar to spa_export(), this unloads the spa_t without actually removing it 6552 * from the namespace in any way. 6553 */ 6554 int 6555 spa_reset(const char *pool) 6556 { 6557 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 6558 B_FALSE, B_FALSE)); 6559 } 6560 6561 /* 6562 * ========================================================================== 6563 * Device manipulation 6564 * ========================================================================== 6565 */ 6566 6567 /* 6568 * This is called as a synctask to increment the draid feature flag 6569 */ 6570 static void 6571 spa_draid_feature_incr(void *arg, dmu_tx_t *tx) 6572 { 6573 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6574 int draid = (int)(uintptr_t)arg; 6575 6576 for (int c = 0; c < draid; c++) 6577 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 6578 } 6579 6580 /* 6581 * Add a device to a storage pool. 6582 */ 6583 int 6584 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 6585 { 6586 uint64_t txg, ndraid = 0; 6587 int error; 6588 vdev_t *rvd = spa->spa_root_vdev; 6589 vdev_t *vd, *tvd; 6590 nvlist_t **spares, **l2cache; 6591 uint_t nspares, nl2cache; 6592 6593 ASSERT(spa_writeable(spa)); 6594 6595 txg = spa_vdev_enter(spa); 6596 6597 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 6598 VDEV_ALLOC_ADD)) != 0) 6599 return (spa_vdev_exit(spa, NULL, txg, error)); 6600 6601 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 6602 6603 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 6604 &nspares) != 0) 6605 nspares = 0; 6606 6607 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 6608 &nl2cache) != 0) 6609 nl2cache = 0; 6610 6611 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 6612 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6613 6614 if (vd->vdev_children != 0 && 6615 (error = vdev_create(vd, txg, B_FALSE)) != 0) { 6616 return (spa_vdev_exit(spa, vd, txg, error)); 6617 } 6618 6619 /* 6620 * The virtual dRAID spares must be added after vdev tree is created 6621 * and the vdev guids are generated. The guid of their associated 6622 * dRAID is stored in the config and used when opening the spare. 6623 */ 6624 if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, 6625 rvd->vdev_children)) == 0) { 6626 if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot, 6627 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0) 6628 nspares = 0; 6629 } else { 6630 return (spa_vdev_exit(spa, vd, txg, error)); 6631 } 6632 6633 /* 6634 * We must validate the spares and l2cache devices after checking the 6635 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 6636 */ 6637 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 6638 return (spa_vdev_exit(spa, vd, txg, error)); 6639 6640 /* 6641 * If we are in the middle of a device removal, we can only add 6642 * devices which match the existing devices in the pool. 6643 * If we are in the middle of a removal, or have some indirect 6644 * vdevs, we can not add raidz or dRAID top levels. 6645 */ 6646 if (spa->spa_vdev_removal != NULL || 6647 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 6648 for (int c = 0; c < vd->vdev_children; c++) { 6649 tvd = vd->vdev_child[c]; 6650 if (spa->spa_vdev_removal != NULL && 6651 tvd->vdev_ashift != spa->spa_max_ashift) { 6652 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6653 } 6654 /* Fail if top level vdev is raidz or a dRAID */ 6655 if (vdev_get_nparity(tvd) != 0) 6656 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6657 6658 /* 6659 * Need the top level mirror to be 6660 * a mirror of leaf vdevs only 6661 */ 6662 if (tvd->vdev_ops == &vdev_mirror_ops) { 6663 for (uint64_t cid = 0; 6664 cid < tvd->vdev_children; cid++) { 6665 vdev_t *cvd = tvd->vdev_child[cid]; 6666 if (!cvd->vdev_ops->vdev_op_leaf) { 6667 return (spa_vdev_exit(spa, vd, 6668 txg, EINVAL)); 6669 } 6670 } 6671 } 6672 } 6673 } 6674 6675 for (int c = 0; c < vd->vdev_children; c++) { 6676 tvd = vd->vdev_child[c]; 6677 vdev_remove_child(vd, tvd); 6678 tvd->vdev_id = rvd->vdev_children; 6679 vdev_add_child(rvd, tvd); 6680 vdev_config_dirty(tvd); 6681 } 6682 6683 if (nspares != 0) { 6684 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 6685 ZPOOL_CONFIG_SPARES); 6686 spa_load_spares(spa); 6687 spa->spa_spares.sav_sync = B_TRUE; 6688 } 6689 6690 if (nl2cache != 0) { 6691 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 6692 ZPOOL_CONFIG_L2CACHE); 6693 spa_load_l2cache(spa); 6694 spa->spa_l2cache.sav_sync = B_TRUE; 6695 } 6696 6697 /* 6698 * We can't increment a feature while holding spa_vdev so we 6699 * have to do it in a synctask. 6700 */ 6701 if (ndraid != 0) { 6702 dmu_tx_t *tx; 6703 6704 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 6705 dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr, 6706 (void *)(uintptr_t)ndraid, tx); 6707 dmu_tx_commit(tx); 6708 } 6709 6710 /* 6711 * We have to be careful when adding new vdevs to an existing pool. 6712 * If other threads start allocating from these vdevs before we 6713 * sync the config cache, and we lose power, then upon reboot we may 6714 * fail to open the pool because there are DVAs that the config cache 6715 * can't translate. Therefore, we first add the vdevs without 6716 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 6717 * and then let spa_config_update() initialize the new metaslabs. 6718 * 6719 * spa_load() checks for added-but-not-initialized vdevs, so that 6720 * if we lose power at any point in this sequence, the remaining 6721 * steps will be completed the next time we load the pool. 6722 */ 6723 (void) spa_vdev_exit(spa, vd, txg, 0); 6724 6725 mutex_enter(&spa_namespace_lock); 6726 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6727 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 6728 mutex_exit(&spa_namespace_lock); 6729 6730 return (0); 6731 } 6732 6733 /* 6734 * Attach a device to a mirror. The arguments are the path to any device 6735 * in the mirror, and the nvroot for the new device. If the path specifies 6736 * a device that is not mirrored, we automatically insert the mirror vdev. 6737 * 6738 * If 'replacing' is specified, the new device is intended to replace the 6739 * existing device; in this case the two devices are made into their own 6740 * mirror using the 'replacing' vdev, which is functionally identical to 6741 * the mirror vdev (it actually reuses all the same ops) but has a few 6742 * extra rules: you can't attach to it after it's been created, and upon 6743 * completion of resilvering, the first disk (the one being replaced) 6744 * is automatically detached. 6745 * 6746 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild) 6747 * should be performed instead of traditional healing reconstruction. From 6748 * an administrators perspective these are both resilver operations. 6749 */ 6750 int 6751 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing, 6752 int rebuild) 6753 { 6754 uint64_t txg, dtl_max_txg; 6755 vdev_t *rvd = spa->spa_root_vdev; 6756 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 6757 vdev_ops_t *pvops; 6758 char *oldvdpath, *newvdpath; 6759 int newvd_isspare; 6760 int error; 6761 6762 ASSERT(spa_writeable(spa)); 6763 6764 txg = spa_vdev_enter(spa); 6765 6766 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 6767 6768 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6769 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6770 error = (spa_has_checkpoint(spa)) ? 6771 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6772 return (spa_vdev_exit(spa, NULL, txg, error)); 6773 } 6774 6775 if (rebuild) { 6776 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 6777 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6778 6779 if (dsl_scan_resilvering(spa_get_dsl(spa))) 6780 return (spa_vdev_exit(spa, NULL, txg, 6781 ZFS_ERR_RESILVER_IN_PROGRESS)); 6782 } else { 6783 if (vdev_rebuild_active(rvd)) 6784 return (spa_vdev_exit(spa, NULL, txg, 6785 ZFS_ERR_REBUILD_IN_PROGRESS)); 6786 } 6787 6788 if (spa->spa_vdev_removal != NULL) 6789 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6790 6791 if (oldvd == NULL) 6792 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6793 6794 if (!oldvd->vdev_ops->vdev_op_leaf) 6795 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6796 6797 pvd = oldvd->vdev_parent; 6798 6799 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 6800 VDEV_ALLOC_ATTACH)) != 0) 6801 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6802 6803 if (newrootvd->vdev_children != 1) 6804 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6805 6806 newvd = newrootvd->vdev_child[0]; 6807 6808 if (!newvd->vdev_ops->vdev_op_leaf) 6809 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6810 6811 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 6812 return (spa_vdev_exit(spa, newrootvd, txg, error)); 6813 6814 /* 6815 * Spares can't replace logs 6816 */ 6817 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 6818 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6819 6820 /* 6821 * A dRAID spare can only replace a child of its parent dRAID vdev. 6822 */ 6823 if (newvd->vdev_ops == &vdev_draid_spare_ops && 6824 oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) { 6825 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6826 } 6827 6828 if (rebuild) { 6829 /* 6830 * For rebuilds, the top vdev must support reconstruction 6831 * using only space maps. This means the only allowable 6832 * vdevs types are the root vdev, a mirror, or dRAID. 6833 */ 6834 tvd = pvd; 6835 if (pvd->vdev_top != NULL) 6836 tvd = pvd->vdev_top; 6837 6838 if (tvd->vdev_ops != &vdev_mirror_ops && 6839 tvd->vdev_ops != &vdev_root_ops && 6840 tvd->vdev_ops != &vdev_draid_ops) { 6841 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6842 } 6843 } 6844 6845 if (!replacing) { 6846 /* 6847 * For attach, the only allowable parent is a mirror or the root 6848 * vdev. 6849 */ 6850 if (pvd->vdev_ops != &vdev_mirror_ops && 6851 pvd->vdev_ops != &vdev_root_ops) 6852 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6853 6854 pvops = &vdev_mirror_ops; 6855 } else { 6856 /* 6857 * Active hot spares can only be replaced by inactive hot 6858 * spares. 6859 */ 6860 if (pvd->vdev_ops == &vdev_spare_ops && 6861 oldvd->vdev_isspare && 6862 !spa_has_spare(spa, newvd->vdev_guid)) 6863 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6864 6865 /* 6866 * If the source is a hot spare, and the parent isn't already a 6867 * spare, then we want to create a new hot spare. Otherwise, we 6868 * want to create a replacing vdev. The user is not allowed to 6869 * attach to a spared vdev child unless the 'isspare' state is 6870 * the same (spare replaces spare, non-spare replaces 6871 * non-spare). 6872 */ 6873 if (pvd->vdev_ops == &vdev_replacing_ops && 6874 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 6875 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6876 } else if (pvd->vdev_ops == &vdev_spare_ops && 6877 newvd->vdev_isspare != oldvd->vdev_isspare) { 6878 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6879 } 6880 6881 if (newvd->vdev_isspare) 6882 pvops = &vdev_spare_ops; 6883 else 6884 pvops = &vdev_replacing_ops; 6885 } 6886 6887 /* 6888 * Make sure the new device is big enough. 6889 */ 6890 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 6891 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 6892 6893 /* 6894 * The new device cannot have a higher alignment requirement 6895 * than the top-level vdev. 6896 */ 6897 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 6898 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6899 6900 /* 6901 * If this is an in-place replacement, update oldvd's path and devid 6902 * to make it distinguishable from newvd, and unopenable from now on. 6903 */ 6904 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 6905 spa_strfree(oldvd->vdev_path); 6906 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 6907 KM_SLEEP); 6908 (void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5, 6909 "%s/%s", newvd->vdev_path, "old"); 6910 if (oldvd->vdev_devid != NULL) { 6911 spa_strfree(oldvd->vdev_devid); 6912 oldvd->vdev_devid = NULL; 6913 } 6914 } 6915 6916 /* 6917 * If the parent is not a mirror, or if we're replacing, insert the new 6918 * mirror/replacing/spare vdev above oldvd. 6919 */ 6920 if (pvd->vdev_ops != pvops) 6921 pvd = vdev_add_parent(oldvd, pvops); 6922 6923 ASSERT(pvd->vdev_top->vdev_parent == rvd); 6924 ASSERT(pvd->vdev_ops == pvops); 6925 ASSERT(oldvd->vdev_parent == pvd); 6926 6927 /* 6928 * Extract the new device from its root and add it to pvd. 6929 */ 6930 vdev_remove_child(newrootvd, newvd); 6931 newvd->vdev_id = pvd->vdev_children; 6932 newvd->vdev_crtxg = oldvd->vdev_crtxg; 6933 vdev_add_child(pvd, newvd); 6934 6935 /* 6936 * Reevaluate the parent vdev state. 6937 */ 6938 vdev_propagate_state(pvd); 6939 6940 tvd = newvd->vdev_top; 6941 ASSERT(pvd->vdev_top == tvd); 6942 ASSERT(tvd->vdev_parent == rvd); 6943 6944 vdev_config_dirty(tvd); 6945 6946 /* 6947 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 6948 * for any dmu_sync-ed blocks. It will propagate upward when 6949 * spa_vdev_exit() calls vdev_dtl_reassess(). 6950 */ 6951 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 6952 6953 vdev_dtl_dirty(newvd, DTL_MISSING, 6954 TXG_INITIAL, dtl_max_txg - TXG_INITIAL); 6955 6956 if (newvd->vdev_isspare) { 6957 spa_spare_activate(newvd); 6958 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 6959 } 6960 6961 oldvdpath = spa_strdup(oldvd->vdev_path); 6962 newvdpath = spa_strdup(newvd->vdev_path); 6963 newvd_isspare = newvd->vdev_isspare; 6964 6965 /* 6966 * Mark newvd's DTL dirty in this txg. 6967 */ 6968 vdev_dirty(tvd, VDD_DTL, newvd, txg); 6969 6970 /* 6971 * Schedule the resilver or rebuild to restart in the future. We do 6972 * this to ensure that dmu_sync-ed blocks have been stitched into the 6973 * respective datasets. 6974 */ 6975 if (rebuild) { 6976 newvd->vdev_rebuild_txg = txg; 6977 6978 vdev_rebuild(tvd); 6979 } else { 6980 newvd->vdev_resilver_txg = txg; 6981 6982 if (dsl_scan_resilvering(spa_get_dsl(spa)) && 6983 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) { 6984 vdev_defer_resilver(newvd); 6985 } else { 6986 dsl_scan_restart_resilver(spa->spa_dsl_pool, 6987 dtl_max_txg); 6988 } 6989 } 6990 6991 if (spa->spa_bootfs) 6992 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 6993 6994 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 6995 6996 /* 6997 * Commit the config 6998 */ 6999 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 7000 7001 spa_history_log_internal(spa, "vdev attach", NULL, 7002 "%s vdev=%s %s vdev=%s", 7003 replacing && newvd_isspare ? "spare in" : 7004 replacing ? "replace" : "attach", newvdpath, 7005 replacing ? "for" : "to", oldvdpath); 7006 7007 spa_strfree(oldvdpath); 7008 spa_strfree(newvdpath); 7009 7010 return (0); 7011 } 7012 7013 /* 7014 * Detach a device from a mirror or replacing vdev. 7015 * 7016 * If 'replace_done' is specified, only detach if the parent 7017 * is a replacing vdev. 7018 */ 7019 int 7020 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 7021 { 7022 uint64_t txg; 7023 int error; 7024 vdev_t *rvd __maybe_unused = spa->spa_root_vdev; 7025 vdev_t *vd, *pvd, *cvd, *tvd; 7026 boolean_t unspare = B_FALSE; 7027 uint64_t unspare_guid = 0; 7028 char *vdpath; 7029 7030 ASSERT(spa_writeable(spa)); 7031 7032 txg = spa_vdev_detach_enter(spa, guid); 7033 7034 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7035 7036 /* 7037 * Besides being called directly from the userland through the 7038 * ioctl interface, spa_vdev_detach() can be potentially called 7039 * at the end of spa_vdev_resilver_done(). 7040 * 7041 * In the regular case, when we have a checkpoint this shouldn't 7042 * happen as we never empty the DTLs of a vdev during the scrub 7043 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 7044 * should never get here when we have a checkpoint. 7045 * 7046 * That said, even in a case when we checkpoint the pool exactly 7047 * as spa_vdev_resilver_done() calls this function everything 7048 * should be fine as the resilver will return right away. 7049 */ 7050 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7051 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7052 error = (spa_has_checkpoint(spa)) ? 7053 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7054 return (spa_vdev_exit(spa, NULL, txg, error)); 7055 } 7056 7057 if (vd == NULL) 7058 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 7059 7060 if (!vd->vdev_ops->vdev_op_leaf) 7061 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7062 7063 pvd = vd->vdev_parent; 7064 7065 /* 7066 * If the parent/child relationship is not as expected, don't do it. 7067 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 7068 * vdev that's replacing B with C. The user's intent in replacing 7069 * is to go from M(A,B) to M(A,C). If the user decides to cancel 7070 * the replace by detaching C, the expected behavior is to end up 7071 * M(A,B). But suppose that right after deciding to detach C, 7072 * the replacement of B completes. We would have M(A,C), and then 7073 * ask to detach C, which would leave us with just A -- not what 7074 * the user wanted. To prevent this, we make sure that the 7075 * parent/child relationship hasn't changed -- in this example, 7076 * that C's parent is still the replacing vdev R. 7077 */ 7078 if (pvd->vdev_guid != pguid && pguid != 0) 7079 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7080 7081 /* 7082 * Only 'replacing' or 'spare' vdevs can be replaced. 7083 */ 7084 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 7085 pvd->vdev_ops != &vdev_spare_ops) 7086 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7087 7088 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 7089 spa_version(spa) >= SPA_VERSION_SPARES); 7090 7091 /* 7092 * Only mirror, replacing, and spare vdevs support detach. 7093 */ 7094 if (pvd->vdev_ops != &vdev_replacing_ops && 7095 pvd->vdev_ops != &vdev_mirror_ops && 7096 pvd->vdev_ops != &vdev_spare_ops) 7097 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 7098 7099 /* 7100 * If this device has the only valid copy of some data, 7101 * we cannot safely detach it. 7102 */ 7103 if (vdev_dtl_required(vd)) 7104 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 7105 7106 ASSERT(pvd->vdev_children >= 2); 7107 7108 /* 7109 * If we are detaching the second disk from a replacing vdev, then 7110 * check to see if we changed the original vdev's path to have "/old" 7111 * at the end in spa_vdev_attach(). If so, undo that change now. 7112 */ 7113 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 7114 vd->vdev_path != NULL) { 7115 size_t len = strlen(vd->vdev_path); 7116 7117 for (int c = 0; c < pvd->vdev_children; c++) { 7118 cvd = pvd->vdev_child[c]; 7119 7120 if (cvd == vd || cvd->vdev_path == NULL) 7121 continue; 7122 7123 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 7124 strcmp(cvd->vdev_path + len, "/old") == 0) { 7125 spa_strfree(cvd->vdev_path); 7126 cvd->vdev_path = spa_strdup(vd->vdev_path); 7127 break; 7128 } 7129 } 7130 } 7131 7132 /* 7133 * If we are detaching the original disk from a normal spare, then it 7134 * implies that the spare should become a real disk, and be removed 7135 * from the active spare list for the pool. dRAID spares on the 7136 * other hand are coupled to the pool and thus should never be removed 7137 * from the spares list. 7138 */ 7139 if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) { 7140 vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7141 7142 if (last_cvd->vdev_isspare && 7143 last_cvd->vdev_ops != &vdev_draid_spare_ops) { 7144 unspare = B_TRUE; 7145 } 7146 } 7147 7148 /* 7149 * Erase the disk labels so the disk can be used for other things. 7150 * This must be done after all other error cases are handled, 7151 * but before we disembowel vd (so we can still do I/O to it). 7152 * But if we can't do it, don't treat the error as fatal -- 7153 * it may be that the unwritability of the disk is the reason 7154 * it's being detached! 7155 */ 7156 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 7157 7158 /* 7159 * Remove vd from its parent and compact the parent's children. 7160 */ 7161 vdev_remove_child(pvd, vd); 7162 vdev_compact_children(pvd); 7163 7164 /* 7165 * Remember one of the remaining children so we can get tvd below. 7166 */ 7167 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7168 7169 /* 7170 * If we need to remove the remaining child from the list of hot spares, 7171 * do it now, marking the vdev as no longer a spare in the process. 7172 * We must do this before vdev_remove_parent(), because that can 7173 * change the GUID if it creates a new toplevel GUID. For a similar 7174 * reason, we must remove the spare now, in the same txg as the detach; 7175 * otherwise someone could attach a new sibling, change the GUID, and 7176 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 7177 */ 7178 if (unspare) { 7179 ASSERT(cvd->vdev_isspare); 7180 spa_spare_remove(cvd); 7181 unspare_guid = cvd->vdev_guid; 7182 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 7183 cvd->vdev_unspare = B_TRUE; 7184 } 7185 7186 /* 7187 * If the parent mirror/replacing vdev only has one child, 7188 * the parent is no longer needed. Remove it from the tree. 7189 */ 7190 if (pvd->vdev_children == 1) { 7191 if (pvd->vdev_ops == &vdev_spare_ops) 7192 cvd->vdev_unspare = B_FALSE; 7193 vdev_remove_parent(cvd); 7194 } 7195 7196 /* 7197 * We don't set tvd until now because the parent we just removed 7198 * may have been the previous top-level vdev. 7199 */ 7200 tvd = cvd->vdev_top; 7201 ASSERT(tvd->vdev_parent == rvd); 7202 7203 /* 7204 * Reevaluate the parent vdev state. 7205 */ 7206 vdev_propagate_state(cvd); 7207 7208 /* 7209 * If the 'autoexpand' property is set on the pool then automatically 7210 * try to expand the size of the pool. For example if the device we 7211 * just detached was smaller than the others, it may be possible to 7212 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 7213 * first so that we can obtain the updated sizes of the leaf vdevs. 7214 */ 7215 if (spa->spa_autoexpand) { 7216 vdev_reopen(tvd); 7217 vdev_expand(tvd, txg); 7218 } 7219 7220 vdev_config_dirty(tvd); 7221 7222 /* 7223 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 7224 * vd->vdev_detached is set and free vd's DTL object in syncing context. 7225 * But first make sure we're not on any *other* txg's DTL list, to 7226 * prevent vd from being accessed after it's freed. 7227 */ 7228 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); 7229 for (int t = 0; t < TXG_SIZE; t++) 7230 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 7231 vd->vdev_detached = B_TRUE; 7232 vdev_dirty(tvd, VDD_DTL, vd, txg); 7233 7234 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 7235 spa_notify_waiters(spa); 7236 7237 /* hang on to the spa before we release the lock */ 7238 spa_open_ref(spa, FTAG); 7239 7240 error = spa_vdev_exit(spa, vd, txg, 0); 7241 7242 spa_history_log_internal(spa, "detach", NULL, 7243 "vdev=%s", vdpath); 7244 spa_strfree(vdpath); 7245 7246 /* 7247 * If this was the removal of the original device in a hot spare vdev, 7248 * then we want to go through and remove the device from the hot spare 7249 * list of every other pool. 7250 */ 7251 if (unspare) { 7252 spa_t *altspa = NULL; 7253 7254 mutex_enter(&spa_namespace_lock); 7255 while ((altspa = spa_next(altspa)) != NULL) { 7256 if (altspa->spa_state != POOL_STATE_ACTIVE || 7257 altspa == spa) 7258 continue; 7259 7260 spa_open_ref(altspa, FTAG); 7261 mutex_exit(&spa_namespace_lock); 7262 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 7263 mutex_enter(&spa_namespace_lock); 7264 spa_close(altspa, FTAG); 7265 } 7266 mutex_exit(&spa_namespace_lock); 7267 7268 /* search the rest of the vdevs for spares to remove */ 7269 spa_vdev_resilver_done(spa); 7270 } 7271 7272 /* all done with the spa; OK to release */ 7273 mutex_enter(&spa_namespace_lock); 7274 spa_close(spa, FTAG); 7275 mutex_exit(&spa_namespace_lock); 7276 7277 return (error); 7278 } 7279 7280 static int 7281 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7282 list_t *vd_list) 7283 { 7284 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7285 7286 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7287 7288 /* Look up vdev and ensure it's a leaf. */ 7289 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7290 if (vd == NULL || vd->vdev_detached) { 7291 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7292 return (SET_ERROR(ENODEV)); 7293 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7294 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7295 return (SET_ERROR(EINVAL)); 7296 } else if (!vdev_writeable(vd)) { 7297 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7298 return (SET_ERROR(EROFS)); 7299 } 7300 mutex_enter(&vd->vdev_initialize_lock); 7301 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7302 7303 /* 7304 * When we activate an initialize action we check to see 7305 * if the vdev_initialize_thread is NULL. We do this instead 7306 * of using the vdev_initialize_state since there might be 7307 * a previous initialization process which has completed but 7308 * the thread is not exited. 7309 */ 7310 if (cmd_type == POOL_INITIALIZE_START && 7311 (vd->vdev_initialize_thread != NULL || 7312 vd->vdev_top->vdev_removing)) { 7313 mutex_exit(&vd->vdev_initialize_lock); 7314 return (SET_ERROR(EBUSY)); 7315 } else if (cmd_type == POOL_INITIALIZE_CANCEL && 7316 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 7317 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 7318 mutex_exit(&vd->vdev_initialize_lock); 7319 return (SET_ERROR(ESRCH)); 7320 } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 7321 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 7322 mutex_exit(&vd->vdev_initialize_lock); 7323 return (SET_ERROR(ESRCH)); 7324 } 7325 7326 switch (cmd_type) { 7327 case POOL_INITIALIZE_START: 7328 vdev_initialize(vd); 7329 break; 7330 case POOL_INITIALIZE_CANCEL: 7331 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); 7332 break; 7333 case POOL_INITIALIZE_SUSPEND: 7334 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); 7335 break; 7336 default: 7337 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7338 } 7339 mutex_exit(&vd->vdev_initialize_lock); 7340 7341 return (0); 7342 } 7343 7344 int 7345 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, 7346 nvlist_t *vdev_errlist) 7347 { 7348 int total_errors = 0; 7349 list_t vd_list; 7350 7351 list_create(&vd_list, sizeof (vdev_t), 7352 offsetof(vdev_t, vdev_initialize_node)); 7353 7354 /* 7355 * We hold the namespace lock through the whole function 7356 * to prevent any changes to the pool while we're starting or 7357 * stopping initialization. The config and state locks are held so that 7358 * we can properly assess the vdev state before we commit to 7359 * the initializing operation. 7360 */ 7361 mutex_enter(&spa_namespace_lock); 7362 7363 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7364 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7365 uint64_t vdev_guid = fnvpair_value_uint64(pair); 7366 7367 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, 7368 &vd_list); 7369 if (error != 0) { 7370 char guid_as_str[MAXNAMELEN]; 7371 7372 (void) snprintf(guid_as_str, sizeof (guid_as_str), 7373 "%llu", (unsigned long long)vdev_guid); 7374 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7375 total_errors++; 7376 } 7377 } 7378 7379 /* Wait for all initialize threads to stop. */ 7380 vdev_initialize_stop_wait(spa, &vd_list); 7381 7382 /* Sync out the initializing state */ 7383 txg_wait_synced(spa->spa_dsl_pool, 0); 7384 mutex_exit(&spa_namespace_lock); 7385 7386 list_destroy(&vd_list); 7387 7388 return (total_errors); 7389 } 7390 7391 static int 7392 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7393 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) 7394 { 7395 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7396 7397 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7398 7399 /* Look up vdev and ensure it's a leaf. */ 7400 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7401 if (vd == NULL || vd->vdev_detached) { 7402 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7403 return (SET_ERROR(ENODEV)); 7404 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7405 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7406 return (SET_ERROR(EINVAL)); 7407 } else if (!vdev_writeable(vd)) { 7408 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7409 return (SET_ERROR(EROFS)); 7410 } else if (!vd->vdev_has_trim) { 7411 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7412 return (SET_ERROR(EOPNOTSUPP)); 7413 } else if (secure && !vd->vdev_has_securetrim) { 7414 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7415 return (SET_ERROR(EOPNOTSUPP)); 7416 } 7417 mutex_enter(&vd->vdev_trim_lock); 7418 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7419 7420 /* 7421 * When we activate a TRIM action we check to see if the 7422 * vdev_trim_thread is NULL. We do this instead of using the 7423 * vdev_trim_state since there might be a previous TRIM process 7424 * which has completed but the thread is not exited. 7425 */ 7426 if (cmd_type == POOL_TRIM_START && 7427 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) { 7428 mutex_exit(&vd->vdev_trim_lock); 7429 return (SET_ERROR(EBUSY)); 7430 } else if (cmd_type == POOL_TRIM_CANCEL && 7431 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && 7432 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { 7433 mutex_exit(&vd->vdev_trim_lock); 7434 return (SET_ERROR(ESRCH)); 7435 } else if (cmd_type == POOL_TRIM_SUSPEND && 7436 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { 7437 mutex_exit(&vd->vdev_trim_lock); 7438 return (SET_ERROR(ESRCH)); 7439 } 7440 7441 switch (cmd_type) { 7442 case POOL_TRIM_START: 7443 vdev_trim(vd, rate, partial, secure); 7444 break; 7445 case POOL_TRIM_CANCEL: 7446 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); 7447 break; 7448 case POOL_TRIM_SUSPEND: 7449 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); 7450 break; 7451 default: 7452 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7453 } 7454 mutex_exit(&vd->vdev_trim_lock); 7455 7456 return (0); 7457 } 7458 7459 /* 7460 * Initiates a manual TRIM for the requested vdevs. This kicks off individual 7461 * TRIM threads for each child vdev. These threads pass over all of the free 7462 * space in the vdev's metaslabs and issues TRIM commands for that space. 7463 */ 7464 int 7465 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, 7466 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) 7467 { 7468 int total_errors = 0; 7469 list_t vd_list; 7470 7471 list_create(&vd_list, sizeof (vdev_t), 7472 offsetof(vdev_t, vdev_trim_node)); 7473 7474 /* 7475 * We hold the namespace lock through the whole function 7476 * to prevent any changes to the pool while we're starting or 7477 * stopping TRIM. The config and state locks are held so that 7478 * we can properly assess the vdev state before we commit to 7479 * the TRIM operation. 7480 */ 7481 mutex_enter(&spa_namespace_lock); 7482 7483 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7484 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7485 uint64_t vdev_guid = fnvpair_value_uint64(pair); 7486 7487 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, 7488 rate, partial, secure, &vd_list); 7489 if (error != 0) { 7490 char guid_as_str[MAXNAMELEN]; 7491 7492 (void) snprintf(guid_as_str, sizeof (guid_as_str), 7493 "%llu", (unsigned long long)vdev_guid); 7494 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7495 total_errors++; 7496 } 7497 } 7498 7499 /* Wait for all TRIM threads to stop. */ 7500 vdev_trim_stop_wait(spa, &vd_list); 7501 7502 /* Sync out the TRIM state */ 7503 txg_wait_synced(spa->spa_dsl_pool, 0); 7504 mutex_exit(&spa_namespace_lock); 7505 7506 list_destroy(&vd_list); 7507 7508 return (total_errors); 7509 } 7510 7511 /* 7512 * Split a set of devices from their mirrors, and create a new pool from them. 7513 */ 7514 int 7515 spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config, 7516 nvlist_t *props, boolean_t exp) 7517 { 7518 int error = 0; 7519 uint64_t txg, *glist; 7520 spa_t *newspa; 7521 uint_t c, children, lastlog; 7522 nvlist_t **child, *nvl, *tmp; 7523 dmu_tx_t *tx; 7524 char *altroot = NULL; 7525 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 7526 boolean_t activate_slog; 7527 7528 ASSERT(spa_writeable(spa)); 7529 7530 txg = spa_vdev_enter(spa); 7531 7532 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7533 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7534 error = (spa_has_checkpoint(spa)) ? 7535 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7536 return (spa_vdev_exit(spa, NULL, txg, error)); 7537 } 7538 7539 /* clear the log and flush everything up to now */ 7540 activate_slog = spa_passivate_log(spa); 7541 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7542 error = spa_reset_logs(spa); 7543 txg = spa_vdev_config_enter(spa); 7544 7545 if (activate_slog) 7546 spa_activate_log(spa); 7547 7548 if (error != 0) 7549 return (spa_vdev_exit(spa, NULL, txg, error)); 7550 7551 /* check new spa name before going any further */ 7552 if (spa_lookup(newname) != NULL) 7553 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 7554 7555 /* 7556 * scan through all the children to ensure they're all mirrors 7557 */ 7558 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 7559 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 7560 &children) != 0) 7561 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7562 7563 /* first, check to ensure we've got the right child count */ 7564 rvd = spa->spa_root_vdev; 7565 lastlog = 0; 7566 for (c = 0; c < rvd->vdev_children; c++) { 7567 vdev_t *vd = rvd->vdev_child[c]; 7568 7569 /* don't count the holes & logs as children */ 7570 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops && 7571 !vdev_is_concrete(vd))) { 7572 if (lastlog == 0) 7573 lastlog = c; 7574 continue; 7575 } 7576 7577 lastlog = 0; 7578 } 7579 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 7580 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7581 7582 /* next, ensure no spare or cache devices are part of the split */ 7583 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 7584 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 7585 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7586 7587 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 7588 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 7589 7590 /* then, loop over each vdev and validate it */ 7591 for (c = 0; c < children; c++) { 7592 uint64_t is_hole = 0; 7593 7594 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 7595 &is_hole); 7596 7597 if (is_hole != 0) { 7598 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 7599 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 7600 continue; 7601 } else { 7602 error = SET_ERROR(EINVAL); 7603 break; 7604 } 7605 } 7606 7607 /* deal with indirect vdevs */ 7608 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops == 7609 &vdev_indirect_ops) 7610 continue; 7611 7612 /* which disk is going to be split? */ 7613 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 7614 &glist[c]) != 0) { 7615 error = SET_ERROR(EINVAL); 7616 break; 7617 } 7618 7619 /* look it up in the spa */ 7620 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 7621 if (vml[c] == NULL) { 7622 error = SET_ERROR(ENODEV); 7623 break; 7624 } 7625 7626 /* make sure there's nothing stopping the split */ 7627 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 7628 vml[c]->vdev_islog || 7629 !vdev_is_concrete(vml[c]) || 7630 vml[c]->vdev_isspare || 7631 vml[c]->vdev_isl2cache || 7632 !vdev_writeable(vml[c]) || 7633 vml[c]->vdev_children != 0 || 7634 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 7635 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 7636 error = SET_ERROR(EINVAL); 7637 break; 7638 } 7639 7640 if (vdev_dtl_required(vml[c]) || 7641 vdev_resilver_needed(vml[c], NULL, NULL)) { 7642 error = SET_ERROR(EBUSY); 7643 break; 7644 } 7645 7646 /* we need certain info from the top level */ 7647 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 7648 vml[c]->vdev_top->vdev_ms_array); 7649 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 7650 vml[c]->vdev_top->vdev_ms_shift); 7651 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 7652 vml[c]->vdev_top->vdev_asize); 7653 fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 7654 vml[c]->vdev_top->vdev_ashift); 7655 7656 /* transfer per-vdev ZAPs */ 7657 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 7658 VERIFY0(nvlist_add_uint64(child[c], 7659 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 7660 7661 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 7662 VERIFY0(nvlist_add_uint64(child[c], 7663 ZPOOL_CONFIG_VDEV_TOP_ZAP, 7664 vml[c]->vdev_parent->vdev_top_zap)); 7665 } 7666 7667 if (error != 0) { 7668 kmem_free(vml, children * sizeof (vdev_t *)); 7669 kmem_free(glist, children * sizeof (uint64_t)); 7670 return (spa_vdev_exit(spa, NULL, txg, error)); 7671 } 7672 7673 /* stop writers from using the disks */ 7674 for (c = 0; c < children; c++) { 7675 if (vml[c] != NULL) 7676 vml[c]->vdev_offline = B_TRUE; 7677 } 7678 vdev_reopen(spa->spa_root_vdev); 7679 7680 /* 7681 * Temporarily record the splitting vdevs in the spa config. This 7682 * will disappear once the config is regenerated. 7683 */ 7684 nvl = fnvlist_alloc(); 7685 fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children); 7686 kmem_free(glist, children * sizeof (uint64_t)); 7687 7688 mutex_enter(&spa->spa_props_lock); 7689 fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl); 7690 mutex_exit(&spa->spa_props_lock); 7691 spa->spa_config_splitting = nvl; 7692 vdev_config_dirty(spa->spa_root_vdev); 7693 7694 /* configure and create the new pool */ 7695 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname); 7696 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 7697 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE); 7698 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)); 7699 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg); 7700 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 7701 spa_generate_guid(NULL)); 7702 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 7703 (void) nvlist_lookup_string(props, 7704 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 7705 7706 /* add the new pool to the namespace */ 7707 newspa = spa_add(newname, config, altroot); 7708 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 7709 newspa->spa_config_txg = spa->spa_config_txg; 7710 spa_set_log_state(newspa, SPA_LOG_CLEAR); 7711 7712 /* release the spa config lock, retaining the namespace lock */ 7713 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7714 7715 if (zio_injection_enabled) 7716 zio_handle_panic_injection(spa, FTAG, 1); 7717 7718 spa_activate(newspa, spa_mode_global); 7719 spa_async_suspend(newspa); 7720 7721 /* 7722 * Temporarily stop the initializing and TRIM activity. We set the 7723 * state to ACTIVE so that we know to resume initializing or TRIM 7724 * once the split has completed. 7725 */ 7726 list_t vd_initialize_list; 7727 list_create(&vd_initialize_list, sizeof (vdev_t), 7728 offsetof(vdev_t, vdev_initialize_node)); 7729 7730 list_t vd_trim_list; 7731 list_create(&vd_trim_list, sizeof (vdev_t), 7732 offsetof(vdev_t, vdev_trim_node)); 7733 7734 for (c = 0; c < children; c++) { 7735 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7736 mutex_enter(&vml[c]->vdev_initialize_lock); 7737 vdev_initialize_stop(vml[c], 7738 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); 7739 mutex_exit(&vml[c]->vdev_initialize_lock); 7740 7741 mutex_enter(&vml[c]->vdev_trim_lock); 7742 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); 7743 mutex_exit(&vml[c]->vdev_trim_lock); 7744 } 7745 } 7746 7747 vdev_initialize_stop_wait(spa, &vd_initialize_list); 7748 vdev_trim_stop_wait(spa, &vd_trim_list); 7749 7750 list_destroy(&vd_initialize_list); 7751 list_destroy(&vd_trim_list); 7752 7753 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 7754 newspa->spa_is_splitting = B_TRUE; 7755 7756 /* create the new pool from the disks of the original pool */ 7757 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 7758 if (error) 7759 goto out; 7760 7761 /* if that worked, generate a real config for the new pool */ 7762 if (newspa->spa_root_vdev != NULL) { 7763 newspa->spa_config_splitting = fnvlist_alloc(); 7764 fnvlist_add_uint64(newspa->spa_config_splitting, 7765 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)); 7766 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 7767 B_TRUE)); 7768 } 7769 7770 /* set the props */ 7771 if (props != NULL) { 7772 spa_configfile_set(newspa, props, B_FALSE); 7773 error = spa_prop_set(newspa, props); 7774 if (error) 7775 goto out; 7776 } 7777 7778 /* flush everything */ 7779 txg = spa_vdev_config_enter(newspa); 7780 vdev_config_dirty(newspa->spa_root_vdev); 7781 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 7782 7783 if (zio_injection_enabled) 7784 zio_handle_panic_injection(spa, FTAG, 2); 7785 7786 spa_async_resume(newspa); 7787 7788 /* finally, update the original pool's config */ 7789 txg = spa_vdev_config_enter(spa); 7790 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 7791 error = dmu_tx_assign(tx, TXG_WAIT); 7792 if (error != 0) 7793 dmu_tx_abort(tx); 7794 for (c = 0; c < children; c++) { 7795 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7796 vdev_t *tvd = vml[c]->vdev_top; 7797 7798 /* 7799 * Need to be sure the detachable VDEV is not 7800 * on any *other* txg's DTL list to prevent it 7801 * from being accessed after it's freed. 7802 */ 7803 for (int t = 0; t < TXG_SIZE; t++) { 7804 (void) txg_list_remove_this( 7805 &tvd->vdev_dtl_list, vml[c], t); 7806 } 7807 7808 vdev_split(vml[c]); 7809 if (error == 0) 7810 spa_history_log_internal(spa, "detach", tx, 7811 "vdev=%s", vml[c]->vdev_path); 7812 7813 vdev_free(vml[c]); 7814 } 7815 } 7816 spa->spa_avz_action = AVZ_ACTION_REBUILD; 7817 vdev_config_dirty(spa->spa_root_vdev); 7818 spa->spa_config_splitting = NULL; 7819 nvlist_free(nvl); 7820 if (error == 0) 7821 dmu_tx_commit(tx); 7822 (void) spa_vdev_exit(spa, NULL, txg, 0); 7823 7824 if (zio_injection_enabled) 7825 zio_handle_panic_injection(spa, FTAG, 3); 7826 7827 /* split is complete; log a history record */ 7828 spa_history_log_internal(newspa, "split", NULL, 7829 "from pool %s", spa_name(spa)); 7830 7831 newspa->spa_is_splitting = B_FALSE; 7832 kmem_free(vml, children * sizeof (vdev_t *)); 7833 7834 /* if we're not going to mount the filesystems in userland, export */ 7835 if (exp) 7836 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 7837 B_FALSE, B_FALSE); 7838 7839 return (error); 7840 7841 out: 7842 spa_unload(newspa); 7843 spa_deactivate(newspa); 7844 spa_remove(newspa); 7845 7846 txg = spa_vdev_config_enter(spa); 7847 7848 /* re-online all offlined disks */ 7849 for (c = 0; c < children; c++) { 7850 if (vml[c] != NULL) 7851 vml[c]->vdev_offline = B_FALSE; 7852 } 7853 7854 /* restart initializing or trimming disks as necessary */ 7855 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 7856 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 7857 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 7858 7859 vdev_reopen(spa->spa_root_vdev); 7860 7861 nvlist_free(spa->spa_config_splitting); 7862 spa->spa_config_splitting = NULL; 7863 (void) spa_vdev_exit(spa, NULL, txg, error); 7864 7865 kmem_free(vml, children * sizeof (vdev_t *)); 7866 return (error); 7867 } 7868 7869 /* 7870 * Find any device that's done replacing, or a vdev marked 'unspare' that's 7871 * currently spared, so we can detach it. 7872 */ 7873 static vdev_t * 7874 spa_vdev_resilver_done_hunt(vdev_t *vd) 7875 { 7876 vdev_t *newvd, *oldvd; 7877 7878 for (int c = 0; c < vd->vdev_children; c++) { 7879 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 7880 if (oldvd != NULL) 7881 return (oldvd); 7882 } 7883 7884 /* 7885 * Check for a completed replacement. We always consider the first 7886 * vdev in the list to be the oldest vdev, and the last one to be 7887 * the newest (see spa_vdev_attach() for how that works). In 7888 * the case where the newest vdev is faulted, we will not automatically 7889 * remove it after a resilver completes. This is OK as it will require 7890 * user intervention to determine which disk the admin wishes to keep. 7891 */ 7892 if (vd->vdev_ops == &vdev_replacing_ops) { 7893 ASSERT(vd->vdev_children > 1); 7894 7895 newvd = vd->vdev_child[vd->vdev_children - 1]; 7896 oldvd = vd->vdev_child[0]; 7897 7898 if (vdev_dtl_empty(newvd, DTL_MISSING) && 7899 vdev_dtl_empty(newvd, DTL_OUTAGE) && 7900 !vdev_dtl_required(oldvd)) 7901 return (oldvd); 7902 } 7903 7904 /* 7905 * Check for a completed resilver with the 'unspare' flag set. 7906 * Also potentially update faulted state. 7907 */ 7908 if (vd->vdev_ops == &vdev_spare_ops) { 7909 vdev_t *first = vd->vdev_child[0]; 7910 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 7911 7912 if (last->vdev_unspare) { 7913 oldvd = first; 7914 newvd = last; 7915 } else if (first->vdev_unspare) { 7916 oldvd = last; 7917 newvd = first; 7918 } else { 7919 oldvd = NULL; 7920 } 7921 7922 if (oldvd != NULL && 7923 vdev_dtl_empty(newvd, DTL_MISSING) && 7924 vdev_dtl_empty(newvd, DTL_OUTAGE) && 7925 !vdev_dtl_required(oldvd)) 7926 return (oldvd); 7927 7928 vdev_propagate_state(vd); 7929 7930 /* 7931 * If there are more than two spares attached to a disk, 7932 * and those spares are not required, then we want to 7933 * attempt to free them up now so that they can be used 7934 * by other pools. Once we're back down to a single 7935 * disk+spare, we stop removing them. 7936 */ 7937 if (vd->vdev_children > 2) { 7938 newvd = vd->vdev_child[1]; 7939 7940 if (newvd->vdev_isspare && last->vdev_isspare && 7941 vdev_dtl_empty(last, DTL_MISSING) && 7942 vdev_dtl_empty(last, DTL_OUTAGE) && 7943 !vdev_dtl_required(newvd)) 7944 return (newvd); 7945 } 7946 } 7947 7948 return (NULL); 7949 } 7950 7951 static void 7952 spa_vdev_resilver_done(spa_t *spa) 7953 { 7954 vdev_t *vd, *pvd, *ppvd; 7955 uint64_t guid, sguid, pguid, ppguid; 7956 7957 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7958 7959 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 7960 pvd = vd->vdev_parent; 7961 ppvd = pvd->vdev_parent; 7962 guid = vd->vdev_guid; 7963 pguid = pvd->vdev_guid; 7964 ppguid = ppvd->vdev_guid; 7965 sguid = 0; 7966 /* 7967 * If we have just finished replacing a hot spared device, then 7968 * we need to detach the parent's first child (the original hot 7969 * spare) as well. 7970 */ 7971 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 7972 ppvd->vdev_children == 2) { 7973 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 7974 sguid = ppvd->vdev_child[1]->vdev_guid; 7975 } 7976 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 7977 7978 spa_config_exit(spa, SCL_ALL, FTAG); 7979 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 7980 return; 7981 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 7982 return; 7983 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7984 } 7985 7986 spa_config_exit(spa, SCL_ALL, FTAG); 7987 7988 /* 7989 * If a detach was not performed above replace waiters will not have 7990 * been notified. In which case we must do so now. 7991 */ 7992 spa_notify_waiters(spa); 7993 } 7994 7995 /* 7996 * Update the stored path or FRU for this vdev. 7997 */ 7998 static int 7999 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 8000 boolean_t ispath) 8001 { 8002 vdev_t *vd; 8003 boolean_t sync = B_FALSE; 8004 8005 ASSERT(spa_writeable(spa)); 8006 8007 spa_vdev_state_enter(spa, SCL_ALL); 8008 8009 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 8010 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 8011 8012 if (!vd->vdev_ops->vdev_op_leaf) 8013 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 8014 8015 if (ispath) { 8016 if (strcmp(value, vd->vdev_path) != 0) { 8017 spa_strfree(vd->vdev_path); 8018 vd->vdev_path = spa_strdup(value); 8019 sync = B_TRUE; 8020 } 8021 } else { 8022 if (vd->vdev_fru == NULL) { 8023 vd->vdev_fru = spa_strdup(value); 8024 sync = B_TRUE; 8025 } else if (strcmp(value, vd->vdev_fru) != 0) { 8026 spa_strfree(vd->vdev_fru); 8027 vd->vdev_fru = spa_strdup(value); 8028 sync = B_TRUE; 8029 } 8030 } 8031 8032 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 8033 } 8034 8035 int 8036 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 8037 { 8038 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 8039 } 8040 8041 int 8042 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 8043 { 8044 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 8045 } 8046 8047 /* 8048 * ========================================================================== 8049 * SPA Scanning 8050 * ========================================================================== 8051 */ 8052 int 8053 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 8054 { 8055 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8056 8057 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8058 return (SET_ERROR(EBUSY)); 8059 8060 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 8061 } 8062 8063 int 8064 spa_scan_stop(spa_t *spa) 8065 { 8066 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8067 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 8068 return (SET_ERROR(EBUSY)); 8069 return (dsl_scan_cancel(spa->spa_dsl_pool)); 8070 } 8071 8072 int 8073 spa_scan(spa_t *spa, pool_scan_func_t func) 8074 { 8075 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 8076 8077 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 8078 return (SET_ERROR(ENOTSUP)); 8079 8080 if (func == POOL_SCAN_RESILVER && 8081 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 8082 return (SET_ERROR(ENOTSUP)); 8083 8084 /* 8085 * If a resilver was requested, but there is no DTL on a 8086 * writeable leaf device, we have nothing to do. 8087 */ 8088 if (func == POOL_SCAN_RESILVER && 8089 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 8090 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 8091 return (0); 8092 } 8093 8094 return (dsl_scan(spa->spa_dsl_pool, func)); 8095 } 8096 8097 /* 8098 * ========================================================================== 8099 * SPA async task processing 8100 * ========================================================================== 8101 */ 8102 8103 static void 8104 spa_async_remove(spa_t *spa, vdev_t *vd) 8105 { 8106 if (vd->vdev_remove_wanted) { 8107 vd->vdev_remove_wanted = B_FALSE; 8108 vd->vdev_delayed_close = B_FALSE; 8109 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 8110 8111 /* 8112 * We want to clear the stats, but we don't want to do a full 8113 * vdev_clear() as that will cause us to throw away 8114 * degraded/faulted state as well as attempt to reopen the 8115 * device, all of which is a waste. 8116 */ 8117 vd->vdev_stat.vs_read_errors = 0; 8118 vd->vdev_stat.vs_write_errors = 0; 8119 vd->vdev_stat.vs_checksum_errors = 0; 8120 8121 vdev_state_dirty(vd->vdev_top); 8122 8123 /* Tell userspace that the vdev is gone. */ 8124 zfs_post_remove(spa, vd); 8125 } 8126 8127 for (int c = 0; c < vd->vdev_children; c++) 8128 spa_async_remove(spa, vd->vdev_child[c]); 8129 } 8130 8131 static void 8132 spa_async_probe(spa_t *spa, vdev_t *vd) 8133 { 8134 if (vd->vdev_probe_wanted) { 8135 vd->vdev_probe_wanted = B_FALSE; 8136 vdev_reopen(vd); /* vdev_open() does the actual probe */ 8137 } 8138 8139 for (int c = 0; c < vd->vdev_children; c++) 8140 spa_async_probe(spa, vd->vdev_child[c]); 8141 } 8142 8143 static void 8144 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 8145 { 8146 if (!spa->spa_autoexpand) 8147 return; 8148 8149 for (int c = 0; c < vd->vdev_children; c++) { 8150 vdev_t *cvd = vd->vdev_child[c]; 8151 spa_async_autoexpand(spa, cvd); 8152 } 8153 8154 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 8155 return; 8156 8157 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND); 8158 } 8159 8160 static __attribute__((noreturn)) void 8161 spa_async_thread(void *arg) 8162 { 8163 spa_t *spa = (spa_t *)arg; 8164 dsl_pool_t *dp = spa->spa_dsl_pool; 8165 int tasks; 8166 8167 ASSERT(spa->spa_sync_on); 8168 8169 mutex_enter(&spa->spa_async_lock); 8170 tasks = spa->spa_async_tasks; 8171 spa->spa_async_tasks = 0; 8172 mutex_exit(&spa->spa_async_lock); 8173 8174 /* 8175 * See if the config needs to be updated. 8176 */ 8177 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 8178 uint64_t old_space, new_space; 8179 8180 mutex_enter(&spa_namespace_lock); 8181 old_space = metaslab_class_get_space(spa_normal_class(spa)); 8182 old_space += metaslab_class_get_space(spa_special_class(spa)); 8183 old_space += metaslab_class_get_space(spa_dedup_class(spa)); 8184 old_space += metaslab_class_get_space( 8185 spa_embedded_log_class(spa)); 8186 8187 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 8188 8189 new_space = metaslab_class_get_space(spa_normal_class(spa)); 8190 new_space += metaslab_class_get_space(spa_special_class(spa)); 8191 new_space += metaslab_class_get_space(spa_dedup_class(spa)); 8192 new_space += metaslab_class_get_space( 8193 spa_embedded_log_class(spa)); 8194 mutex_exit(&spa_namespace_lock); 8195 8196 /* 8197 * If the pool grew as a result of the config update, 8198 * then log an internal history event. 8199 */ 8200 if (new_space != old_space) { 8201 spa_history_log_internal(spa, "vdev online", NULL, 8202 "pool '%s' size: %llu(+%llu)", 8203 spa_name(spa), (u_longlong_t)new_space, 8204 (u_longlong_t)(new_space - old_space)); 8205 } 8206 } 8207 8208 /* 8209 * See if any devices need to be marked REMOVED. 8210 */ 8211 if (tasks & SPA_ASYNC_REMOVE) { 8212 spa_vdev_state_enter(spa, SCL_NONE); 8213 spa_async_remove(spa, spa->spa_root_vdev); 8214 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 8215 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 8216 for (int i = 0; i < spa->spa_spares.sav_count; i++) 8217 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 8218 (void) spa_vdev_state_exit(spa, NULL, 0); 8219 } 8220 8221 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 8222 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8223 spa_async_autoexpand(spa, spa->spa_root_vdev); 8224 spa_config_exit(spa, SCL_CONFIG, FTAG); 8225 } 8226 8227 /* 8228 * See if any devices need to be probed. 8229 */ 8230 if (tasks & SPA_ASYNC_PROBE) { 8231 spa_vdev_state_enter(spa, SCL_NONE); 8232 spa_async_probe(spa, spa->spa_root_vdev); 8233 (void) spa_vdev_state_exit(spa, NULL, 0); 8234 } 8235 8236 /* 8237 * If any devices are done replacing, detach them. 8238 */ 8239 if (tasks & SPA_ASYNC_RESILVER_DONE || 8240 tasks & SPA_ASYNC_REBUILD_DONE) { 8241 spa_vdev_resilver_done(spa); 8242 } 8243 8244 /* 8245 * Kick off a resilver. 8246 */ 8247 if (tasks & SPA_ASYNC_RESILVER && 8248 !vdev_rebuild_active(spa->spa_root_vdev) && 8249 (!dsl_scan_resilvering(dp) || 8250 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 8251 dsl_scan_restart_resilver(dp, 0); 8252 8253 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 8254 mutex_enter(&spa_namespace_lock); 8255 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8256 vdev_initialize_restart(spa->spa_root_vdev); 8257 spa_config_exit(spa, SCL_CONFIG, FTAG); 8258 mutex_exit(&spa_namespace_lock); 8259 } 8260 8261 if (tasks & SPA_ASYNC_TRIM_RESTART) { 8262 mutex_enter(&spa_namespace_lock); 8263 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8264 vdev_trim_restart(spa->spa_root_vdev); 8265 spa_config_exit(spa, SCL_CONFIG, FTAG); 8266 mutex_exit(&spa_namespace_lock); 8267 } 8268 8269 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { 8270 mutex_enter(&spa_namespace_lock); 8271 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8272 vdev_autotrim_restart(spa); 8273 spa_config_exit(spa, SCL_CONFIG, FTAG); 8274 mutex_exit(&spa_namespace_lock); 8275 } 8276 8277 /* 8278 * Kick off L2 cache whole device TRIM. 8279 */ 8280 if (tasks & SPA_ASYNC_L2CACHE_TRIM) { 8281 mutex_enter(&spa_namespace_lock); 8282 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8283 vdev_trim_l2arc(spa); 8284 spa_config_exit(spa, SCL_CONFIG, FTAG); 8285 mutex_exit(&spa_namespace_lock); 8286 } 8287 8288 /* 8289 * Kick off L2 cache rebuilding. 8290 */ 8291 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) { 8292 mutex_enter(&spa_namespace_lock); 8293 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER); 8294 l2arc_spa_rebuild_start(spa); 8295 spa_config_exit(spa, SCL_L2ARC, FTAG); 8296 mutex_exit(&spa_namespace_lock); 8297 } 8298 8299 /* 8300 * Let the world know that we're done. 8301 */ 8302 mutex_enter(&spa->spa_async_lock); 8303 spa->spa_async_thread = NULL; 8304 cv_broadcast(&spa->spa_async_cv); 8305 mutex_exit(&spa->spa_async_lock); 8306 thread_exit(); 8307 } 8308 8309 void 8310 spa_async_suspend(spa_t *spa) 8311 { 8312 mutex_enter(&spa->spa_async_lock); 8313 spa->spa_async_suspended++; 8314 while (spa->spa_async_thread != NULL) 8315 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 8316 mutex_exit(&spa->spa_async_lock); 8317 8318 spa_vdev_remove_suspend(spa); 8319 8320 zthr_t *condense_thread = spa->spa_condense_zthr; 8321 if (condense_thread != NULL) 8322 zthr_cancel(condense_thread); 8323 8324 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8325 if (discard_thread != NULL) 8326 zthr_cancel(discard_thread); 8327 8328 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8329 if (ll_delete_thread != NULL) 8330 zthr_cancel(ll_delete_thread); 8331 8332 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8333 if (ll_condense_thread != NULL) 8334 zthr_cancel(ll_condense_thread); 8335 } 8336 8337 void 8338 spa_async_resume(spa_t *spa) 8339 { 8340 mutex_enter(&spa->spa_async_lock); 8341 ASSERT(spa->spa_async_suspended != 0); 8342 spa->spa_async_suspended--; 8343 mutex_exit(&spa->spa_async_lock); 8344 spa_restart_removal(spa); 8345 8346 zthr_t *condense_thread = spa->spa_condense_zthr; 8347 if (condense_thread != NULL) 8348 zthr_resume(condense_thread); 8349 8350 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8351 if (discard_thread != NULL) 8352 zthr_resume(discard_thread); 8353 8354 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8355 if (ll_delete_thread != NULL) 8356 zthr_resume(ll_delete_thread); 8357 8358 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8359 if (ll_condense_thread != NULL) 8360 zthr_resume(ll_condense_thread); 8361 } 8362 8363 static boolean_t 8364 spa_async_tasks_pending(spa_t *spa) 8365 { 8366 uint_t non_config_tasks; 8367 uint_t config_task; 8368 boolean_t config_task_suspended; 8369 8370 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 8371 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 8372 if (spa->spa_ccw_fail_time == 0) { 8373 config_task_suspended = B_FALSE; 8374 } else { 8375 config_task_suspended = 8376 (gethrtime() - spa->spa_ccw_fail_time) < 8377 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC); 8378 } 8379 8380 return (non_config_tasks || (config_task && !config_task_suspended)); 8381 } 8382 8383 static void 8384 spa_async_dispatch(spa_t *spa) 8385 { 8386 mutex_enter(&spa->spa_async_lock); 8387 if (spa_async_tasks_pending(spa) && 8388 !spa->spa_async_suspended && 8389 spa->spa_async_thread == NULL) 8390 spa->spa_async_thread = thread_create(NULL, 0, 8391 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 8392 mutex_exit(&spa->spa_async_lock); 8393 } 8394 8395 void 8396 spa_async_request(spa_t *spa, int task) 8397 { 8398 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 8399 mutex_enter(&spa->spa_async_lock); 8400 spa->spa_async_tasks |= task; 8401 mutex_exit(&spa->spa_async_lock); 8402 } 8403 8404 int 8405 spa_async_tasks(spa_t *spa) 8406 { 8407 return (spa->spa_async_tasks); 8408 } 8409 8410 /* 8411 * ========================================================================== 8412 * SPA syncing routines 8413 * ========================================================================== 8414 */ 8415 8416 8417 static int 8418 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8419 dmu_tx_t *tx) 8420 { 8421 bpobj_t *bpo = arg; 8422 bpobj_enqueue(bpo, bp, bp_freed, tx); 8423 return (0); 8424 } 8425 8426 int 8427 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8428 { 8429 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx)); 8430 } 8431 8432 int 8433 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8434 { 8435 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx)); 8436 } 8437 8438 static int 8439 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8440 { 8441 zio_t *pio = arg; 8442 8443 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp, 8444 pio->io_flags)); 8445 return (0); 8446 } 8447 8448 static int 8449 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8450 dmu_tx_t *tx) 8451 { 8452 ASSERT(!bp_freed); 8453 return (spa_free_sync_cb(arg, bp, tx)); 8454 } 8455 8456 /* 8457 * Note: this simple function is not inlined to make it easier to dtrace the 8458 * amount of time spent syncing frees. 8459 */ 8460 static void 8461 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 8462 { 8463 zio_t *zio = zio_root(spa, NULL, NULL, 0); 8464 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 8465 VERIFY(zio_wait(zio) == 0); 8466 } 8467 8468 /* 8469 * Note: this simple function is not inlined to make it easier to dtrace the 8470 * amount of time spent syncing deferred frees. 8471 */ 8472 static void 8473 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 8474 { 8475 if (spa_sync_pass(spa) != 1) 8476 return; 8477 8478 /* 8479 * Note: 8480 * If the log space map feature is active, we stop deferring 8481 * frees to the next TXG and therefore running this function 8482 * would be considered a no-op as spa_deferred_bpobj should 8483 * not have any entries. 8484 * 8485 * That said we run this function anyway (instead of returning 8486 * immediately) for the edge-case scenario where we just 8487 * activated the log space map feature in this TXG but we have 8488 * deferred frees from the previous TXG. 8489 */ 8490 zio_t *zio = zio_root(spa, NULL, NULL, 0); 8491 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 8492 bpobj_spa_free_sync_cb, zio, tx), ==, 0); 8493 VERIFY0(zio_wait(zio)); 8494 } 8495 8496 static void 8497 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 8498 { 8499 char *packed = NULL; 8500 size_t bufsize; 8501 size_t nvsize = 0; 8502 dmu_buf_t *db; 8503 8504 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 8505 8506 /* 8507 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 8508 * information. This avoids the dmu_buf_will_dirty() path and 8509 * saves us a pre-read to get data we don't actually care about. 8510 */ 8511 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 8512 packed = vmem_alloc(bufsize, KM_SLEEP); 8513 8514 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 8515 KM_SLEEP) == 0); 8516 memset(packed + nvsize, 0, bufsize - nvsize); 8517 8518 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 8519 8520 vmem_free(packed, bufsize); 8521 8522 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 8523 dmu_buf_will_dirty(db, tx); 8524 *(uint64_t *)db->db_data = nvsize; 8525 dmu_buf_rele(db, FTAG); 8526 } 8527 8528 static void 8529 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 8530 const char *config, const char *entry) 8531 { 8532 nvlist_t *nvroot; 8533 nvlist_t **list; 8534 int i; 8535 8536 if (!sav->sav_sync) 8537 return; 8538 8539 /* 8540 * Update the MOS nvlist describing the list of available devices. 8541 * spa_validate_aux() will have already made sure this nvlist is 8542 * valid and the vdevs are labeled appropriately. 8543 */ 8544 if (sav->sav_object == 0) { 8545 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 8546 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 8547 sizeof (uint64_t), tx); 8548 VERIFY(zap_update(spa->spa_meta_objset, 8549 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 8550 &sav->sav_object, tx) == 0); 8551 } 8552 8553 nvroot = fnvlist_alloc(); 8554 if (sav->sav_count == 0) { 8555 fnvlist_add_nvlist_array(nvroot, config, 8556 (const nvlist_t * const *)NULL, 0); 8557 } else { 8558 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP); 8559 for (i = 0; i < sav->sav_count; i++) 8560 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 8561 B_FALSE, VDEV_CONFIG_L2CACHE); 8562 fnvlist_add_nvlist_array(nvroot, config, 8563 (const nvlist_t * const *)list, sav->sav_count); 8564 for (i = 0; i < sav->sav_count; i++) 8565 nvlist_free(list[i]); 8566 kmem_free(list, sav->sav_count * sizeof (void *)); 8567 } 8568 8569 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 8570 nvlist_free(nvroot); 8571 8572 sav->sav_sync = B_FALSE; 8573 } 8574 8575 /* 8576 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 8577 * The all-vdev ZAP must be empty. 8578 */ 8579 static void 8580 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 8581 { 8582 spa_t *spa = vd->vdev_spa; 8583 8584 if (vd->vdev_top_zap != 0) { 8585 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8586 vd->vdev_top_zap, tx)); 8587 } 8588 if (vd->vdev_leaf_zap != 0) { 8589 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8590 vd->vdev_leaf_zap, tx)); 8591 } 8592 for (uint64_t i = 0; i < vd->vdev_children; i++) { 8593 spa_avz_build(vd->vdev_child[i], avz, tx); 8594 } 8595 } 8596 8597 static void 8598 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 8599 { 8600 nvlist_t *config; 8601 8602 /* 8603 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 8604 * its config may not be dirty but we still need to build per-vdev ZAPs. 8605 * Similarly, if the pool is being assembled (e.g. after a split), we 8606 * need to rebuild the AVZ although the config may not be dirty. 8607 */ 8608 if (list_is_empty(&spa->spa_config_dirty_list) && 8609 spa->spa_avz_action == AVZ_ACTION_NONE) 8610 return; 8611 8612 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 8613 8614 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 8615 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 8616 spa->spa_all_vdev_zaps != 0); 8617 8618 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 8619 /* Make and build the new AVZ */ 8620 uint64_t new_avz = zap_create(spa->spa_meta_objset, 8621 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 8622 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 8623 8624 /* Diff old AVZ with new one */ 8625 zap_cursor_t zc; 8626 zap_attribute_t za; 8627 8628 for (zap_cursor_init(&zc, spa->spa_meta_objset, 8629 spa->spa_all_vdev_zaps); 8630 zap_cursor_retrieve(&zc, &za) == 0; 8631 zap_cursor_advance(&zc)) { 8632 uint64_t vdzap = za.za_first_integer; 8633 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 8634 vdzap) == ENOENT) { 8635 /* 8636 * ZAP is listed in old AVZ but not in new one; 8637 * destroy it 8638 */ 8639 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 8640 tx)); 8641 } 8642 } 8643 8644 zap_cursor_fini(&zc); 8645 8646 /* Destroy the old AVZ */ 8647 VERIFY0(zap_destroy(spa->spa_meta_objset, 8648 spa->spa_all_vdev_zaps, tx)); 8649 8650 /* Replace the old AVZ in the dir obj with the new one */ 8651 VERIFY0(zap_update(spa->spa_meta_objset, 8652 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 8653 sizeof (new_avz), 1, &new_avz, tx)); 8654 8655 spa->spa_all_vdev_zaps = new_avz; 8656 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 8657 zap_cursor_t zc; 8658 zap_attribute_t za; 8659 8660 /* Walk through the AVZ and destroy all listed ZAPs */ 8661 for (zap_cursor_init(&zc, spa->spa_meta_objset, 8662 spa->spa_all_vdev_zaps); 8663 zap_cursor_retrieve(&zc, &za) == 0; 8664 zap_cursor_advance(&zc)) { 8665 uint64_t zap = za.za_first_integer; 8666 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 8667 } 8668 8669 zap_cursor_fini(&zc); 8670 8671 /* Destroy and unlink the AVZ itself */ 8672 VERIFY0(zap_destroy(spa->spa_meta_objset, 8673 spa->spa_all_vdev_zaps, tx)); 8674 VERIFY0(zap_remove(spa->spa_meta_objset, 8675 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 8676 spa->spa_all_vdev_zaps = 0; 8677 } 8678 8679 if (spa->spa_all_vdev_zaps == 0) { 8680 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 8681 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 8682 DMU_POOL_VDEV_ZAP_MAP, tx); 8683 } 8684 spa->spa_avz_action = AVZ_ACTION_NONE; 8685 8686 /* Create ZAPs for vdevs that don't have them. */ 8687 vdev_construct_zaps(spa->spa_root_vdev, tx); 8688 8689 config = spa_config_generate(spa, spa->spa_root_vdev, 8690 dmu_tx_get_txg(tx), B_FALSE); 8691 8692 /* 8693 * If we're upgrading the spa version then make sure that 8694 * the config object gets updated with the correct version. 8695 */ 8696 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 8697 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 8698 spa->spa_uberblock.ub_version); 8699 8700 spa_config_exit(spa, SCL_STATE, FTAG); 8701 8702 nvlist_free(spa->spa_config_syncing); 8703 spa->spa_config_syncing = config; 8704 8705 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 8706 } 8707 8708 static void 8709 spa_sync_version(void *arg, dmu_tx_t *tx) 8710 { 8711 uint64_t *versionp = arg; 8712 uint64_t version = *versionp; 8713 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8714 8715 /* 8716 * Setting the version is special cased when first creating the pool. 8717 */ 8718 ASSERT(tx->tx_txg != TXG_INITIAL); 8719 8720 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 8721 ASSERT(version >= spa_version(spa)); 8722 8723 spa->spa_uberblock.ub_version = version; 8724 vdev_config_dirty(spa->spa_root_vdev); 8725 spa_history_log_internal(spa, "set", tx, "version=%lld", 8726 (longlong_t)version); 8727 } 8728 8729 /* 8730 * Set zpool properties. 8731 */ 8732 static void 8733 spa_sync_props(void *arg, dmu_tx_t *tx) 8734 { 8735 nvlist_t *nvp = arg; 8736 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8737 objset_t *mos = spa->spa_meta_objset; 8738 nvpair_t *elem = NULL; 8739 8740 mutex_enter(&spa->spa_props_lock); 8741 8742 while ((elem = nvlist_next_nvpair(nvp, elem))) { 8743 uint64_t intval; 8744 char *strval, *fname; 8745 zpool_prop_t prop; 8746 const char *propname; 8747 zprop_type_t proptype; 8748 spa_feature_t fid; 8749 8750 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 8751 case ZPOOL_PROP_INVAL: 8752 /* 8753 * We checked this earlier in spa_prop_validate(). 8754 */ 8755 ASSERT(zpool_prop_feature(nvpair_name(elem))); 8756 8757 fname = strchr(nvpair_name(elem), '@') + 1; 8758 VERIFY0(zfeature_lookup_name(fname, &fid)); 8759 8760 spa_feature_enable(spa, fid, tx); 8761 spa_history_log_internal(spa, "set", tx, 8762 "%s=enabled", nvpair_name(elem)); 8763 break; 8764 8765 case ZPOOL_PROP_VERSION: 8766 intval = fnvpair_value_uint64(elem); 8767 /* 8768 * The version is synced separately before other 8769 * properties and should be correct by now. 8770 */ 8771 ASSERT3U(spa_version(spa), >=, intval); 8772 break; 8773 8774 case ZPOOL_PROP_ALTROOT: 8775 /* 8776 * 'altroot' is a non-persistent property. It should 8777 * have been set temporarily at creation or import time. 8778 */ 8779 ASSERT(spa->spa_root != NULL); 8780 break; 8781 8782 case ZPOOL_PROP_READONLY: 8783 case ZPOOL_PROP_CACHEFILE: 8784 /* 8785 * 'readonly' and 'cachefile' are also non-persistent 8786 * properties. 8787 */ 8788 break; 8789 case ZPOOL_PROP_COMMENT: 8790 strval = fnvpair_value_string(elem); 8791 if (spa->spa_comment != NULL) 8792 spa_strfree(spa->spa_comment); 8793 spa->spa_comment = spa_strdup(strval); 8794 /* 8795 * We need to dirty the configuration on all the vdevs 8796 * so that their labels get updated. We also need to 8797 * update the cache file to keep it in sync with the 8798 * MOS version. It's unnecessary to do this for pool 8799 * creation since the vdev's configuration has already 8800 * been dirtied. 8801 */ 8802 if (tx->tx_txg != TXG_INITIAL) { 8803 vdev_config_dirty(spa->spa_root_vdev); 8804 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 8805 } 8806 spa_history_log_internal(spa, "set", tx, 8807 "%s=%s", nvpair_name(elem), strval); 8808 break; 8809 case ZPOOL_PROP_COMPATIBILITY: 8810 strval = fnvpair_value_string(elem); 8811 if (spa->spa_compatibility != NULL) 8812 spa_strfree(spa->spa_compatibility); 8813 spa->spa_compatibility = spa_strdup(strval); 8814 /* 8815 * Dirty the configuration on vdevs as above. 8816 */ 8817 if (tx->tx_txg != TXG_INITIAL) { 8818 vdev_config_dirty(spa->spa_root_vdev); 8819 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 8820 } 8821 8822 spa_history_log_internal(spa, "set", tx, 8823 "%s=%s", nvpair_name(elem), strval); 8824 break; 8825 8826 default: 8827 /* 8828 * Set pool property values in the poolprops mos object. 8829 */ 8830 if (spa->spa_pool_props_object == 0) { 8831 spa->spa_pool_props_object = 8832 zap_create_link(mos, DMU_OT_POOL_PROPS, 8833 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 8834 tx); 8835 } 8836 8837 /* normalize the property name */ 8838 propname = zpool_prop_to_name(prop); 8839 proptype = zpool_prop_get_type(prop); 8840 8841 if (nvpair_type(elem) == DATA_TYPE_STRING) { 8842 ASSERT(proptype == PROP_TYPE_STRING); 8843 strval = fnvpair_value_string(elem); 8844 VERIFY0(zap_update(mos, 8845 spa->spa_pool_props_object, propname, 8846 1, strlen(strval) + 1, strval, tx)); 8847 spa_history_log_internal(spa, "set", tx, 8848 "%s=%s", nvpair_name(elem), strval); 8849 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 8850 intval = fnvpair_value_uint64(elem); 8851 8852 if (proptype == PROP_TYPE_INDEX) { 8853 const char *unused; 8854 VERIFY0(zpool_prop_index_to_string( 8855 prop, intval, &unused)); 8856 } 8857 VERIFY0(zap_update(mos, 8858 spa->spa_pool_props_object, propname, 8859 8, 1, &intval, tx)); 8860 spa_history_log_internal(spa, "set", tx, 8861 "%s=%lld", nvpair_name(elem), 8862 (longlong_t)intval); 8863 } else { 8864 ASSERT(0); /* not allowed */ 8865 } 8866 8867 switch (prop) { 8868 case ZPOOL_PROP_DELEGATION: 8869 spa->spa_delegation = intval; 8870 break; 8871 case ZPOOL_PROP_BOOTFS: 8872 spa->spa_bootfs = intval; 8873 break; 8874 case ZPOOL_PROP_FAILUREMODE: 8875 spa->spa_failmode = intval; 8876 break; 8877 case ZPOOL_PROP_AUTOTRIM: 8878 spa->spa_autotrim = intval; 8879 spa_async_request(spa, 8880 SPA_ASYNC_AUTOTRIM_RESTART); 8881 break; 8882 case ZPOOL_PROP_AUTOEXPAND: 8883 spa->spa_autoexpand = intval; 8884 if (tx->tx_txg != TXG_INITIAL) 8885 spa_async_request(spa, 8886 SPA_ASYNC_AUTOEXPAND); 8887 break; 8888 case ZPOOL_PROP_MULTIHOST: 8889 spa->spa_multihost = intval; 8890 break; 8891 default: 8892 break; 8893 } 8894 } 8895 8896 } 8897 8898 mutex_exit(&spa->spa_props_lock); 8899 } 8900 8901 /* 8902 * Perform one-time upgrade on-disk changes. spa_version() does not 8903 * reflect the new version this txg, so there must be no changes this 8904 * txg to anything that the upgrade code depends on after it executes. 8905 * Therefore this must be called after dsl_pool_sync() does the sync 8906 * tasks. 8907 */ 8908 static void 8909 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 8910 { 8911 if (spa_sync_pass(spa) != 1) 8912 return; 8913 8914 dsl_pool_t *dp = spa->spa_dsl_pool; 8915 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 8916 8917 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 8918 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 8919 dsl_pool_create_origin(dp, tx); 8920 8921 /* Keeping the origin open increases spa_minref */ 8922 spa->spa_minref += 3; 8923 } 8924 8925 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 8926 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 8927 dsl_pool_upgrade_clones(dp, tx); 8928 } 8929 8930 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 8931 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 8932 dsl_pool_upgrade_dir_clones(dp, tx); 8933 8934 /* Keeping the freedir open increases spa_minref */ 8935 spa->spa_minref += 3; 8936 } 8937 8938 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 8939 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8940 spa_feature_create_zap_objects(spa, tx); 8941 } 8942 8943 /* 8944 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 8945 * when possibility to use lz4 compression for metadata was added 8946 * Old pools that have this feature enabled must be upgraded to have 8947 * this feature active 8948 */ 8949 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8950 boolean_t lz4_en = spa_feature_is_enabled(spa, 8951 SPA_FEATURE_LZ4_COMPRESS); 8952 boolean_t lz4_ac = spa_feature_is_active(spa, 8953 SPA_FEATURE_LZ4_COMPRESS); 8954 8955 if (lz4_en && !lz4_ac) 8956 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 8957 } 8958 8959 /* 8960 * If we haven't written the salt, do so now. Note that the 8961 * feature may not be activated yet, but that's fine since 8962 * the presence of this ZAP entry is backwards compatible. 8963 */ 8964 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 8965 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 8966 VERIFY0(zap_add(spa->spa_meta_objset, 8967 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 8968 sizeof (spa->spa_cksum_salt.zcs_bytes), 8969 spa->spa_cksum_salt.zcs_bytes, tx)); 8970 } 8971 8972 rrw_exit(&dp->dp_config_rwlock, FTAG); 8973 } 8974 8975 static void 8976 vdev_indirect_state_sync_verify(vdev_t *vd) 8977 { 8978 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping; 8979 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births; 8980 8981 if (vd->vdev_ops == &vdev_indirect_ops) { 8982 ASSERT(vim != NULL); 8983 ASSERT(vib != NULL); 8984 } 8985 8986 uint64_t obsolete_sm_object = 0; 8987 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 8988 if (obsolete_sm_object != 0) { 8989 ASSERT(vd->vdev_obsolete_sm != NULL); 8990 ASSERT(vd->vdev_removing || 8991 vd->vdev_ops == &vdev_indirect_ops); 8992 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 8993 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 8994 ASSERT3U(obsolete_sm_object, ==, 8995 space_map_object(vd->vdev_obsolete_sm)); 8996 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 8997 space_map_allocated(vd->vdev_obsolete_sm)); 8998 } 8999 ASSERT(vd->vdev_obsolete_segments != NULL); 9000 9001 /* 9002 * Since frees / remaps to an indirect vdev can only 9003 * happen in syncing context, the obsolete segments 9004 * tree must be empty when we start syncing. 9005 */ 9006 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 9007 } 9008 9009 /* 9010 * Set the top-level vdev's max queue depth. Evaluate each top-level's 9011 * async write queue depth in case it changed. The max queue depth will 9012 * not change in the middle of syncing out this txg. 9013 */ 9014 static void 9015 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 9016 { 9017 ASSERT(spa_writeable(spa)); 9018 9019 vdev_t *rvd = spa->spa_root_vdev; 9020 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 9021 zfs_vdev_queue_depth_pct / 100; 9022 metaslab_class_t *normal = spa_normal_class(spa); 9023 metaslab_class_t *special = spa_special_class(spa); 9024 metaslab_class_t *dedup = spa_dedup_class(spa); 9025 9026 uint64_t slots_per_allocator = 0; 9027 for (int c = 0; c < rvd->vdev_children; c++) { 9028 vdev_t *tvd = rvd->vdev_child[c]; 9029 9030 metaslab_group_t *mg = tvd->vdev_mg; 9031 if (mg == NULL || !metaslab_group_initialized(mg)) 9032 continue; 9033 9034 metaslab_class_t *mc = mg->mg_class; 9035 if (mc != normal && mc != special && mc != dedup) 9036 continue; 9037 9038 /* 9039 * It is safe to do a lock-free check here because only async 9040 * allocations look at mg_max_alloc_queue_depth, and async 9041 * allocations all happen from spa_sync(). 9042 */ 9043 for (int i = 0; i < mg->mg_allocators; i++) { 9044 ASSERT0(zfs_refcount_count( 9045 &(mg->mg_allocator[i].mga_alloc_queue_depth))); 9046 } 9047 mg->mg_max_alloc_queue_depth = max_queue_depth; 9048 9049 for (int i = 0; i < mg->mg_allocators; i++) { 9050 mg->mg_allocator[i].mga_cur_max_alloc_queue_depth = 9051 zfs_vdev_def_queue_depth; 9052 } 9053 slots_per_allocator += zfs_vdev_def_queue_depth; 9054 } 9055 9056 for (int i = 0; i < spa->spa_alloc_count; i++) { 9057 ASSERT0(zfs_refcount_count(&normal->mc_allocator[i]. 9058 mca_alloc_slots)); 9059 ASSERT0(zfs_refcount_count(&special->mc_allocator[i]. 9060 mca_alloc_slots)); 9061 ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i]. 9062 mca_alloc_slots)); 9063 normal->mc_allocator[i].mca_alloc_max_slots = 9064 slots_per_allocator; 9065 special->mc_allocator[i].mca_alloc_max_slots = 9066 slots_per_allocator; 9067 dedup->mc_allocator[i].mca_alloc_max_slots = 9068 slots_per_allocator; 9069 } 9070 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9071 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9072 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 9073 } 9074 9075 static void 9076 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 9077 { 9078 ASSERT(spa_writeable(spa)); 9079 9080 vdev_t *rvd = spa->spa_root_vdev; 9081 for (int c = 0; c < rvd->vdev_children; c++) { 9082 vdev_t *vd = rvd->vdev_child[c]; 9083 vdev_indirect_state_sync_verify(vd); 9084 9085 if (vdev_indirect_should_condense(vd)) { 9086 spa_condense_indirect_start_sync(vd, tx); 9087 break; 9088 } 9089 } 9090 } 9091 9092 static void 9093 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 9094 { 9095 objset_t *mos = spa->spa_meta_objset; 9096 dsl_pool_t *dp = spa->spa_dsl_pool; 9097 uint64_t txg = tx->tx_txg; 9098 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 9099 9100 do { 9101 int pass = ++spa->spa_sync_pass; 9102 9103 spa_sync_config_object(spa, tx); 9104 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 9105 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 9106 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 9107 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 9108 spa_errlog_sync(spa, txg); 9109 dsl_pool_sync(dp, txg); 9110 9111 if (pass < zfs_sync_pass_deferred_free || 9112 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 9113 /* 9114 * If the log space map feature is active we don't 9115 * care about deferred frees and the deferred bpobj 9116 * as the log space map should effectively have the 9117 * same results (i.e. appending only to one object). 9118 */ 9119 spa_sync_frees(spa, free_bpl, tx); 9120 } else { 9121 /* 9122 * We can not defer frees in pass 1, because 9123 * we sync the deferred frees later in pass 1. 9124 */ 9125 ASSERT3U(pass, >, 1); 9126 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb, 9127 &spa->spa_deferred_bpobj, tx); 9128 } 9129 9130 ddt_sync(spa, txg); 9131 dsl_scan_sync(dp, tx); 9132 svr_sync(spa, tx); 9133 spa_sync_upgrades(spa, tx); 9134 9135 spa_flush_metaslabs(spa, tx); 9136 9137 vdev_t *vd = NULL; 9138 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 9139 != NULL) 9140 vdev_sync(vd, txg); 9141 9142 /* 9143 * Note: We need to check if the MOS is dirty because we could 9144 * have marked the MOS dirty without updating the uberblock 9145 * (e.g. if we have sync tasks but no dirty user data). We need 9146 * to check the uberblock's rootbp because it is updated if we 9147 * have synced out dirty data (though in this case the MOS will 9148 * most likely also be dirty due to second order effects, we 9149 * don't want to rely on that here). 9150 */ 9151 if (pass == 1 && 9152 spa->spa_uberblock.ub_rootbp.blk_birth < txg && 9153 !dmu_objset_is_dirty(mos, txg)) { 9154 /* 9155 * Nothing changed on the first pass, therefore this 9156 * TXG is a no-op. Avoid syncing deferred frees, so 9157 * that we can keep this TXG as a no-op. 9158 */ 9159 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9160 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9161 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 9162 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 9163 break; 9164 } 9165 9166 spa_sync_deferred_frees(spa, tx); 9167 } while (dmu_objset_is_dirty(mos, txg)); 9168 } 9169 9170 /* 9171 * Rewrite the vdev configuration (which includes the uberblock) to 9172 * commit the transaction group. 9173 * 9174 * If there are no dirty vdevs, we sync the uberblock to a few random 9175 * top-level vdevs that are known to be visible in the config cache 9176 * (see spa_vdev_add() for a complete description). If there *are* dirty 9177 * vdevs, sync the uberblock to all vdevs. 9178 */ 9179 static void 9180 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 9181 { 9182 vdev_t *rvd = spa->spa_root_vdev; 9183 uint64_t txg = tx->tx_txg; 9184 9185 for (;;) { 9186 int error = 0; 9187 9188 /* 9189 * We hold SCL_STATE to prevent vdev open/close/etc. 9190 * while we're attempting to write the vdev labels. 9191 */ 9192 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9193 9194 if (list_is_empty(&spa->spa_config_dirty_list)) { 9195 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 9196 int svdcount = 0; 9197 int children = rvd->vdev_children; 9198 int c0 = random_in_range(children); 9199 9200 for (int c = 0; c < children; c++) { 9201 vdev_t *vd = 9202 rvd->vdev_child[(c0 + c) % children]; 9203 9204 /* Stop when revisiting the first vdev */ 9205 if (c > 0 && svd[0] == vd) 9206 break; 9207 9208 if (vd->vdev_ms_array == 0 || 9209 vd->vdev_islog || 9210 !vdev_is_concrete(vd)) 9211 continue; 9212 9213 svd[svdcount++] = vd; 9214 if (svdcount == SPA_SYNC_MIN_VDEVS) 9215 break; 9216 } 9217 error = vdev_config_sync(svd, svdcount, txg); 9218 } else { 9219 error = vdev_config_sync(rvd->vdev_child, 9220 rvd->vdev_children, txg); 9221 } 9222 9223 if (error == 0) 9224 spa->spa_last_synced_guid = rvd->vdev_guid; 9225 9226 spa_config_exit(spa, SCL_STATE, FTAG); 9227 9228 if (error == 0) 9229 break; 9230 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 9231 zio_resume_wait(spa); 9232 } 9233 } 9234 9235 /* 9236 * Sync the specified transaction group. New blocks may be dirtied as 9237 * part of the process, so we iterate until it converges. 9238 */ 9239 void 9240 spa_sync(spa_t *spa, uint64_t txg) 9241 { 9242 vdev_t *vd = NULL; 9243 9244 VERIFY(spa_writeable(spa)); 9245 9246 /* 9247 * Wait for i/os issued in open context that need to complete 9248 * before this txg syncs. 9249 */ 9250 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 9251 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 9252 ZIO_FLAG_CANFAIL); 9253 9254 /* 9255 * Lock out configuration changes. 9256 */ 9257 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9258 9259 spa->spa_syncing_txg = txg; 9260 spa->spa_sync_pass = 0; 9261 9262 for (int i = 0; i < spa->spa_alloc_count; i++) { 9263 mutex_enter(&spa->spa_allocs[i].spaa_lock); 9264 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 9265 mutex_exit(&spa->spa_allocs[i].spaa_lock); 9266 } 9267 9268 /* 9269 * If there are any pending vdev state changes, convert them 9270 * into config changes that go out with this transaction group. 9271 */ 9272 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9273 while (list_head(&spa->spa_state_dirty_list) != NULL) { 9274 /* 9275 * We need the write lock here because, for aux vdevs, 9276 * calling vdev_config_dirty() modifies sav_config. 9277 * This is ugly and will become unnecessary when we 9278 * eliminate the aux vdev wart by integrating all vdevs 9279 * into the root vdev tree. 9280 */ 9281 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9282 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 9283 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 9284 vdev_state_clean(vd); 9285 vdev_config_dirty(vd); 9286 } 9287 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9288 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9289 } 9290 spa_config_exit(spa, SCL_STATE, FTAG); 9291 9292 dsl_pool_t *dp = spa->spa_dsl_pool; 9293 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 9294 9295 spa->spa_sync_starttime = gethrtime(); 9296 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9297 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 9298 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 9299 NSEC_TO_TICK(spa->spa_deadman_synctime)); 9300 9301 /* 9302 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 9303 * set spa_deflate if we have no raid-z vdevs. 9304 */ 9305 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 9306 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 9307 vdev_t *rvd = spa->spa_root_vdev; 9308 9309 int i; 9310 for (i = 0; i < rvd->vdev_children; i++) { 9311 vd = rvd->vdev_child[i]; 9312 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 9313 break; 9314 } 9315 if (i == rvd->vdev_children) { 9316 spa->spa_deflate = TRUE; 9317 VERIFY0(zap_add(spa->spa_meta_objset, 9318 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 9319 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 9320 } 9321 } 9322 9323 spa_sync_adjust_vdev_max_queue_depth(spa); 9324 9325 spa_sync_condense_indirect(spa, tx); 9326 9327 spa_sync_iterate_to_convergence(spa, tx); 9328 9329 #ifdef ZFS_DEBUG 9330 if (!list_is_empty(&spa->spa_config_dirty_list)) { 9331 /* 9332 * Make sure that the number of ZAPs for all the vdevs matches 9333 * the number of ZAPs in the per-vdev ZAP list. This only gets 9334 * called if the config is dirty; otherwise there may be 9335 * outstanding AVZ operations that weren't completed in 9336 * spa_sync_config_object. 9337 */ 9338 uint64_t all_vdev_zap_entry_count; 9339 ASSERT0(zap_count(spa->spa_meta_objset, 9340 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 9341 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 9342 all_vdev_zap_entry_count); 9343 } 9344 #endif 9345 9346 if (spa->spa_vdev_removal != NULL) { 9347 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 9348 } 9349 9350 spa_sync_rewrite_vdev_config(spa, tx); 9351 dmu_tx_commit(tx); 9352 9353 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9354 spa->spa_deadman_tqid = 0; 9355 9356 /* 9357 * Clear the dirty config list. 9358 */ 9359 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 9360 vdev_config_clean(vd); 9361 9362 /* 9363 * Now that the new config has synced transactionally, 9364 * let it become visible to the config cache. 9365 */ 9366 if (spa->spa_config_syncing != NULL) { 9367 spa_config_set(spa, spa->spa_config_syncing); 9368 spa->spa_config_txg = txg; 9369 spa->spa_config_syncing = NULL; 9370 } 9371 9372 dsl_pool_sync_done(dp, txg); 9373 9374 for (int i = 0; i < spa->spa_alloc_count; i++) { 9375 mutex_enter(&spa->spa_allocs[i].spaa_lock); 9376 VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree)); 9377 mutex_exit(&spa->spa_allocs[i].spaa_lock); 9378 } 9379 9380 /* 9381 * Update usable space statistics. 9382 */ 9383 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 9384 != NULL) 9385 vdev_sync_done(vd, txg); 9386 9387 metaslab_class_evict_old(spa->spa_normal_class, txg); 9388 metaslab_class_evict_old(spa->spa_log_class, txg); 9389 9390 spa_sync_close_syncing_log_sm(spa); 9391 9392 spa_update_dspace(spa); 9393 9394 /* 9395 * It had better be the case that we didn't dirty anything 9396 * since vdev_config_sync(). 9397 */ 9398 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9399 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9400 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 9401 9402 while (zfs_pause_spa_sync) 9403 delay(1); 9404 9405 spa->spa_sync_pass = 0; 9406 9407 /* 9408 * Update the last synced uberblock here. We want to do this at 9409 * the end of spa_sync() so that consumers of spa_last_synced_txg() 9410 * will be guaranteed that all the processing associated with 9411 * that txg has been completed. 9412 */ 9413 spa->spa_ubsync = spa->spa_uberblock; 9414 spa_config_exit(spa, SCL_CONFIG, FTAG); 9415 9416 spa_handle_ignored_writes(spa); 9417 9418 /* 9419 * If any async tasks have been requested, kick them off. 9420 */ 9421 spa_async_dispatch(spa); 9422 } 9423 9424 /* 9425 * Sync all pools. We don't want to hold the namespace lock across these 9426 * operations, so we take a reference on the spa_t and drop the lock during the 9427 * sync. 9428 */ 9429 void 9430 spa_sync_allpools(void) 9431 { 9432 spa_t *spa = NULL; 9433 mutex_enter(&spa_namespace_lock); 9434 while ((spa = spa_next(spa)) != NULL) { 9435 if (spa_state(spa) != POOL_STATE_ACTIVE || 9436 !spa_writeable(spa) || spa_suspended(spa)) 9437 continue; 9438 spa_open_ref(spa, FTAG); 9439 mutex_exit(&spa_namespace_lock); 9440 txg_wait_synced(spa_get_dsl(spa), 0); 9441 mutex_enter(&spa_namespace_lock); 9442 spa_close(spa, FTAG); 9443 } 9444 mutex_exit(&spa_namespace_lock); 9445 } 9446 9447 /* 9448 * ========================================================================== 9449 * Miscellaneous routines 9450 * ========================================================================== 9451 */ 9452 9453 /* 9454 * Remove all pools in the system. 9455 */ 9456 void 9457 spa_evict_all(void) 9458 { 9459 spa_t *spa; 9460 9461 /* 9462 * Remove all cached state. All pools should be closed now, 9463 * so every spa in the AVL tree should be unreferenced. 9464 */ 9465 mutex_enter(&spa_namespace_lock); 9466 while ((spa = spa_next(NULL)) != NULL) { 9467 /* 9468 * Stop async tasks. The async thread may need to detach 9469 * a device that's been replaced, which requires grabbing 9470 * spa_namespace_lock, so we must drop it here. 9471 */ 9472 spa_open_ref(spa, FTAG); 9473 mutex_exit(&spa_namespace_lock); 9474 spa_async_suspend(spa); 9475 mutex_enter(&spa_namespace_lock); 9476 spa_close(spa, FTAG); 9477 9478 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 9479 spa_unload(spa); 9480 spa_deactivate(spa); 9481 } 9482 spa_remove(spa); 9483 } 9484 mutex_exit(&spa_namespace_lock); 9485 } 9486 9487 vdev_t * 9488 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 9489 { 9490 vdev_t *vd; 9491 int i; 9492 9493 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 9494 return (vd); 9495 9496 if (aux) { 9497 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 9498 vd = spa->spa_l2cache.sav_vdevs[i]; 9499 if (vd->vdev_guid == guid) 9500 return (vd); 9501 } 9502 9503 for (i = 0; i < spa->spa_spares.sav_count; i++) { 9504 vd = spa->spa_spares.sav_vdevs[i]; 9505 if (vd->vdev_guid == guid) 9506 return (vd); 9507 } 9508 } 9509 9510 return (NULL); 9511 } 9512 9513 void 9514 spa_upgrade(spa_t *spa, uint64_t version) 9515 { 9516 ASSERT(spa_writeable(spa)); 9517 9518 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 9519 9520 /* 9521 * This should only be called for a non-faulted pool, and since a 9522 * future version would result in an unopenable pool, this shouldn't be 9523 * possible. 9524 */ 9525 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 9526 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 9527 9528 spa->spa_uberblock.ub_version = version; 9529 vdev_config_dirty(spa->spa_root_vdev); 9530 9531 spa_config_exit(spa, SCL_ALL, FTAG); 9532 9533 txg_wait_synced(spa_get_dsl(spa), 0); 9534 } 9535 9536 static boolean_t 9537 spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav) 9538 { 9539 (void) spa; 9540 int i; 9541 uint64_t vdev_guid; 9542 9543 for (i = 0; i < sav->sav_count; i++) 9544 if (sav->sav_vdevs[i]->vdev_guid == guid) 9545 return (B_TRUE); 9546 9547 for (i = 0; i < sav->sav_npending; i++) { 9548 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 9549 &vdev_guid) == 0 && vdev_guid == guid) 9550 return (B_TRUE); 9551 } 9552 9553 return (B_FALSE); 9554 } 9555 9556 boolean_t 9557 spa_has_l2cache(spa_t *spa, uint64_t guid) 9558 { 9559 return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache)); 9560 } 9561 9562 boolean_t 9563 spa_has_spare(spa_t *spa, uint64_t guid) 9564 { 9565 return (spa_has_aux_vdev(spa, guid, &spa->spa_spares)); 9566 } 9567 9568 /* 9569 * Check if a pool has an active shared spare device. 9570 * Note: reference count of an active spare is 2, as a spare and as a replace 9571 */ 9572 static boolean_t 9573 spa_has_active_shared_spare(spa_t *spa) 9574 { 9575 int i, refcnt; 9576 uint64_t pool; 9577 spa_aux_vdev_t *sav = &spa->spa_spares; 9578 9579 for (i = 0; i < sav->sav_count; i++) { 9580 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 9581 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 9582 refcnt > 2) 9583 return (B_TRUE); 9584 } 9585 9586 return (B_FALSE); 9587 } 9588 9589 uint64_t 9590 spa_total_metaslabs(spa_t *spa) 9591 { 9592 vdev_t *rvd = spa->spa_root_vdev; 9593 9594 uint64_t m = 0; 9595 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 9596 vdev_t *vd = rvd->vdev_child[c]; 9597 if (!vdev_is_concrete(vd)) 9598 continue; 9599 m += vd->vdev_ms_count; 9600 } 9601 return (m); 9602 } 9603 9604 /* 9605 * Notify any waiting threads that some activity has switched from being in- 9606 * progress to not-in-progress so that the thread can wake up and determine 9607 * whether it is finished waiting. 9608 */ 9609 void 9610 spa_notify_waiters(spa_t *spa) 9611 { 9612 /* 9613 * Acquiring spa_activities_lock here prevents the cv_broadcast from 9614 * happening between the waiting thread's check and cv_wait. 9615 */ 9616 mutex_enter(&spa->spa_activities_lock); 9617 cv_broadcast(&spa->spa_activities_cv); 9618 mutex_exit(&spa->spa_activities_lock); 9619 } 9620 9621 /* 9622 * Notify any waiting threads that the pool is exporting, and then block until 9623 * they are finished using the spa_t. 9624 */ 9625 void 9626 spa_wake_waiters(spa_t *spa) 9627 { 9628 mutex_enter(&spa->spa_activities_lock); 9629 spa->spa_waiters_cancel = B_TRUE; 9630 cv_broadcast(&spa->spa_activities_cv); 9631 while (spa->spa_waiters != 0) 9632 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock); 9633 spa->spa_waiters_cancel = B_FALSE; 9634 mutex_exit(&spa->spa_activities_lock); 9635 } 9636 9637 /* Whether the vdev or any of its descendants are being initialized/trimmed. */ 9638 static boolean_t 9639 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity) 9640 { 9641 spa_t *spa = vd->vdev_spa; 9642 9643 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER)); 9644 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9645 ASSERT(activity == ZPOOL_WAIT_INITIALIZE || 9646 activity == ZPOOL_WAIT_TRIM); 9647 9648 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ? 9649 &vd->vdev_initialize_lock : &vd->vdev_trim_lock; 9650 9651 mutex_exit(&spa->spa_activities_lock); 9652 mutex_enter(lock); 9653 mutex_enter(&spa->spa_activities_lock); 9654 9655 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ? 9656 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) : 9657 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE); 9658 mutex_exit(lock); 9659 9660 if (in_progress) 9661 return (B_TRUE); 9662 9663 for (int i = 0; i < vd->vdev_children; i++) { 9664 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i], 9665 activity)) 9666 return (B_TRUE); 9667 } 9668 9669 return (B_FALSE); 9670 } 9671 9672 /* 9673 * If use_guid is true, this checks whether the vdev specified by guid is 9674 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool 9675 * is being initialized/trimmed. The caller must hold the config lock and 9676 * spa_activities_lock. 9677 */ 9678 static int 9679 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid, 9680 zpool_wait_activity_t activity, boolean_t *in_progress) 9681 { 9682 mutex_exit(&spa->spa_activities_lock); 9683 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9684 mutex_enter(&spa->spa_activities_lock); 9685 9686 vdev_t *vd; 9687 if (use_guid) { 9688 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 9689 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) { 9690 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9691 return (EINVAL); 9692 } 9693 } else { 9694 vd = spa->spa_root_vdev; 9695 } 9696 9697 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity); 9698 9699 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9700 return (0); 9701 } 9702 9703 /* 9704 * Locking for waiting threads 9705 * --------------------------- 9706 * 9707 * Waiting threads need a way to check whether a given activity is in progress, 9708 * and then, if it is, wait for it to complete. Each activity will have some 9709 * in-memory representation of the relevant on-disk state which can be used to 9710 * determine whether or not the activity is in progress. The in-memory state and 9711 * the locking used to protect it will be different for each activity, and may 9712 * not be suitable for use with a cvar (e.g., some state is protected by the 9713 * config lock). To allow waiting threads to wait without any races, another 9714 * lock, spa_activities_lock, is used. 9715 * 9716 * When the state is checked, both the activity-specific lock (if there is one) 9717 * and spa_activities_lock are held. In some cases, the activity-specific lock 9718 * is acquired explicitly (e.g. the config lock). In others, the locking is 9719 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting 9720 * thread releases the activity-specific lock and, if the activity is in 9721 * progress, then cv_waits using spa_activities_lock. 9722 * 9723 * The waiting thread is woken when another thread, one completing some 9724 * activity, updates the state of the activity and then calls 9725 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only 9726 * needs to hold its activity-specific lock when updating the state, and this 9727 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters. 9728 * 9729 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting, 9730 * and because it is held when the waiting thread checks the state of the 9731 * activity, it can never be the case that the completing thread both updates 9732 * the activity state and cv_broadcasts in between the waiting thread's check 9733 * and cv_wait. Thus, a waiting thread can never miss a wakeup. 9734 * 9735 * In order to prevent deadlock, when the waiting thread does its check, in some 9736 * cases it will temporarily drop spa_activities_lock in order to acquire the 9737 * activity-specific lock. The order in which spa_activities_lock and the 9738 * activity specific lock are acquired in the waiting thread is determined by 9739 * the order in which they are acquired in the completing thread; if the 9740 * completing thread calls spa_notify_waiters with the activity-specific lock 9741 * held, then the waiting thread must also acquire the activity-specific lock 9742 * first. 9743 */ 9744 9745 static int 9746 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity, 9747 boolean_t use_tag, uint64_t tag, boolean_t *in_progress) 9748 { 9749 int error = 0; 9750 9751 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9752 9753 switch (activity) { 9754 case ZPOOL_WAIT_CKPT_DISCARD: 9755 *in_progress = 9756 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) && 9757 zap_contains(spa_meta_objset(spa), 9758 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) == 9759 ENOENT); 9760 break; 9761 case ZPOOL_WAIT_FREE: 9762 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS && 9763 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) || 9764 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) || 9765 spa_livelist_delete_check(spa)); 9766 break; 9767 case ZPOOL_WAIT_INITIALIZE: 9768 case ZPOOL_WAIT_TRIM: 9769 error = spa_vdev_activity_in_progress(spa, use_tag, tag, 9770 activity, in_progress); 9771 break; 9772 case ZPOOL_WAIT_REPLACE: 9773 mutex_exit(&spa->spa_activities_lock); 9774 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9775 mutex_enter(&spa->spa_activities_lock); 9776 9777 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev); 9778 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9779 break; 9780 case ZPOOL_WAIT_REMOVE: 9781 *in_progress = (spa->spa_removing_phys.sr_state == 9782 DSS_SCANNING); 9783 break; 9784 case ZPOOL_WAIT_RESILVER: 9785 if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev))) 9786 break; 9787 zfs_fallthrough; 9788 case ZPOOL_WAIT_SCRUB: 9789 { 9790 boolean_t scanning, paused, is_scrub; 9791 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 9792 9793 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB); 9794 scanning = (scn->scn_phys.scn_state == DSS_SCANNING); 9795 paused = dsl_scan_is_paused_scrub(scn); 9796 *in_progress = (scanning && !paused && 9797 is_scrub == (activity == ZPOOL_WAIT_SCRUB)); 9798 break; 9799 } 9800 default: 9801 panic("unrecognized value for activity %d", activity); 9802 } 9803 9804 return (error); 9805 } 9806 9807 static int 9808 spa_wait_common(const char *pool, zpool_wait_activity_t activity, 9809 boolean_t use_tag, uint64_t tag, boolean_t *waited) 9810 { 9811 /* 9812 * The tag is used to distinguish between instances of an activity. 9813 * 'initialize' and 'trim' are the only activities that we use this for. 9814 * The other activities can only have a single instance in progress in a 9815 * pool at one time, making the tag unnecessary. 9816 * 9817 * There can be multiple devices being replaced at once, but since they 9818 * all finish once resilvering finishes, we don't bother keeping track 9819 * of them individually, we just wait for them all to finish. 9820 */ 9821 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE && 9822 activity != ZPOOL_WAIT_TRIM) 9823 return (EINVAL); 9824 9825 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES) 9826 return (EINVAL); 9827 9828 spa_t *spa; 9829 int error = spa_open(pool, &spa, FTAG); 9830 if (error != 0) 9831 return (error); 9832 9833 /* 9834 * Increment the spa's waiter count so that we can call spa_close and 9835 * still ensure that the spa_t doesn't get freed before this thread is 9836 * finished with it when the pool is exported. We want to call spa_close 9837 * before we start waiting because otherwise the additional ref would 9838 * prevent the pool from being exported or destroyed throughout the 9839 * potentially long wait. 9840 */ 9841 mutex_enter(&spa->spa_activities_lock); 9842 spa->spa_waiters++; 9843 spa_close(spa, FTAG); 9844 9845 *waited = B_FALSE; 9846 for (;;) { 9847 boolean_t in_progress; 9848 error = spa_activity_in_progress(spa, activity, use_tag, tag, 9849 &in_progress); 9850 9851 if (error || !in_progress || spa->spa_waiters_cancel) 9852 break; 9853 9854 *waited = B_TRUE; 9855 9856 if (cv_wait_sig(&spa->spa_activities_cv, 9857 &spa->spa_activities_lock) == 0) { 9858 error = EINTR; 9859 break; 9860 } 9861 } 9862 9863 spa->spa_waiters--; 9864 cv_signal(&spa->spa_waiters_cv); 9865 mutex_exit(&spa->spa_activities_lock); 9866 9867 return (error); 9868 } 9869 9870 /* 9871 * Wait for a particular instance of the specified activity to complete, where 9872 * the instance is identified by 'tag' 9873 */ 9874 int 9875 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag, 9876 boolean_t *waited) 9877 { 9878 return (spa_wait_common(pool, activity, B_TRUE, tag, waited)); 9879 } 9880 9881 /* 9882 * Wait for all instances of the specified activity complete 9883 */ 9884 int 9885 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited) 9886 { 9887 9888 return (spa_wait_common(pool, activity, B_FALSE, 0, waited)); 9889 } 9890 9891 sysevent_t * 9892 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9893 { 9894 sysevent_t *ev = NULL; 9895 #ifdef _KERNEL 9896 nvlist_t *resource; 9897 9898 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl); 9899 if (resource) { 9900 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); 9901 ev->resource = resource; 9902 } 9903 #else 9904 (void) spa, (void) vd, (void) hist_nvl, (void) name; 9905 #endif 9906 return (ev); 9907 } 9908 9909 void 9910 spa_event_post(sysevent_t *ev) 9911 { 9912 #ifdef _KERNEL 9913 if (ev) { 9914 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); 9915 kmem_free(ev, sizeof (*ev)); 9916 } 9917 #else 9918 (void) ev; 9919 #endif 9920 } 9921 9922 /* 9923 * Post a zevent corresponding to the given sysevent. The 'name' must be one 9924 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be 9925 * filled in from the spa and (optionally) the vdev. This doesn't do anything 9926 * in the userland libzpool, as we don't want consumers to misinterpret ztest 9927 * or zdb as real changes. 9928 */ 9929 void 9930 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9931 { 9932 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 9933 } 9934 9935 /* state manipulation functions */ 9936 EXPORT_SYMBOL(spa_open); 9937 EXPORT_SYMBOL(spa_open_rewind); 9938 EXPORT_SYMBOL(spa_get_stats); 9939 EXPORT_SYMBOL(spa_create); 9940 EXPORT_SYMBOL(spa_import); 9941 EXPORT_SYMBOL(spa_tryimport); 9942 EXPORT_SYMBOL(spa_destroy); 9943 EXPORT_SYMBOL(spa_export); 9944 EXPORT_SYMBOL(spa_reset); 9945 EXPORT_SYMBOL(spa_async_request); 9946 EXPORT_SYMBOL(spa_async_suspend); 9947 EXPORT_SYMBOL(spa_async_resume); 9948 EXPORT_SYMBOL(spa_inject_addref); 9949 EXPORT_SYMBOL(spa_inject_delref); 9950 EXPORT_SYMBOL(spa_scan_stat_init); 9951 EXPORT_SYMBOL(spa_scan_get_stats); 9952 9953 /* device manipulation */ 9954 EXPORT_SYMBOL(spa_vdev_add); 9955 EXPORT_SYMBOL(spa_vdev_attach); 9956 EXPORT_SYMBOL(spa_vdev_detach); 9957 EXPORT_SYMBOL(spa_vdev_setpath); 9958 EXPORT_SYMBOL(spa_vdev_setfru); 9959 EXPORT_SYMBOL(spa_vdev_split_mirror); 9960 9961 /* spare statech is global across all pools) */ 9962 EXPORT_SYMBOL(spa_spare_add); 9963 EXPORT_SYMBOL(spa_spare_remove); 9964 EXPORT_SYMBOL(spa_spare_exists); 9965 EXPORT_SYMBOL(spa_spare_activate); 9966 9967 /* L2ARC statech is global across all pools) */ 9968 EXPORT_SYMBOL(spa_l2cache_add); 9969 EXPORT_SYMBOL(spa_l2cache_remove); 9970 EXPORT_SYMBOL(spa_l2cache_exists); 9971 EXPORT_SYMBOL(spa_l2cache_activate); 9972 EXPORT_SYMBOL(spa_l2cache_drop); 9973 9974 /* scanning */ 9975 EXPORT_SYMBOL(spa_scan); 9976 EXPORT_SYMBOL(spa_scan_stop); 9977 9978 /* spa syncing */ 9979 EXPORT_SYMBOL(spa_sync); /* only for DMU use */ 9980 EXPORT_SYMBOL(spa_sync_allpools); 9981 9982 /* properties */ 9983 EXPORT_SYMBOL(spa_prop_set); 9984 EXPORT_SYMBOL(spa_prop_get); 9985 EXPORT_SYMBOL(spa_prop_clear_bootfs); 9986 9987 /* asynchronous event notification */ 9988 EXPORT_SYMBOL(spa_event_notify); 9989 9990 /* BEGIN CSTYLED */ 9991 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW, 9992 "log2 fraction of arc that can be used by inflight I/Os when " 9993 "verifying pool during import"); 9994 /* END CSTYLED */ 9995 9996 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, 9997 "Set to traverse metadata on pool import"); 9998 9999 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW, 10000 "Set to traverse data on pool import"); 10001 10002 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW, 10003 "Print vdev tree to zfs_dbgmsg during pool import"); 10004 10005 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD, 10006 "Percentage of CPUs to run an IO worker thread"); 10007 10008 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD, 10009 "Number of threads per IO worker taskqueue"); 10010 10011 /* BEGIN CSTYLED */ 10012 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW, 10013 "Allow importing pool with up to this number of missing top-level " 10014 "vdevs (in read-only mode)"); 10015 /* END CSTYLED */ 10016 10017 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, 10018 ZMOD_RW, "Set the livelist condense zthr to pause"); 10019 10020 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, 10021 ZMOD_RW, "Set the livelist condense synctask to pause"); 10022 10023 /* BEGIN CSTYLED */ 10024 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, 10025 INT, ZMOD_RW, 10026 "Whether livelist condensing was canceled in the synctask"); 10027 10028 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, 10029 INT, ZMOD_RW, 10030 "Whether livelist condensing was canceled in the zthr function"); 10031 10032 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, 10033 ZMOD_RW, 10034 "Whether extra ALLOC blkptrs were added to a livelist entry while it " 10035 "was being condensed"); 10036 /* END CSTYLED */ 10037