1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 31 * Copyright 2018 Joyent, Inc. 32 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 33 * Copyright 2017 Joyent, Inc. 34 * Copyright (c) 2017, Intel Corporation. 35 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 36 */ 37 38 /* 39 * SPA: Storage Pool Allocator 40 * 41 * This file contains all the routines used when modifying on-disk SPA state. 42 * This includes opening, importing, destroying, exporting a pool, and syncing a 43 * pool. 44 */ 45 46 #include <sys/zfs_context.h> 47 #include <sys/fm/fs/zfs.h> 48 #include <sys/spa_impl.h> 49 #include <sys/zio.h> 50 #include <sys/zio_checksum.h> 51 #include <sys/dmu.h> 52 #include <sys/dmu_tx.h> 53 #include <sys/zap.h> 54 #include <sys/zil.h> 55 #include <sys/ddt.h> 56 #include <sys/vdev_impl.h> 57 #include <sys/vdev_removal.h> 58 #include <sys/vdev_indirect_mapping.h> 59 #include <sys/vdev_indirect_births.h> 60 #include <sys/vdev_initialize.h> 61 #include <sys/vdev_rebuild.h> 62 #include <sys/vdev_trim.h> 63 #include <sys/vdev_disk.h> 64 #include <sys/vdev_draid.h> 65 #include <sys/metaslab.h> 66 #include <sys/metaslab_impl.h> 67 #include <sys/mmp.h> 68 #include <sys/uberblock_impl.h> 69 #include <sys/txg.h> 70 #include <sys/avl.h> 71 #include <sys/bpobj.h> 72 #include <sys/dmu_traverse.h> 73 #include <sys/dmu_objset.h> 74 #include <sys/unique.h> 75 #include <sys/dsl_pool.h> 76 #include <sys/dsl_dataset.h> 77 #include <sys/dsl_dir.h> 78 #include <sys/dsl_prop.h> 79 #include <sys/dsl_synctask.h> 80 #include <sys/fs/zfs.h> 81 #include <sys/arc.h> 82 #include <sys/callb.h> 83 #include <sys/systeminfo.h> 84 #include <sys/spa_boot.h> 85 #include <sys/zfs_ioctl.h> 86 #include <sys/dsl_scan.h> 87 #include <sys/zfeature.h> 88 #include <sys/dsl_destroy.h> 89 #include <sys/zvol.h> 90 91 #ifdef _KERNEL 92 #include <sys/fm/protocol.h> 93 #include <sys/fm/util.h> 94 #include <sys/callb.h> 95 #include <sys/zone.h> 96 #include <sys/vmsystm.h> 97 #endif /* _KERNEL */ 98 99 #include "zfs_prop.h" 100 #include "zfs_comutil.h" 101 102 /* 103 * The interval, in seconds, at which failed configuration cache file writes 104 * should be retried. 105 */ 106 int zfs_ccw_retry_interval = 300; 107 108 typedef enum zti_modes { 109 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 110 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 111 ZTI_MODE_NULL, /* don't create a taskq */ 112 ZTI_NMODES 113 } zti_modes_t; 114 115 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 116 #define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 117 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 118 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 119 120 #define ZTI_N(n) ZTI_P(n, 1) 121 #define ZTI_ONE ZTI_N(1) 122 123 typedef struct zio_taskq_info { 124 zti_modes_t zti_mode; 125 uint_t zti_value; 126 uint_t zti_count; 127 } zio_taskq_info_t; 128 129 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 130 "iss", "iss_h", "int", "int_h" 131 }; 132 133 /* 134 * This table defines the taskq settings for each ZFS I/O type. When 135 * initializing a pool, we use this table to create an appropriately sized 136 * taskq. Some operations are low volume and therefore have a small, static 137 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 138 * macros. Other operations process a large amount of data; the ZTI_BATCH 139 * macro causes us to create a taskq oriented for throughput. Some operations 140 * are so high frequency and short-lived that the taskq itself can become a 141 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 142 * additional degree of parallelism specified by the number of threads per- 143 * taskq and the number of taskqs; when dispatching an event in this case, the 144 * particular taskq is chosen at random. 145 * 146 * The different taskq priorities are to handle the different contexts (issue 147 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 148 * need to be handled with minimum delay. 149 */ 150 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 151 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 152 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 153 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 154 { ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */ 155 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 156 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 157 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 158 { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */ 159 }; 160 161 static void spa_sync_version(void *arg, dmu_tx_t *tx); 162 static void spa_sync_props(void *arg, dmu_tx_t *tx); 163 static boolean_t spa_has_active_shared_spare(spa_t *spa); 164 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport); 165 static void spa_vdev_resilver_done(spa_t *spa); 166 167 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 168 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 169 uint_t zio_taskq_basedc = 80; /* base duty cycle */ 170 171 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 172 173 /* 174 * Report any spa_load_verify errors found, but do not fail spa_load. 175 * This is used by zdb to analyze non-idle pools. 176 */ 177 boolean_t spa_load_verify_dryrun = B_FALSE; 178 179 /* 180 * This (illegal) pool name is used when temporarily importing a spa_t in order 181 * to get the vdev stats associated with the imported devices. 182 */ 183 #define TRYIMPORT_NAME "$import" 184 185 /* 186 * For debugging purposes: print out vdev tree during pool import. 187 */ 188 int spa_load_print_vdev_tree = B_FALSE; 189 190 /* 191 * A non-zero value for zfs_max_missing_tvds means that we allow importing 192 * pools with missing top-level vdevs. This is strictly intended for advanced 193 * pool recovery cases since missing data is almost inevitable. Pools with 194 * missing devices can only be imported read-only for safety reasons, and their 195 * fail-mode will be automatically set to "continue". 196 * 197 * With 1 missing vdev we should be able to import the pool and mount all 198 * datasets. User data that was not modified after the missing device has been 199 * added should be recoverable. This means that snapshots created prior to the 200 * addition of that device should be completely intact. 201 * 202 * With 2 missing vdevs, some datasets may fail to mount since there are 203 * dataset statistics that are stored as regular metadata. Some data might be 204 * recoverable if those vdevs were added recently. 205 * 206 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 207 * may be missing entirely. Chances of data recovery are very low. Note that 208 * there are also risks of performing an inadvertent rewind as we might be 209 * missing all the vdevs with the latest uberblocks. 210 */ 211 unsigned long zfs_max_missing_tvds = 0; 212 213 /* 214 * The parameters below are similar to zfs_max_missing_tvds but are only 215 * intended for a preliminary open of the pool with an untrusted config which 216 * might be incomplete or out-dated. 217 * 218 * We are more tolerant for pools opened from a cachefile since we could have 219 * an out-dated cachefile where a device removal was not registered. 220 * We could have set the limit arbitrarily high but in the case where devices 221 * are really missing we would want to return the proper error codes; we chose 222 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 223 * and we get a chance to retrieve the trusted config. 224 */ 225 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 226 227 /* 228 * In the case where config was assembled by scanning device paths (/dev/dsks 229 * by default) we are less tolerant since all the existing devices should have 230 * been detected and we want spa_load to return the right error codes. 231 */ 232 uint64_t zfs_max_missing_tvds_scan = 0; 233 234 /* 235 * Debugging aid that pauses spa_sync() towards the end. 236 */ 237 boolean_t zfs_pause_spa_sync = B_FALSE; 238 239 /* 240 * Variables to indicate the livelist condense zthr func should wait at certain 241 * points for the livelist to be removed - used to test condense/destroy races 242 */ 243 int zfs_livelist_condense_zthr_pause = 0; 244 int zfs_livelist_condense_sync_pause = 0; 245 246 /* 247 * Variables to track whether or not condense cancellation has been 248 * triggered in testing. 249 */ 250 int zfs_livelist_condense_sync_cancel = 0; 251 int zfs_livelist_condense_zthr_cancel = 0; 252 253 /* 254 * Variable to track whether or not extra ALLOC blkptrs were added to a 255 * livelist entry while it was being condensed (caused by the way we track 256 * remapped blkptrs in dbuf_remap_impl) 257 */ 258 int zfs_livelist_condense_new_alloc = 0; 259 260 /* 261 * ========================================================================== 262 * SPA properties routines 263 * ========================================================================== 264 */ 265 266 /* 267 * Add a (source=src, propname=propval) list to an nvlist. 268 */ 269 static void 270 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 271 uint64_t intval, zprop_source_t src) 272 { 273 const char *propname = zpool_prop_to_name(prop); 274 nvlist_t *propval; 275 276 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 277 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 278 279 if (strval != NULL) 280 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 281 else 282 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 283 284 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 285 nvlist_free(propval); 286 } 287 288 /* 289 * Get property values from the spa configuration. 290 */ 291 static void 292 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 293 { 294 vdev_t *rvd = spa->spa_root_vdev; 295 dsl_pool_t *pool = spa->spa_dsl_pool; 296 uint64_t size, alloc, cap, version; 297 const zprop_source_t src = ZPROP_SRC_NONE; 298 spa_config_dirent_t *dp; 299 metaslab_class_t *mc = spa_normal_class(spa); 300 301 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 302 303 if (rvd != NULL) { 304 alloc = metaslab_class_get_alloc(mc); 305 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 306 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 307 alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa)); 308 309 size = metaslab_class_get_space(mc); 310 size += metaslab_class_get_space(spa_special_class(spa)); 311 size += metaslab_class_get_space(spa_dedup_class(spa)); 312 size += metaslab_class_get_space(spa_embedded_log_class(spa)); 313 314 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 315 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 316 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 317 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 318 size - alloc, src); 319 spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL, 320 spa->spa_checkpoint_info.sci_dspace, src); 321 322 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 323 metaslab_class_fragmentation(mc), src); 324 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 325 metaslab_class_expandable_space(mc), src); 326 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 327 (spa_mode(spa) == SPA_MODE_READ), src); 328 329 cap = (size == 0) ? 0 : (alloc * 100 / size); 330 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 331 332 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 333 ddt_get_pool_dedup_ratio(spa), src); 334 335 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 336 rvd->vdev_state, src); 337 338 version = spa_version(spa); 339 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) { 340 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 341 version, ZPROP_SRC_DEFAULT); 342 } else { 343 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, 344 version, ZPROP_SRC_LOCAL); 345 } 346 spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID, 347 NULL, spa_load_guid(spa), src); 348 } 349 350 if (pool != NULL) { 351 /* 352 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 353 * when opening pools before this version freedir will be NULL. 354 */ 355 if (pool->dp_free_dir != NULL) { 356 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 357 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 358 src); 359 } else { 360 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 361 NULL, 0, src); 362 } 363 364 if (pool->dp_leak_dir != NULL) { 365 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 366 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 367 src); 368 } else { 369 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 370 NULL, 0, src); 371 } 372 } 373 374 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 375 376 if (spa->spa_comment != NULL) { 377 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 378 0, ZPROP_SRC_LOCAL); 379 } 380 381 if (spa->spa_compatibility != NULL) { 382 spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY, 383 spa->spa_compatibility, 0, ZPROP_SRC_LOCAL); 384 } 385 386 if (spa->spa_root != NULL) 387 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 388 0, ZPROP_SRC_LOCAL); 389 390 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 391 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 392 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 393 } else { 394 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 395 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 396 } 397 398 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 399 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 400 DNODE_MAX_SIZE, ZPROP_SRC_NONE); 401 } else { 402 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 403 DNODE_MIN_SIZE, ZPROP_SRC_NONE); 404 } 405 406 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 407 if (dp->scd_path == NULL) { 408 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 409 "none", 0, ZPROP_SRC_LOCAL); 410 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 411 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 412 dp->scd_path, 0, ZPROP_SRC_LOCAL); 413 } 414 } 415 } 416 417 /* 418 * Get zpool property values. 419 */ 420 int 421 spa_prop_get(spa_t *spa, nvlist_t **nvp) 422 { 423 objset_t *mos = spa->spa_meta_objset; 424 zap_cursor_t zc; 425 zap_attribute_t za; 426 dsl_pool_t *dp; 427 int err; 428 429 err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP); 430 if (err) 431 return (err); 432 433 dp = spa_get_dsl(spa); 434 dsl_pool_config_enter(dp, FTAG); 435 mutex_enter(&spa->spa_props_lock); 436 437 /* 438 * Get properties from the spa config. 439 */ 440 spa_prop_get_config(spa, nvp); 441 442 /* If no pool property object, no more prop to get. */ 443 if (mos == NULL || spa->spa_pool_props_object == 0) 444 goto out; 445 446 /* 447 * Get properties from the MOS pool property object. 448 */ 449 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 450 (err = zap_cursor_retrieve(&zc, &za)) == 0; 451 zap_cursor_advance(&zc)) { 452 uint64_t intval = 0; 453 char *strval = NULL; 454 zprop_source_t src = ZPROP_SRC_DEFAULT; 455 zpool_prop_t prop; 456 457 if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL) 458 continue; 459 460 switch (za.za_integer_length) { 461 case 8: 462 /* integer property */ 463 if (za.za_first_integer != 464 zpool_prop_default_numeric(prop)) 465 src = ZPROP_SRC_LOCAL; 466 467 if (prop == ZPOOL_PROP_BOOTFS) { 468 dsl_dataset_t *ds = NULL; 469 470 err = dsl_dataset_hold_obj(dp, 471 za.za_first_integer, FTAG, &ds); 472 if (err != 0) 473 break; 474 475 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 476 KM_SLEEP); 477 dsl_dataset_name(ds, strval); 478 dsl_dataset_rele(ds, FTAG); 479 } else { 480 strval = NULL; 481 intval = za.za_first_integer; 482 } 483 484 spa_prop_add_list(*nvp, prop, strval, intval, src); 485 486 if (strval != NULL) 487 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 488 489 break; 490 491 case 1: 492 /* string property */ 493 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 494 err = zap_lookup(mos, spa->spa_pool_props_object, 495 za.za_name, 1, za.za_num_integers, strval); 496 if (err) { 497 kmem_free(strval, za.za_num_integers); 498 break; 499 } 500 spa_prop_add_list(*nvp, prop, strval, 0, src); 501 kmem_free(strval, za.za_num_integers); 502 break; 503 504 default: 505 break; 506 } 507 } 508 zap_cursor_fini(&zc); 509 out: 510 mutex_exit(&spa->spa_props_lock); 511 dsl_pool_config_exit(dp, FTAG); 512 if (err && err != ENOENT) { 513 nvlist_free(*nvp); 514 *nvp = NULL; 515 return (err); 516 } 517 518 return (0); 519 } 520 521 /* 522 * Validate the given pool properties nvlist and modify the list 523 * for the property values to be set. 524 */ 525 static int 526 spa_prop_validate(spa_t *spa, nvlist_t *props) 527 { 528 nvpair_t *elem; 529 int error = 0, reset_bootfs = 0; 530 uint64_t objnum = 0; 531 boolean_t has_feature = B_FALSE; 532 533 elem = NULL; 534 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 535 uint64_t intval; 536 char *strval, *slash, *check, *fname; 537 const char *propname = nvpair_name(elem); 538 zpool_prop_t prop = zpool_name_to_prop(propname); 539 540 switch (prop) { 541 case ZPOOL_PROP_INVAL: 542 if (!zpool_prop_feature(propname)) { 543 error = SET_ERROR(EINVAL); 544 break; 545 } 546 547 /* 548 * Sanitize the input. 549 */ 550 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 551 error = SET_ERROR(EINVAL); 552 break; 553 } 554 555 if (nvpair_value_uint64(elem, &intval) != 0) { 556 error = SET_ERROR(EINVAL); 557 break; 558 } 559 560 if (intval != 0) { 561 error = SET_ERROR(EINVAL); 562 break; 563 } 564 565 fname = strchr(propname, '@') + 1; 566 if (zfeature_lookup_name(fname, NULL) != 0) { 567 error = SET_ERROR(EINVAL); 568 break; 569 } 570 571 has_feature = B_TRUE; 572 break; 573 574 case ZPOOL_PROP_VERSION: 575 error = nvpair_value_uint64(elem, &intval); 576 if (!error && 577 (intval < spa_version(spa) || 578 intval > SPA_VERSION_BEFORE_FEATURES || 579 has_feature)) 580 error = SET_ERROR(EINVAL); 581 break; 582 583 case ZPOOL_PROP_DELEGATION: 584 case ZPOOL_PROP_AUTOREPLACE: 585 case ZPOOL_PROP_LISTSNAPS: 586 case ZPOOL_PROP_AUTOEXPAND: 587 case ZPOOL_PROP_AUTOTRIM: 588 error = nvpair_value_uint64(elem, &intval); 589 if (!error && intval > 1) 590 error = SET_ERROR(EINVAL); 591 break; 592 593 case ZPOOL_PROP_MULTIHOST: 594 error = nvpair_value_uint64(elem, &intval); 595 if (!error && intval > 1) 596 error = SET_ERROR(EINVAL); 597 598 if (!error) { 599 uint32_t hostid = zone_get_hostid(NULL); 600 if (hostid) 601 spa->spa_hostid = hostid; 602 else 603 error = SET_ERROR(ENOTSUP); 604 } 605 606 break; 607 608 case ZPOOL_PROP_BOOTFS: 609 /* 610 * If the pool version is less than SPA_VERSION_BOOTFS, 611 * or the pool is still being created (version == 0), 612 * the bootfs property cannot be set. 613 */ 614 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 615 error = SET_ERROR(ENOTSUP); 616 break; 617 } 618 619 /* 620 * Make sure the vdev config is bootable 621 */ 622 if (!vdev_is_bootable(spa->spa_root_vdev)) { 623 error = SET_ERROR(ENOTSUP); 624 break; 625 } 626 627 reset_bootfs = 1; 628 629 error = nvpair_value_string(elem, &strval); 630 631 if (!error) { 632 objset_t *os; 633 634 if (strval == NULL || strval[0] == '\0') { 635 objnum = zpool_prop_default_numeric( 636 ZPOOL_PROP_BOOTFS); 637 break; 638 } 639 640 error = dmu_objset_hold(strval, FTAG, &os); 641 if (error != 0) 642 break; 643 644 /* Must be ZPL. */ 645 if (dmu_objset_type(os) != DMU_OST_ZFS) { 646 error = SET_ERROR(ENOTSUP); 647 } else { 648 objnum = dmu_objset_id(os); 649 } 650 dmu_objset_rele(os, FTAG); 651 } 652 break; 653 654 case ZPOOL_PROP_FAILUREMODE: 655 error = nvpair_value_uint64(elem, &intval); 656 if (!error && intval > ZIO_FAILURE_MODE_PANIC) 657 error = SET_ERROR(EINVAL); 658 659 /* 660 * This is a special case which only occurs when 661 * the pool has completely failed. This allows 662 * the user to change the in-core failmode property 663 * without syncing it out to disk (I/Os might 664 * currently be blocked). We do this by returning 665 * EIO to the caller (spa_prop_set) to trick it 666 * into thinking we encountered a property validation 667 * error. 668 */ 669 if (!error && spa_suspended(spa)) { 670 spa->spa_failmode = intval; 671 error = SET_ERROR(EIO); 672 } 673 break; 674 675 case ZPOOL_PROP_CACHEFILE: 676 if ((error = nvpair_value_string(elem, &strval)) != 0) 677 break; 678 679 if (strval[0] == '\0') 680 break; 681 682 if (strcmp(strval, "none") == 0) 683 break; 684 685 if (strval[0] != '/') { 686 error = SET_ERROR(EINVAL); 687 break; 688 } 689 690 slash = strrchr(strval, '/'); 691 ASSERT(slash != NULL); 692 693 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 694 strcmp(slash, "/..") == 0) 695 error = SET_ERROR(EINVAL); 696 break; 697 698 case ZPOOL_PROP_COMMENT: 699 if ((error = nvpair_value_string(elem, &strval)) != 0) 700 break; 701 for (check = strval; *check != '\0'; check++) { 702 if (!isprint(*check)) { 703 error = SET_ERROR(EINVAL); 704 break; 705 } 706 } 707 if (strlen(strval) > ZPROP_MAX_COMMENT) 708 error = SET_ERROR(E2BIG); 709 break; 710 711 default: 712 break; 713 } 714 715 if (error) 716 break; 717 } 718 719 (void) nvlist_remove_all(props, 720 zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO)); 721 722 if (!error && reset_bootfs) { 723 error = nvlist_remove(props, 724 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 725 726 if (!error) { 727 error = nvlist_add_uint64(props, 728 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 729 } 730 } 731 732 return (error); 733 } 734 735 void 736 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 737 { 738 char *cachefile; 739 spa_config_dirent_t *dp; 740 741 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 742 &cachefile) != 0) 743 return; 744 745 dp = kmem_alloc(sizeof (spa_config_dirent_t), 746 KM_SLEEP); 747 748 if (cachefile[0] == '\0') 749 dp->scd_path = spa_strdup(spa_config_path); 750 else if (strcmp(cachefile, "none") == 0) 751 dp->scd_path = NULL; 752 else 753 dp->scd_path = spa_strdup(cachefile); 754 755 list_insert_head(&spa->spa_config_list, dp); 756 if (need_sync) 757 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 758 } 759 760 int 761 spa_prop_set(spa_t *spa, nvlist_t *nvp) 762 { 763 int error; 764 nvpair_t *elem = NULL; 765 boolean_t need_sync = B_FALSE; 766 767 if ((error = spa_prop_validate(spa, nvp)) != 0) 768 return (error); 769 770 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 771 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 772 773 if (prop == ZPOOL_PROP_CACHEFILE || 774 prop == ZPOOL_PROP_ALTROOT || 775 prop == ZPOOL_PROP_READONLY) 776 continue; 777 778 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 779 uint64_t ver; 780 781 if (prop == ZPOOL_PROP_VERSION) { 782 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 783 } else { 784 ASSERT(zpool_prop_feature(nvpair_name(elem))); 785 ver = SPA_VERSION_FEATURES; 786 need_sync = B_TRUE; 787 } 788 789 /* Save time if the version is already set. */ 790 if (ver == spa_version(spa)) 791 continue; 792 793 /* 794 * In addition to the pool directory object, we might 795 * create the pool properties object, the features for 796 * read object, the features for write object, or the 797 * feature descriptions object. 798 */ 799 error = dsl_sync_task(spa->spa_name, NULL, 800 spa_sync_version, &ver, 801 6, ZFS_SPACE_CHECK_RESERVED); 802 if (error) 803 return (error); 804 continue; 805 } 806 807 need_sync = B_TRUE; 808 break; 809 } 810 811 if (need_sync) { 812 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 813 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 814 } 815 816 return (0); 817 } 818 819 /* 820 * If the bootfs property value is dsobj, clear it. 821 */ 822 void 823 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 824 { 825 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 826 VERIFY(zap_remove(spa->spa_meta_objset, 827 spa->spa_pool_props_object, 828 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 829 spa->spa_bootfs = 0; 830 } 831 } 832 833 /*ARGSUSED*/ 834 static int 835 spa_change_guid_check(void *arg, dmu_tx_t *tx) 836 { 837 uint64_t *newguid __maybe_unused = arg; 838 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 839 vdev_t *rvd = spa->spa_root_vdev; 840 uint64_t vdev_state; 841 842 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 843 int error = (spa_has_checkpoint(spa)) ? 844 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 845 return (SET_ERROR(error)); 846 } 847 848 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 849 vdev_state = rvd->vdev_state; 850 spa_config_exit(spa, SCL_STATE, FTAG); 851 852 if (vdev_state != VDEV_STATE_HEALTHY) 853 return (SET_ERROR(ENXIO)); 854 855 ASSERT3U(spa_guid(spa), !=, *newguid); 856 857 return (0); 858 } 859 860 static void 861 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 862 { 863 uint64_t *newguid = arg; 864 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 865 uint64_t oldguid; 866 vdev_t *rvd = spa->spa_root_vdev; 867 868 oldguid = spa_guid(spa); 869 870 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 871 rvd->vdev_guid = *newguid; 872 rvd->vdev_guid_sum += (*newguid - oldguid); 873 vdev_config_dirty(rvd); 874 spa_config_exit(spa, SCL_STATE, FTAG); 875 876 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 877 (u_longlong_t)oldguid, (u_longlong_t)*newguid); 878 } 879 880 /* 881 * Change the GUID for the pool. This is done so that we can later 882 * re-import a pool built from a clone of our own vdevs. We will modify 883 * the root vdev's guid, our own pool guid, and then mark all of our 884 * vdevs dirty. Note that we must make sure that all our vdevs are 885 * online when we do this, or else any vdevs that weren't present 886 * would be orphaned from our pool. We are also going to issue a 887 * sysevent to update any watchers. 888 */ 889 int 890 spa_change_guid(spa_t *spa) 891 { 892 int error; 893 uint64_t guid; 894 895 mutex_enter(&spa->spa_vdev_top_lock); 896 mutex_enter(&spa_namespace_lock); 897 guid = spa_generate_guid(NULL); 898 899 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 900 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 901 902 if (error == 0) { 903 spa_write_cachefile(spa, B_FALSE, B_TRUE); 904 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 905 } 906 907 mutex_exit(&spa_namespace_lock); 908 mutex_exit(&spa->spa_vdev_top_lock); 909 910 return (error); 911 } 912 913 /* 914 * ========================================================================== 915 * SPA state manipulation (open/create/destroy/import/export) 916 * ========================================================================== 917 */ 918 919 static int 920 spa_error_entry_compare(const void *a, const void *b) 921 { 922 const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 923 const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 924 int ret; 925 926 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 927 sizeof (zbookmark_phys_t)); 928 929 return (TREE_ISIGN(ret)); 930 } 931 932 /* 933 * Utility function which retrieves copies of the current logs and 934 * re-initializes them in the process. 935 */ 936 void 937 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 938 { 939 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 940 941 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 942 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 943 944 avl_create(&spa->spa_errlist_scrub, 945 spa_error_entry_compare, sizeof (spa_error_entry_t), 946 offsetof(spa_error_entry_t, se_avl)); 947 avl_create(&spa->spa_errlist_last, 948 spa_error_entry_compare, sizeof (spa_error_entry_t), 949 offsetof(spa_error_entry_t, se_avl)); 950 } 951 952 static void 953 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 954 { 955 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 956 enum zti_modes mode = ztip->zti_mode; 957 uint_t value = ztip->zti_value; 958 uint_t count = ztip->zti_count; 959 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 960 uint_t flags = 0; 961 boolean_t batch = B_FALSE; 962 963 if (mode == ZTI_MODE_NULL) { 964 tqs->stqs_count = 0; 965 tqs->stqs_taskq = NULL; 966 return; 967 } 968 969 ASSERT3U(count, >, 0); 970 971 tqs->stqs_count = count; 972 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 973 974 switch (mode) { 975 case ZTI_MODE_FIXED: 976 ASSERT3U(value, >=, 1); 977 value = MAX(value, 1); 978 flags |= TASKQ_DYNAMIC; 979 break; 980 981 case ZTI_MODE_BATCH: 982 batch = B_TRUE; 983 flags |= TASKQ_THREADS_CPU_PCT; 984 value = MIN(zio_taskq_batch_pct, 100); 985 break; 986 987 default: 988 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 989 "spa_activate()", 990 zio_type_name[t], zio_taskq_types[q], mode, value); 991 break; 992 } 993 994 for (uint_t i = 0; i < count; i++) { 995 taskq_t *tq; 996 char name[32]; 997 998 (void) snprintf(name, sizeof (name), "%s_%s", 999 zio_type_name[t], zio_taskq_types[q]); 1000 1001 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 1002 if (batch) 1003 flags |= TASKQ_DC_BATCH; 1004 1005 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 1006 spa->spa_proc, zio_taskq_basedc, flags); 1007 } else { 1008 pri_t pri = maxclsyspri; 1009 /* 1010 * The write issue taskq can be extremely CPU 1011 * intensive. Run it at slightly less important 1012 * priority than the other taskqs. 1013 * 1014 * Under Linux and FreeBSD this means incrementing 1015 * the priority value as opposed to platforms like 1016 * illumos where it should be decremented. 1017 * 1018 * On FreeBSD, if priorities divided by four (RQ_PPQ) 1019 * are equal then a difference between them is 1020 * insignificant. 1021 */ 1022 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) { 1023 #if defined(__linux__) 1024 pri++; 1025 #elif defined(__FreeBSD__) 1026 pri += 4; 1027 #else 1028 #error "unknown OS" 1029 #endif 1030 } 1031 tq = taskq_create_proc(name, value, pri, 50, 1032 INT_MAX, spa->spa_proc, flags); 1033 } 1034 1035 tqs->stqs_taskq[i] = tq; 1036 } 1037 } 1038 1039 static void 1040 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1041 { 1042 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1043 1044 if (tqs->stqs_taskq == NULL) { 1045 ASSERT3U(tqs->stqs_count, ==, 0); 1046 return; 1047 } 1048 1049 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1050 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1051 taskq_destroy(tqs->stqs_taskq[i]); 1052 } 1053 1054 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1055 tqs->stqs_taskq = NULL; 1056 } 1057 1058 /* 1059 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1060 * Note that a type may have multiple discrete taskqs to avoid lock contention 1061 * on the taskq itself. In that case we choose which taskq at random by using 1062 * the low bits of gethrtime(). 1063 */ 1064 void 1065 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1066 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 1067 { 1068 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1069 taskq_t *tq; 1070 1071 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1072 ASSERT3U(tqs->stqs_count, !=, 0); 1073 1074 if (tqs->stqs_count == 1) { 1075 tq = tqs->stqs_taskq[0]; 1076 } else { 1077 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1078 } 1079 1080 taskq_dispatch_ent(tq, func, arg, flags, ent); 1081 } 1082 1083 /* 1084 * Same as spa_taskq_dispatch_ent() but block on the task until completion. 1085 */ 1086 void 1087 spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1088 task_func_t *func, void *arg, uint_t flags) 1089 { 1090 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1091 taskq_t *tq; 1092 taskqid_t id; 1093 1094 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1095 ASSERT3U(tqs->stqs_count, !=, 0); 1096 1097 if (tqs->stqs_count == 1) { 1098 tq = tqs->stqs_taskq[0]; 1099 } else { 1100 tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count]; 1101 } 1102 1103 id = taskq_dispatch(tq, func, arg, flags); 1104 if (id) 1105 taskq_wait_id(tq, id); 1106 } 1107 1108 static void 1109 spa_create_zio_taskqs(spa_t *spa) 1110 { 1111 for (int t = 0; t < ZIO_TYPES; t++) { 1112 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1113 spa_taskqs_init(spa, t, q); 1114 } 1115 } 1116 } 1117 1118 /* 1119 * Disabled until spa_thread() can be adapted for Linux. 1120 */ 1121 #undef HAVE_SPA_THREAD 1122 1123 #if defined(_KERNEL) && defined(HAVE_SPA_THREAD) 1124 static void 1125 spa_thread(void *arg) 1126 { 1127 psetid_t zio_taskq_psrset_bind = PS_NONE; 1128 callb_cpr_t cprinfo; 1129 1130 spa_t *spa = arg; 1131 user_t *pu = PTOU(curproc); 1132 1133 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1134 spa->spa_name); 1135 1136 ASSERT(curproc != &p0); 1137 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1138 "zpool-%s", spa->spa_name); 1139 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1140 1141 /* bind this thread to the requested psrset */ 1142 if (zio_taskq_psrset_bind != PS_NONE) { 1143 pool_lock(); 1144 mutex_enter(&cpu_lock); 1145 mutex_enter(&pidlock); 1146 mutex_enter(&curproc->p_lock); 1147 1148 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1149 0, NULL, NULL) == 0) { 1150 curthread->t_bind_pset = zio_taskq_psrset_bind; 1151 } else { 1152 cmn_err(CE_WARN, 1153 "Couldn't bind process for zfs pool \"%s\" to " 1154 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1155 } 1156 1157 mutex_exit(&curproc->p_lock); 1158 mutex_exit(&pidlock); 1159 mutex_exit(&cpu_lock); 1160 pool_unlock(); 1161 } 1162 1163 if (zio_taskq_sysdc) { 1164 sysdc_thread_enter(curthread, 100, 0); 1165 } 1166 1167 spa->spa_proc = curproc; 1168 spa->spa_did = curthread->t_did; 1169 1170 spa_create_zio_taskqs(spa); 1171 1172 mutex_enter(&spa->spa_proc_lock); 1173 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1174 1175 spa->spa_proc_state = SPA_PROC_ACTIVE; 1176 cv_broadcast(&spa->spa_proc_cv); 1177 1178 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1179 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1180 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1181 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1182 1183 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1184 spa->spa_proc_state = SPA_PROC_GONE; 1185 spa->spa_proc = &p0; 1186 cv_broadcast(&spa->spa_proc_cv); 1187 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1188 1189 mutex_enter(&curproc->p_lock); 1190 lwp_exit(); 1191 } 1192 #endif 1193 1194 /* 1195 * Activate an uninitialized pool. 1196 */ 1197 static void 1198 spa_activate(spa_t *spa, spa_mode_t mode) 1199 { 1200 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1201 1202 spa->spa_state = POOL_STATE_ACTIVE; 1203 spa->spa_mode = mode; 1204 1205 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1206 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1207 spa->spa_embedded_log_class = 1208 metaslab_class_create(spa, zfs_metaslab_ops); 1209 spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops); 1210 spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops); 1211 1212 /* Try to create a covering process */ 1213 mutex_enter(&spa->spa_proc_lock); 1214 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1215 ASSERT(spa->spa_proc == &p0); 1216 spa->spa_did = 0; 1217 1218 #ifdef HAVE_SPA_THREAD 1219 /* Only create a process if we're going to be around a while. */ 1220 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1221 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1222 NULL, 0) == 0) { 1223 spa->spa_proc_state = SPA_PROC_CREATED; 1224 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1225 cv_wait(&spa->spa_proc_cv, 1226 &spa->spa_proc_lock); 1227 } 1228 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1229 ASSERT(spa->spa_proc != &p0); 1230 ASSERT(spa->spa_did != 0); 1231 } else { 1232 #ifdef _KERNEL 1233 cmn_err(CE_WARN, 1234 "Couldn't create process for zfs pool \"%s\"\n", 1235 spa->spa_name); 1236 #endif 1237 } 1238 } 1239 #endif /* HAVE_SPA_THREAD */ 1240 mutex_exit(&spa->spa_proc_lock); 1241 1242 /* If we didn't create a process, we need to create our taskqs. */ 1243 if (spa->spa_proc == &p0) { 1244 spa_create_zio_taskqs(spa); 1245 } 1246 1247 for (size_t i = 0; i < TXG_SIZE; i++) { 1248 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1249 ZIO_FLAG_CANFAIL); 1250 } 1251 1252 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1253 offsetof(vdev_t, vdev_config_dirty_node)); 1254 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1255 offsetof(objset_t, os_evicting_node)); 1256 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1257 offsetof(vdev_t, vdev_state_dirty_node)); 1258 1259 txg_list_create(&spa->spa_vdev_txg_list, spa, 1260 offsetof(struct vdev, vdev_txg_node)); 1261 1262 avl_create(&spa->spa_errlist_scrub, 1263 spa_error_entry_compare, sizeof (spa_error_entry_t), 1264 offsetof(spa_error_entry_t, se_avl)); 1265 avl_create(&spa->spa_errlist_last, 1266 spa_error_entry_compare, sizeof (spa_error_entry_t), 1267 offsetof(spa_error_entry_t, se_avl)); 1268 1269 spa_keystore_init(&spa->spa_keystore); 1270 1271 /* 1272 * This taskq is used to perform zvol-minor-related tasks 1273 * asynchronously. This has several advantages, including easy 1274 * resolution of various deadlocks. 1275 * 1276 * The taskq must be single threaded to ensure tasks are always 1277 * processed in the order in which they were dispatched. 1278 * 1279 * A taskq per pool allows one to keep the pools independent. 1280 * This way if one pool is suspended, it will not impact another. 1281 * 1282 * The preferred location to dispatch a zvol minor task is a sync 1283 * task. In this context, there is easy access to the spa_t and minimal 1284 * error handling is required because the sync task must succeed. 1285 */ 1286 spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri, 1287 1, INT_MAX, 0); 1288 1289 /* 1290 * Taskq dedicated to prefetcher threads: this is used to prevent the 1291 * pool traverse code from monopolizing the global (and limited) 1292 * system_taskq by inappropriately scheduling long running tasks on it. 1293 */ 1294 spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100, 1295 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1296 1297 /* 1298 * The taskq to upgrade datasets in this pool. Currently used by 1299 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1300 */ 1301 spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100, 1302 defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); 1303 } 1304 1305 /* 1306 * Opposite of spa_activate(). 1307 */ 1308 static void 1309 spa_deactivate(spa_t *spa) 1310 { 1311 ASSERT(spa->spa_sync_on == B_FALSE); 1312 ASSERT(spa->spa_dsl_pool == NULL); 1313 ASSERT(spa->spa_root_vdev == NULL); 1314 ASSERT(spa->spa_async_zio_root == NULL); 1315 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1316 1317 spa_evicting_os_wait(spa); 1318 1319 if (spa->spa_zvol_taskq) { 1320 taskq_destroy(spa->spa_zvol_taskq); 1321 spa->spa_zvol_taskq = NULL; 1322 } 1323 1324 if (spa->spa_prefetch_taskq) { 1325 taskq_destroy(spa->spa_prefetch_taskq); 1326 spa->spa_prefetch_taskq = NULL; 1327 } 1328 1329 if (spa->spa_upgrade_taskq) { 1330 taskq_destroy(spa->spa_upgrade_taskq); 1331 spa->spa_upgrade_taskq = NULL; 1332 } 1333 1334 txg_list_destroy(&spa->spa_vdev_txg_list); 1335 1336 list_destroy(&spa->spa_config_dirty_list); 1337 list_destroy(&spa->spa_evicting_os_list); 1338 list_destroy(&spa->spa_state_dirty_list); 1339 1340 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 1341 1342 for (int t = 0; t < ZIO_TYPES; t++) { 1343 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1344 spa_taskqs_fini(spa, t, q); 1345 } 1346 } 1347 1348 for (size_t i = 0; i < TXG_SIZE; i++) { 1349 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1350 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1351 spa->spa_txg_zio[i] = NULL; 1352 } 1353 1354 metaslab_class_destroy(spa->spa_normal_class); 1355 spa->spa_normal_class = NULL; 1356 1357 metaslab_class_destroy(spa->spa_log_class); 1358 spa->spa_log_class = NULL; 1359 1360 metaslab_class_destroy(spa->spa_embedded_log_class); 1361 spa->spa_embedded_log_class = NULL; 1362 1363 metaslab_class_destroy(spa->spa_special_class); 1364 spa->spa_special_class = NULL; 1365 1366 metaslab_class_destroy(spa->spa_dedup_class); 1367 spa->spa_dedup_class = NULL; 1368 1369 /* 1370 * If this was part of an import or the open otherwise failed, we may 1371 * still have errors left in the queues. Empty them just in case. 1372 */ 1373 spa_errlog_drain(spa); 1374 avl_destroy(&spa->spa_errlist_scrub); 1375 avl_destroy(&spa->spa_errlist_last); 1376 1377 spa_keystore_fini(&spa->spa_keystore); 1378 1379 spa->spa_state = POOL_STATE_UNINITIALIZED; 1380 1381 mutex_enter(&spa->spa_proc_lock); 1382 if (spa->spa_proc_state != SPA_PROC_NONE) { 1383 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1384 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1385 cv_broadcast(&spa->spa_proc_cv); 1386 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1387 ASSERT(spa->spa_proc != &p0); 1388 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1389 } 1390 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1391 spa->spa_proc_state = SPA_PROC_NONE; 1392 } 1393 ASSERT(spa->spa_proc == &p0); 1394 mutex_exit(&spa->spa_proc_lock); 1395 1396 /* 1397 * We want to make sure spa_thread() has actually exited the ZFS 1398 * module, so that the module can't be unloaded out from underneath 1399 * it. 1400 */ 1401 if (spa->spa_did != 0) { 1402 thread_join(spa->spa_did); 1403 spa->spa_did = 0; 1404 } 1405 } 1406 1407 /* 1408 * Verify a pool configuration, and construct the vdev tree appropriately. This 1409 * will create all the necessary vdevs in the appropriate layout, with each vdev 1410 * in the CLOSED state. This will prep the pool before open/creation/import. 1411 * All vdev validation is done by the vdev_alloc() routine. 1412 */ 1413 int 1414 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1415 uint_t id, int atype) 1416 { 1417 nvlist_t **child; 1418 uint_t children; 1419 int error; 1420 1421 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1422 return (error); 1423 1424 if ((*vdp)->vdev_ops->vdev_op_leaf) 1425 return (0); 1426 1427 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1428 &child, &children); 1429 1430 if (error == ENOENT) 1431 return (0); 1432 1433 if (error) { 1434 vdev_free(*vdp); 1435 *vdp = NULL; 1436 return (SET_ERROR(EINVAL)); 1437 } 1438 1439 for (int c = 0; c < children; c++) { 1440 vdev_t *vd; 1441 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1442 atype)) != 0) { 1443 vdev_free(*vdp); 1444 *vdp = NULL; 1445 return (error); 1446 } 1447 } 1448 1449 ASSERT(*vdp != NULL); 1450 1451 return (0); 1452 } 1453 1454 static boolean_t 1455 spa_should_flush_logs_on_unload(spa_t *spa) 1456 { 1457 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) 1458 return (B_FALSE); 1459 1460 if (!spa_writeable(spa)) 1461 return (B_FALSE); 1462 1463 if (!spa->spa_sync_on) 1464 return (B_FALSE); 1465 1466 if (spa_state(spa) != POOL_STATE_EXPORTED) 1467 return (B_FALSE); 1468 1469 if (zfs_keep_log_spacemaps_at_export) 1470 return (B_FALSE); 1471 1472 return (B_TRUE); 1473 } 1474 1475 /* 1476 * Opens a transaction that will set the flag that will instruct 1477 * spa_sync to attempt to flush all the metaslabs for that txg. 1478 */ 1479 static void 1480 spa_unload_log_sm_flush_all(spa_t *spa) 1481 { 1482 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1483 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1484 1485 ASSERT3U(spa->spa_log_flushall_txg, ==, 0); 1486 spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); 1487 1488 dmu_tx_commit(tx); 1489 txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg); 1490 } 1491 1492 static void 1493 spa_unload_log_sm_metadata(spa_t *spa) 1494 { 1495 void *cookie = NULL; 1496 spa_log_sm_t *sls; 1497 while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg, 1498 &cookie)) != NULL) { 1499 VERIFY0(sls->sls_mscount); 1500 kmem_free(sls, sizeof (spa_log_sm_t)); 1501 } 1502 1503 for (log_summary_entry_t *e = list_head(&spa->spa_log_summary); 1504 e != NULL; e = list_head(&spa->spa_log_summary)) { 1505 VERIFY0(e->lse_mscount); 1506 list_remove(&spa->spa_log_summary, e); 1507 kmem_free(e, sizeof (log_summary_entry_t)); 1508 } 1509 1510 spa->spa_unflushed_stats.sus_nblocks = 0; 1511 spa->spa_unflushed_stats.sus_memused = 0; 1512 spa->spa_unflushed_stats.sus_blocklimit = 0; 1513 } 1514 1515 static void 1516 spa_destroy_aux_threads(spa_t *spa) 1517 { 1518 if (spa->spa_condense_zthr != NULL) { 1519 zthr_destroy(spa->spa_condense_zthr); 1520 spa->spa_condense_zthr = NULL; 1521 } 1522 if (spa->spa_checkpoint_discard_zthr != NULL) { 1523 zthr_destroy(spa->spa_checkpoint_discard_zthr); 1524 spa->spa_checkpoint_discard_zthr = NULL; 1525 } 1526 if (spa->spa_livelist_delete_zthr != NULL) { 1527 zthr_destroy(spa->spa_livelist_delete_zthr); 1528 spa->spa_livelist_delete_zthr = NULL; 1529 } 1530 if (spa->spa_livelist_condense_zthr != NULL) { 1531 zthr_destroy(spa->spa_livelist_condense_zthr); 1532 spa->spa_livelist_condense_zthr = NULL; 1533 } 1534 } 1535 1536 /* 1537 * Opposite of spa_load(). 1538 */ 1539 static void 1540 spa_unload(spa_t *spa) 1541 { 1542 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1543 ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED); 1544 1545 spa_import_progress_remove(spa_guid(spa)); 1546 spa_load_note(spa, "UNLOADING"); 1547 1548 spa_wake_waiters(spa); 1549 1550 /* 1551 * If the log space map feature is enabled and the pool is getting 1552 * exported (but not destroyed), we want to spend some time flushing 1553 * as many metaslabs as we can in an attempt to destroy log space 1554 * maps and save import time. 1555 */ 1556 if (spa_should_flush_logs_on_unload(spa)) 1557 spa_unload_log_sm_flush_all(spa); 1558 1559 /* 1560 * Stop async tasks. 1561 */ 1562 spa_async_suspend(spa); 1563 1564 if (spa->spa_root_vdev) { 1565 vdev_t *root_vdev = spa->spa_root_vdev; 1566 vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE); 1567 vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE); 1568 vdev_autotrim_stop_all(spa); 1569 vdev_rebuild_stop_all(spa); 1570 } 1571 1572 /* 1573 * Stop syncing. 1574 */ 1575 if (spa->spa_sync_on) { 1576 txg_sync_stop(spa->spa_dsl_pool); 1577 spa->spa_sync_on = B_FALSE; 1578 } 1579 1580 /* 1581 * This ensures that there is no async metaslab prefetching 1582 * while we attempt to unload the spa. 1583 */ 1584 if (spa->spa_root_vdev != NULL) { 1585 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) { 1586 vdev_t *vc = spa->spa_root_vdev->vdev_child[c]; 1587 if (vc->vdev_mg != NULL) 1588 taskq_wait(vc->vdev_mg->mg_taskq); 1589 } 1590 } 1591 1592 if (spa->spa_mmp.mmp_thread) 1593 mmp_thread_stop(spa); 1594 1595 /* 1596 * Wait for any outstanding async I/O to complete. 1597 */ 1598 if (spa->spa_async_zio_root != NULL) { 1599 for (int i = 0; i < max_ncpus; i++) 1600 (void) zio_wait(spa->spa_async_zio_root[i]); 1601 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1602 spa->spa_async_zio_root = NULL; 1603 } 1604 1605 if (spa->spa_vdev_removal != NULL) { 1606 spa_vdev_removal_destroy(spa->spa_vdev_removal); 1607 spa->spa_vdev_removal = NULL; 1608 } 1609 1610 spa_destroy_aux_threads(spa); 1611 1612 spa_condense_fini(spa); 1613 1614 bpobj_close(&spa->spa_deferred_bpobj); 1615 1616 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1617 1618 /* 1619 * Close all vdevs. 1620 */ 1621 if (spa->spa_root_vdev) 1622 vdev_free(spa->spa_root_vdev); 1623 ASSERT(spa->spa_root_vdev == NULL); 1624 1625 /* 1626 * Close the dsl pool. 1627 */ 1628 if (spa->spa_dsl_pool) { 1629 dsl_pool_close(spa->spa_dsl_pool); 1630 spa->spa_dsl_pool = NULL; 1631 spa->spa_meta_objset = NULL; 1632 } 1633 1634 ddt_unload(spa); 1635 spa_unload_log_sm_metadata(spa); 1636 1637 /* 1638 * Drop and purge level 2 cache 1639 */ 1640 spa_l2cache_drop(spa); 1641 1642 for (int i = 0; i < spa->spa_spares.sav_count; i++) 1643 vdev_free(spa->spa_spares.sav_vdevs[i]); 1644 if (spa->spa_spares.sav_vdevs) { 1645 kmem_free(spa->spa_spares.sav_vdevs, 1646 spa->spa_spares.sav_count * sizeof (void *)); 1647 spa->spa_spares.sav_vdevs = NULL; 1648 } 1649 if (spa->spa_spares.sav_config) { 1650 nvlist_free(spa->spa_spares.sav_config); 1651 spa->spa_spares.sav_config = NULL; 1652 } 1653 spa->spa_spares.sav_count = 0; 1654 1655 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) { 1656 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1657 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1658 } 1659 if (spa->spa_l2cache.sav_vdevs) { 1660 kmem_free(spa->spa_l2cache.sav_vdevs, 1661 spa->spa_l2cache.sav_count * sizeof (void *)); 1662 spa->spa_l2cache.sav_vdevs = NULL; 1663 } 1664 if (spa->spa_l2cache.sav_config) { 1665 nvlist_free(spa->spa_l2cache.sav_config); 1666 spa->spa_l2cache.sav_config = NULL; 1667 } 1668 spa->spa_l2cache.sav_count = 0; 1669 1670 spa->spa_async_suspended = 0; 1671 1672 spa->spa_indirect_vdevs_loaded = B_FALSE; 1673 1674 if (spa->spa_comment != NULL) { 1675 spa_strfree(spa->spa_comment); 1676 spa->spa_comment = NULL; 1677 } 1678 if (spa->spa_compatibility != NULL) { 1679 spa_strfree(spa->spa_compatibility); 1680 spa->spa_compatibility = NULL; 1681 } 1682 1683 spa_config_exit(spa, SCL_ALL, spa); 1684 } 1685 1686 /* 1687 * Load (or re-load) the current list of vdevs describing the active spares for 1688 * this pool. When this is called, we have some form of basic information in 1689 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1690 * then re-generate a more complete list including status information. 1691 */ 1692 void 1693 spa_load_spares(spa_t *spa) 1694 { 1695 nvlist_t **spares; 1696 uint_t nspares; 1697 int i; 1698 vdev_t *vd, *tvd; 1699 1700 #ifndef _KERNEL 1701 /* 1702 * zdb opens both the current state of the pool and the 1703 * checkpointed state (if present), with a different spa_t. 1704 * 1705 * As spare vdevs are shared among open pools, we skip loading 1706 * them when we load the checkpointed state of the pool. 1707 */ 1708 if (!spa_writeable(spa)) 1709 return; 1710 #endif 1711 1712 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1713 1714 /* 1715 * First, close and free any existing spare vdevs. 1716 */ 1717 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1718 vd = spa->spa_spares.sav_vdevs[i]; 1719 1720 /* Undo the call to spa_activate() below */ 1721 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1722 B_FALSE)) != NULL && tvd->vdev_isspare) 1723 spa_spare_remove(tvd); 1724 vdev_close(vd); 1725 vdev_free(vd); 1726 } 1727 1728 if (spa->spa_spares.sav_vdevs) 1729 kmem_free(spa->spa_spares.sav_vdevs, 1730 spa->spa_spares.sav_count * sizeof (void *)); 1731 1732 if (spa->spa_spares.sav_config == NULL) 1733 nspares = 0; 1734 else 1735 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1736 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1737 1738 spa->spa_spares.sav_count = (int)nspares; 1739 spa->spa_spares.sav_vdevs = NULL; 1740 1741 if (nspares == 0) 1742 return; 1743 1744 /* 1745 * Construct the array of vdevs, opening them to get status in the 1746 * process. For each spare, there is potentially two different vdev_t 1747 * structures associated with it: one in the list of spares (used only 1748 * for basic validation purposes) and one in the active vdev 1749 * configuration (if it's spared in). During this phase we open and 1750 * validate each vdev on the spare list. If the vdev also exists in the 1751 * active configuration, then we also mark this vdev as an active spare. 1752 */ 1753 spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *), 1754 KM_SLEEP); 1755 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1756 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1757 VDEV_ALLOC_SPARE) == 0); 1758 ASSERT(vd != NULL); 1759 1760 spa->spa_spares.sav_vdevs[i] = vd; 1761 1762 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1763 B_FALSE)) != NULL) { 1764 if (!tvd->vdev_isspare) 1765 spa_spare_add(tvd); 1766 1767 /* 1768 * We only mark the spare active if we were successfully 1769 * able to load the vdev. Otherwise, importing a pool 1770 * with a bad active spare would result in strange 1771 * behavior, because multiple pool would think the spare 1772 * is actively in use. 1773 * 1774 * There is a vulnerability here to an equally bizarre 1775 * circumstance, where a dead active spare is later 1776 * brought back to life (onlined or otherwise). Given 1777 * the rarity of this scenario, and the extra complexity 1778 * it adds, we ignore the possibility. 1779 */ 1780 if (!vdev_is_dead(tvd)) 1781 spa_spare_activate(tvd); 1782 } 1783 1784 vd->vdev_top = vd; 1785 vd->vdev_aux = &spa->spa_spares; 1786 1787 if (vdev_open(vd) != 0) 1788 continue; 1789 1790 if (vdev_validate_aux(vd) == 0) 1791 spa_spare_add(vd); 1792 } 1793 1794 /* 1795 * Recompute the stashed list of spares, with status information 1796 * this time. 1797 */ 1798 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1799 DATA_TYPE_NVLIST_ARRAY) == 0); 1800 1801 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1802 KM_SLEEP); 1803 for (i = 0; i < spa->spa_spares.sav_count; i++) 1804 spares[i] = vdev_config_generate(spa, 1805 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1806 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1807 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1808 for (i = 0; i < spa->spa_spares.sav_count; i++) 1809 nvlist_free(spares[i]); 1810 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1811 } 1812 1813 /* 1814 * Load (or re-load) the current list of vdevs describing the active l2cache for 1815 * this pool. When this is called, we have some form of basic information in 1816 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1817 * then re-generate a more complete list including status information. 1818 * Devices which are already active have their details maintained, and are 1819 * not re-opened. 1820 */ 1821 void 1822 spa_load_l2cache(spa_t *spa) 1823 { 1824 nvlist_t **l2cache = NULL; 1825 uint_t nl2cache; 1826 int i, j, oldnvdevs; 1827 uint64_t guid; 1828 vdev_t *vd, **oldvdevs, **newvdevs; 1829 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1830 1831 #ifndef _KERNEL 1832 /* 1833 * zdb opens both the current state of the pool and the 1834 * checkpointed state (if present), with a different spa_t. 1835 * 1836 * As L2 caches are part of the ARC which is shared among open 1837 * pools, we skip loading them when we load the checkpointed 1838 * state of the pool. 1839 */ 1840 if (!spa_writeable(spa)) 1841 return; 1842 #endif 1843 1844 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1845 1846 oldvdevs = sav->sav_vdevs; 1847 oldnvdevs = sav->sav_count; 1848 sav->sav_vdevs = NULL; 1849 sav->sav_count = 0; 1850 1851 if (sav->sav_config == NULL) { 1852 nl2cache = 0; 1853 newvdevs = NULL; 1854 goto out; 1855 } 1856 1857 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1858 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1859 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1860 1861 /* 1862 * Process new nvlist of vdevs. 1863 */ 1864 for (i = 0; i < nl2cache; i++) { 1865 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1866 &guid) == 0); 1867 1868 newvdevs[i] = NULL; 1869 for (j = 0; j < oldnvdevs; j++) { 1870 vd = oldvdevs[j]; 1871 if (vd != NULL && guid == vd->vdev_guid) { 1872 /* 1873 * Retain previous vdev for add/remove ops. 1874 */ 1875 newvdevs[i] = vd; 1876 oldvdevs[j] = NULL; 1877 break; 1878 } 1879 } 1880 1881 if (newvdevs[i] == NULL) { 1882 /* 1883 * Create new vdev 1884 */ 1885 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1886 VDEV_ALLOC_L2CACHE) == 0); 1887 ASSERT(vd != NULL); 1888 newvdevs[i] = vd; 1889 1890 /* 1891 * Commit this vdev as an l2cache device, 1892 * even if it fails to open. 1893 */ 1894 spa_l2cache_add(vd); 1895 1896 vd->vdev_top = vd; 1897 vd->vdev_aux = sav; 1898 1899 spa_l2cache_activate(vd); 1900 1901 if (vdev_open(vd) != 0) 1902 continue; 1903 1904 (void) vdev_validate_aux(vd); 1905 1906 if (!vdev_is_dead(vd)) 1907 l2arc_add_vdev(spa, vd); 1908 1909 /* 1910 * Upon cache device addition to a pool or pool 1911 * creation with a cache device or if the header 1912 * of the device is invalid we issue an async 1913 * TRIM command for the whole device which will 1914 * execute if l2arc_trim_ahead > 0. 1915 */ 1916 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 1917 } 1918 } 1919 1920 sav->sav_vdevs = newvdevs; 1921 sav->sav_count = (int)nl2cache; 1922 1923 /* 1924 * Recompute the stashed list of l2cache devices, with status 1925 * information this time. 1926 */ 1927 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1928 DATA_TYPE_NVLIST_ARRAY) == 0); 1929 1930 if (sav->sav_count > 0) 1931 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), 1932 KM_SLEEP); 1933 for (i = 0; i < sav->sav_count; i++) 1934 l2cache[i] = vdev_config_generate(spa, 1935 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1936 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1937 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1938 1939 out: 1940 /* 1941 * Purge vdevs that were dropped 1942 */ 1943 for (i = 0; i < oldnvdevs; i++) { 1944 uint64_t pool; 1945 1946 vd = oldvdevs[i]; 1947 if (vd != NULL) { 1948 ASSERT(vd->vdev_isl2cache); 1949 1950 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1951 pool != 0ULL && l2arc_vdev_present(vd)) 1952 l2arc_remove_vdev(vd); 1953 vdev_clear_stats(vd); 1954 vdev_free(vd); 1955 } 1956 } 1957 1958 if (oldvdevs) 1959 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1960 1961 for (i = 0; i < sav->sav_count; i++) 1962 nvlist_free(l2cache[i]); 1963 if (sav->sav_count) 1964 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1965 } 1966 1967 static int 1968 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1969 { 1970 dmu_buf_t *db; 1971 char *packed = NULL; 1972 size_t nvsize = 0; 1973 int error; 1974 *value = NULL; 1975 1976 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1977 if (error) 1978 return (error); 1979 1980 nvsize = *(uint64_t *)db->db_data; 1981 dmu_buf_rele(db, FTAG); 1982 1983 packed = vmem_alloc(nvsize, KM_SLEEP); 1984 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1985 DMU_READ_PREFETCH); 1986 if (error == 0) 1987 error = nvlist_unpack(packed, nvsize, value, 0); 1988 vmem_free(packed, nvsize); 1989 1990 return (error); 1991 } 1992 1993 /* 1994 * Concrete top-level vdevs that are not missing and are not logs. At every 1995 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 1996 */ 1997 static uint64_t 1998 spa_healthy_core_tvds(spa_t *spa) 1999 { 2000 vdev_t *rvd = spa->spa_root_vdev; 2001 uint64_t tvds = 0; 2002 2003 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 2004 vdev_t *vd = rvd->vdev_child[i]; 2005 if (vd->vdev_islog) 2006 continue; 2007 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 2008 tvds++; 2009 } 2010 2011 return (tvds); 2012 } 2013 2014 /* 2015 * Checks to see if the given vdev could not be opened, in which case we post a 2016 * sysevent to notify the autoreplace code that the device has been removed. 2017 */ 2018 static void 2019 spa_check_removed(vdev_t *vd) 2020 { 2021 for (uint64_t c = 0; c < vd->vdev_children; c++) 2022 spa_check_removed(vd->vdev_child[c]); 2023 2024 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 2025 vdev_is_concrete(vd)) { 2026 zfs_post_autoreplace(vd->vdev_spa, vd); 2027 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 2028 } 2029 } 2030 2031 static int 2032 spa_check_for_missing_logs(spa_t *spa) 2033 { 2034 vdev_t *rvd = spa->spa_root_vdev; 2035 2036 /* 2037 * If we're doing a normal import, then build up any additional 2038 * diagnostic information about missing log devices. 2039 * We'll pass this up to the user for further processing. 2040 */ 2041 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 2042 nvlist_t **child, *nv; 2043 uint64_t idx = 0; 2044 2045 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *), 2046 KM_SLEEP); 2047 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2048 2049 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2050 vdev_t *tvd = rvd->vdev_child[c]; 2051 2052 /* 2053 * We consider a device as missing only if it failed 2054 * to open (i.e. offline or faulted is not considered 2055 * as missing). 2056 */ 2057 if (tvd->vdev_islog && 2058 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2059 child[idx++] = vdev_config_generate(spa, tvd, 2060 B_FALSE, VDEV_CONFIG_MISSING); 2061 } 2062 } 2063 2064 if (idx > 0) { 2065 fnvlist_add_nvlist_array(nv, 2066 ZPOOL_CONFIG_CHILDREN, child, idx); 2067 fnvlist_add_nvlist(spa->spa_load_info, 2068 ZPOOL_CONFIG_MISSING_DEVICES, nv); 2069 2070 for (uint64_t i = 0; i < idx; i++) 2071 nvlist_free(child[i]); 2072 } 2073 nvlist_free(nv); 2074 kmem_free(child, rvd->vdev_children * sizeof (char **)); 2075 2076 if (idx > 0) { 2077 spa_load_failed(spa, "some log devices are missing"); 2078 vdev_dbgmsg_print_tree(rvd, 2); 2079 return (SET_ERROR(ENXIO)); 2080 } 2081 } else { 2082 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2083 vdev_t *tvd = rvd->vdev_child[c]; 2084 2085 if (tvd->vdev_islog && 2086 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 2087 spa_set_log_state(spa, SPA_LOG_CLEAR); 2088 spa_load_note(spa, "some log devices are " 2089 "missing, ZIL is dropped."); 2090 vdev_dbgmsg_print_tree(rvd, 2); 2091 break; 2092 } 2093 } 2094 } 2095 2096 return (0); 2097 } 2098 2099 /* 2100 * Check for missing log devices 2101 */ 2102 static boolean_t 2103 spa_check_logs(spa_t *spa) 2104 { 2105 boolean_t rv = B_FALSE; 2106 dsl_pool_t *dp = spa_get_dsl(spa); 2107 2108 switch (spa->spa_log_state) { 2109 default: 2110 break; 2111 case SPA_LOG_MISSING: 2112 /* need to recheck in case slog has been restored */ 2113 case SPA_LOG_UNKNOWN: 2114 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2115 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 2116 if (rv) 2117 spa_set_log_state(spa, SPA_LOG_MISSING); 2118 break; 2119 } 2120 return (rv); 2121 } 2122 2123 /* 2124 * Passivate any log vdevs (note, does not apply to embedded log metaslabs). 2125 */ 2126 static boolean_t 2127 spa_passivate_log(spa_t *spa) 2128 { 2129 vdev_t *rvd = spa->spa_root_vdev; 2130 boolean_t slog_found = B_FALSE; 2131 2132 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2133 2134 for (int c = 0; c < rvd->vdev_children; c++) { 2135 vdev_t *tvd = rvd->vdev_child[c]; 2136 2137 if (tvd->vdev_islog) { 2138 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2139 metaslab_group_passivate(tvd->vdev_mg); 2140 slog_found = B_TRUE; 2141 } 2142 } 2143 2144 return (slog_found); 2145 } 2146 2147 /* 2148 * Activate any log vdevs (note, does not apply to embedded log metaslabs). 2149 */ 2150 static void 2151 spa_activate_log(spa_t *spa) 2152 { 2153 vdev_t *rvd = spa->spa_root_vdev; 2154 2155 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 2156 2157 for (int c = 0; c < rvd->vdev_children; c++) { 2158 vdev_t *tvd = rvd->vdev_child[c]; 2159 2160 if (tvd->vdev_islog) { 2161 ASSERT3P(tvd->vdev_log_mg, ==, NULL); 2162 metaslab_group_activate(tvd->vdev_mg); 2163 } 2164 } 2165 } 2166 2167 int 2168 spa_reset_logs(spa_t *spa) 2169 { 2170 int error; 2171 2172 error = dmu_objset_find(spa_name(spa), zil_reset, 2173 NULL, DS_FIND_CHILDREN); 2174 if (error == 0) { 2175 /* 2176 * We successfully offlined the log device, sync out the 2177 * current txg so that the "stubby" block can be removed 2178 * by zil_sync(). 2179 */ 2180 txg_wait_synced(spa->spa_dsl_pool, 0); 2181 } 2182 return (error); 2183 } 2184 2185 static void 2186 spa_aux_check_removed(spa_aux_vdev_t *sav) 2187 { 2188 for (int i = 0; i < sav->sav_count; i++) 2189 spa_check_removed(sav->sav_vdevs[i]); 2190 } 2191 2192 void 2193 spa_claim_notify(zio_t *zio) 2194 { 2195 spa_t *spa = zio->io_spa; 2196 2197 if (zio->io_error) 2198 return; 2199 2200 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 2201 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 2202 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 2203 mutex_exit(&spa->spa_props_lock); 2204 } 2205 2206 typedef struct spa_load_error { 2207 uint64_t sle_meta_count; 2208 uint64_t sle_data_count; 2209 } spa_load_error_t; 2210 2211 static void 2212 spa_load_verify_done(zio_t *zio) 2213 { 2214 blkptr_t *bp = zio->io_bp; 2215 spa_load_error_t *sle = zio->io_private; 2216 dmu_object_type_t type = BP_GET_TYPE(bp); 2217 int error = zio->io_error; 2218 spa_t *spa = zio->io_spa; 2219 2220 abd_free(zio->io_abd); 2221 if (error) { 2222 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2223 type != DMU_OT_INTENT_LOG) 2224 atomic_inc_64(&sle->sle_meta_count); 2225 else 2226 atomic_inc_64(&sle->sle_data_count); 2227 } 2228 2229 mutex_enter(&spa->spa_scrub_lock); 2230 spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp); 2231 cv_broadcast(&spa->spa_scrub_io_cv); 2232 mutex_exit(&spa->spa_scrub_lock); 2233 } 2234 2235 /* 2236 * Maximum number of inflight bytes is the log2 fraction of the arc size. 2237 * By default, we set it to 1/16th of the arc. 2238 */ 2239 int spa_load_verify_shift = 4; 2240 int spa_load_verify_metadata = B_TRUE; 2241 int spa_load_verify_data = B_TRUE; 2242 2243 /*ARGSUSED*/ 2244 static int 2245 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2246 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2247 { 2248 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || 2249 BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 2250 return (0); 2251 /* 2252 * Note: normally this routine will not be called if 2253 * spa_load_verify_metadata is not set. However, it may be useful 2254 * to manually set the flag after the traversal has begun. 2255 */ 2256 if (!spa_load_verify_metadata) 2257 return (0); 2258 if (!BP_IS_METADATA(bp) && !spa_load_verify_data) 2259 return (0); 2260 2261 uint64_t maxinflight_bytes = 2262 arc_target_bytes() >> spa_load_verify_shift; 2263 zio_t *rio = arg; 2264 size_t size = BP_GET_PSIZE(bp); 2265 2266 mutex_enter(&spa->spa_scrub_lock); 2267 while (spa->spa_load_verify_bytes >= maxinflight_bytes) 2268 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2269 spa->spa_load_verify_bytes += size; 2270 mutex_exit(&spa->spa_scrub_lock); 2271 2272 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2273 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2274 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2275 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2276 return (0); 2277 } 2278 2279 /* ARGSUSED */ 2280 static int 2281 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2282 { 2283 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2284 return (SET_ERROR(ENAMETOOLONG)); 2285 2286 return (0); 2287 } 2288 2289 static int 2290 spa_load_verify(spa_t *spa) 2291 { 2292 zio_t *rio; 2293 spa_load_error_t sle = { 0 }; 2294 zpool_load_policy_t policy; 2295 boolean_t verify_ok = B_FALSE; 2296 int error = 0; 2297 2298 zpool_get_load_policy(spa->spa_config, &policy); 2299 2300 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND) 2301 return (0); 2302 2303 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2304 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2305 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2306 DS_FIND_CHILDREN); 2307 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2308 if (error != 0) 2309 return (error); 2310 2311 rio = zio_root(spa, NULL, &sle, 2312 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2313 2314 if (spa_load_verify_metadata) { 2315 if (spa->spa_extreme_rewind) { 2316 spa_load_note(spa, "performing a complete scan of the " 2317 "pool since extreme rewind is on. This may take " 2318 "a very long time.\n (spa_load_verify_data=%u, " 2319 "spa_load_verify_metadata=%u)", 2320 spa_load_verify_data, spa_load_verify_metadata); 2321 } 2322 2323 error = traverse_pool(spa, spa->spa_verify_min_txg, 2324 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2325 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2326 } 2327 2328 (void) zio_wait(rio); 2329 ASSERT0(spa->spa_load_verify_bytes); 2330 2331 spa->spa_load_meta_errors = sle.sle_meta_count; 2332 spa->spa_load_data_errors = sle.sle_data_count; 2333 2334 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2335 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2336 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2337 (u_longlong_t)sle.sle_data_count); 2338 } 2339 2340 if (spa_load_verify_dryrun || 2341 (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2342 sle.sle_data_count <= policy.zlp_maxdata)) { 2343 int64_t loss = 0; 2344 2345 verify_ok = B_TRUE; 2346 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2347 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2348 2349 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2350 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2351 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 2352 VERIFY(nvlist_add_int64(spa->spa_load_info, 2353 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 2354 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2355 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 2356 } else { 2357 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2358 } 2359 2360 if (spa_load_verify_dryrun) 2361 return (0); 2362 2363 if (error) { 2364 if (error != ENXIO && error != EIO) 2365 error = SET_ERROR(EIO); 2366 return (error); 2367 } 2368 2369 return (verify_ok ? 0 : EIO); 2370 } 2371 2372 /* 2373 * Find a value in the pool props object. 2374 */ 2375 static void 2376 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2377 { 2378 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2379 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2380 } 2381 2382 /* 2383 * Find a value in the pool directory object. 2384 */ 2385 static int 2386 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2387 { 2388 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2389 name, sizeof (uint64_t), 1, val); 2390 2391 if (error != 0 && (error != ENOENT || log_enoent)) { 2392 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2393 "[error=%d]", name, error); 2394 } 2395 2396 return (error); 2397 } 2398 2399 static int 2400 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2401 { 2402 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2403 return (SET_ERROR(err)); 2404 } 2405 2406 boolean_t 2407 spa_livelist_delete_check(spa_t *spa) 2408 { 2409 return (spa->spa_livelists_to_delete != 0); 2410 } 2411 2412 /* ARGSUSED */ 2413 static boolean_t 2414 spa_livelist_delete_cb_check(void *arg, zthr_t *z) 2415 { 2416 spa_t *spa = arg; 2417 return (spa_livelist_delete_check(spa)); 2418 } 2419 2420 static int 2421 delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2422 { 2423 spa_t *spa = arg; 2424 zio_free(spa, tx->tx_txg, bp); 2425 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2426 -bp_get_dsize_sync(spa, bp), 2427 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2428 return (0); 2429 } 2430 2431 static int 2432 dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp) 2433 { 2434 int err; 2435 zap_cursor_t zc; 2436 zap_attribute_t za; 2437 zap_cursor_init(&zc, os, zap_obj); 2438 err = zap_cursor_retrieve(&zc, &za); 2439 zap_cursor_fini(&zc); 2440 if (err == 0) 2441 *llp = za.za_first_integer; 2442 return (err); 2443 } 2444 2445 /* 2446 * Components of livelist deletion that must be performed in syncing 2447 * context: freeing block pointers and updating the pool-wide data 2448 * structures to indicate how much work is left to do 2449 */ 2450 typedef struct sublist_delete_arg { 2451 spa_t *spa; 2452 dsl_deadlist_t *ll; 2453 uint64_t key; 2454 bplist_t *to_free; 2455 } sublist_delete_arg_t; 2456 2457 static void 2458 sublist_delete_sync(void *arg, dmu_tx_t *tx) 2459 { 2460 sublist_delete_arg_t *sda = arg; 2461 spa_t *spa = sda->spa; 2462 dsl_deadlist_t *ll = sda->ll; 2463 uint64_t key = sda->key; 2464 bplist_t *to_free = sda->to_free; 2465 2466 bplist_iterate(to_free, delete_blkptr_cb, spa, tx); 2467 dsl_deadlist_remove_entry(ll, key, tx); 2468 } 2469 2470 typedef struct livelist_delete_arg { 2471 spa_t *spa; 2472 uint64_t ll_obj; 2473 uint64_t zap_obj; 2474 } livelist_delete_arg_t; 2475 2476 static void 2477 livelist_delete_sync(void *arg, dmu_tx_t *tx) 2478 { 2479 livelist_delete_arg_t *lda = arg; 2480 spa_t *spa = lda->spa; 2481 uint64_t ll_obj = lda->ll_obj; 2482 uint64_t zap_obj = lda->zap_obj; 2483 objset_t *mos = spa->spa_meta_objset; 2484 uint64_t count; 2485 2486 /* free the livelist and decrement the feature count */ 2487 VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx)); 2488 dsl_deadlist_free(mos, ll_obj, tx); 2489 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 2490 VERIFY0(zap_count(mos, zap_obj, &count)); 2491 if (count == 0) { 2492 /* no more livelists to delete */ 2493 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 2494 DMU_POOL_DELETED_CLONES, tx)); 2495 VERIFY0(zap_destroy(mos, zap_obj, tx)); 2496 spa->spa_livelists_to_delete = 0; 2497 spa_notify_waiters(spa); 2498 } 2499 } 2500 2501 /* 2502 * Load in the value for the livelist to be removed and open it. Then, 2503 * load its first sublist and determine which block pointers should actually 2504 * be freed. Then, call a synctask which performs the actual frees and updates 2505 * the pool-wide livelist data. 2506 */ 2507 /* ARGSUSED */ 2508 static void 2509 spa_livelist_delete_cb(void *arg, zthr_t *z) 2510 { 2511 spa_t *spa = arg; 2512 uint64_t ll_obj = 0, count; 2513 objset_t *mos = spa->spa_meta_objset; 2514 uint64_t zap_obj = spa->spa_livelists_to_delete; 2515 /* 2516 * Determine the next livelist to delete. This function should only 2517 * be called if there is at least one deleted clone. 2518 */ 2519 VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj)); 2520 VERIFY0(zap_count(mos, ll_obj, &count)); 2521 if (count > 0) { 2522 dsl_deadlist_t *ll; 2523 dsl_deadlist_entry_t *dle; 2524 bplist_t to_free; 2525 ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP); 2526 dsl_deadlist_open(ll, mos, ll_obj); 2527 dle = dsl_deadlist_first(ll); 2528 ASSERT3P(dle, !=, NULL); 2529 bplist_create(&to_free); 2530 int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free, 2531 z, NULL); 2532 if (err == 0) { 2533 sublist_delete_arg_t sync_arg = { 2534 .spa = spa, 2535 .ll = ll, 2536 .key = dle->dle_mintxg, 2537 .to_free = &to_free 2538 }; 2539 zfs_dbgmsg("deleting sublist (id %llu) from" 2540 " livelist %llu, %d remaining", 2541 dle->dle_bpobj.bpo_object, ll_obj, count - 1); 2542 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 2543 sublist_delete_sync, &sync_arg, 0, 2544 ZFS_SPACE_CHECK_DESTROY)); 2545 } else { 2546 VERIFY3U(err, ==, EINTR); 2547 } 2548 bplist_clear(&to_free); 2549 bplist_destroy(&to_free); 2550 dsl_deadlist_close(ll); 2551 kmem_free(ll, sizeof (dsl_deadlist_t)); 2552 } else { 2553 livelist_delete_arg_t sync_arg = { 2554 .spa = spa, 2555 .ll_obj = ll_obj, 2556 .zap_obj = zap_obj 2557 }; 2558 zfs_dbgmsg("deletion of livelist %llu completed", ll_obj); 2559 VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync, 2560 &sync_arg, 0, ZFS_SPACE_CHECK_DESTROY)); 2561 } 2562 } 2563 2564 static void 2565 spa_start_livelist_destroy_thread(spa_t *spa) 2566 { 2567 ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL); 2568 spa->spa_livelist_delete_zthr = 2569 zthr_create("z_livelist_destroy", 2570 spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa); 2571 } 2572 2573 typedef struct livelist_new_arg { 2574 bplist_t *allocs; 2575 bplist_t *frees; 2576 } livelist_new_arg_t; 2577 2578 static int 2579 livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 2580 dmu_tx_t *tx) 2581 { 2582 ASSERT(tx == NULL); 2583 livelist_new_arg_t *lna = arg; 2584 if (bp_freed) { 2585 bplist_append(lna->frees, bp); 2586 } else { 2587 bplist_append(lna->allocs, bp); 2588 zfs_livelist_condense_new_alloc++; 2589 } 2590 return (0); 2591 } 2592 2593 typedef struct livelist_condense_arg { 2594 spa_t *spa; 2595 bplist_t to_keep; 2596 uint64_t first_size; 2597 uint64_t next_size; 2598 } livelist_condense_arg_t; 2599 2600 static void 2601 spa_livelist_condense_sync(void *arg, dmu_tx_t *tx) 2602 { 2603 livelist_condense_arg_t *lca = arg; 2604 spa_t *spa = lca->spa; 2605 bplist_t new_frees; 2606 dsl_dataset_t *ds = spa->spa_to_condense.ds; 2607 2608 /* Have we been cancelled? */ 2609 if (spa->spa_to_condense.cancelled) { 2610 zfs_livelist_condense_sync_cancel++; 2611 goto out; 2612 } 2613 2614 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2615 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2616 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist; 2617 2618 /* 2619 * It's possible that the livelist was changed while the zthr was 2620 * running. Therefore, we need to check for new blkptrs in the two 2621 * entries being condensed and continue to track them in the livelist. 2622 * Because of the way we handle remapped blkptrs (see dbuf_remap_impl), 2623 * it's possible that the newly added blkptrs are FREEs or ALLOCs so 2624 * we need to sort them into two different bplists. 2625 */ 2626 uint64_t first_obj = first->dle_bpobj.bpo_object; 2627 uint64_t next_obj = next->dle_bpobj.bpo_object; 2628 uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2629 uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs; 2630 2631 bplist_create(&new_frees); 2632 livelist_new_arg_t new_bps = { 2633 .allocs = &lca->to_keep, 2634 .frees = &new_frees, 2635 }; 2636 2637 if (cur_first_size > lca->first_size) { 2638 VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj, 2639 livelist_track_new_cb, &new_bps, lca->first_size)); 2640 } 2641 if (cur_next_size > lca->next_size) { 2642 VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj, 2643 livelist_track_new_cb, &new_bps, lca->next_size)); 2644 } 2645 2646 dsl_deadlist_clear_entry(first, ll, tx); 2647 ASSERT(bpobj_is_empty(&first->dle_bpobj)); 2648 dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx); 2649 2650 bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx); 2651 bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx); 2652 bplist_destroy(&new_frees); 2653 2654 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 2655 dsl_dataset_name(ds, dsname); 2656 zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu " 2657 "(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu " 2658 "(%llu blkptrs)", tx->tx_txg, dsname, ds->ds_object, first_obj, 2659 cur_first_size, next_obj, cur_next_size, 2660 first->dle_bpobj.bpo_object, 2661 first->dle_bpobj.bpo_phys->bpo_num_blkptrs); 2662 out: 2663 dmu_buf_rele(ds->ds_dbuf, spa); 2664 spa->spa_to_condense.ds = NULL; 2665 bplist_clear(&lca->to_keep); 2666 bplist_destroy(&lca->to_keep); 2667 kmem_free(lca, sizeof (livelist_condense_arg_t)); 2668 spa->spa_to_condense.syncing = B_FALSE; 2669 } 2670 2671 static void 2672 spa_livelist_condense_cb(void *arg, zthr_t *t) 2673 { 2674 while (zfs_livelist_condense_zthr_pause && 2675 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2676 delay(1); 2677 2678 spa_t *spa = arg; 2679 dsl_deadlist_entry_t *first = spa->spa_to_condense.first; 2680 dsl_deadlist_entry_t *next = spa->spa_to_condense.next; 2681 uint64_t first_size, next_size; 2682 2683 livelist_condense_arg_t *lca = 2684 kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP); 2685 bplist_create(&lca->to_keep); 2686 2687 /* 2688 * Process the livelists (matching FREEs and ALLOCs) in open context 2689 * so we have minimal work in syncing context to condense. 2690 * 2691 * We save bpobj sizes (first_size and next_size) to use later in 2692 * syncing context to determine if entries were added to these sublists 2693 * while in open context. This is possible because the clone is still 2694 * active and open for normal writes and we want to make sure the new, 2695 * unprocessed blockpointers are inserted into the livelist normally. 2696 * 2697 * Note that dsl_process_sub_livelist() both stores the size number of 2698 * blockpointers and iterates over them while the bpobj's lock held, so 2699 * the sizes returned to us are consistent which what was actually 2700 * processed. 2701 */ 2702 int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t, 2703 &first_size); 2704 if (err == 0) 2705 err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep, 2706 t, &next_size); 2707 2708 if (err == 0) { 2709 while (zfs_livelist_condense_sync_pause && 2710 !(zthr_has_waiters(t) || zthr_iscancelled(t))) 2711 delay(1); 2712 2713 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2714 dmu_tx_mark_netfree(tx); 2715 dmu_tx_hold_space(tx, 1); 2716 err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); 2717 if (err == 0) { 2718 /* 2719 * Prevent the condense zthr restarting before 2720 * the synctask completes. 2721 */ 2722 spa->spa_to_condense.syncing = B_TRUE; 2723 lca->spa = spa; 2724 lca->first_size = first_size; 2725 lca->next_size = next_size; 2726 dsl_sync_task_nowait(spa_get_dsl(spa), 2727 spa_livelist_condense_sync, lca, tx); 2728 dmu_tx_commit(tx); 2729 return; 2730 } 2731 } 2732 /* 2733 * Condensing can not continue: either it was externally stopped or 2734 * we were unable to assign to a tx because the pool has run out of 2735 * space. In the second case, we'll just end up trying to condense 2736 * again in a later txg. 2737 */ 2738 ASSERT(err != 0); 2739 bplist_clear(&lca->to_keep); 2740 bplist_destroy(&lca->to_keep); 2741 kmem_free(lca, sizeof (livelist_condense_arg_t)); 2742 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa); 2743 spa->spa_to_condense.ds = NULL; 2744 if (err == EINTR) 2745 zfs_livelist_condense_zthr_cancel++; 2746 } 2747 2748 /* ARGSUSED */ 2749 /* 2750 * Check that there is something to condense but that a condense is not 2751 * already in progress and that condensing has not been cancelled. 2752 */ 2753 static boolean_t 2754 spa_livelist_condense_cb_check(void *arg, zthr_t *z) 2755 { 2756 spa_t *spa = arg; 2757 if ((spa->spa_to_condense.ds != NULL) && 2758 (spa->spa_to_condense.syncing == B_FALSE) && 2759 (spa->spa_to_condense.cancelled == B_FALSE)) { 2760 return (B_TRUE); 2761 } 2762 return (B_FALSE); 2763 } 2764 2765 static void 2766 spa_start_livelist_condensing_thread(spa_t *spa) 2767 { 2768 spa->spa_to_condense.ds = NULL; 2769 spa->spa_to_condense.first = NULL; 2770 spa->spa_to_condense.next = NULL; 2771 spa->spa_to_condense.syncing = B_FALSE; 2772 spa->spa_to_condense.cancelled = B_FALSE; 2773 2774 ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL); 2775 spa->spa_livelist_condense_zthr = 2776 zthr_create("z_livelist_condense", 2777 spa_livelist_condense_cb_check, 2778 spa_livelist_condense_cb, spa); 2779 } 2780 2781 static void 2782 spa_spawn_aux_threads(spa_t *spa) 2783 { 2784 ASSERT(spa_writeable(spa)); 2785 2786 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2787 2788 spa_start_indirect_condensing_thread(spa); 2789 spa_start_livelist_destroy_thread(spa); 2790 spa_start_livelist_condensing_thread(spa); 2791 2792 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 2793 spa->spa_checkpoint_discard_zthr = 2794 zthr_create("z_checkpoint_discard", 2795 spa_checkpoint_discard_thread_check, 2796 spa_checkpoint_discard_thread, spa); 2797 } 2798 2799 /* 2800 * Fix up config after a partly-completed split. This is done with the 2801 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2802 * pool have that entry in their config, but only the splitting one contains 2803 * a list of all the guids of the vdevs that are being split off. 2804 * 2805 * This function determines what to do with that list: either rejoin 2806 * all the disks to the pool, or complete the splitting process. To attempt 2807 * the rejoin, each disk that is offlined is marked online again, and 2808 * we do a reopen() call. If the vdev label for every disk that was 2809 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2810 * then we call vdev_split() on each disk, and complete the split. 2811 * 2812 * Otherwise we leave the config alone, with all the vdevs in place in 2813 * the original pool. 2814 */ 2815 static void 2816 spa_try_repair(spa_t *spa, nvlist_t *config) 2817 { 2818 uint_t extracted; 2819 uint64_t *glist; 2820 uint_t i, gcount; 2821 nvlist_t *nvl; 2822 vdev_t **vd; 2823 boolean_t attempt_reopen; 2824 2825 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2826 return; 2827 2828 /* check that the config is complete */ 2829 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2830 &glist, &gcount) != 0) 2831 return; 2832 2833 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2834 2835 /* attempt to online all the vdevs & validate */ 2836 attempt_reopen = B_TRUE; 2837 for (i = 0; i < gcount; i++) { 2838 if (glist[i] == 0) /* vdev is hole */ 2839 continue; 2840 2841 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2842 if (vd[i] == NULL) { 2843 /* 2844 * Don't bother attempting to reopen the disks; 2845 * just do the split. 2846 */ 2847 attempt_reopen = B_FALSE; 2848 } else { 2849 /* attempt to re-online it */ 2850 vd[i]->vdev_offline = B_FALSE; 2851 } 2852 } 2853 2854 if (attempt_reopen) { 2855 vdev_reopen(spa->spa_root_vdev); 2856 2857 /* check each device to see what state it's in */ 2858 for (extracted = 0, i = 0; i < gcount; i++) { 2859 if (vd[i] != NULL && 2860 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2861 break; 2862 ++extracted; 2863 } 2864 } 2865 2866 /* 2867 * If every disk has been moved to the new pool, or if we never 2868 * even attempted to look at them, then we split them off for 2869 * good. 2870 */ 2871 if (!attempt_reopen || gcount == extracted) { 2872 for (i = 0; i < gcount; i++) 2873 if (vd[i] != NULL) 2874 vdev_split(vd[i]); 2875 vdev_reopen(spa->spa_root_vdev); 2876 } 2877 2878 kmem_free(vd, gcount * sizeof (vdev_t *)); 2879 } 2880 2881 static int 2882 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 2883 { 2884 char *ereport = FM_EREPORT_ZFS_POOL; 2885 int error; 2886 2887 spa->spa_load_state = state; 2888 (void) spa_import_progress_set_state(spa_guid(spa), 2889 spa_load_state(spa)); 2890 2891 gethrestime(&spa->spa_loaded_ts); 2892 error = spa_load_impl(spa, type, &ereport); 2893 2894 /* 2895 * Don't count references from objsets that are already closed 2896 * and are making their way through the eviction process. 2897 */ 2898 spa_evicting_os_wait(spa); 2899 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 2900 if (error) { 2901 if (error != EEXIST) { 2902 spa->spa_loaded_ts.tv_sec = 0; 2903 spa->spa_loaded_ts.tv_nsec = 0; 2904 } 2905 if (error != EBADF) { 2906 (void) zfs_ereport_post(ereport, spa, 2907 NULL, NULL, NULL, 0); 2908 } 2909 } 2910 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2911 spa->spa_ena = 0; 2912 2913 (void) spa_import_progress_set_state(spa_guid(spa), 2914 spa_load_state(spa)); 2915 2916 return (error); 2917 } 2918 2919 #ifdef ZFS_DEBUG 2920 /* 2921 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2922 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2923 * spa's per-vdev ZAP list. 2924 */ 2925 static uint64_t 2926 vdev_count_verify_zaps(vdev_t *vd) 2927 { 2928 spa_t *spa = vd->vdev_spa; 2929 uint64_t total = 0; 2930 2931 if (vd->vdev_top_zap != 0) { 2932 total++; 2933 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2934 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2935 } 2936 if (vd->vdev_leaf_zap != 0) { 2937 total++; 2938 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2939 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2940 } 2941 2942 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2943 total += vdev_count_verify_zaps(vd->vdev_child[i]); 2944 } 2945 2946 return (total); 2947 } 2948 #endif 2949 2950 /* 2951 * Determine whether the activity check is required. 2952 */ 2953 static boolean_t 2954 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 2955 nvlist_t *config) 2956 { 2957 uint64_t state = 0; 2958 uint64_t hostid = 0; 2959 uint64_t tryconfig_txg = 0; 2960 uint64_t tryconfig_timestamp = 0; 2961 uint16_t tryconfig_mmp_seq = 0; 2962 nvlist_t *nvinfo; 2963 2964 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 2965 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2966 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 2967 &tryconfig_txg); 2968 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2969 &tryconfig_timestamp); 2970 (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, 2971 &tryconfig_mmp_seq); 2972 } 2973 2974 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 2975 2976 /* 2977 * Disable the MMP activity check - This is used by zdb which 2978 * is intended to be used on potentially active pools. 2979 */ 2980 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 2981 return (B_FALSE); 2982 2983 /* 2984 * Skip the activity check when the MMP feature is disabled. 2985 */ 2986 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 2987 return (B_FALSE); 2988 2989 /* 2990 * If the tryconfig_ values are nonzero, they are the results of an 2991 * earlier tryimport. If they all match the uberblock we just found, 2992 * then the pool has not changed and we return false so we do not test 2993 * a second time. 2994 */ 2995 if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 2996 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && 2997 tryconfig_mmp_seq && tryconfig_mmp_seq == 2998 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) 2999 return (B_FALSE); 3000 3001 /* 3002 * Allow the activity check to be skipped when importing the pool 3003 * on the same host which last imported it. Since the hostid from 3004 * configuration may be stale use the one read from the label. 3005 */ 3006 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 3007 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 3008 3009 if (hostid == spa_get_hostid(spa)) 3010 return (B_FALSE); 3011 3012 /* 3013 * Skip the activity test when the pool was cleanly exported. 3014 */ 3015 if (state != POOL_STATE_ACTIVE) 3016 return (B_FALSE); 3017 3018 return (B_TRUE); 3019 } 3020 3021 /* 3022 * Nanoseconds the activity check must watch for changes on-disk. 3023 */ 3024 static uint64_t 3025 spa_activity_check_duration(spa_t *spa, uberblock_t *ub) 3026 { 3027 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 3028 uint64_t multihost_interval = MSEC2NSEC( 3029 MMP_INTERVAL_OK(zfs_multihost_interval)); 3030 uint64_t import_delay = MAX(NANOSEC, import_intervals * 3031 multihost_interval); 3032 3033 /* 3034 * Local tunables determine a minimum duration except for the case 3035 * where we know when the remote host will suspend the pool if MMP 3036 * writes do not land. 3037 * 3038 * See Big Theory comment at the top of mmp.c for the reasoning behind 3039 * these cases and times. 3040 */ 3041 3042 ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); 3043 3044 if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3045 MMP_FAIL_INT(ub) > 0) { 3046 3047 /* MMP on remote host will suspend pool after failed writes */ 3048 import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * 3049 MMP_IMPORT_SAFETY_FACTOR / 100; 3050 3051 zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " 3052 "mmp_fails=%llu ub_mmp mmp_interval=%llu " 3053 "import_intervals=%u", import_delay, MMP_FAIL_INT(ub), 3054 MMP_INTERVAL(ub), import_intervals); 3055 3056 } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && 3057 MMP_FAIL_INT(ub) == 0) { 3058 3059 /* MMP on remote host will never suspend pool */ 3060 import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + 3061 ub->ub_mmp_delay) * import_intervals); 3062 3063 zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " 3064 "mmp_interval=%llu ub_mmp_delay=%llu " 3065 "import_intervals=%u", import_delay, MMP_INTERVAL(ub), 3066 ub->ub_mmp_delay, import_intervals); 3067 3068 } else if (MMP_VALID(ub)) { 3069 /* 3070 * zfs-0.7 compatibility case 3071 */ 3072 3073 import_delay = MAX(import_delay, (multihost_interval + 3074 ub->ub_mmp_delay) * import_intervals); 3075 3076 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " 3077 "import_intervals=%u leaves=%u", import_delay, 3078 ub->ub_mmp_delay, import_intervals, 3079 vdev_count_leaves(spa)); 3080 } else { 3081 /* Using local tunings is the only reasonable option */ 3082 zfs_dbgmsg("pool last imported on non-MMP aware " 3083 "host using import_delay=%llu multihost_interval=%llu " 3084 "import_intervals=%u", import_delay, multihost_interval, 3085 import_intervals); 3086 } 3087 3088 return (import_delay); 3089 } 3090 3091 /* 3092 * Perform the import activity check. If the user canceled the import or 3093 * we detected activity then fail. 3094 */ 3095 static int 3096 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) 3097 { 3098 uint64_t txg = ub->ub_txg; 3099 uint64_t timestamp = ub->ub_timestamp; 3100 uint64_t mmp_config = ub->ub_mmp_config; 3101 uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; 3102 uint64_t import_delay; 3103 hrtime_t import_expire; 3104 nvlist_t *mmp_label = NULL; 3105 vdev_t *rvd = spa->spa_root_vdev; 3106 kcondvar_t cv; 3107 kmutex_t mtx; 3108 int error = 0; 3109 3110 cv_init(&cv, NULL, CV_DEFAULT, NULL); 3111 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 3112 mutex_enter(&mtx); 3113 3114 /* 3115 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 3116 * during the earlier tryimport. If the txg recorded there is 0 then 3117 * the pool is known to be active on another host. 3118 * 3119 * Otherwise, the pool might be in use on another host. Check for 3120 * changes in the uberblocks on disk if necessary. 3121 */ 3122 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 3123 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 3124 ZPOOL_CONFIG_LOAD_INFO); 3125 3126 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 3127 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 3128 vdev_uberblock_load(rvd, ub, &mmp_label); 3129 error = SET_ERROR(EREMOTEIO); 3130 goto out; 3131 } 3132 } 3133 3134 import_delay = spa_activity_check_duration(spa, ub); 3135 3136 /* Add a small random factor in case of simultaneous imports (0-25%) */ 3137 import_delay += import_delay * spa_get_random(250) / 1000; 3138 3139 import_expire = gethrtime() + import_delay; 3140 3141 while (gethrtime() < import_expire) { 3142 (void) spa_import_progress_set_mmp_check(spa_guid(spa), 3143 NSEC2SEC(import_expire - gethrtime())); 3144 3145 vdev_uberblock_load(rvd, ub, &mmp_label); 3146 3147 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || 3148 mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { 3149 zfs_dbgmsg("multihost activity detected " 3150 "txg %llu ub_txg %llu " 3151 "timestamp %llu ub_timestamp %llu " 3152 "mmp_config %#llx ub_mmp_config %#llx", 3153 txg, ub->ub_txg, timestamp, ub->ub_timestamp, 3154 mmp_config, ub->ub_mmp_config); 3155 3156 error = SET_ERROR(EREMOTEIO); 3157 break; 3158 } 3159 3160 if (mmp_label) { 3161 nvlist_free(mmp_label); 3162 mmp_label = NULL; 3163 } 3164 3165 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 3166 if (error != -1) { 3167 error = SET_ERROR(EINTR); 3168 break; 3169 } 3170 error = 0; 3171 } 3172 3173 out: 3174 mutex_exit(&mtx); 3175 mutex_destroy(&mtx); 3176 cv_destroy(&cv); 3177 3178 /* 3179 * If the pool is determined to be active store the status in the 3180 * spa->spa_load_info nvlist. If the remote hostname or hostid are 3181 * available from configuration read from disk store them as well. 3182 * This allows 'zpool import' to generate a more useful message. 3183 * 3184 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 3185 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 3186 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 3187 */ 3188 if (error == EREMOTEIO) { 3189 char *hostname = "<unknown>"; 3190 uint64_t hostid = 0; 3191 3192 if (mmp_label) { 3193 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 3194 hostname = fnvlist_lookup_string(mmp_label, 3195 ZPOOL_CONFIG_HOSTNAME); 3196 fnvlist_add_string(spa->spa_load_info, 3197 ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 3198 } 3199 3200 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 3201 hostid = fnvlist_lookup_uint64(mmp_label, 3202 ZPOOL_CONFIG_HOSTID); 3203 fnvlist_add_uint64(spa->spa_load_info, 3204 ZPOOL_CONFIG_MMP_HOSTID, hostid); 3205 } 3206 } 3207 3208 fnvlist_add_uint64(spa->spa_load_info, 3209 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 3210 fnvlist_add_uint64(spa->spa_load_info, 3211 ZPOOL_CONFIG_MMP_TXG, 0); 3212 3213 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 3214 } 3215 3216 if (mmp_label) 3217 nvlist_free(mmp_label); 3218 3219 return (error); 3220 } 3221 3222 static int 3223 spa_verify_host(spa_t *spa, nvlist_t *mos_config) 3224 { 3225 uint64_t hostid; 3226 char *hostname; 3227 uint64_t myhostid = 0; 3228 3229 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 3230 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 3231 hostname = fnvlist_lookup_string(mos_config, 3232 ZPOOL_CONFIG_HOSTNAME); 3233 3234 myhostid = zone_get_hostid(NULL); 3235 3236 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 3237 cmn_err(CE_WARN, "pool '%s' could not be " 3238 "loaded as it was last accessed by " 3239 "another system (host: %s hostid: 0x%llx). " 3240 "See: https://openzfs.github.io/openzfs-docs/msg/" 3241 "ZFS-8000-EY", 3242 spa_name(spa), hostname, (u_longlong_t)hostid); 3243 spa_load_failed(spa, "hostid verification failed: pool " 3244 "last accessed by host: %s (hostid: 0x%llx)", 3245 hostname, (u_longlong_t)hostid); 3246 return (SET_ERROR(EBADF)); 3247 } 3248 } 3249 3250 return (0); 3251 } 3252 3253 static int 3254 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 3255 { 3256 int error = 0; 3257 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 3258 int parse; 3259 vdev_t *rvd; 3260 uint64_t pool_guid; 3261 char *comment; 3262 char *compatibility; 3263 3264 /* 3265 * Versioning wasn't explicitly added to the label until later, so if 3266 * it's not present treat it as the initial version. 3267 */ 3268 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 3269 &spa->spa_ubsync.ub_version) != 0) 3270 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 3271 3272 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 3273 spa_load_failed(spa, "invalid config provided: '%s' missing", 3274 ZPOOL_CONFIG_POOL_GUID); 3275 return (SET_ERROR(EINVAL)); 3276 } 3277 3278 /* 3279 * If we are doing an import, ensure that the pool is not already 3280 * imported by checking if its pool guid already exists in the 3281 * spa namespace. 3282 * 3283 * The only case that we allow an already imported pool to be 3284 * imported again, is when the pool is checkpointed and we want to 3285 * look at its checkpointed state from userland tools like zdb. 3286 */ 3287 #ifdef _KERNEL 3288 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3289 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3290 spa_guid_exists(pool_guid, 0)) { 3291 #else 3292 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 3293 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 3294 spa_guid_exists(pool_guid, 0) && 3295 !spa_importing_readonly_checkpoint(spa)) { 3296 #endif 3297 spa_load_failed(spa, "a pool with guid %llu is already open", 3298 (u_longlong_t)pool_guid); 3299 return (SET_ERROR(EEXIST)); 3300 } 3301 3302 spa->spa_config_guid = pool_guid; 3303 3304 nvlist_free(spa->spa_load_info); 3305 spa->spa_load_info = fnvlist_alloc(); 3306 3307 ASSERT(spa->spa_comment == NULL); 3308 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3309 spa->spa_comment = spa_strdup(comment); 3310 3311 ASSERT(spa->spa_compatibility == NULL); 3312 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY, 3313 &compatibility) == 0) 3314 spa->spa_compatibility = spa_strdup(compatibility); 3315 3316 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 3317 &spa->spa_config_txg); 3318 3319 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 3320 spa->spa_config_splitting = fnvlist_dup(nvl); 3321 3322 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 3323 spa_load_failed(spa, "invalid config provided: '%s' missing", 3324 ZPOOL_CONFIG_VDEV_TREE); 3325 return (SET_ERROR(EINVAL)); 3326 } 3327 3328 /* 3329 * Create "The Godfather" zio to hold all async IOs 3330 */ 3331 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 3332 KM_SLEEP); 3333 for (int i = 0; i < max_ncpus; i++) { 3334 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 3335 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3336 ZIO_FLAG_GODFATHER); 3337 } 3338 3339 /* 3340 * Parse the configuration into a vdev tree. We explicitly set the 3341 * value that will be returned by spa_version() since parsing the 3342 * configuration requires knowing the version number. 3343 */ 3344 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3345 parse = (type == SPA_IMPORT_EXISTING ? 3346 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 3347 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 3348 spa_config_exit(spa, SCL_ALL, FTAG); 3349 3350 if (error != 0) { 3351 spa_load_failed(spa, "unable to parse config [error=%d]", 3352 error); 3353 return (error); 3354 } 3355 3356 ASSERT(spa->spa_root_vdev == rvd); 3357 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 3358 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 3359 3360 if (type != SPA_IMPORT_ASSEMBLE) { 3361 ASSERT(spa_guid(spa) == pool_guid); 3362 } 3363 3364 return (0); 3365 } 3366 3367 /* 3368 * Recursively open all vdevs in the vdev tree. This function is called twice: 3369 * first with the untrusted config, then with the trusted config. 3370 */ 3371 static int 3372 spa_ld_open_vdevs(spa_t *spa) 3373 { 3374 int error = 0; 3375 3376 /* 3377 * spa_missing_tvds_allowed defines how many top-level vdevs can be 3378 * missing/unopenable for the root vdev to be still considered openable. 3379 */ 3380 if (spa->spa_trust_config) { 3381 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 3382 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 3383 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 3384 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 3385 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 3386 } else { 3387 spa->spa_missing_tvds_allowed = 0; 3388 } 3389 3390 spa->spa_missing_tvds_allowed = 3391 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 3392 3393 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3394 error = vdev_open(spa->spa_root_vdev); 3395 spa_config_exit(spa, SCL_ALL, FTAG); 3396 3397 if (spa->spa_missing_tvds != 0) { 3398 spa_load_note(spa, "vdev tree has %lld missing top-level " 3399 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 3400 if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) { 3401 /* 3402 * Although theoretically we could allow users to open 3403 * incomplete pools in RW mode, we'd need to add a lot 3404 * of extra logic (e.g. adjust pool space to account 3405 * for missing vdevs). 3406 * This limitation also prevents users from accidentally 3407 * opening the pool in RW mode during data recovery and 3408 * damaging it further. 3409 */ 3410 spa_load_note(spa, "pools with missing top-level " 3411 "vdevs can only be opened in read-only mode."); 3412 error = SET_ERROR(ENXIO); 3413 } else { 3414 spa_load_note(spa, "current settings allow for maximum " 3415 "%lld missing top-level vdevs at this stage.", 3416 (u_longlong_t)spa->spa_missing_tvds_allowed); 3417 } 3418 } 3419 if (error != 0) { 3420 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 3421 error); 3422 } 3423 if (spa->spa_missing_tvds != 0 || error != 0) 3424 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 3425 3426 return (error); 3427 } 3428 3429 /* 3430 * We need to validate the vdev labels against the configuration that 3431 * we have in hand. This function is called twice: first with an untrusted 3432 * config, then with a trusted config. The validation is more strict when the 3433 * config is trusted. 3434 */ 3435 static int 3436 spa_ld_validate_vdevs(spa_t *spa) 3437 { 3438 int error = 0; 3439 vdev_t *rvd = spa->spa_root_vdev; 3440 3441 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3442 error = vdev_validate(rvd); 3443 spa_config_exit(spa, SCL_ALL, FTAG); 3444 3445 if (error != 0) { 3446 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 3447 return (error); 3448 } 3449 3450 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 3451 spa_load_failed(spa, "cannot open vdev tree after invalidating " 3452 "some vdevs"); 3453 vdev_dbgmsg_print_tree(rvd, 2); 3454 return (SET_ERROR(ENXIO)); 3455 } 3456 3457 return (0); 3458 } 3459 3460 static void 3461 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 3462 { 3463 spa->spa_state = POOL_STATE_ACTIVE; 3464 spa->spa_ubsync = spa->spa_uberblock; 3465 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 3466 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 3467 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 3468 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 3469 spa->spa_claim_max_txg = spa->spa_first_txg; 3470 spa->spa_prev_software_version = ub->ub_software_version; 3471 } 3472 3473 static int 3474 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 3475 { 3476 vdev_t *rvd = spa->spa_root_vdev; 3477 nvlist_t *label; 3478 uberblock_t *ub = &spa->spa_uberblock; 3479 boolean_t activity_check = B_FALSE; 3480 3481 /* 3482 * If we are opening the checkpointed state of the pool by 3483 * rewinding to it, at this point we will have written the 3484 * checkpointed uberblock to the vdev labels, so searching 3485 * the labels will find the right uberblock. However, if 3486 * we are opening the checkpointed state read-only, we have 3487 * not modified the labels. Therefore, we must ignore the 3488 * labels and continue using the spa_uberblock that was set 3489 * by spa_ld_checkpoint_rewind. 3490 * 3491 * Note that it would be fine to ignore the labels when 3492 * rewinding (opening writeable) as well. However, if we 3493 * crash just after writing the labels, we will end up 3494 * searching the labels. Doing so in the common case means 3495 * that this code path gets exercised normally, rather than 3496 * just in the edge case. 3497 */ 3498 if (ub->ub_checkpoint_txg != 0 && 3499 spa_importing_readonly_checkpoint(spa)) { 3500 spa_ld_select_uberblock_done(spa, ub); 3501 return (0); 3502 } 3503 3504 /* 3505 * Find the best uberblock. 3506 */ 3507 vdev_uberblock_load(rvd, ub, &label); 3508 3509 /* 3510 * If we weren't able to find a single valid uberblock, return failure. 3511 */ 3512 if (ub->ub_txg == 0) { 3513 nvlist_free(label); 3514 spa_load_failed(spa, "no valid uberblock found"); 3515 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 3516 } 3517 3518 if (spa->spa_load_max_txg != UINT64_MAX) { 3519 (void) spa_import_progress_set_max_txg(spa_guid(spa), 3520 (u_longlong_t)spa->spa_load_max_txg); 3521 } 3522 spa_load_note(spa, "using uberblock with txg=%llu", 3523 (u_longlong_t)ub->ub_txg); 3524 3525 3526 /* 3527 * For pools which have the multihost property on determine if the 3528 * pool is truly inactive and can be safely imported. Prevent 3529 * hosts which don't have a hostid set from importing the pool. 3530 */ 3531 activity_check = spa_activity_check_required(spa, ub, label, 3532 spa->spa_config); 3533 if (activity_check) { 3534 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 3535 spa_get_hostid(spa) == 0) { 3536 nvlist_free(label); 3537 fnvlist_add_uint64(spa->spa_load_info, 3538 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 3539 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 3540 } 3541 3542 int error = spa_activity_check(spa, ub, spa->spa_config); 3543 if (error) { 3544 nvlist_free(label); 3545 return (error); 3546 } 3547 3548 fnvlist_add_uint64(spa->spa_load_info, 3549 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 3550 fnvlist_add_uint64(spa->spa_load_info, 3551 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 3552 fnvlist_add_uint16(spa->spa_load_info, 3553 ZPOOL_CONFIG_MMP_SEQ, 3554 (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); 3555 } 3556 3557 /* 3558 * If the pool has an unsupported version we can't open it. 3559 */ 3560 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 3561 nvlist_free(label); 3562 spa_load_failed(spa, "version %llu is not supported", 3563 (u_longlong_t)ub->ub_version); 3564 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 3565 } 3566 3567 if (ub->ub_version >= SPA_VERSION_FEATURES) { 3568 nvlist_t *features; 3569 3570 /* 3571 * If we weren't able to find what's necessary for reading the 3572 * MOS in the label, return failure. 3573 */ 3574 if (label == NULL) { 3575 spa_load_failed(spa, "label config unavailable"); 3576 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3577 ENXIO)); 3578 } 3579 3580 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 3581 &features) != 0) { 3582 nvlist_free(label); 3583 spa_load_failed(spa, "invalid label: '%s' missing", 3584 ZPOOL_CONFIG_FEATURES_FOR_READ); 3585 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3586 ENXIO)); 3587 } 3588 3589 /* 3590 * Update our in-core representation with the definitive values 3591 * from the label. 3592 */ 3593 nvlist_free(spa->spa_label_features); 3594 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 3595 } 3596 3597 nvlist_free(label); 3598 3599 /* 3600 * Look through entries in the label nvlist's features_for_read. If 3601 * there is a feature listed there which we don't understand then we 3602 * cannot open a pool. 3603 */ 3604 if (ub->ub_version >= SPA_VERSION_FEATURES) { 3605 nvlist_t *unsup_feat; 3606 3607 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 3608 0); 3609 3610 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 3611 NULL); nvp != NULL; 3612 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 3613 if (!zfeature_is_supported(nvpair_name(nvp))) { 3614 VERIFY(nvlist_add_string(unsup_feat, 3615 nvpair_name(nvp), "") == 0); 3616 } 3617 } 3618 3619 if (!nvlist_empty(unsup_feat)) { 3620 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 3621 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 3622 nvlist_free(unsup_feat); 3623 spa_load_failed(spa, "some features are unsupported"); 3624 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3625 ENOTSUP)); 3626 } 3627 3628 nvlist_free(unsup_feat); 3629 } 3630 3631 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 3632 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3633 spa_try_repair(spa, spa->spa_config); 3634 spa_config_exit(spa, SCL_ALL, FTAG); 3635 nvlist_free(spa->spa_config_splitting); 3636 spa->spa_config_splitting = NULL; 3637 } 3638 3639 /* 3640 * Initialize internal SPA structures. 3641 */ 3642 spa_ld_select_uberblock_done(spa, ub); 3643 3644 return (0); 3645 } 3646 3647 static int 3648 spa_ld_open_rootbp(spa_t *spa) 3649 { 3650 int error = 0; 3651 vdev_t *rvd = spa->spa_root_vdev; 3652 3653 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 3654 if (error != 0) { 3655 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 3656 "[error=%d]", error); 3657 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3658 } 3659 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 3660 3661 return (0); 3662 } 3663 3664 static int 3665 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 3666 boolean_t reloading) 3667 { 3668 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 3669 nvlist_t *nv, *mos_config, *policy; 3670 int error = 0, copy_error; 3671 uint64_t healthy_tvds, healthy_tvds_mos; 3672 uint64_t mos_config_txg; 3673 3674 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 3675 != 0) 3676 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3677 3678 /* 3679 * If we're assembling a pool from a split, the config provided is 3680 * already trusted so there is nothing to do. 3681 */ 3682 if (type == SPA_IMPORT_ASSEMBLE) 3683 return (0); 3684 3685 healthy_tvds = spa_healthy_core_tvds(spa); 3686 3687 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 3688 != 0) { 3689 spa_load_failed(spa, "unable to retrieve MOS config"); 3690 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3691 } 3692 3693 /* 3694 * If we are doing an open, pool owner wasn't verified yet, thus do 3695 * the verification here. 3696 */ 3697 if (spa->spa_load_state == SPA_LOAD_OPEN) { 3698 error = spa_verify_host(spa, mos_config); 3699 if (error != 0) { 3700 nvlist_free(mos_config); 3701 return (error); 3702 } 3703 } 3704 3705 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 3706 3707 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3708 3709 /* 3710 * Build a new vdev tree from the trusted config 3711 */ 3712 error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD); 3713 if (error != 0) { 3714 nvlist_free(mos_config); 3715 spa_config_exit(spa, SCL_ALL, FTAG); 3716 spa_load_failed(spa, "spa_config_parse failed [error=%d]", 3717 error); 3718 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3719 } 3720 3721 /* 3722 * Vdev paths in the MOS may be obsolete. If the untrusted config was 3723 * obtained by scanning /dev/dsk, then it will have the right vdev 3724 * paths. We update the trusted MOS config with this information. 3725 * We first try to copy the paths with vdev_copy_path_strict, which 3726 * succeeds only when both configs have exactly the same vdev tree. 3727 * If that fails, we fall back to a more flexible method that has a 3728 * best effort policy. 3729 */ 3730 copy_error = vdev_copy_path_strict(rvd, mrvd); 3731 if (copy_error != 0 || spa_load_print_vdev_tree) { 3732 spa_load_note(spa, "provided vdev tree:"); 3733 vdev_dbgmsg_print_tree(rvd, 2); 3734 spa_load_note(spa, "MOS vdev tree:"); 3735 vdev_dbgmsg_print_tree(mrvd, 2); 3736 } 3737 if (copy_error != 0) { 3738 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 3739 "back to vdev_copy_path_relaxed"); 3740 vdev_copy_path_relaxed(rvd, mrvd); 3741 } 3742 3743 vdev_close(rvd); 3744 vdev_free(rvd); 3745 spa->spa_root_vdev = mrvd; 3746 rvd = mrvd; 3747 spa_config_exit(spa, SCL_ALL, FTAG); 3748 3749 /* 3750 * We will use spa_config if we decide to reload the spa or if spa_load 3751 * fails and we rewind. We must thus regenerate the config using the 3752 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 3753 * pass settings on how to load the pool and is not stored in the MOS. 3754 * We copy it over to our new, trusted config. 3755 */ 3756 mos_config_txg = fnvlist_lookup_uint64(mos_config, 3757 ZPOOL_CONFIG_POOL_TXG); 3758 nvlist_free(mos_config); 3759 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 3760 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 3761 &policy) == 0) 3762 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 3763 spa_config_set(spa, mos_config); 3764 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 3765 3766 /* 3767 * Now that we got the config from the MOS, we should be more strict 3768 * in checking blkptrs and can make assumptions about the consistency 3769 * of the vdev tree. spa_trust_config must be set to true before opening 3770 * vdevs in order for them to be writeable. 3771 */ 3772 spa->spa_trust_config = B_TRUE; 3773 3774 /* 3775 * Open and validate the new vdev tree 3776 */ 3777 error = spa_ld_open_vdevs(spa); 3778 if (error != 0) 3779 return (error); 3780 3781 error = spa_ld_validate_vdevs(spa); 3782 if (error != 0) 3783 return (error); 3784 3785 if (copy_error != 0 || spa_load_print_vdev_tree) { 3786 spa_load_note(spa, "final vdev tree:"); 3787 vdev_dbgmsg_print_tree(rvd, 2); 3788 } 3789 3790 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 3791 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 3792 /* 3793 * Sanity check to make sure that we are indeed loading the 3794 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 3795 * in the config provided and they happened to be the only ones 3796 * to have the latest uberblock, we could involuntarily perform 3797 * an extreme rewind. 3798 */ 3799 healthy_tvds_mos = spa_healthy_core_tvds(spa); 3800 if (healthy_tvds_mos - healthy_tvds >= 3801 SPA_SYNC_MIN_VDEVS) { 3802 spa_load_note(spa, "config provided misses too many " 3803 "top-level vdevs compared to MOS (%lld vs %lld). ", 3804 (u_longlong_t)healthy_tvds, 3805 (u_longlong_t)healthy_tvds_mos); 3806 spa_load_note(spa, "vdev tree:"); 3807 vdev_dbgmsg_print_tree(rvd, 2); 3808 if (reloading) { 3809 spa_load_failed(spa, "config was already " 3810 "provided from MOS. Aborting."); 3811 return (spa_vdev_err(rvd, 3812 VDEV_AUX_CORRUPT_DATA, EIO)); 3813 } 3814 spa_load_note(spa, "spa must be reloaded using MOS " 3815 "config"); 3816 return (SET_ERROR(EAGAIN)); 3817 } 3818 } 3819 3820 error = spa_check_for_missing_logs(spa); 3821 if (error != 0) 3822 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 3823 3824 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 3825 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 3826 "guid sum (%llu != %llu)", 3827 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 3828 (u_longlong_t)rvd->vdev_guid_sum); 3829 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 3830 ENXIO)); 3831 } 3832 3833 return (0); 3834 } 3835 3836 static int 3837 spa_ld_open_indirect_vdev_metadata(spa_t *spa) 3838 { 3839 int error = 0; 3840 vdev_t *rvd = spa->spa_root_vdev; 3841 3842 /* 3843 * Everything that we read before spa_remove_init() must be stored 3844 * on concreted vdevs. Therefore we do this as early as possible. 3845 */ 3846 error = spa_remove_init(spa); 3847 if (error != 0) { 3848 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 3849 error); 3850 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3851 } 3852 3853 /* 3854 * Retrieve information needed to condense indirect vdev mappings. 3855 */ 3856 error = spa_condense_init(spa); 3857 if (error != 0) { 3858 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 3859 error); 3860 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3861 } 3862 3863 return (0); 3864 } 3865 3866 static int 3867 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 3868 { 3869 int error = 0; 3870 vdev_t *rvd = spa->spa_root_vdev; 3871 3872 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 3873 boolean_t missing_feat_read = B_FALSE; 3874 nvlist_t *unsup_feat, *enabled_feat; 3875 3876 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 3877 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 3878 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3879 } 3880 3881 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 3882 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 3883 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3884 } 3885 3886 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 3887 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 3888 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3889 } 3890 3891 enabled_feat = fnvlist_alloc(); 3892 unsup_feat = fnvlist_alloc(); 3893 3894 if (!spa_features_check(spa, B_FALSE, 3895 unsup_feat, enabled_feat)) 3896 missing_feat_read = B_TRUE; 3897 3898 if (spa_writeable(spa) || 3899 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 3900 if (!spa_features_check(spa, B_TRUE, 3901 unsup_feat, enabled_feat)) { 3902 *missing_feat_writep = B_TRUE; 3903 } 3904 } 3905 3906 fnvlist_add_nvlist(spa->spa_load_info, 3907 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 3908 3909 if (!nvlist_empty(unsup_feat)) { 3910 fnvlist_add_nvlist(spa->spa_load_info, 3911 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 3912 } 3913 3914 fnvlist_free(enabled_feat); 3915 fnvlist_free(unsup_feat); 3916 3917 if (!missing_feat_read) { 3918 fnvlist_add_boolean(spa->spa_load_info, 3919 ZPOOL_CONFIG_CAN_RDONLY); 3920 } 3921 3922 /* 3923 * If the state is SPA_LOAD_TRYIMPORT, our objective is 3924 * twofold: to determine whether the pool is available for 3925 * import in read-write mode and (if it is not) whether the 3926 * pool is available for import in read-only mode. If the pool 3927 * is available for import in read-write mode, it is displayed 3928 * as available in userland; if it is not available for import 3929 * in read-only mode, it is displayed as unavailable in 3930 * userland. If the pool is available for import in read-only 3931 * mode but not read-write mode, it is displayed as unavailable 3932 * in userland with a special note that the pool is actually 3933 * available for open in read-only mode. 3934 * 3935 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 3936 * missing a feature for write, we must first determine whether 3937 * the pool can be opened read-only before returning to 3938 * userland in order to know whether to display the 3939 * abovementioned note. 3940 */ 3941 if (missing_feat_read || (*missing_feat_writep && 3942 spa_writeable(spa))) { 3943 spa_load_failed(spa, "pool uses unsupported features"); 3944 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3945 ENOTSUP)); 3946 } 3947 3948 /* 3949 * Load refcounts for ZFS features from disk into an in-memory 3950 * cache during SPA initialization. 3951 */ 3952 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 3953 uint64_t refcount; 3954 3955 error = feature_get_refcount_from_disk(spa, 3956 &spa_feature_table[i], &refcount); 3957 if (error == 0) { 3958 spa->spa_feat_refcount_cache[i] = refcount; 3959 } else if (error == ENOTSUP) { 3960 spa->spa_feat_refcount_cache[i] = 3961 SPA_FEATURE_DISABLED; 3962 } else { 3963 spa_load_failed(spa, "error getting refcount " 3964 "for feature %s [error=%d]", 3965 spa_feature_table[i].fi_guid, error); 3966 return (spa_vdev_err(rvd, 3967 VDEV_AUX_CORRUPT_DATA, EIO)); 3968 } 3969 } 3970 } 3971 3972 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 3973 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 3974 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 3975 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3976 } 3977 3978 /* 3979 * Encryption was added before bookmark_v2, even though bookmark_v2 3980 * is now a dependency. If this pool has encryption enabled without 3981 * bookmark_v2, trigger an errata message. 3982 */ 3983 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 3984 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 3985 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 3986 } 3987 3988 return (0); 3989 } 3990 3991 static int 3992 spa_ld_load_special_directories(spa_t *spa) 3993 { 3994 int error = 0; 3995 vdev_t *rvd = spa->spa_root_vdev; 3996 3997 spa->spa_is_initializing = B_TRUE; 3998 error = dsl_pool_open(spa->spa_dsl_pool); 3999 spa->spa_is_initializing = B_FALSE; 4000 if (error != 0) { 4001 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 4002 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4003 } 4004 4005 return (0); 4006 } 4007 4008 static int 4009 spa_ld_get_props(spa_t *spa) 4010 { 4011 int error = 0; 4012 uint64_t obj; 4013 vdev_t *rvd = spa->spa_root_vdev; 4014 4015 /* Grab the checksum salt from the MOS. */ 4016 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4017 DMU_POOL_CHECKSUM_SALT, 1, 4018 sizeof (spa->spa_cksum_salt.zcs_bytes), 4019 spa->spa_cksum_salt.zcs_bytes); 4020 if (error == ENOENT) { 4021 /* Generate a new salt for subsequent use */ 4022 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 4023 sizeof (spa->spa_cksum_salt.zcs_bytes)); 4024 } else if (error != 0) { 4025 spa_load_failed(spa, "unable to retrieve checksum salt from " 4026 "MOS [error=%d]", error); 4027 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4028 } 4029 4030 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 4031 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4032 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 4033 if (error != 0) { 4034 spa_load_failed(spa, "error opening deferred-frees bpobj " 4035 "[error=%d]", error); 4036 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4037 } 4038 4039 /* 4040 * Load the bit that tells us to use the new accounting function 4041 * (raid-z deflation). If we have an older pool, this will not 4042 * be present. 4043 */ 4044 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 4045 if (error != 0 && error != ENOENT) 4046 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4047 4048 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 4049 &spa->spa_creation_version, B_FALSE); 4050 if (error != 0 && error != ENOENT) 4051 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4052 4053 /* 4054 * Load the persistent error log. If we have an older pool, this will 4055 * not be present. 4056 */ 4057 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 4058 B_FALSE); 4059 if (error != 0 && error != ENOENT) 4060 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4061 4062 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 4063 &spa->spa_errlog_scrub, B_FALSE); 4064 if (error != 0 && error != ENOENT) 4065 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4066 4067 /* 4068 * Load the livelist deletion field. If a livelist is queued for 4069 * deletion, indicate that in the spa 4070 */ 4071 error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES, 4072 &spa->spa_livelists_to_delete, B_FALSE); 4073 if (error != 0 && error != ENOENT) 4074 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4075 4076 /* 4077 * Load the history object. If we have an older pool, this 4078 * will not be present. 4079 */ 4080 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 4081 if (error != 0 && error != ENOENT) 4082 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4083 4084 /* 4085 * Load the per-vdev ZAP map. If we have an older pool, this will not 4086 * be present; in this case, defer its creation to a later time to 4087 * avoid dirtying the MOS this early / out of sync context. See 4088 * spa_sync_config_object. 4089 */ 4090 4091 /* The sentinel is only available in the MOS config. */ 4092 nvlist_t *mos_config; 4093 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 4094 spa_load_failed(spa, "unable to retrieve MOS config"); 4095 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4096 } 4097 4098 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 4099 &spa->spa_all_vdev_zaps, B_FALSE); 4100 4101 if (error == ENOENT) { 4102 VERIFY(!nvlist_exists(mos_config, 4103 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 4104 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 4105 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4106 } else if (error != 0) { 4107 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4108 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 4109 /* 4110 * An older version of ZFS overwrote the sentinel value, so 4111 * we have orphaned per-vdev ZAPs in the MOS. Defer their 4112 * destruction to later; see spa_sync_config_object. 4113 */ 4114 spa->spa_avz_action = AVZ_ACTION_DESTROY; 4115 /* 4116 * We're assuming that no vdevs have had their ZAPs created 4117 * before this. Better be sure of it. 4118 */ 4119 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 4120 } 4121 nvlist_free(mos_config); 4122 4123 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 4124 4125 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 4126 B_FALSE); 4127 if (error && error != ENOENT) 4128 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4129 4130 if (error == 0) { 4131 uint64_t autoreplace; 4132 4133 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 4134 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 4135 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 4136 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 4137 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 4138 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 4139 spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim); 4140 spa->spa_autoreplace = (autoreplace != 0); 4141 } 4142 4143 /* 4144 * If we are importing a pool with missing top-level vdevs, 4145 * we enforce that the pool doesn't panic or get suspended on 4146 * error since the likelihood of missing data is extremely high. 4147 */ 4148 if (spa->spa_missing_tvds > 0 && 4149 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 4150 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4151 spa_load_note(spa, "forcing failmode to 'continue' " 4152 "as some top level vdevs are missing"); 4153 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 4154 } 4155 4156 return (0); 4157 } 4158 4159 static int 4160 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 4161 { 4162 int error = 0; 4163 vdev_t *rvd = spa->spa_root_vdev; 4164 4165 /* 4166 * If we're assembling the pool from the split-off vdevs of 4167 * an existing pool, we don't want to attach the spares & cache 4168 * devices. 4169 */ 4170 4171 /* 4172 * Load any hot spares for this pool. 4173 */ 4174 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 4175 B_FALSE); 4176 if (error != 0 && error != ENOENT) 4177 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4178 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4179 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 4180 if (load_nvlist(spa, spa->spa_spares.sav_object, 4181 &spa->spa_spares.sav_config) != 0) { 4182 spa_load_failed(spa, "error loading spares nvlist"); 4183 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4184 } 4185 4186 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4187 spa_load_spares(spa); 4188 spa_config_exit(spa, SCL_ALL, FTAG); 4189 } else if (error == 0) { 4190 spa->spa_spares.sav_sync = B_TRUE; 4191 } 4192 4193 /* 4194 * Load any level 2 ARC devices for this pool. 4195 */ 4196 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 4197 &spa->spa_l2cache.sav_object, B_FALSE); 4198 if (error != 0 && error != ENOENT) 4199 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4200 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 4201 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 4202 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 4203 &spa->spa_l2cache.sav_config) != 0) { 4204 spa_load_failed(spa, "error loading l2cache nvlist"); 4205 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4206 } 4207 4208 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4209 spa_load_l2cache(spa); 4210 spa_config_exit(spa, SCL_ALL, FTAG); 4211 } else if (error == 0) { 4212 spa->spa_l2cache.sav_sync = B_TRUE; 4213 } 4214 4215 return (0); 4216 } 4217 4218 static int 4219 spa_ld_load_vdev_metadata(spa_t *spa) 4220 { 4221 int error = 0; 4222 vdev_t *rvd = spa->spa_root_vdev; 4223 4224 /* 4225 * If the 'multihost' property is set, then never allow a pool to 4226 * be imported when the system hostid is zero. The exception to 4227 * this rule is zdb which is always allowed to access pools. 4228 */ 4229 if (spa_multihost(spa) && spa_get_hostid(spa) == 0 && 4230 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 4231 fnvlist_add_uint64(spa->spa_load_info, 4232 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 4233 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 4234 } 4235 4236 /* 4237 * If the 'autoreplace' property is set, then post a resource notifying 4238 * the ZFS DE that it should not issue any faults for unopenable 4239 * devices. We also iterate over the vdevs, and post a sysevent for any 4240 * unopenable vdevs so that the normal autoreplace handler can take 4241 * over. 4242 */ 4243 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4244 spa_check_removed(spa->spa_root_vdev); 4245 /* 4246 * For the import case, this is done in spa_import(), because 4247 * at this point we're using the spare definitions from 4248 * the MOS config, not necessarily from the userland config. 4249 */ 4250 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 4251 spa_aux_check_removed(&spa->spa_spares); 4252 spa_aux_check_removed(&spa->spa_l2cache); 4253 } 4254 } 4255 4256 /* 4257 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 4258 */ 4259 error = vdev_load(rvd); 4260 if (error != 0) { 4261 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 4262 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4263 } 4264 4265 error = spa_ld_log_spacemaps(spa); 4266 if (error != 0) { 4267 spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]", 4268 error); 4269 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 4270 } 4271 4272 /* 4273 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 4274 */ 4275 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4276 vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE); 4277 spa_config_exit(spa, SCL_ALL, FTAG); 4278 4279 return (0); 4280 } 4281 4282 static int 4283 spa_ld_load_dedup_tables(spa_t *spa) 4284 { 4285 int error = 0; 4286 vdev_t *rvd = spa->spa_root_vdev; 4287 4288 error = ddt_load(spa); 4289 if (error != 0) { 4290 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 4291 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 4292 } 4293 4294 return (0); 4295 } 4296 4297 static int 4298 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport) 4299 { 4300 vdev_t *rvd = spa->spa_root_vdev; 4301 4302 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 4303 boolean_t missing = spa_check_logs(spa); 4304 if (missing) { 4305 if (spa->spa_missing_tvds != 0) { 4306 spa_load_note(spa, "spa_check_logs failed " 4307 "so dropping the logs"); 4308 } else { 4309 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 4310 spa_load_failed(spa, "spa_check_logs failed"); 4311 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 4312 ENXIO)); 4313 } 4314 } 4315 } 4316 4317 return (0); 4318 } 4319 4320 static int 4321 spa_ld_verify_pool_data(spa_t *spa) 4322 { 4323 int error = 0; 4324 vdev_t *rvd = spa->spa_root_vdev; 4325 4326 /* 4327 * We've successfully opened the pool, verify that we're ready 4328 * to start pushing transactions. 4329 */ 4330 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 4331 error = spa_load_verify(spa); 4332 if (error != 0) { 4333 spa_load_failed(spa, "spa_load_verify failed " 4334 "[error=%d]", error); 4335 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 4336 error)); 4337 } 4338 } 4339 4340 return (0); 4341 } 4342 4343 static void 4344 spa_ld_claim_log_blocks(spa_t *spa) 4345 { 4346 dmu_tx_t *tx; 4347 dsl_pool_t *dp = spa_get_dsl(spa); 4348 4349 /* 4350 * Claim log blocks that haven't been committed yet. 4351 * This must all happen in a single txg. 4352 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 4353 * invoked from zil_claim_log_block()'s i/o done callback. 4354 * Price of rollback is that we abandon the log. 4355 */ 4356 spa->spa_claiming = B_TRUE; 4357 4358 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 4359 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 4360 zil_claim, tx, DS_FIND_CHILDREN); 4361 dmu_tx_commit(tx); 4362 4363 spa->spa_claiming = B_FALSE; 4364 4365 spa_set_log_state(spa, SPA_LOG_GOOD); 4366 } 4367 4368 static void 4369 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 4370 boolean_t update_config_cache) 4371 { 4372 vdev_t *rvd = spa->spa_root_vdev; 4373 int need_update = B_FALSE; 4374 4375 /* 4376 * If the config cache is stale, or we have uninitialized 4377 * metaslabs (see spa_vdev_add()), then update the config. 4378 * 4379 * If this is a verbatim import, trust the current 4380 * in-core spa_config and update the disk labels. 4381 */ 4382 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 4383 spa->spa_load_state == SPA_LOAD_IMPORT || 4384 spa->spa_load_state == SPA_LOAD_RECOVER || 4385 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 4386 need_update = B_TRUE; 4387 4388 for (int c = 0; c < rvd->vdev_children; c++) 4389 if (rvd->vdev_child[c]->vdev_ms_array == 0) 4390 need_update = B_TRUE; 4391 4392 /* 4393 * Update the config cache asynchronously in case we're the 4394 * root pool, in which case the config cache isn't writable yet. 4395 */ 4396 if (need_update) 4397 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 4398 } 4399 4400 static void 4401 spa_ld_prepare_for_reload(spa_t *spa) 4402 { 4403 spa_mode_t mode = spa->spa_mode; 4404 int async_suspended = spa->spa_async_suspended; 4405 4406 spa_unload(spa); 4407 spa_deactivate(spa); 4408 spa_activate(spa, mode); 4409 4410 /* 4411 * We save the value of spa_async_suspended as it gets reset to 0 by 4412 * spa_unload(). We want to restore it back to the original value before 4413 * returning as we might be calling spa_async_resume() later. 4414 */ 4415 spa->spa_async_suspended = async_suspended; 4416 } 4417 4418 static int 4419 spa_ld_read_checkpoint_txg(spa_t *spa) 4420 { 4421 uberblock_t checkpoint; 4422 int error = 0; 4423 4424 ASSERT0(spa->spa_checkpoint_txg); 4425 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4426 4427 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4428 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4429 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4430 4431 if (error == ENOENT) 4432 return (0); 4433 4434 if (error != 0) 4435 return (error); 4436 4437 ASSERT3U(checkpoint.ub_txg, !=, 0); 4438 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 4439 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 4440 spa->spa_checkpoint_txg = checkpoint.ub_txg; 4441 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 4442 4443 return (0); 4444 } 4445 4446 static int 4447 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 4448 { 4449 int error = 0; 4450 4451 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4452 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4453 4454 /* 4455 * Never trust the config that is provided unless we are assembling 4456 * a pool following a split. 4457 * This means don't trust blkptrs and the vdev tree in general. This 4458 * also effectively puts the spa in read-only mode since 4459 * spa_writeable() checks for spa_trust_config to be true. 4460 * We will later load a trusted config from the MOS. 4461 */ 4462 if (type != SPA_IMPORT_ASSEMBLE) 4463 spa->spa_trust_config = B_FALSE; 4464 4465 /* 4466 * Parse the config provided to create a vdev tree. 4467 */ 4468 error = spa_ld_parse_config(spa, type); 4469 if (error != 0) 4470 return (error); 4471 4472 spa_import_progress_add(spa); 4473 4474 /* 4475 * Now that we have the vdev tree, try to open each vdev. This involves 4476 * opening the underlying physical device, retrieving its geometry and 4477 * probing the vdev with a dummy I/O. The state of each vdev will be set 4478 * based on the success of those operations. After this we'll be ready 4479 * to read from the vdevs. 4480 */ 4481 error = spa_ld_open_vdevs(spa); 4482 if (error != 0) 4483 return (error); 4484 4485 /* 4486 * Read the label of each vdev and make sure that the GUIDs stored 4487 * there match the GUIDs in the config provided. 4488 * If we're assembling a new pool that's been split off from an 4489 * existing pool, the labels haven't yet been updated so we skip 4490 * validation for now. 4491 */ 4492 if (type != SPA_IMPORT_ASSEMBLE) { 4493 error = spa_ld_validate_vdevs(spa); 4494 if (error != 0) 4495 return (error); 4496 } 4497 4498 /* 4499 * Read all vdev labels to find the best uberblock (i.e. latest, 4500 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 4501 * get the list of features required to read blkptrs in the MOS from 4502 * the vdev label with the best uberblock and verify that our version 4503 * of zfs supports them all. 4504 */ 4505 error = spa_ld_select_uberblock(spa, type); 4506 if (error != 0) 4507 return (error); 4508 4509 /* 4510 * Pass that uberblock to the dsl_pool layer which will open the root 4511 * blkptr. This blkptr points to the latest version of the MOS and will 4512 * allow us to read its contents. 4513 */ 4514 error = spa_ld_open_rootbp(spa); 4515 if (error != 0) 4516 return (error); 4517 4518 return (0); 4519 } 4520 4521 static int 4522 spa_ld_checkpoint_rewind(spa_t *spa) 4523 { 4524 uberblock_t checkpoint; 4525 int error = 0; 4526 4527 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4528 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4529 4530 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4531 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 4532 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 4533 4534 if (error != 0) { 4535 spa_load_failed(spa, "unable to retrieve checkpointed " 4536 "uberblock from the MOS config [error=%d]", error); 4537 4538 if (error == ENOENT) 4539 error = ZFS_ERR_NO_CHECKPOINT; 4540 4541 return (error); 4542 } 4543 4544 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 4545 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 4546 4547 /* 4548 * We need to update the txg and timestamp of the checkpointed 4549 * uberblock to be higher than the latest one. This ensures that 4550 * the checkpointed uberblock is selected if we were to close and 4551 * reopen the pool right after we've written it in the vdev labels. 4552 * (also see block comment in vdev_uberblock_compare) 4553 */ 4554 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 4555 checkpoint.ub_timestamp = gethrestime_sec(); 4556 4557 /* 4558 * Set current uberblock to be the checkpointed uberblock. 4559 */ 4560 spa->spa_uberblock = checkpoint; 4561 4562 /* 4563 * If we are doing a normal rewind, then the pool is open for 4564 * writing and we sync the "updated" checkpointed uberblock to 4565 * disk. Once this is done, we've basically rewound the whole 4566 * pool and there is no way back. 4567 * 4568 * There are cases when we don't want to attempt and sync the 4569 * checkpointed uberblock to disk because we are opening a 4570 * pool as read-only. Specifically, verifying the checkpointed 4571 * state with zdb, and importing the checkpointed state to get 4572 * a "preview" of its content. 4573 */ 4574 if (spa_writeable(spa)) { 4575 vdev_t *rvd = spa->spa_root_vdev; 4576 4577 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4578 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 4579 int svdcount = 0; 4580 int children = rvd->vdev_children; 4581 int c0 = spa_get_random(children); 4582 4583 for (int c = 0; c < children; c++) { 4584 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 4585 4586 /* Stop when revisiting the first vdev */ 4587 if (c > 0 && svd[0] == vd) 4588 break; 4589 4590 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 4591 !vdev_is_concrete(vd)) 4592 continue; 4593 4594 svd[svdcount++] = vd; 4595 if (svdcount == SPA_SYNC_MIN_VDEVS) 4596 break; 4597 } 4598 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 4599 if (error == 0) 4600 spa->spa_last_synced_guid = rvd->vdev_guid; 4601 spa_config_exit(spa, SCL_ALL, FTAG); 4602 4603 if (error != 0) { 4604 spa_load_failed(spa, "failed to write checkpointed " 4605 "uberblock to the vdev labels [error=%d]", error); 4606 return (error); 4607 } 4608 } 4609 4610 return (0); 4611 } 4612 4613 static int 4614 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 4615 boolean_t *update_config_cache) 4616 { 4617 int error; 4618 4619 /* 4620 * Parse the config for pool, open and validate vdevs, 4621 * select an uberblock, and use that uberblock to open 4622 * the MOS. 4623 */ 4624 error = spa_ld_mos_init(spa, type); 4625 if (error != 0) 4626 return (error); 4627 4628 /* 4629 * Retrieve the trusted config stored in the MOS and use it to create 4630 * a new, exact version of the vdev tree, then reopen all vdevs. 4631 */ 4632 error = spa_ld_trusted_config(spa, type, B_FALSE); 4633 if (error == EAGAIN) { 4634 if (update_config_cache != NULL) 4635 *update_config_cache = B_TRUE; 4636 4637 /* 4638 * Redo the loading process with the trusted config if it is 4639 * too different from the untrusted config. 4640 */ 4641 spa_ld_prepare_for_reload(spa); 4642 spa_load_note(spa, "RELOADING"); 4643 error = spa_ld_mos_init(spa, type); 4644 if (error != 0) 4645 return (error); 4646 4647 error = spa_ld_trusted_config(spa, type, B_TRUE); 4648 if (error != 0) 4649 return (error); 4650 4651 } else if (error != 0) { 4652 return (error); 4653 } 4654 4655 return (0); 4656 } 4657 4658 /* 4659 * Load an existing storage pool, using the config provided. This config 4660 * describes which vdevs are part of the pool and is later validated against 4661 * partial configs present in each vdev's label and an entire copy of the 4662 * config stored in the MOS. 4663 */ 4664 static int 4665 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) 4666 { 4667 int error = 0; 4668 boolean_t missing_feat_write = B_FALSE; 4669 boolean_t checkpoint_rewind = 4670 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4671 boolean_t update_config_cache = B_FALSE; 4672 4673 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 4674 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 4675 4676 spa_load_note(spa, "LOADING"); 4677 4678 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 4679 if (error != 0) 4680 return (error); 4681 4682 /* 4683 * If we are rewinding to the checkpoint then we need to repeat 4684 * everything we've done so far in this function but this time 4685 * selecting the checkpointed uberblock and using that to open 4686 * the MOS. 4687 */ 4688 if (checkpoint_rewind) { 4689 /* 4690 * If we are rewinding to the checkpoint update config cache 4691 * anyway. 4692 */ 4693 update_config_cache = B_TRUE; 4694 4695 /* 4696 * Extract the checkpointed uberblock from the current MOS 4697 * and use this as the pool's uberblock from now on. If the 4698 * pool is imported as writeable we also write the checkpoint 4699 * uberblock to the labels, making the rewind permanent. 4700 */ 4701 error = spa_ld_checkpoint_rewind(spa); 4702 if (error != 0) 4703 return (error); 4704 4705 /* 4706 * Redo the loading process again with the 4707 * checkpointed uberblock. 4708 */ 4709 spa_ld_prepare_for_reload(spa); 4710 spa_load_note(spa, "LOADING checkpointed uberblock"); 4711 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 4712 if (error != 0) 4713 return (error); 4714 } 4715 4716 /* 4717 * Retrieve the checkpoint txg if the pool has a checkpoint. 4718 */ 4719 error = spa_ld_read_checkpoint_txg(spa); 4720 if (error != 0) 4721 return (error); 4722 4723 /* 4724 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 4725 * from the pool and their contents were re-mapped to other vdevs. Note 4726 * that everything that we read before this step must have been 4727 * rewritten on concrete vdevs after the last device removal was 4728 * initiated. Otherwise we could be reading from indirect vdevs before 4729 * we have loaded their mappings. 4730 */ 4731 error = spa_ld_open_indirect_vdev_metadata(spa); 4732 if (error != 0) 4733 return (error); 4734 4735 /* 4736 * Retrieve the full list of active features from the MOS and check if 4737 * they are all supported. 4738 */ 4739 error = spa_ld_check_features(spa, &missing_feat_write); 4740 if (error != 0) 4741 return (error); 4742 4743 /* 4744 * Load several special directories from the MOS needed by the dsl_pool 4745 * layer. 4746 */ 4747 error = spa_ld_load_special_directories(spa); 4748 if (error != 0) 4749 return (error); 4750 4751 /* 4752 * Retrieve pool properties from the MOS. 4753 */ 4754 error = spa_ld_get_props(spa); 4755 if (error != 0) 4756 return (error); 4757 4758 /* 4759 * Retrieve the list of auxiliary devices - cache devices and spares - 4760 * and open them. 4761 */ 4762 error = spa_ld_open_aux_vdevs(spa, type); 4763 if (error != 0) 4764 return (error); 4765 4766 /* 4767 * Load the metadata for all vdevs. Also check if unopenable devices 4768 * should be autoreplaced. 4769 */ 4770 error = spa_ld_load_vdev_metadata(spa); 4771 if (error != 0) 4772 return (error); 4773 4774 error = spa_ld_load_dedup_tables(spa); 4775 if (error != 0) 4776 return (error); 4777 4778 /* 4779 * Verify the logs now to make sure we don't have any unexpected errors 4780 * when we claim log blocks later. 4781 */ 4782 error = spa_ld_verify_logs(spa, type, ereport); 4783 if (error != 0) 4784 return (error); 4785 4786 if (missing_feat_write) { 4787 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 4788 4789 /* 4790 * At this point, we know that we can open the pool in 4791 * read-only mode but not read-write mode. We now have enough 4792 * information and can return to userland. 4793 */ 4794 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 4795 ENOTSUP)); 4796 } 4797 4798 /* 4799 * Traverse the last txgs to make sure the pool was left off in a safe 4800 * state. When performing an extreme rewind, we verify the whole pool, 4801 * which can take a very long time. 4802 */ 4803 error = spa_ld_verify_pool_data(spa); 4804 if (error != 0) 4805 return (error); 4806 4807 /* 4808 * Calculate the deflated space for the pool. This must be done before 4809 * we write anything to the pool because we'd need to update the space 4810 * accounting using the deflated sizes. 4811 */ 4812 spa_update_dspace(spa); 4813 4814 /* 4815 * We have now retrieved all the information we needed to open the 4816 * pool. If we are importing the pool in read-write mode, a few 4817 * additional steps must be performed to finish the import. 4818 */ 4819 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 4820 spa->spa_load_max_txg == UINT64_MAX)) { 4821 uint64_t config_cache_txg = spa->spa_config_txg; 4822 4823 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 4824 4825 /* 4826 * In case of a checkpoint rewind, log the original txg 4827 * of the checkpointed uberblock. 4828 */ 4829 if (checkpoint_rewind) { 4830 spa_history_log_internal(spa, "checkpoint rewind", 4831 NULL, "rewound state to txg=%llu", 4832 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 4833 } 4834 4835 /* 4836 * Traverse the ZIL and claim all blocks. 4837 */ 4838 spa_ld_claim_log_blocks(spa); 4839 4840 /* 4841 * Kick-off the syncing thread. 4842 */ 4843 spa->spa_sync_on = B_TRUE; 4844 txg_sync_start(spa->spa_dsl_pool); 4845 mmp_thread_start(spa); 4846 4847 /* 4848 * Wait for all claims to sync. We sync up to the highest 4849 * claimed log block birth time so that claimed log blocks 4850 * don't appear to be from the future. spa_claim_max_txg 4851 * will have been set for us by ZIL traversal operations 4852 * performed above. 4853 */ 4854 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 4855 4856 /* 4857 * Check if we need to request an update of the config. On the 4858 * next sync, we would update the config stored in vdev labels 4859 * and the cachefile (by default /etc/zfs/zpool.cache). 4860 */ 4861 spa_ld_check_for_config_update(spa, config_cache_txg, 4862 update_config_cache); 4863 4864 /* 4865 * Check if a rebuild was in progress and if so resume it. 4866 * Then check all DTLs to see if anything needs resilvering. 4867 * The resilver will be deferred if a rebuild was started. 4868 */ 4869 if (vdev_rebuild_active(spa->spa_root_vdev)) { 4870 vdev_rebuild_restart(spa); 4871 } else if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 4872 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 4873 spa_async_request(spa, SPA_ASYNC_RESILVER); 4874 } 4875 4876 /* 4877 * Log the fact that we booted up (so that we can detect if 4878 * we rebooted in the middle of an operation). 4879 */ 4880 spa_history_log_version(spa, "open", NULL); 4881 4882 spa_restart_removal(spa); 4883 spa_spawn_aux_threads(spa); 4884 4885 /* 4886 * Delete any inconsistent datasets. 4887 * 4888 * Note: 4889 * Since we may be issuing deletes for clones here, 4890 * we make sure to do so after we've spawned all the 4891 * auxiliary threads above (from which the livelist 4892 * deletion zthr is part of). 4893 */ 4894 (void) dmu_objset_find(spa_name(spa), 4895 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 4896 4897 /* 4898 * Clean up any stale temporary dataset userrefs. 4899 */ 4900 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 4901 4902 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4903 vdev_initialize_restart(spa->spa_root_vdev); 4904 vdev_trim_restart(spa->spa_root_vdev); 4905 vdev_autotrim_restart(spa); 4906 spa_config_exit(spa, SCL_CONFIG, FTAG); 4907 } 4908 4909 spa_import_progress_remove(spa_guid(spa)); 4910 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 4911 4912 spa_load_note(spa, "LOADED"); 4913 4914 return (0); 4915 } 4916 4917 static int 4918 spa_load_retry(spa_t *spa, spa_load_state_t state) 4919 { 4920 spa_mode_t mode = spa->spa_mode; 4921 4922 spa_unload(spa); 4923 spa_deactivate(spa); 4924 4925 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 4926 4927 spa_activate(spa, mode); 4928 spa_async_suspend(spa); 4929 4930 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 4931 (u_longlong_t)spa->spa_load_max_txg); 4932 4933 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 4934 } 4935 4936 /* 4937 * If spa_load() fails this function will try loading prior txg's. If 4938 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 4939 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 4940 * function will not rewind the pool and will return the same error as 4941 * spa_load(). 4942 */ 4943 static int 4944 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 4945 int rewind_flags) 4946 { 4947 nvlist_t *loadinfo = NULL; 4948 nvlist_t *config = NULL; 4949 int load_error, rewind_error; 4950 uint64_t safe_rewind_txg; 4951 uint64_t min_txg; 4952 4953 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 4954 spa->spa_load_max_txg = spa->spa_load_txg; 4955 spa_set_log_state(spa, SPA_LOG_CLEAR); 4956 } else { 4957 spa->spa_load_max_txg = max_request; 4958 if (max_request != UINT64_MAX) 4959 spa->spa_extreme_rewind = B_TRUE; 4960 } 4961 4962 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 4963 if (load_error == 0) 4964 return (0); 4965 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 4966 /* 4967 * When attempting checkpoint-rewind on a pool with no 4968 * checkpoint, we should not attempt to load uberblocks 4969 * from previous txgs when spa_load fails. 4970 */ 4971 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4972 spa_import_progress_remove(spa_guid(spa)); 4973 return (load_error); 4974 } 4975 4976 if (spa->spa_root_vdev != NULL) 4977 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4978 4979 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 4980 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 4981 4982 if (rewind_flags & ZPOOL_NEVER_REWIND) { 4983 nvlist_free(config); 4984 spa_import_progress_remove(spa_guid(spa)); 4985 return (load_error); 4986 } 4987 4988 if (state == SPA_LOAD_RECOVER) { 4989 /* Price of rolling back is discarding txgs, including log */ 4990 spa_set_log_state(spa, SPA_LOG_CLEAR); 4991 } else { 4992 /* 4993 * If we aren't rolling back save the load info from our first 4994 * import attempt so that we can restore it after attempting 4995 * to rewind. 4996 */ 4997 loadinfo = spa->spa_load_info; 4998 spa->spa_load_info = fnvlist_alloc(); 4999 } 5000 5001 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 5002 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 5003 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 5004 TXG_INITIAL : safe_rewind_txg; 5005 5006 /* 5007 * Continue as long as we're finding errors, we're still within 5008 * the acceptable rewind range, and we're still finding uberblocks 5009 */ 5010 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 5011 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 5012 if (spa->spa_load_max_txg < safe_rewind_txg) 5013 spa->spa_extreme_rewind = B_TRUE; 5014 rewind_error = spa_load_retry(spa, state); 5015 } 5016 5017 spa->spa_extreme_rewind = B_FALSE; 5018 spa->spa_load_max_txg = UINT64_MAX; 5019 5020 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 5021 spa_config_set(spa, config); 5022 else 5023 nvlist_free(config); 5024 5025 if (state == SPA_LOAD_RECOVER) { 5026 ASSERT3P(loadinfo, ==, NULL); 5027 spa_import_progress_remove(spa_guid(spa)); 5028 return (rewind_error); 5029 } else { 5030 /* Store the rewind info as part of the initial load info */ 5031 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 5032 spa->spa_load_info); 5033 5034 /* Restore the initial load info */ 5035 fnvlist_free(spa->spa_load_info); 5036 spa->spa_load_info = loadinfo; 5037 5038 spa_import_progress_remove(spa_guid(spa)); 5039 return (load_error); 5040 } 5041 } 5042 5043 /* 5044 * Pool Open/Import 5045 * 5046 * The import case is identical to an open except that the configuration is sent 5047 * down from userland, instead of grabbed from the configuration cache. For the 5048 * case of an open, the pool configuration will exist in the 5049 * POOL_STATE_UNINITIALIZED state. 5050 * 5051 * The stats information (gen/count/ustats) is used to gather vdev statistics at 5052 * the same time open the pool, without having to keep around the spa_t in some 5053 * ambiguous state. 5054 */ 5055 static int 5056 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 5057 nvlist_t **config) 5058 { 5059 spa_t *spa; 5060 spa_load_state_t state = SPA_LOAD_OPEN; 5061 int error; 5062 int locked = B_FALSE; 5063 int firstopen = B_FALSE; 5064 5065 *spapp = NULL; 5066 5067 /* 5068 * As disgusting as this is, we need to support recursive calls to this 5069 * function because dsl_dir_open() is called during spa_load(), and ends 5070 * up calling spa_open() again. The real fix is to figure out how to 5071 * avoid dsl_dir_open() calling this in the first place. 5072 */ 5073 if (MUTEX_NOT_HELD(&spa_namespace_lock)) { 5074 mutex_enter(&spa_namespace_lock); 5075 locked = B_TRUE; 5076 } 5077 5078 if ((spa = spa_lookup(pool)) == NULL) { 5079 if (locked) 5080 mutex_exit(&spa_namespace_lock); 5081 return (SET_ERROR(ENOENT)); 5082 } 5083 5084 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 5085 zpool_load_policy_t policy; 5086 5087 firstopen = B_TRUE; 5088 5089 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 5090 &policy); 5091 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5092 state = SPA_LOAD_RECOVER; 5093 5094 spa_activate(spa, spa_mode_global); 5095 5096 if (state != SPA_LOAD_RECOVER) 5097 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5098 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5099 5100 zfs_dbgmsg("spa_open_common: opening %s", pool); 5101 error = spa_load_best(spa, state, policy.zlp_txg, 5102 policy.zlp_rewind); 5103 5104 if (error == EBADF) { 5105 /* 5106 * If vdev_validate() returns failure (indicated by 5107 * EBADF), it indicates that one of the vdevs indicates 5108 * that the pool has been exported or destroyed. If 5109 * this is the case, the config cache is out of sync and 5110 * we should remove the pool from the namespace. 5111 */ 5112 spa_unload(spa); 5113 spa_deactivate(spa); 5114 spa_write_cachefile(spa, B_TRUE, B_TRUE); 5115 spa_remove(spa); 5116 if (locked) 5117 mutex_exit(&spa_namespace_lock); 5118 return (SET_ERROR(ENOENT)); 5119 } 5120 5121 if (error) { 5122 /* 5123 * We can't open the pool, but we still have useful 5124 * information: the state of each vdev after the 5125 * attempted vdev_open(). Return this to the user. 5126 */ 5127 if (config != NULL && spa->spa_config) { 5128 VERIFY(nvlist_dup(spa->spa_config, config, 5129 KM_SLEEP) == 0); 5130 VERIFY(nvlist_add_nvlist(*config, 5131 ZPOOL_CONFIG_LOAD_INFO, 5132 spa->spa_load_info) == 0); 5133 } 5134 spa_unload(spa); 5135 spa_deactivate(spa); 5136 spa->spa_last_open_failed = error; 5137 if (locked) 5138 mutex_exit(&spa_namespace_lock); 5139 *spapp = NULL; 5140 return (error); 5141 } 5142 } 5143 5144 spa_open_ref(spa, tag); 5145 5146 if (config != NULL) 5147 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5148 5149 /* 5150 * If we've recovered the pool, pass back any information we 5151 * gathered while doing the load. 5152 */ 5153 if (state == SPA_LOAD_RECOVER) { 5154 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 5155 spa->spa_load_info) == 0); 5156 } 5157 5158 if (locked) { 5159 spa->spa_last_open_failed = 0; 5160 spa->spa_last_ubsync_txg = 0; 5161 spa->spa_load_txg = 0; 5162 mutex_exit(&spa_namespace_lock); 5163 } 5164 5165 if (firstopen) 5166 zvol_create_minors_recursive(spa_name(spa)); 5167 5168 *spapp = spa; 5169 5170 return (0); 5171 } 5172 5173 int 5174 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 5175 nvlist_t **config) 5176 { 5177 return (spa_open_common(name, spapp, tag, policy, config)); 5178 } 5179 5180 int 5181 spa_open(const char *name, spa_t **spapp, void *tag) 5182 { 5183 return (spa_open_common(name, spapp, tag, NULL, NULL)); 5184 } 5185 5186 /* 5187 * Lookup the given spa_t, incrementing the inject count in the process, 5188 * preventing it from being exported or destroyed. 5189 */ 5190 spa_t * 5191 spa_inject_addref(char *name) 5192 { 5193 spa_t *spa; 5194 5195 mutex_enter(&spa_namespace_lock); 5196 if ((spa = spa_lookup(name)) == NULL) { 5197 mutex_exit(&spa_namespace_lock); 5198 return (NULL); 5199 } 5200 spa->spa_inject_ref++; 5201 mutex_exit(&spa_namespace_lock); 5202 5203 return (spa); 5204 } 5205 5206 void 5207 spa_inject_delref(spa_t *spa) 5208 { 5209 mutex_enter(&spa_namespace_lock); 5210 spa->spa_inject_ref--; 5211 mutex_exit(&spa_namespace_lock); 5212 } 5213 5214 /* 5215 * Add spares device information to the nvlist. 5216 */ 5217 static void 5218 spa_add_spares(spa_t *spa, nvlist_t *config) 5219 { 5220 nvlist_t **spares; 5221 uint_t i, nspares; 5222 nvlist_t *nvroot; 5223 uint64_t guid; 5224 vdev_stat_t *vs; 5225 uint_t vsc; 5226 uint64_t pool; 5227 5228 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5229 5230 if (spa->spa_spares.sav_count == 0) 5231 return; 5232 5233 VERIFY(nvlist_lookup_nvlist(config, 5234 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 5235 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5236 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 5237 if (nspares != 0) { 5238 VERIFY(nvlist_add_nvlist_array(nvroot, 5239 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5240 VERIFY(nvlist_lookup_nvlist_array(nvroot, 5241 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 5242 5243 /* 5244 * Go through and find any spares which have since been 5245 * repurposed as an active spare. If this is the case, update 5246 * their status appropriately. 5247 */ 5248 for (i = 0; i < nspares; i++) { 5249 VERIFY(nvlist_lookup_uint64(spares[i], 5250 ZPOOL_CONFIG_GUID, &guid) == 0); 5251 if (spa_spare_exists(guid, &pool, NULL) && 5252 pool != 0ULL) { 5253 VERIFY(nvlist_lookup_uint64_array( 5254 spares[i], ZPOOL_CONFIG_VDEV_STATS, 5255 (uint64_t **)&vs, &vsc) == 0); 5256 vs->vs_state = VDEV_STATE_CANT_OPEN; 5257 vs->vs_aux = VDEV_AUX_SPARED; 5258 } 5259 } 5260 } 5261 } 5262 5263 /* 5264 * Add l2cache device information to the nvlist, including vdev stats. 5265 */ 5266 static void 5267 spa_add_l2cache(spa_t *spa, nvlist_t *config) 5268 { 5269 nvlist_t **l2cache; 5270 uint_t i, j, nl2cache; 5271 nvlist_t *nvroot; 5272 uint64_t guid; 5273 vdev_t *vd; 5274 vdev_stat_t *vs; 5275 uint_t vsc; 5276 5277 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5278 5279 if (spa->spa_l2cache.sav_count == 0) 5280 return; 5281 5282 VERIFY(nvlist_lookup_nvlist(config, 5283 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 5284 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5285 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 5286 if (nl2cache != 0) { 5287 VERIFY(nvlist_add_nvlist_array(nvroot, 5288 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5289 VERIFY(nvlist_lookup_nvlist_array(nvroot, 5290 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 5291 5292 /* 5293 * Update level 2 cache device stats. 5294 */ 5295 5296 for (i = 0; i < nl2cache; i++) { 5297 VERIFY(nvlist_lookup_uint64(l2cache[i], 5298 ZPOOL_CONFIG_GUID, &guid) == 0); 5299 5300 vd = NULL; 5301 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 5302 if (guid == 5303 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 5304 vd = spa->spa_l2cache.sav_vdevs[j]; 5305 break; 5306 } 5307 } 5308 ASSERT(vd != NULL); 5309 5310 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 5311 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 5312 == 0); 5313 vdev_get_stats(vd, vs); 5314 vdev_config_generate_stats(vd, l2cache[i]); 5315 5316 } 5317 } 5318 } 5319 5320 static void 5321 spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features) 5322 { 5323 zap_cursor_t zc; 5324 zap_attribute_t za; 5325 5326 if (spa->spa_feat_for_read_obj != 0) { 5327 for (zap_cursor_init(&zc, spa->spa_meta_objset, 5328 spa->spa_feat_for_read_obj); 5329 zap_cursor_retrieve(&zc, &za) == 0; 5330 zap_cursor_advance(&zc)) { 5331 ASSERT(za.za_integer_length == sizeof (uint64_t) && 5332 za.za_num_integers == 1); 5333 VERIFY0(nvlist_add_uint64(features, za.za_name, 5334 za.za_first_integer)); 5335 } 5336 zap_cursor_fini(&zc); 5337 } 5338 5339 if (spa->spa_feat_for_write_obj != 0) { 5340 for (zap_cursor_init(&zc, spa->spa_meta_objset, 5341 spa->spa_feat_for_write_obj); 5342 zap_cursor_retrieve(&zc, &za) == 0; 5343 zap_cursor_advance(&zc)) { 5344 ASSERT(za.za_integer_length == sizeof (uint64_t) && 5345 za.za_num_integers == 1); 5346 VERIFY0(nvlist_add_uint64(features, za.za_name, 5347 za.za_first_integer)); 5348 } 5349 zap_cursor_fini(&zc); 5350 } 5351 } 5352 5353 static void 5354 spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features) 5355 { 5356 int i; 5357 5358 for (i = 0; i < SPA_FEATURES; i++) { 5359 zfeature_info_t feature = spa_feature_table[i]; 5360 uint64_t refcount; 5361 5362 if (feature_get_refcount(spa, &feature, &refcount) != 0) 5363 continue; 5364 5365 VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount)); 5366 } 5367 } 5368 5369 /* 5370 * Store a list of pool features and their reference counts in the 5371 * config. 5372 * 5373 * The first time this is called on a spa, allocate a new nvlist, fetch 5374 * the pool features and reference counts from disk, then save the list 5375 * in the spa. In subsequent calls on the same spa use the saved nvlist 5376 * and refresh its values from the cached reference counts. This 5377 * ensures we don't block here on I/O on a suspended pool so 'zpool 5378 * clear' can resume the pool. 5379 */ 5380 static void 5381 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 5382 { 5383 nvlist_t *features; 5384 5385 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 5386 5387 mutex_enter(&spa->spa_feat_stats_lock); 5388 features = spa->spa_feat_stats; 5389 5390 if (features != NULL) { 5391 spa_feature_stats_from_cache(spa, features); 5392 } else { 5393 VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP)); 5394 spa->spa_feat_stats = features; 5395 spa_feature_stats_from_disk(spa, features); 5396 } 5397 5398 VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 5399 features)); 5400 5401 mutex_exit(&spa->spa_feat_stats_lock); 5402 } 5403 5404 int 5405 spa_get_stats(const char *name, nvlist_t **config, 5406 char *altroot, size_t buflen) 5407 { 5408 int error; 5409 spa_t *spa; 5410 5411 *config = NULL; 5412 error = spa_open_common(name, &spa, FTAG, NULL, config); 5413 5414 if (spa != NULL) { 5415 /* 5416 * This still leaves a window of inconsistency where the spares 5417 * or l2cache devices could change and the config would be 5418 * self-inconsistent. 5419 */ 5420 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5421 5422 if (*config != NULL) { 5423 uint64_t loadtimes[2]; 5424 5425 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 5426 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 5427 VERIFY(nvlist_add_uint64_array(*config, 5428 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 5429 5430 VERIFY(nvlist_add_uint64(*config, 5431 ZPOOL_CONFIG_ERRCOUNT, 5432 spa_get_errlog_size(spa)) == 0); 5433 5434 if (spa_suspended(spa)) { 5435 VERIFY(nvlist_add_uint64(*config, 5436 ZPOOL_CONFIG_SUSPENDED, 5437 spa->spa_failmode) == 0); 5438 VERIFY(nvlist_add_uint64(*config, 5439 ZPOOL_CONFIG_SUSPENDED_REASON, 5440 spa->spa_suspended) == 0); 5441 } 5442 5443 spa_add_spares(spa, *config); 5444 spa_add_l2cache(spa, *config); 5445 spa_add_feature_stats(spa, *config); 5446 } 5447 } 5448 5449 /* 5450 * We want to get the alternate root even for faulted pools, so we cheat 5451 * and call spa_lookup() directly. 5452 */ 5453 if (altroot) { 5454 if (spa == NULL) { 5455 mutex_enter(&spa_namespace_lock); 5456 spa = spa_lookup(name); 5457 if (spa) 5458 spa_altroot(spa, altroot, buflen); 5459 else 5460 altroot[0] = '\0'; 5461 spa = NULL; 5462 mutex_exit(&spa_namespace_lock); 5463 } else { 5464 spa_altroot(spa, altroot, buflen); 5465 } 5466 } 5467 5468 if (spa != NULL) { 5469 spa_config_exit(spa, SCL_CONFIG, FTAG); 5470 spa_close(spa, FTAG); 5471 } 5472 5473 return (error); 5474 } 5475 5476 /* 5477 * Validate that the auxiliary device array is well formed. We must have an 5478 * array of nvlists, each which describes a valid leaf vdev. If this is an 5479 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 5480 * specified, as long as they are well-formed. 5481 */ 5482 static int 5483 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 5484 spa_aux_vdev_t *sav, const char *config, uint64_t version, 5485 vdev_labeltype_t label) 5486 { 5487 nvlist_t **dev; 5488 uint_t i, ndev; 5489 vdev_t *vd; 5490 int error; 5491 5492 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5493 5494 /* 5495 * It's acceptable to have no devs specified. 5496 */ 5497 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 5498 return (0); 5499 5500 if (ndev == 0) 5501 return (SET_ERROR(EINVAL)); 5502 5503 /* 5504 * Make sure the pool is formatted with a version that supports this 5505 * device type. 5506 */ 5507 if (spa_version(spa) < version) 5508 return (SET_ERROR(ENOTSUP)); 5509 5510 /* 5511 * Set the pending device list so we correctly handle device in-use 5512 * checking. 5513 */ 5514 sav->sav_pending = dev; 5515 sav->sav_npending = ndev; 5516 5517 for (i = 0; i < ndev; i++) { 5518 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 5519 mode)) != 0) 5520 goto out; 5521 5522 if (!vd->vdev_ops->vdev_op_leaf) { 5523 vdev_free(vd); 5524 error = SET_ERROR(EINVAL); 5525 goto out; 5526 } 5527 5528 vd->vdev_top = vd; 5529 5530 if ((error = vdev_open(vd)) == 0 && 5531 (error = vdev_label_init(vd, crtxg, label)) == 0) { 5532 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 5533 vd->vdev_guid) == 0); 5534 } 5535 5536 vdev_free(vd); 5537 5538 if (error && 5539 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 5540 goto out; 5541 else 5542 error = 0; 5543 } 5544 5545 out: 5546 sav->sav_pending = NULL; 5547 sav->sav_npending = 0; 5548 return (error); 5549 } 5550 5551 static int 5552 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 5553 { 5554 int error; 5555 5556 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5557 5558 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5559 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 5560 VDEV_LABEL_SPARE)) != 0) { 5561 return (error); 5562 } 5563 5564 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 5565 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 5566 VDEV_LABEL_L2CACHE)); 5567 } 5568 5569 static void 5570 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 5571 const char *config) 5572 { 5573 int i; 5574 5575 if (sav->sav_config != NULL) { 5576 nvlist_t **olddevs; 5577 uint_t oldndevs; 5578 nvlist_t **newdevs; 5579 5580 /* 5581 * Generate new dev list by concatenating with the 5582 * current dev list. 5583 */ 5584 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 5585 &olddevs, &oldndevs) == 0); 5586 5587 newdevs = kmem_alloc(sizeof (void *) * 5588 (ndevs + oldndevs), KM_SLEEP); 5589 for (i = 0; i < oldndevs; i++) 5590 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 5591 KM_SLEEP) == 0); 5592 for (i = 0; i < ndevs; i++) 5593 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 5594 KM_SLEEP) == 0); 5595 5596 VERIFY(nvlist_remove(sav->sav_config, config, 5597 DATA_TYPE_NVLIST_ARRAY) == 0); 5598 5599 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 5600 config, newdevs, ndevs + oldndevs) == 0); 5601 for (i = 0; i < oldndevs + ndevs; i++) 5602 nvlist_free(newdevs[i]); 5603 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 5604 } else { 5605 /* 5606 * Generate a new dev list. 5607 */ 5608 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 5609 KM_SLEEP) == 0); 5610 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 5611 devs, ndevs) == 0); 5612 } 5613 } 5614 5615 /* 5616 * Stop and drop level 2 ARC devices 5617 */ 5618 void 5619 spa_l2cache_drop(spa_t *spa) 5620 { 5621 vdev_t *vd; 5622 int i; 5623 spa_aux_vdev_t *sav = &spa->spa_l2cache; 5624 5625 for (i = 0; i < sav->sav_count; i++) { 5626 uint64_t pool; 5627 5628 vd = sav->sav_vdevs[i]; 5629 ASSERT(vd != NULL); 5630 5631 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 5632 pool != 0ULL && l2arc_vdev_present(vd)) 5633 l2arc_remove_vdev(vd); 5634 } 5635 } 5636 5637 /* 5638 * Verify encryption parameters for spa creation. If we are encrypting, we must 5639 * have the encryption feature flag enabled. 5640 */ 5641 static int 5642 spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 5643 boolean_t has_encryption) 5644 { 5645 if (dcp->cp_crypt != ZIO_CRYPT_OFF && 5646 dcp->cp_crypt != ZIO_CRYPT_INHERIT && 5647 !has_encryption) 5648 return (SET_ERROR(ENOTSUP)); 5649 5650 return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 5651 } 5652 5653 /* 5654 * Pool Creation 5655 */ 5656 int 5657 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 5658 nvlist_t *zplprops, dsl_crypto_params_t *dcp) 5659 { 5660 spa_t *spa; 5661 char *altroot = NULL; 5662 vdev_t *rvd; 5663 dsl_pool_t *dp; 5664 dmu_tx_t *tx; 5665 int error = 0; 5666 uint64_t txg = TXG_INITIAL; 5667 nvlist_t **spares, **l2cache; 5668 uint_t nspares, nl2cache; 5669 uint64_t version, obj, ndraid = 0; 5670 boolean_t has_features; 5671 boolean_t has_encryption; 5672 boolean_t has_allocclass; 5673 spa_feature_t feat; 5674 char *feat_name; 5675 char *poolname; 5676 nvlist_t *nvl; 5677 5678 if (props == NULL || 5679 nvlist_lookup_string(props, "tname", &poolname) != 0) 5680 poolname = (char *)pool; 5681 5682 /* 5683 * If this pool already exists, return failure. 5684 */ 5685 mutex_enter(&spa_namespace_lock); 5686 if (spa_lookup(poolname) != NULL) { 5687 mutex_exit(&spa_namespace_lock); 5688 return (SET_ERROR(EEXIST)); 5689 } 5690 5691 /* 5692 * Allocate a new spa_t structure. 5693 */ 5694 nvl = fnvlist_alloc(); 5695 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 5696 (void) nvlist_lookup_string(props, 5697 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5698 spa = spa_add(poolname, nvl, altroot); 5699 fnvlist_free(nvl); 5700 spa_activate(spa, spa_mode_global); 5701 5702 if (props && (error = spa_prop_validate(spa, props))) { 5703 spa_deactivate(spa); 5704 spa_remove(spa); 5705 mutex_exit(&spa_namespace_lock); 5706 return (error); 5707 } 5708 5709 /* 5710 * Temporary pool names should never be written to disk. 5711 */ 5712 if (poolname != pool) 5713 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 5714 5715 has_features = B_FALSE; 5716 has_encryption = B_FALSE; 5717 has_allocclass = B_FALSE; 5718 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 5719 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 5720 if (zpool_prop_feature(nvpair_name(elem))) { 5721 has_features = B_TRUE; 5722 5723 feat_name = strchr(nvpair_name(elem), '@') + 1; 5724 VERIFY0(zfeature_lookup_name(feat_name, &feat)); 5725 if (feat == SPA_FEATURE_ENCRYPTION) 5726 has_encryption = B_TRUE; 5727 if (feat == SPA_FEATURE_ALLOCATION_CLASSES) 5728 has_allocclass = B_TRUE; 5729 } 5730 } 5731 5732 /* verify encryption params, if they were provided */ 5733 if (dcp != NULL) { 5734 error = spa_create_check_encryption_params(dcp, has_encryption); 5735 if (error != 0) { 5736 spa_deactivate(spa); 5737 spa_remove(spa); 5738 mutex_exit(&spa_namespace_lock); 5739 return (error); 5740 } 5741 } 5742 if (!has_allocclass && zfs_special_devs(nvroot, NULL)) { 5743 spa_deactivate(spa); 5744 spa_remove(spa); 5745 mutex_exit(&spa_namespace_lock); 5746 return (ENOTSUP); 5747 } 5748 5749 if (has_features || nvlist_lookup_uint64(props, 5750 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 5751 version = SPA_VERSION; 5752 } 5753 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 5754 5755 spa->spa_first_txg = txg; 5756 spa->spa_uberblock.ub_txg = txg - 1; 5757 spa->spa_uberblock.ub_version = version; 5758 spa->spa_ubsync = spa->spa_uberblock; 5759 spa->spa_load_state = SPA_LOAD_CREATE; 5760 spa->spa_removing_phys.sr_state = DSS_NONE; 5761 spa->spa_removing_phys.sr_removing_vdev = -1; 5762 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 5763 spa->spa_indirect_vdevs_loaded = B_TRUE; 5764 5765 /* 5766 * Create "The Godfather" zio to hold all async IOs 5767 */ 5768 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 5769 KM_SLEEP); 5770 for (int i = 0; i < max_ncpus; i++) { 5771 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 5772 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 5773 ZIO_FLAG_GODFATHER); 5774 } 5775 5776 /* 5777 * Create the root vdev. 5778 */ 5779 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5780 5781 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 5782 5783 ASSERT(error != 0 || rvd != NULL); 5784 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 5785 5786 if (error == 0 && !zfs_allocatable_devs(nvroot)) 5787 error = SET_ERROR(EINVAL); 5788 5789 if (error == 0 && 5790 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 5791 (error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 && 5792 (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { 5793 /* 5794 * instantiate the metaslab groups (this will dirty the vdevs) 5795 * we can no longer error exit past this point 5796 */ 5797 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 5798 vdev_t *vd = rvd->vdev_child[c]; 5799 5800 vdev_metaslab_set_size(vd); 5801 vdev_expand(vd, txg); 5802 } 5803 } 5804 5805 spa_config_exit(spa, SCL_ALL, FTAG); 5806 5807 if (error != 0) { 5808 spa_unload(spa); 5809 spa_deactivate(spa); 5810 spa_remove(spa); 5811 mutex_exit(&spa_namespace_lock); 5812 return (error); 5813 } 5814 5815 /* 5816 * Get the list of spares, if specified. 5817 */ 5818 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5819 &spares, &nspares) == 0) { 5820 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 5821 KM_SLEEP) == 0); 5822 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 5823 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5824 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5825 spa_load_spares(spa); 5826 spa_config_exit(spa, SCL_ALL, FTAG); 5827 spa->spa_spares.sav_sync = B_TRUE; 5828 } 5829 5830 /* 5831 * Get the list of level 2 cache devices, if specified. 5832 */ 5833 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5834 &l2cache, &nl2cache) == 0) { 5835 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 5836 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5837 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5838 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5839 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5840 spa_load_l2cache(spa); 5841 spa_config_exit(spa, SCL_ALL, FTAG); 5842 spa->spa_l2cache.sav_sync = B_TRUE; 5843 } 5844 5845 spa->spa_is_initializing = B_TRUE; 5846 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 5847 spa->spa_is_initializing = B_FALSE; 5848 5849 /* 5850 * Create DDTs (dedup tables). 5851 */ 5852 ddt_create(spa); 5853 5854 spa_update_dspace(spa); 5855 5856 tx = dmu_tx_create_assigned(dp, txg); 5857 5858 /* 5859 * Create the pool's history object. 5860 */ 5861 if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history) 5862 spa_history_create_obj(spa, tx); 5863 5864 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 5865 spa_history_log_version(spa, "create", tx); 5866 5867 /* 5868 * Create the pool config object. 5869 */ 5870 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 5871 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 5872 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 5873 5874 if (zap_add(spa->spa_meta_objset, 5875 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5876 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 5877 cmn_err(CE_PANIC, "failed to add pool config"); 5878 } 5879 5880 if (zap_add(spa->spa_meta_objset, 5881 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 5882 sizeof (uint64_t), 1, &version, tx) != 0) { 5883 cmn_err(CE_PANIC, "failed to add pool version"); 5884 } 5885 5886 /* Newly created pools with the right version are always deflated. */ 5887 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 5888 spa->spa_deflate = TRUE; 5889 if (zap_add(spa->spa_meta_objset, 5890 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5891 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 5892 cmn_err(CE_PANIC, "failed to add deflate"); 5893 } 5894 } 5895 5896 /* 5897 * Create the deferred-free bpobj. Turn off compression 5898 * because sync-to-convergence takes longer if the blocksize 5899 * keeps changing. 5900 */ 5901 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 5902 dmu_object_set_compress(spa->spa_meta_objset, obj, 5903 ZIO_COMPRESS_OFF, tx); 5904 if (zap_add(spa->spa_meta_objset, 5905 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 5906 sizeof (uint64_t), 1, &obj, tx) != 0) { 5907 cmn_err(CE_PANIC, "failed to add bpobj"); 5908 } 5909 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 5910 spa->spa_meta_objset, obj)); 5911 5912 /* 5913 * Generate some random noise for salted checksums to operate on. 5914 */ 5915 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 5916 sizeof (spa->spa_cksum_salt.zcs_bytes)); 5917 5918 /* 5919 * Set pool properties. 5920 */ 5921 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 5922 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 5923 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 5924 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 5925 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 5926 spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM); 5927 5928 if (props != NULL) { 5929 spa_configfile_set(spa, props, B_FALSE); 5930 spa_sync_props(props, tx); 5931 } 5932 5933 for (int i = 0; i < ndraid; i++) 5934 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 5935 5936 dmu_tx_commit(tx); 5937 5938 spa->spa_sync_on = B_TRUE; 5939 txg_sync_start(dp); 5940 mmp_thread_start(spa); 5941 txg_wait_synced(dp, txg); 5942 5943 spa_spawn_aux_threads(spa); 5944 5945 spa_write_cachefile(spa, B_FALSE, B_TRUE); 5946 5947 /* 5948 * Don't count references from objsets that are already closed 5949 * and are making their way through the eviction process. 5950 */ 5951 spa_evicting_os_wait(spa); 5952 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 5953 spa->spa_load_state = SPA_LOAD_NONE; 5954 5955 mutex_exit(&spa_namespace_lock); 5956 5957 return (0); 5958 } 5959 5960 /* 5961 * Import a non-root pool into the system. 5962 */ 5963 int 5964 spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 5965 { 5966 spa_t *spa; 5967 char *altroot = NULL; 5968 spa_load_state_t state = SPA_LOAD_IMPORT; 5969 zpool_load_policy_t policy; 5970 spa_mode_t mode = spa_mode_global; 5971 uint64_t readonly = B_FALSE; 5972 int error; 5973 nvlist_t *nvroot; 5974 nvlist_t **spares, **l2cache; 5975 uint_t nspares, nl2cache; 5976 5977 /* 5978 * If a pool with this name exists, return failure. 5979 */ 5980 mutex_enter(&spa_namespace_lock); 5981 if (spa_lookup(pool) != NULL) { 5982 mutex_exit(&spa_namespace_lock); 5983 return (SET_ERROR(EEXIST)); 5984 } 5985 5986 /* 5987 * Create and initialize the spa structure. 5988 */ 5989 (void) nvlist_lookup_string(props, 5990 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5991 (void) nvlist_lookup_uint64(props, 5992 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 5993 if (readonly) 5994 mode = SPA_MODE_READ; 5995 spa = spa_add(pool, config, altroot); 5996 spa->spa_import_flags = flags; 5997 5998 /* 5999 * Verbatim import - Take a pool and insert it into the namespace 6000 * as if it had been loaded at boot. 6001 */ 6002 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 6003 if (props != NULL) 6004 spa_configfile_set(spa, props, B_FALSE); 6005 6006 spa_write_cachefile(spa, B_FALSE, B_TRUE); 6007 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6008 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 6009 mutex_exit(&spa_namespace_lock); 6010 return (0); 6011 } 6012 6013 spa_activate(spa, mode); 6014 6015 /* 6016 * Don't start async tasks until we know everything is healthy. 6017 */ 6018 spa_async_suspend(spa); 6019 6020 zpool_get_load_policy(config, &policy); 6021 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 6022 state = SPA_LOAD_RECOVER; 6023 6024 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 6025 6026 if (state != SPA_LOAD_RECOVER) { 6027 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 6028 zfs_dbgmsg("spa_import: importing %s", pool); 6029 } else { 6030 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 6031 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 6032 } 6033 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 6034 6035 /* 6036 * Propagate anything learned while loading the pool and pass it 6037 * back to caller (i.e. rewind info, missing devices, etc). 6038 */ 6039 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6040 spa->spa_load_info) == 0); 6041 6042 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6043 /* 6044 * Toss any existing sparelist, as it doesn't have any validity 6045 * anymore, and conflicts with spa_has_spare(). 6046 */ 6047 if (spa->spa_spares.sav_config) { 6048 nvlist_free(spa->spa_spares.sav_config); 6049 spa->spa_spares.sav_config = NULL; 6050 spa_load_spares(spa); 6051 } 6052 if (spa->spa_l2cache.sav_config) { 6053 nvlist_free(spa->spa_l2cache.sav_config); 6054 spa->spa_l2cache.sav_config = NULL; 6055 spa_load_l2cache(spa); 6056 } 6057 6058 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6059 &nvroot) == 0); 6060 spa_config_exit(spa, SCL_ALL, FTAG); 6061 6062 if (props != NULL) 6063 spa_configfile_set(spa, props, B_FALSE); 6064 6065 if (error != 0 || (props && spa_writeable(spa) && 6066 (error = spa_prop_set(spa, props)))) { 6067 spa_unload(spa); 6068 spa_deactivate(spa); 6069 spa_remove(spa); 6070 mutex_exit(&spa_namespace_lock); 6071 return (error); 6072 } 6073 6074 spa_async_resume(spa); 6075 6076 /* 6077 * Override any spares and level 2 cache devices as specified by 6078 * the user, as these may have correct device names/devids, etc. 6079 */ 6080 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 6081 &spares, &nspares) == 0) { 6082 if (spa->spa_spares.sav_config) 6083 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 6084 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 6085 else 6086 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 6087 NV_UNIQUE_NAME, KM_SLEEP) == 0); 6088 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 6089 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 6090 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6091 spa_load_spares(spa); 6092 spa_config_exit(spa, SCL_ALL, FTAG); 6093 spa->spa_spares.sav_sync = B_TRUE; 6094 } 6095 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 6096 &l2cache, &nl2cache) == 0) { 6097 if (spa->spa_l2cache.sav_config) 6098 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 6099 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 6100 else 6101 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 6102 NV_UNIQUE_NAME, KM_SLEEP) == 0); 6103 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 6104 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 6105 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6106 spa_load_l2cache(spa); 6107 spa_config_exit(spa, SCL_ALL, FTAG); 6108 spa->spa_l2cache.sav_sync = B_TRUE; 6109 } 6110 6111 /* 6112 * Check for any removed devices. 6113 */ 6114 if (spa->spa_autoreplace) { 6115 spa_aux_check_removed(&spa->spa_spares); 6116 spa_aux_check_removed(&spa->spa_l2cache); 6117 } 6118 6119 if (spa_writeable(spa)) { 6120 /* 6121 * Update the config cache to include the newly-imported pool. 6122 */ 6123 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6124 } 6125 6126 /* 6127 * It's possible that the pool was expanded while it was exported. 6128 * We kick off an async task to handle this for us. 6129 */ 6130 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 6131 6132 spa_history_log_version(spa, "import", NULL); 6133 6134 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 6135 6136 mutex_exit(&spa_namespace_lock); 6137 6138 zvol_create_minors_recursive(pool); 6139 6140 return (0); 6141 } 6142 6143 nvlist_t * 6144 spa_tryimport(nvlist_t *tryconfig) 6145 { 6146 nvlist_t *config = NULL; 6147 char *poolname, *cachefile; 6148 spa_t *spa; 6149 uint64_t state; 6150 int error; 6151 zpool_load_policy_t policy; 6152 6153 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 6154 return (NULL); 6155 6156 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 6157 return (NULL); 6158 6159 /* 6160 * Create and initialize the spa structure. 6161 */ 6162 mutex_enter(&spa_namespace_lock); 6163 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 6164 spa_activate(spa, SPA_MODE_READ); 6165 6166 /* 6167 * Rewind pool if a max txg was provided. 6168 */ 6169 zpool_get_load_policy(spa->spa_config, &policy); 6170 if (policy.zlp_txg != UINT64_MAX) { 6171 spa->spa_load_max_txg = policy.zlp_txg; 6172 spa->spa_extreme_rewind = B_TRUE; 6173 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 6174 poolname, (longlong_t)policy.zlp_txg); 6175 } else { 6176 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 6177 } 6178 6179 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 6180 == 0) { 6181 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 6182 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 6183 } else { 6184 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 6185 } 6186 6187 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 6188 6189 /* 6190 * If 'tryconfig' was at least parsable, return the current config. 6191 */ 6192 if (spa->spa_root_vdev != NULL) { 6193 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 6194 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 6195 poolname) == 0); 6196 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 6197 state) == 0); 6198 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 6199 spa->spa_uberblock.ub_timestamp) == 0); 6200 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 6201 spa->spa_load_info) == 0); 6202 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, 6203 spa->spa_errata) == 0); 6204 6205 /* 6206 * If the bootfs property exists on this pool then we 6207 * copy it out so that external consumers can tell which 6208 * pools are bootable. 6209 */ 6210 if ((!error || error == EEXIST) && spa->spa_bootfs) { 6211 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6212 6213 /* 6214 * We have to play games with the name since the 6215 * pool was opened as TRYIMPORT_NAME. 6216 */ 6217 if (dsl_dsobj_to_dsname(spa_name(spa), 6218 spa->spa_bootfs, tmpname) == 0) { 6219 char *cp; 6220 char *dsname; 6221 6222 dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6223 6224 cp = strchr(tmpname, '/'); 6225 if (cp == NULL) { 6226 (void) strlcpy(dsname, tmpname, 6227 MAXPATHLEN); 6228 } else { 6229 (void) snprintf(dsname, MAXPATHLEN, 6230 "%s/%s", poolname, ++cp); 6231 } 6232 VERIFY(nvlist_add_string(config, 6233 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 6234 kmem_free(dsname, MAXPATHLEN); 6235 } 6236 kmem_free(tmpname, MAXPATHLEN); 6237 } 6238 6239 /* 6240 * Add the list of hot spares and level 2 cache devices. 6241 */ 6242 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6243 spa_add_spares(spa, config); 6244 spa_add_l2cache(spa, config); 6245 spa_config_exit(spa, SCL_CONFIG, FTAG); 6246 } 6247 6248 spa_unload(spa); 6249 spa_deactivate(spa); 6250 spa_remove(spa); 6251 mutex_exit(&spa_namespace_lock); 6252 6253 return (config); 6254 } 6255 6256 /* 6257 * Pool export/destroy 6258 * 6259 * The act of destroying or exporting a pool is very simple. We make sure there 6260 * is no more pending I/O and any references to the pool are gone. Then, we 6261 * update the pool state and sync all the labels to disk, removing the 6262 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 6263 * we don't sync the labels or remove the configuration cache. 6264 */ 6265 static int 6266 spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, 6267 boolean_t force, boolean_t hardforce) 6268 { 6269 int error; 6270 spa_t *spa; 6271 6272 if (oldconfig) 6273 *oldconfig = NULL; 6274 6275 if (!(spa_mode_global & SPA_MODE_WRITE)) 6276 return (SET_ERROR(EROFS)); 6277 6278 mutex_enter(&spa_namespace_lock); 6279 if ((spa = spa_lookup(pool)) == NULL) { 6280 mutex_exit(&spa_namespace_lock); 6281 return (SET_ERROR(ENOENT)); 6282 } 6283 6284 if (spa->spa_is_exporting) { 6285 /* the pool is being exported by another thread */ 6286 mutex_exit(&spa_namespace_lock); 6287 return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS)); 6288 } 6289 spa->spa_is_exporting = B_TRUE; 6290 6291 /* 6292 * Put a hold on the pool, drop the namespace lock, stop async tasks, 6293 * reacquire the namespace lock, and see if we can export. 6294 */ 6295 spa_open_ref(spa, FTAG); 6296 mutex_exit(&spa_namespace_lock); 6297 spa_async_suspend(spa); 6298 if (spa->spa_zvol_taskq) { 6299 zvol_remove_minors(spa, spa_name(spa), B_TRUE); 6300 taskq_wait(spa->spa_zvol_taskq); 6301 } 6302 mutex_enter(&spa_namespace_lock); 6303 spa_close(spa, FTAG); 6304 6305 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 6306 goto export_spa; 6307 /* 6308 * The pool will be in core if it's openable, in which case we can 6309 * modify its state. Objsets may be open only because they're dirty, 6310 * so we have to force it to sync before checking spa_refcnt. 6311 */ 6312 if (spa->spa_sync_on) { 6313 txg_wait_synced(spa->spa_dsl_pool, 0); 6314 spa_evicting_os_wait(spa); 6315 } 6316 6317 /* 6318 * A pool cannot be exported or destroyed if there are active 6319 * references. If we are resetting a pool, allow references by 6320 * fault injection handlers. 6321 */ 6322 if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) { 6323 error = SET_ERROR(EBUSY); 6324 goto fail; 6325 } 6326 6327 if (spa->spa_sync_on) { 6328 /* 6329 * A pool cannot be exported if it has an active shared spare. 6330 * This is to prevent other pools stealing the active spare 6331 * from an exported pool. At user's own will, such pool can 6332 * be forcedly exported. 6333 */ 6334 if (!force && new_state == POOL_STATE_EXPORTED && 6335 spa_has_active_shared_spare(spa)) { 6336 error = SET_ERROR(EXDEV); 6337 goto fail; 6338 } 6339 6340 /* 6341 * We're about to export or destroy this pool. Make sure 6342 * we stop all initialization and trim activity here before 6343 * we set the spa_final_txg. This will ensure that all 6344 * dirty data resulting from the initialization is 6345 * committed to disk before we unload the pool. 6346 */ 6347 if (spa->spa_root_vdev != NULL) { 6348 vdev_t *rvd = spa->spa_root_vdev; 6349 vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE); 6350 vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE); 6351 vdev_autotrim_stop_all(spa); 6352 vdev_rebuild_stop_all(spa); 6353 } 6354 6355 /* 6356 * We want this to be reflected on every label, 6357 * so mark them all dirty. spa_unload() will do the 6358 * final sync that pushes these changes out. 6359 */ 6360 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 6361 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6362 spa->spa_state = new_state; 6363 spa->spa_final_txg = spa_last_synced_txg(spa) + 6364 TXG_DEFER_SIZE + 1; 6365 vdev_config_dirty(spa->spa_root_vdev); 6366 spa_config_exit(spa, SCL_ALL, FTAG); 6367 } 6368 } 6369 6370 export_spa: 6371 if (new_state == POOL_STATE_DESTROYED) 6372 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 6373 else if (new_state == POOL_STATE_EXPORTED) 6374 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT); 6375 6376 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6377 spa_unload(spa); 6378 spa_deactivate(spa); 6379 } 6380 6381 if (oldconfig && spa->spa_config) 6382 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 6383 6384 if (new_state != POOL_STATE_UNINITIALIZED) { 6385 if (!hardforce) 6386 spa_write_cachefile(spa, B_TRUE, B_TRUE); 6387 spa_remove(spa); 6388 } else { 6389 /* 6390 * If spa_remove() is not called for this spa_t and 6391 * there is any possibility that it can be reused, 6392 * we make sure to reset the exporting flag. 6393 */ 6394 spa->spa_is_exporting = B_FALSE; 6395 } 6396 6397 mutex_exit(&spa_namespace_lock); 6398 return (0); 6399 6400 fail: 6401 spa->spa_is_exporting = B_FALSE; 6402 spa_async_resume(spa); 6403 mutex_exit(&spa_namespace_lock); 6404 return (error); 6405 } 6406 6407 /* 6408 * Destroy a storage pool. 6409 */ 6410 int 6411 spa_destroy(const char *pool) 6412 { 6413 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 6414 B_FALSE, B_FALSE)); 6415 } 6416 6417 /* 6418 * Export a storage pool. 6419 */ 6420 int 6421 spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force, 6422 boolean_t hardforce) 6423 { 6424 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 6425 force, hardforce)); 6426 } 6427 6428 /* 6429 * Similar to spa_export(), this unloads the spa_t without actually removing it 6430 * from the namespace in any way. 6431 */ 6432 int 6433 spa_reset(const char *pool) 6434 { 6435 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 6436 B_FALSE, B_FALSE)); 6437 } 6438 6439 /* 6440 * ========================================================================== 6441 * Device manipulation 6442 * ========================================================================== 6443 */ 6444 6445 /* 6446 * This is called as a synctask to increment the draid feature flag 6447 */ 6448 static void 6449 spa_draid_feature_incr(void *arg, dmu_tx_t *tx) 6450 { 6451 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6452 int draid = (int)(uintptr_t)arg; 6453 6454 for (int c = 0; c < draid; c++) 6455 spa_feature_incr(spa, SPA_FEATURE_DRAID, tx); 6456 } 6457 6458 /* 6459 * Add a device to a storage pool. 6460 */ 6461 int 6462 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 6463 { 6464 uint64_t txg, ndraid = 0; 6465 int error; 6466 vdev_t *rvd = spa->spa_root_vdev; 6467 vdev_t *vd, *tvd; 6468 nvlist_t **spares, **l2cache; 6469 uint_t nspares, nl2cache; 6470 6471 ASSERT(spa_writeable(spa)); 6472 6473 txg = spa_vdev_enter(spa); 6474 6475 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 6476 VDEV_ALLOC_ADD)) != 0) 6477 return (spa_vdev_exit(spa, NULL, txg, error)); 6478 6479 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 6480 6481 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 6482 &nspares) != 0) 6483 nspares = 0; 6484 6485 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 6486 &nl2cache) != 0) 6487 nl2cache = 0; 6488 6489 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 6490 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6491 6492 if (vd->vdev_children != 0 && 6493 (error = vdev_create(vd, txg, B_FALSE)) != 0) { 6494 return (spa_vdev_exit(spa, vd, txg, error)); 6495 } 6496 6497 /* 6498 * The virtual dRAID spares must be added after vdev tree is created 6499 * and the vdev guids are generated. The guid of their assoicated 6500 * dRAID is stored in the config and used when opening the spare. 6501 */ 6502 if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid, 6503 rvd->vdev_children)) == 0) { 6504 if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot, 6505 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0) 6506 nspares = 0; 6507 } else { 6508 return (spa_vdev_exit(spa, vd, txg, error)); 6509 } 6510 6511 /* 6512 * We must validate the spares and l2cache devices after checking the 6513 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 6514 */ 6515 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 6516 return (spa_vdev_exit(spa, vd, txg, error)); 6517 6518 /* 6519 * If we are in the middle of a device removal, we can only add 6520 * devices which match the existing devices in the pool. 6521 * If we are in the middle of a removal, or have some indirect 6522 * vdevs, we can not add raidz or dRAID top levels. 6523 */ 6524 if (spa->spa_vdev_removal != NULL || 6525 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 6526 for (int c = 0; c < vd->vdev_children; c++) { 6527 tvd = vd->vdev_child[c]; 6528 if (spa->spa_vdev_removal != NULL && 6529 tvd->vdev_ashift != spa->spa_max_ashift) { 6530 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6531 } 6532 /* Fail if top level vdev is raidz or a dRAID */ 6533 if (vdev_get_nparity(tvd) != 0) 6534 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 6535 6536 /* 6537 * Need the top level mirror to be 6538 * a mirror of leaf vdevs only 6539 */ 6540 if (tvd->vdev_ops == &vdev_mirror_ops) { 6541 for (uint64_t cid = 0; 6542 cid < tvd->vdev_children; cid++) { 6543 vdev_t *cvd = tvd->vdev_child[cid]; 6544 if (!cvd->vdev_ops->vdev_op_leaf) { 6545 return (spa_vdev_exit(spa, vd, 6546 txg, EINVAL)); 6547 } 6548 } 6549 } 6550 } 6551 } 6552 6553 for (int c = 0; c < vd->vdev_children; c++) { 6554 tvd = vd->vdev_child[c]; 6555 vdev_remove_child(vd, tvd); 6556 tvd->vdev_id = rvd->vdev_children; 6557 vdev_add_child(rvd, tvd); 6558 vdev_config_dirty(tvd); 6559 } 6560 6561 if (nspares != 0) { 6562 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 6563 ZPOOL_CONFIG_SPARES); 6564 spa_load_spares(spa); 6565 spa->spa_spares.sav_sync = B_TRUE; 6566 } 6567 6568 if (nl2cache != 0) { 6569 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 6570 ZPOOL_CONFIG_L2CACHE); 6571 spa_load_l2cache(spa); 6572 spa->spa_l2cache.sav_sync = B_TRUE; 6573 } 6574 6575 /* 6576 * We can't increment a feature while holding spa_vdev so we 6577 * have to do it in a synctask. 6578 */ 6579 if (ndraid != 0) { 6580 dmu_tx_t *tx; 6581 6582 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 6583 dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr, 6584 (void *)(uintptr_t)ndraid, tx); 6585 dmu_tx_commit(tx); 6586 } 6587 6588 /* 6589 * We have to be careful when adding new vdevs to an existing pool. 6590 * If other threads start allocating from these vdevs before we 6591 * sync the config cache, and we lose power, then upon reboot we may 6592 * fail to open the pool because there are DVAs that the config cache 6593 * can't translate. Therefore, we first add the vdevs without 6594 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 6595 * and then let spa_config_update() initialize the new metaslabs. 6596 * 6597 * spa_load() checks for added-but-not-initialized vdevs, so that 6598 * if we lose power at any point in this sequence, the remaining 6599 * steps will be completed the next time we load the pool. 6600 */ 6601 (void) spa_vdev_exit(spa, vd, txg, 0); 6602 6603 mutex_enter(&spa_namespace_lock); 6604 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 6605 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 6606 mutex_exit(&spa_namespace_lock); 6607 6608 return (0); 6609 } 6610 6611 /* 6612 * Attach a device to a mirror. The arguments are the path to any device 6613 * in the mirror, and the nvroot for the new device. If the path specifies 6614 * a device that is not mirrored, we automatically insert the mirror vdev. 6615 * 6616 * If 'replacing' is specified, the new device is intended to replace the 6617 * existing device; in this case the two devices are made into their own 6618 * mirror using the 'replacing' vdev, which is functionally identical to 6619 * the mirror vdev (it actually reuses all the same ops) but has a few 6620 * extra rules: you can't attach to it after it's been created, and upon 6621 * completion of resilvering, the first disk (the one being replaced) 6622 * is automatically detached. 6623 * 6624 * If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild) 6625 * should be performed instead of traditional healing reconstruction. From 6626 * an administrators perspective these are both resilver operations. 6627 */ 6628 int 6629 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing, 6630 int rebuild) 6631 { 6632 uint64_t txg, dtl_max_txg; 6633 vdev_t *rvd = spa->spa_root_vdev; 6634 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 6635 vdev_ops_t *pvops; 6636 char *oldvdpath, *newvdpath; 6637 int newvd_isspare; 6638 int error; 6639 6640 ASSERT(spa_writeable(spa)); 6641 6642 txg = spa_vdev_enter(spa); 6643 6644 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 6645 6646 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6647 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6648 error = (spa_has_checkpoint(spa)) ? 6649 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6650 return (spa_vdev_exit(spa, NULL, txg, error)); 6651 } 6652 6653 if (rebuild) { 6654 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 6655 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6656 6657 if (dsl_scan_resilvering(spa_get_dsl(spa))) 6658 return (spa_vdev_exit(spa, NULL, txg, 6659 ZFS_ERR_RESILVER_IN_PROGRESS)); 6660 } else { 6661 if (vdev_rebuild_active(rvd)) 6662 return (spa_vdev_exit(spa, NULL, txg, 6663 ZFS_ERR_REBUILD_IN_PROGRESS)); 6664 } 6665 6666 if (spa->spa_vdev_removal != NULL) 6667 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6668 6669 if (oldvd == NULL) 6670 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6671 6672 if (!oldvd->vdev_ops->vdev_op_leaf) 6673 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6674 6675 pvd = oldvd->vdev_parent; 6676 6677 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 6678 VDEV_ALLOC_ATTACH)) != 0) 6679 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6680 6681 if (newrootvd->vdev_children != 1) 6682 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6683 6684 newvd = newrootvd->vdev_child[0]; 6685 6686 if (!newvd->vdev_ops->vdev_op_leaf) 6687 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6688 6689 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 6690 return (spa_vdev_exit(spa, newrootvd, txg, error)); 6691 6692 /* 6693 * Spares can't replace logs 6694 */ 6695 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 6696 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6697 6698 /* 6699 * A dRAID spare can only replace a child of its parent dRAID vdev. 6700 */ 6701 if (newvd->vdev_ops == &vdev_draid_spare_ops && 6702 oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) { 6703 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6704 } 6705 6706 if (rebuild) { 6707 /* 6708 * For rebuilds, the top vdev must support reconstruction 6709 * using only space maps. This means the only allowable 6710 * vdevs types are the root vdev, a mirror, or dRAID. 6711 */ 6712 tvd = pvd; 6713 if (pvd->vdev_top != NULL) 6714 tvd = pvd->vdev_top; 6715 6716 if (tvd->vdev_ops != &vdev_mirror_ops && 6717 tvd->vdev_ops != &vdev_root_ops && 6718 tvd->vdev_ops != &vdev_draid_ops) { 6719 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6720 } 6721 } 6722 6723 if (!replacing) { 6724 /* 6725 * For attach, the only allowable parent is a mirror or the root 6726 * vdev. 6727 */ 6728 if (pvd->vdev_ops != &vdev_mirror_ops && 6729 pvd->vdev_ops != &vdev_root_ops) 6730 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6731 6732 pvops = &vdev_mirror_ops; 6733 } else { 6734 /* 6735 * Active hot spares can only be replaced by inactive hot 6736 * spares. 6737 */ 6738 if (pvd->vdev_ops == &vdev_spare_ops && 6739 oldvd->vdev_isspare && 6740 !spa_has_spare(spa, newvd->vdev_guid)) 6741 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6742 6743 /* 6744 * If the source is a hot spare, and the parent isn't already a 6745 * spare, then we want to create a new hot spare. Otherwise, we 6746 * want to create a replacing vdev. The user is not allowed to 6747 * attach to a spared vdev child unless the 'isspare' state is 6748 * the same (spare replaces spare, non-spare replaces 6749 * non-spare). 6750 */ 6751 if (pvd->vdev_ops == &vdev_replacing_ops && 6752 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 6753 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6754 } else if (pvd->vdev_ops == &vdev_spare_ops && 6755 newvd->vdev_isspare != oldvd->vdev_isspare) { 6756 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6757 } 6758 6759 if (newvd->vdev_isspare) 6760 pvops = &vdev_spare_ops; 6761 else 6762 pvops = &vdev_replacing_ops; 6763 } 6764 6765 /* 6766 * Make sure the new device is big enough. 6767 */ 6768 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 6769 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 6770 6771 /* 6772 * The new device cannot have a higher alignment requirement 6773 * than the top-level vdev. 6774 */ 6775 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 6776 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6777 6778 /* 6779 * If this is an in-place replacement, update oldvd's path and devid 6780 * to make it distinguishable from newvd, and unopenable from now on. 6781 */ 6782 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 6783 spa_strfree(oldvd->vdev_path); 6784 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 6785 KM_SLEEP); 6786 (void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5, 6787 "%s/%s", newvd->vdev_path, "old"); 6788 if (oldvd->vdev_devid != NULL) { 6789 spa_strfree(oldvd->vdev_devid); 6790 oldvd->vdev_devid = NULL; 6791 } 6792 } 6793 6794 /* 6795 * If the parent is not a mirror, or if we're replacing, insert the new 6796 * mirror/replacing/spare vdev above oldvd. 6797 */ 6798 if (pvd->vdev_ops != pvops) 6799 pvd = vdev_add_parent(oldvd, pvops); 6800 6801 ASSERT(pvd->vdev_top->vdev_parent == rvd); 6802 ASSERT(pvd->vdev_ops == pvops); 6803 ASSERT(oldvd->vdev_parent == pvd); 6804 6805 /* 6806 * Extract the new device from its root and add it to pvd. 6807 */ 6808 vdev_remove_child(newrootvd, newvd); 6809 newvd->vdev_id = pvd->vdev_children; 6810 newvd->vdev_crtxg = oldvd->vdev_crtxg; 6811 vdev_add_child(pvd, newvd); 6812 6813 /* 6814 * Reevaluate the parent vdev state. 6815 */ 6816 vdev_propagate_state(pvd); 6817 6818 tvd = newvd->vdev_top; 6819 ASSERT(pvd->vdev_top == tvd); 6820 ASSERT(tvd->vdev_parent == rvd); 6821 6822 vdev_config_dirty(tvd); 6823 6824 /* 6825 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 6826 * for any dmu_sync-ed blocks. It will propagate upward when 6827 * spa_vdev_exit() calls vdev_dtl_reassess(). 6828 */ 6829 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 6830 6831 vdev_dtl_dirty(newvd, DTL_MISSING, 6832 TXG_INITIAL, dtl_max_txg - TXG_INITIAL); 6833 6834 if (newvd->vdev_isspare) { 6835 spa_spare_activate(newvd); 6836 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 6837 } 6838 6839 oldvdpath = spa_strdup(oldvd->vdev_path); 6840 newvdpath = spa_strdup(newvd->vdev_path); 6841 newvd_isspare = newvd->vdev_isspare; 6842 6843 /* 6844 * Mark newvd's DTL dirty in this txg. 6845 */ 6846 vdev_dirty(tvd, VDD_DTL, newvd, txg); 6847 6848 /* 6849 * Schedule the resilver or rebuild to restart in the future. We do 6850 * this to ensure that dmu_sync-ed blocks have been stitched into the 6851 * respective datasets. 6852 */ 6853 if (rebuild) { 6854 newvd->vdev_rebuild_txg = txg; 6855 6856 vdev_rebuild(tvd); 6857 } else { 6858 newvd->vdev_resilver_txg = txg; 6859 6860 if (dsl_scan_resilvering(spa_get_dsl(spa)) && 6861 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) { 6862 vdev_defer_resilver(newvd); 6863 } else { 6864 dsl_scan_restart_resilver(spa->spa_dsl_pool, 6865 dtl_max_txg); 6866 } 6867 } 6868 6869 if (spa->spa_bootfs) 6870 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 6871 6872 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 6873 6874 /* 6875 * Commit the config 6876 */ 6877 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 6878 6879 spa_history_log_internal(spa, "vdev attach", NULL, 6880 "%s vdev=%s %s vdev=%s", 6881 replacing && newvd_isspare ? "spare in" : 6882 replacing ? "replace" : "attach", newvdpath, 6883 replacing ? "for" : "to", oldvdpath); 6884 6885 spa_strfree(oldvdpath); 6886 spa_strfree(newvdpath); 6887 6888 return (0); 6889 } 6890 6891 /* 6892 * Detach a device from a mirror or replacing vdev. 6893 * 6894 * If 'replace_done' is specified, only detach if the parent 6895 * is a replacing vdev. 6896 */ 6897 int 6898 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 6899 { 6900 uint64_t txg; 6901 int error; 6902 vdev_t *rvd __maybe_unused = spa->spa_root_vdev; 6903 vdev_t *vd, *pvd, *cvd, *tvd; 6904 boolean_t unspare = B_FALSE; 6905 uint64_t unspare_guid = 0; 6906 char *vdpath; 6907 6908 ASSERT(spa_writeable(spa)); 6909 6910 txg = spa_vdev_detach_enter(spa, guid); 6911 6912 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 6913 6914 /* 6915 * Besides being called directly from the userland through the 6916 * ioctl interface, spa_vdev_detach() can be potentially called 6917 * at the end of spa_vdev_resilver_done(). 6918 * 6919 * In the regular case, when we have a checkpoint this shouldn't 6920 * happen as we never empty the DTLs of a vdev during the scrub 6921 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 6922 * should never get here when we have a checkpoint. 6923 * 6924 * That said, even in a case when we checkpoint the pool exactly 6925 * as spa_vdev_resilver_done() calls this function everything 6926 * should be fine as the resilver will return right away. 6927 */ 6928 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6929 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6930 error = (spa_has_checkpoint(spa)) ? 6931 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6932 return (spa_vdev_exit(spa, NULL, txg, error)); 6933 } 6934 6935 if (vd == NULL) 6936 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6937 6938 if (!vd->vdev_ops->vdev_op_leaf) 6939 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6940 6941 pvd = vd->vdev_parent; 6942 6943 /* 6944 * If the parent/child relationship is not as expected, don't do it. 6945 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 6946 * vdev that's replacing B with C. The user's intent in replacing 6947 * is to go from M(A,B) to M(A,C). If the user decides to cancel 6948 * the replace by detaching C, the expected behavior is to end up 6949 * M(A,B). But suppose that right after deciding to detach C, 6950 * the replacement of B completes. We would have M(A,C), and then 6951 * ask to detach C, which would leave us with just A -- not what 6952 * the user wanted. To prevent this, we make sure that the 6953 * parent/child relationship hasn't changed -- in this example, 6954 * that C's parent is still the replacing vdev R. 6955 */ 6956 if (pvd->vdev_guid != pguid && pguid != 0) 6957 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6958 6959 /* 6960 * Only 'replacing' or 'spare' vdevs can be replaced. 6961 */ 6962 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 6963 pvd->vdev_ops != &vdev_spare_ops) 6964 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6965 6966 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 6967 spa_version(spa) >= SPA_VERSION_SPARES); 6968 6969 /* 6970 * Only mirror, replacing, and spare vdevs support detach. 6971 */ 6972 if (pvd->vdev_ops != &vdev_replacing_ops && 6973 pvd->vdev_ops != &vdev_mirror_ops && 6974 pvd->vdev_ops != &vdev_spare_ops) 6975 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6976 6977 /* 6978 * If this device has the only valid copy of some data, 6979 * we cannot safely detach it. 6980 */ 6981 if (vdev_dtl_required(vd)) 6982 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6983 6984 ASSERT(pvd->vdev_children >= 2); 6985 6986 /* 6987 * If we are detaching the second disk from a replacing vdev, then 6988 * check to see if we changed the original vdev's path to have "/old" 6989 * at the end in spa_vdev_attach(). If so, undo that change now. 6990 */ 6991 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 6992 vd->vdev_path != NULL) { 6993 size_t len = strlen(vd->vdev_path); 6994 6995 for (int c = 0; c < pvd->vdev_children; c++) { 6996 cvd = pvd->vdev_child[c]; 6997 6998 if (cvd == vd || cvd->vdev_path == NULL) 6999 continue; 7000 7001 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 7002 strcmp(cvd->vdev_path + len, "/old") == 0) { 7003 spa_strfree(cvd->vdev_path); 7004 cvd->vdev_path = spa_strdup(vd->vdev_path); 7005 break; 7006 } 7007 } 7008 } 7009 7010 /* 7011 * If we are detaching the original disk from a normal spare, then it 7012 * implies that the spare should become a real disk, and be removed 7013 * from the active spare list for the pool. dRAID spares on the 7014 * other hand are coupled to the pool and thus should never be removed 7015 * from the spares list. 7016 */ 7017 if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) { 7018 vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7019 7020 if (last_cvd->vdev_isspare && 7021 last_cvd->vdev_ops != &vdev_draid_spare_ops) { 7022 unspare = B_TRUE; 7023 } 7024 } 7025 7026 /* 7027 * Erase the disk labels so the disk can be used for other things. 7028 * This must be done after all other error cases are handled, 7029 * but before we disembowel vd (so we can still do I/O to it). 7030 * But if we can't do it, don't treat the error as fatal -- 7031 * it may be that the unwritability of the disk is the reason 7032 * it's being detached! 7033 */ 7034 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 7035 7036 /* 7037 * Remove vd from its parent and compact the parent's children. 7038 */ 7039 vdev_remove_child(pvd, vd); 7040 vdev_compact_children(pvd); 7041 7042 /* 7043 * Remember one of the remaining children so we can get tvd below. 7044 */ 7045 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 7046 7047 /* 7048 * If we need to remove the remaining child from the list of hot spares, 7049 * do it now, marking the vdev as no longer a spare in the process. 7050 * We must do this before vdev_remove_parent(), because that can 7051 * change the GUID if it creates a new toplevel GUID. For a similar 7052 * reason, we must remove the spare now, in the same txg as the detach; 7053 * otherwise someone could attach a new sibling, change the GUID, and 7054 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 7055 */ 7056 if (unspare) { 7057 ASSERT(cvd->vdev_isspare); 7058 spa_spare_remove(cvd); 7059 unspare_guid = cvd->vdev_guid; 7060 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 7061 cvd->vdev_unspare = B_TRUE; 7062 } 7063 7064 /* 7065 * If the parent mirror/replacing vdev only has one child, 7066 * the parent is no longer needed. Remove it from the tree. 7067 */ 7068 if (pvd->vdev_children == 1) { 7069 if (pvd->vdev_ops == &vdev_spare_ops) 7070 cvd->vdev_unspare = B_FALSE; 7071 vdev_remove_parent(cvd); 7072 } 7073 7074 /* 7075 * We don't set tvd until now because the parent we just removed 7076 * may have been the previous top-level vdev. 7077 */ 7078 tvd = cvd->vdev_top; 7079 ASSERT(tvd->vdev_parent == rvd); 7080 7081 /* 7082 * Reevaluate the parent vdev state. 7083 */ 7084 vdev_propagate_state(cvd); 7085 7086 /* 7087 * If the 'autoexpand' property is set on the pool then automatically 7088 * try to expand the size of the pool. For example if the device we 7089 * just detached was smaller than the others, it may be possible to 7090 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 7091 * first so that we can obtain the updated sizes of the leaf vdevs. 7092 */ 7093 if (spa->spa_autoexpand) { 7094 vdev_reopen(tvd); 7095 vdev_expand(tvd, txg); 7096 } 7097 7098 vdev_config_dirty(tvd); 7099 7100 /* 7101 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 7102 * vd->vdev_detached is set and free vd's DTL object in syncing context. 7103 * But first make sure we're not on any *other* txg's DTL list, to 7104 * prevent vd from being accessed after it's freed. 7105 */ 7106 vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); 7107 for (int t = 0; t < TXG_SIZE; t++) 7108 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 7109 vd->vdev_detached = B_TRUE; 7110 vdev_dirty(tvd, VDD_DTL, vd, txg); 7111 7112 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 7113 spa_notify_waiters(spa); 7114 7115 /* hang on to the spa before we release the lock */ 7116 spa_open_ref(spa, FTAG); 7117 7118 error = spa_vdev_exit(spa, vd, txg, 0); 7119 7120 spa_history_log_internal(spa, "detach", NULL, 7121 "vdev=%s", vdpath); 7122 spa_strfree(vdpath); 7123 7124 /* 7125 * If this was the removal of the original device in a hot spare vdev, 7126 * then we want to go through and remove the device from the hot spare 7127 * list of every other pool. 7128 */ 7129 if (unspare) { 7130 spa_t *altspa = NULL; 7131 7132 mutex_enter(&spa_namespace_lock); 7133 while ((altspa = spa_next(altspa)) != NULL) { 7134 if (altspa->spa_state != POOL_STATE_ACTIVE || 7135 altspa == spa) 7136 continue; 7137 7138 spa_open_ref(altspa, FTAG); 7139 mutex_exit(&spa_namespace_lock); 7140 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 7141 mutex_enter(&spa_namespace_lock); 7142 spa_close(altspa, FTAG); 7143 } 7144 mutex_exit(&spa_namespace_lock); 7145 7146 /* search the rest of the vdevs for spares to remove */ 7147 spa_vdev_resilver_done(spa); 7148 } 7149 7150 /* all done with the spa; OK to release */ 7151 mutex_enter(&spa_namespace_lock); 7152 spa_close(spa, FTAG); 7153 mutex_exit(&spa_namespace_lock); 7154 7155 return (error); 7156 } 7157 7158 static int 7159 spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7160 list_t *vd_list) 7161 { 7162 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7163 7164 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7165 7166 /* Look up vdev and ensure it's a leaf. */ 7167 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7168 if (vd == NULL || vd->vdev_detached) { 7169 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7170 return (SET_ERROR(ENODEV)); 7171 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7172 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7173 return (SET_ERROR(EINVAL)); 7174 } else if (!vdev_writeable(vd)) { 7175 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7176 return (SET_ERROR(EROFS)); 7177 } 7178 mutex_enter(&vd->vdev_initialize_lock); 7179 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7180 7181 /* 7182 * When we activate an initialize action we check to see 7183 * if the vdev_initialize_thread is NULL. We do this instead 7184 * of using the vdev_initialize_state since there might be 7185 * a previous initialization process which has completed but 7186 * the thread is not exited. 7187 */ 7188 if (cmd_type == POOL_INITIALIZE_START && 7189 (vd->vdev_initialize_thread != NULL || 7190 vd->vdev_top->vdev_removing)) { 7191 mutex_exit(&vd->vdev_initialize_lock); 7192 return (SET_ERROR(EBUSY)); 7193 } else if (cmd_type == POOL_INITIALIZE_CANCEL && 7194 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 7195 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 7196 mutex_exit(&vd->vdev_initialize_lock); 7197 return (SET_ERROR(ESRCH)); 7198 } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 7199 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 7200 mutex_exit(&vd->vdev_initialize_lock); 7201 return (SET_ERROR(ESRCH)); 7202 } 7203 7204 switch (cmd_type) { 7205 case POOL_INITIALIZE_START: 7206 vdev_initialize(vd); 7207 break; 7208 case POOL_INITIALIZE_CANCEL: 7209 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list); 7210 break; 7211 case POOL_INITIALIZE_SUSPEND: 7212 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list); 7213 break; 7214 default: 7215 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7216 } 7217 mutex_exit(&vd->vdev_initialize_lock); 7218 7219 return (0); 7220 } 7221 7222 int 7223 spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, 7224 nvlist_t *vdev_errlist) 7225 { 7226 int total_errors = 0; 7227 list_t vd_list; 7228 7229 list_create(&vd_list, sizeof (vdev_t), 7230 offsetof(vdev_t, vdev_initialize_node)); 7231 7232 /* 7233 * We hold the namespace lock through the whole function 7234 * to prevent any changes to the pool while we're starting or 7235 * stopping initialization. The config and state locks are held so that 7236 * we can properly assess the vdev state before we commit to 7237 * the initializing operation. 7238 */ 7239 mutex_enter(&spa_namespace_lock); 7240 7241 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7242 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7243 uint64_t vdev_guid = fnvpair_value_uint64(pair); 7244 7245 int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type, 7246 &vd_list); 7247 if (error != 0) { 7248 char guid_as_str[MAXNAMELEN]; 7249 7250 (void) snprintf(guid_as_str, sizeof (guid_as_str), 7251 "%llu", (unsigned long long)vdev_guid); 7252 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7253 total_errors++; 7254 } 7255 } 7256 7257 /* Wait for all initialize threads to stop. */ 7258 vdev_initialize_stop_wait(spa, &vd_list); 7259 7260 /* Sync out the initializing state */ 7261 txg_wait_synced(spa->spa_dsl_pool, 0); 7262 mutex_exit(&spa_namespace_lock); 7263 7264 list_destroy(&vd_list); 7265 7266 return (total_errors); 7267 } 7268 7269 static int 7270 spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type, 7271 uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list) 7272 { 7273 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7274 7275 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 7276 7277 /* Look up vdev and ensure it's a leaf. */ 7278 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 7279 if (vd == NULL || vd->vdev_detached) { 7280 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7281 return (SET_ERROR(ENODEV)); 7282 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 7283 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7284 return (SET_ERROR(EINVAL)); 7285 } else if (!vdev_writeable(vd)) { 7286 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7287 return (SET_ERROR(EROFS)); 7288 } else if (!vd->vdev_has_trim) { 7289 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7290 return (SET_ERROR(EOPNOTSUPP)); 7291 } else if (secure && !vd->vdev_has_securetrim) { 7292 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7293 return (SET_ERROR(EOPNOTSUPP)); 7294 } 7295 mutex_enter(&vd->vdev_trim_lock); 7296 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 7297 7298 /* 7299 * When we activate a TRIM action we check to see if the 7300 * vdev_trim_thread is NULL. We do this instead of using the 7301 * vdev_trim_state since there might be a previous TRIM process 7302 * which has completed but the thread is not exited. 7303 */ 7304 if (cmd_type == POOL_TRIM_START && 7305 (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) { 7306 mutex_exit(&vd->vdev_trim_lock); 7307 return (SET_ERROR(EBUSY)); 7308 } else if (cmd_type == POOL_TRIM_CANCEL && 7309 (vd->vdev_trim_state != VDEV_TRIM_ACTIVE && 7310 vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) { 7311 mutex_exit(&vd->vdev_trim_lock); 7312 return (SET_ERROR(ESRCH)); 7313 } else if (cmd_type == POOL_TRIM_SUSPEND && 7314 vd->vdev_trim_state != VDEV_TRIM_ACTIVE) { 7315 mutex_exit(&vd->vdev_trim_lock); 7316 return (SET_ERROR(ESRCH)); 7317 } 7318 7319 switch (cmd_type) { 7320 case POOL_TRIM_START: 7321 vdev_trim(vd, rate, partial, secure); 7322 break; 7323 case POOL_TRIM_CANCEL: 7324 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list); 7325 break; 7326 case POOL_TRIM_SUSPEND: 7327 vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list); 7328 break; 7329 default: 7330 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 7331 } 7332 mutex_exit(&vd->vdev_trim_lock); 7333 7334 return (0); 7335 } 7336 7337 /* 7338 * Initiates a manual TRIM for the requested vdevs. This kicks off individual 7339 * TRIM threads for each child vdev. These threads pass over all of the free 7340 * space in the vdev's metaslabs and issues TRIM commands for that space. 7341 */ 7342 int 7343 spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate, 7344 boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist) 7345 { 7346 int total_errors = 0; 7347 list_t vd_list; 7348 7349 list_create(&vd_list, sizeof (vdev_t), 7350 offsetof(vdev_t, vdev_trim_node)); 7351 7352 /* 7353 * We hold the namespace lock through the whole function 7354 * to prevent any changes to the pool while we're starting or 7355 * stopping TRIM. The config and state locks are held so that 7356 * we can properly assess the vdev state before we commit to 7357 * the TRIM operation. 7358 */ 7359 mutex_enter(&spa_namespace_lock); 7360 7361 for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL); 7362 pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { 7363 uint64_t vdev_guid = fnvpair_value_uint64(pair); 7364 7365 int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type, 7366 rate, partial, secure, &vd_list); 7367 if (error != 0) { 7368 char guid_as_str[MAXNAMELEN]; 7369 7370 (void) snprintf(guid_as_str, sizeof (guid_as_str), 7371 "%llu", (unsigned long long)vdev_guid); 7372 fnvlist_add_int64(vdev_errlist, guid_as_str, error); 7373 total_errors++; 7374 } 7375 } 7376 7377 /* Wait for all TRIM threads to stop. */ 7378 vdev_trim_stop_wait(spa, &vd_list); 7379 7380 /* Sync out the TRIM state */ 7381 txg_wait_synced(spa->spa_dsl_pool, 0); 7382 mutex_exit(&spa_namespace_lock); 7383 7384 list_destroy(&vd_list); 7385 7386 return (total_errors); 7387 } 7388 7389 /* 7390 * Split a set of devices from their mirrors, and create a new pool from them. 7391 */ 7392 int 7393 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 7394 nvlist_t *props, boolean_t exp) 7395 { 7396 int error = 0; 7397 uint64_t txg, *glist; 7398 spa_t *newspa; 7399 uint_t c, children, lastlog; 7400 nvlist_t **child, *nvl, *tmp; 7401 dmu_tx_t *tx; 7402 char *altroot = NULL; 7403 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 7404 boolean_t activate_slog; 7405 7406 ASSERT(spa_writeable(spa)); 7407 7408 txg = spa_vdev_enter(spa); 7409 7410 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 7411 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 7412 error = (spa_has_checkpoint(spa)) ? 7413 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 7414 return (spa_vdev_exit(spa, NULL, txg, error)); 7415 } 7416 7417 /* clear the log and flush everything up to now */ 7418 activate_slog = spa_passivate_log(spa); 7419 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7420 error = spa_reset_logs(spa); 7421 txg = spa_vdev_config_enter(spa); 7422 7423 if (activate_slog) 7424 spa_activate_log(spa); 7425 7426 if (error != 0) 7427 return (spa_vdev_exit(spa, NULL, txg, error)); 7428 7429 /* check new spa name before going any further */ 7430 if (spa_lookup(newname) != NULL) 7431 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 7432 7433 /* 7434 * scan through all the children to ensure they're all mirrors 7435 */ 7436 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 7437 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 7438 &children) != 0) 7439 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7440 7441 /* first, check to ensure we've got the right child count */ 7442 rvd = spa->spa_root_vdev; 7443 lastlog = 0; 7444 for (c = 0; c < rvd->vdev_children; c++) { 7445 vdev_t *vd = rvd->vdev_child[c]; 7446 7447 /* don't count the holes & logs as children */ 7448 if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops && 7449 !vdev_is_concrete(vd))) { 7450 if (lastlog == 0) 7451 lastlog = c; 7452 continue; 7453 } 7454 7455 lastlog = 0; 7456 } 7457 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 7458 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7459 7460 /* next, ensure no spare or cache devices are part of the split */ 7461 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 7462 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 7463 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 7464 7465 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 7466 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 7467 7468 /* then, loop over each vdev and validate it */ 7469 for (c = 0; c < children; c++) { 7470 uint64_t is_hole = 0; 7471 7472 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 7473 &is_hole); 7474 7475 if (is_hole != 0) { 7476 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 7477 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 7478 continue; 7479 } else { 7480 error = SET_ERROR(EINVAL); 7481 break; 7482 } 7483 } 7484 7485 /* deal with indirect vdevs */ 7486 if (spa->spa_root_vdev->vdev_child[c]->vdev_ops == 7487 &vdev_indirect_ops) 7488 continue; 7489 7490 /* which disk is going to be split? */ 7491 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 7492 &glist[c]) != 0) { 7493 error = SET_ERROR(EINVAL); 7494 break; 7495 } 7496 7497 /* look it up in the spa */ 7498 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 7499 if (vml[c] == NULL) { 7500 error = SET_ERROR(ENODEV); 7501 break; 7502 } 7503 7504 /* make sure there's nothing stopping the split */ 7505 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 7506 vml[c]->vdev_islog || 7507 !vdev_is_concrete(vml[c]) || 7508 vml[c]->vdev_isspare || 7509 vml[c]->vdev_isl2cache || 7510 !vdev_writeable(vml[c]) || 7511 vml[c]->vdev_children != 0 || 7512 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 7513 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 7514 error = SET_ERROR(EINVAL); 7515 break; 7516 } 7517 7518 if (vdev_dtl_required(vml[c]) || 7519 vdev_resilver_needed(vml[c], NULL, NULL)) { 7520 error = SET_ERROR(EBUSY); 7521 break; 7522 } 7523 7524 /* we need certain info from the top level */ 7525 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 7526 vml[c]->vdev_top->vdev_ms_array) == 0); 7527 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 7528 vml[c]->vdev_top->vdev_ms_shift) == 0); 7529 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 7530 vml[c]->vdev_top->vdev_asize) == 0); 7531 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 7532 vml[c]->vdev_top->vdev_ashift) == 0); 7533 7534 /* transfer per-vdev ZAPs */ 7535 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 7536 VERIFY0(nvlist_add_uint64(child[c], 7537 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 7538 7539 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 7540 VERIFY0(nvlist_add_uint64(child[c], 7541 ZPOOL_CONFIG_VDEV_TOP_ZAP, 7542 vml[c]->vdev_parent->vdev_top_zap)); 7543 } 7544 7545 if (error != 0) { 7546 kmem_free(vml, children * sizeof (vdev_t *)); 7547 kmem_free(glist, children * sizeof (uint64_t)); 7548 return (spa_vdev_exit(spa, NULL, txg, error)); 7549 } 7550 7551 /* stop writers from using the disks */ 7552 for (c = 0; c < children; c++) { 7553 if (vml[c] != NULL) 7554 vml[c]->vdev_offline = B_TRUE; 7555 } 7556 vdev_reopen(spa->spa_root_vdev); 7557 7558 /* 7559 * Temporarily record the splitting vdevs in the spa config. This 7560 * will disappear once the config is regenerated. 7561 */ 7562 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 7563 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 7564 glist, children) == 0); 7565 kmem_free(glist, children * sizeof (uint64_t)); 7566 7567 mutex_enter(&spa->spa_props_lock); 7568 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 7569 nvl) == 0); 7570 mutex_exit(&spa->spa_props_lock); 7571 spa->spa_config_splitting = nvl; 7572 vdev_config_dirty(spa->spa_root_vdev); 7573 7574 /* configure and create the new pool */ 7575 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 7576 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 7577 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 7578 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 7579 spa_version(spa)) == 0); 7580 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 7581 spa->spa_config_txg) == 0); 7582 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 7583 spa_generate_guid(NULL)) == 0); 7584 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 7585 (void) nvlist_lookup_string(props, 7586 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 7587 7588 /* add the new pool to the namespace */ 7589 newspa = spa_add(newname, config, altroot); 7590 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 7591 newspa->spa_config_txg = spa->spa_config_txg; 7592 spa_set_log_state(newspa, SPA_LOG_CLEAR); 7593 7594 /* release the spa config lock, retaining the namespace lock */ 7595 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 7596 7597 if (zio_injection_enabled) 7598 zio_handle_panic_injection(spa, FTAG, 1); 7599 7600 spa_activate(newspa, spa_mode_global); 7601 spa_async_suspend(newspa); 7602 7603 /* 7604 * Temporarily stop the initializing and TRIM activity. We set the 7605 * state to ACTIVE so that we know to resume initializing or TRIM 7606 * once the split has completed. 7607 */ 7608 list_t vd_initialize_list; 7609 list_create(&vd_initialize_list, sizeof (vdev_t), 7610 offsetof(vdev_t, vdev_initialize_node)); 7611 7612 list_t vd_trim_list; 7613 list_create(&vd_trim_list, sizeof (vdev_t), 7614 offsetof(vdev_t, vdev_trim_node)); 7615 7616 for (c = 0; c < children; c++) { 7617 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7618 mutex_enter(&vml[c]->vdev_initialize_lock); 7619 vdev_initialize_stop(vml[c], 7620 VDEV_INITIALIZE_ACTIVE, &vd_initialize_list); 7621 mutex_exit(&vml[c]->vdev_initialize_lock); 7622 7623 mutex_enter(&vml[c]->vdev_trim_lock); 7624 vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list); 7625 mutex_exit(&vml[c]->vdev_trim_lock); 7626 } 7627 } 7628 7629 vdev_initialize_stop_wait(spa, &vd_initialize_list); 7630 vdev_trim_stop_wait(spa, &vd_trim_list); 7631 7632 list_destroy(&vd_initialize_list); 7633 list_destroy(&vd_trim_list); 7634 7635 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 7636 newspa->spa_is_splitting = B_TRUE; 7637 7638 /* create the new pool from the disks of the original pool */ 7639 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 7640 if (error) 7641 goto out; 7642 7643 /* if that worked, generate a real config for the new pool */ 7644 if (newspa->spa_root_vdev != NULL) { 7645 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 7646 NV_UNIQUE_NAME, KM_SLEEP) == 0); 7647 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 7648 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 7649 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 7650 B_TRUE)); 7651 } 7652 7653 /* set the props */ 7654 if (props != NULL) { 7655 spa_configfile_set(newspa, props, B_FALSE); 7656 error = spa_prop_set(newspa, props); 7657 if (error) 7658 goto out; 7659 } 7660 7661 /* flush everything */ 7662 txg = spa_vdev_config_enter(newspa); 7663 vdev_config_dirty(newspa->spa_root_vdev); 7664 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 7665 7666 if (zio_injection_enabled) 7667 zio_handle_panic_injection(spa, FTAG, 2); 7668 7669 spa_async_resume(newspa); 7670 7671 /* finally, update the original pool's config */ 7672 txg = spa_vdev_config_enter(spa); 7673 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 7674 error = dmu_tx_assign(tx, TXG_WAIT); 7675 if (error != 0) 7676 dmu_tx_abort(tx); 7677 for (c = 0; c < children; c++) { 7678 if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) { 7679 vdev_t *tvd = vml[c]->vdev_top; 7680 7681 /* 7682 * Need to be sure the detachable VDEV is not 7683 * on any *other* txg's DTL list to prevent it 7684 * from being accessed after it's freed. 7685 */ 7686 for (int t = 0; t < TXG_SIZE; t++) { 7687 (void) txg_list_remove_this( 7688 &tvd->vdev_dtl_list, vml[c], t); 7689 } 7690 7691 vdev_split(vml[c]); 7692 if (error == 0) 7693 spa_history_log_internal(spa, "detach", tx, 7694 "vdev=%s", vml[c]->vdev_path); 7695 7696 vdev_free(vml[c]); 7697 } 7698 } 7699 spa->spa_avz_action = AVZ_ACTION_REBUILD; 7700 vdev_config_dirty(spa->spa_root_vdev); 7701 spa->spa_config_splitting = NULL; 7702 nvlist_free(nvl); 7703 if (error == 0) 7704 dmu_tx_commit(tx); 7705 (void) spa_vdev_exit(spa, NULL, txg, 0); 7706 7707 if (zio_injection_enabled) 7708 zio_handle_panic_injection(spa, FTAG, 3); 7709 7710 /* split is complete; log a history record */ 7711 spa_history_log_internal(newspa, "split", NULL, 7712 "from pool %s", spa_name(spa)); 7713 7714 newspa->spa_is_splitting = B_FALSE; 7715 kmem_free(vml, children * sizeof (vdev_t *)); 7716 7717 /* if we're not going to mount the filesystems in userland, export */ 7718 if (exp) 7719 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 7720 B_FALSE, B_FALSE); 7721 7722 return (error); 7723 7724 out: 7725 spa_unload(newspa); 7726 spa_deactivate(newspa); 7727 spa_remove(newspa); 7728 7729 txg = spa_vdev_config_enter(spa); 7730 7731 /* re-online all offlined disks */ 7732 for (c = 0; c < children; c++) { 7733 if (vml[c] != NULL) 7734 vml[c]->vdev_offline = B_FALSE; 7735 } 7736 7737 /* restart initializing or trimming disks as necessary */ 7738 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 7739 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 7740 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 7741 7742 vdev_reopen(spa->spa_root_vdev); 7743 7744 nvlist_free(spa->spa_config_splitting); 7745 spa->spa_config_splitting = NULL; 7746 (void) spa_vdev_exit(spa, NULL, txg, error); 7747 7748 kmem_free(vml, children * sizeof (vdev_t *)); 7749 return (error); 7750 } 7751 7752 /* 7753 * Find any device that's done replacing, or a vdev marked 'unspare' that's 7754 * currently spared, so we can detach it. 7755 */ 7756 static vdev_t * 7757 spa_vdev_resilver_done_hunt(vdev_t *vd) 7758 { 7759 vdev_t *newvd, *oldvd; 7760 7761 for (int c = 0; c < vd->vdev_children; c++) { 7762 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 7763 if (oldvd != NULL) 7764 return (oldvd); 7765 } 7766 7767 /* 7768 * Check for a completed replacement. We always consider the first 7769 * vdev in the list to be the oldest vdev, and the last one to be 7770 * the newest (see spa_vdev_attach() for how that works). In 7771 * the case where the newest vdev is faulted, we will not automatically 7772 * remove it after a resilver completes. This is OK as it will require 7773 * user intervention to determine which disk the admin wishes to keep. 7774 */ 7775 if (vd->vdev_ops == &vdev_replacing_ops) { 7776 ASSERT(vd->vdev_children > 1); 7777 7778 newvd = vd->vdev_child[vd->vdev_children - 1]; 7779 oldvd = vd->vdev_child[0]; 7780 7781 if (vdev_dtl_empty(newvd, DTL_MISSING) && 7782 vdev_dtl_empty(newvd, DTL_OUTAGE) && 7783 !vdev_dtl_required(oldvd)) 7784 return (oldvd); 7785 } 7786 7787 /* 7788 * Check for a completed resilver with the 'unspare' flag set. 7789 * Also potentially update faulted state. 7790 */ 7791 if (vd->vdev_ops == &vdev_spare_ops) { 7792 vdev_t *first = vd->vdev_child[0]; 7793 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 7794 7795 if (last->vdev_unspare) { 7796 oldvd = first; 7797 newvd = last; 7798 } else if (first->vdev_unspare) { 7799 oldvd = last; 7800 newvd = first; 7801 } else { 7802 oldvd = NULL; 7803 } 7804 7805 if (oldvd != NULL && 7806 vdev_dtl_empty(newvd, DTL_MISSING) && 7807 vdev_dtl_empty(newvd, DTL_OUTAGE) && 7808 !vdev_dtl_required(oldvd)) 7809 return (oldvd); 7810 7811 vdev_propagate_state(vd); 7812 7813 /* 7814 * If there are more than two spares attached to a disk, 7815 * and those spares are not required, then we want to 7816 * attempt to free them up now so that they can be used 7817 * by other pools. Once we're back down to a single 7818 * disk+spare, we stop removing them. 7819 */ 7820 if (vd->vdev_children > 2) { 7821 newvd = vd->vdev_child[1]; 7822 7823 if (newvd->vdev_isspare && last->vdev_isspare && 7824 vdev_dtl_empty(last, DTL_MISSING) && 7825 vdev_dtl_empty(last, DTL_OUTAGE) && 7826 !vdev_dtl_required(newvd)) 7827 return (newvd); 7828 } 7829 } 7830 7831 return (NULL); 7832 } 7833 7834 static void 7835 spa_vdev_resilver_done(spa_t *spa) 7836 { 7837 vdev_t *vd, *pvd, *ppvd; 7838 uint64_t guid, sguid, pguid, ppguid; 7839 7840 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7841 7842 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 7843 pvd = vd->vdev_parent; 7844 ppvd = pvd->vdev_parent; 7845 guid = vd->vdev_guid; 7846 pguid = pvd->vdev_guid; 7847 ppguid = ppvd->vdev_guid; 7848 sguid = 0; 7849 /* 7850 * If we have just finished replacing a hot spared device, then 7851 * we need to detach the parent's first child (the original hot 7852 * spare) as well. 7853 */ 7854 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 7855 ppvd->vdev_children == 2) { 7856 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 7857 sguid = ppvd->vdev_child[1]->vdev_guid; 7858 } 7859 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 7860 7861 spa_config_exit(spa, SCL_ALL, FTAG); 7862 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 7863 return; 7864 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 7865 return; 7866 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 7867 } 7868 7869 spa_config_exit(spa, SCL_ALL, FTAG); 7870 7871 /* 7872 * If a detach was not performed above replace waiters will not have 7873 * been notified. In which case we must do so now. 7874 */ 7875 spa_notify_waiters(spa); 7876 } 7877 7878 /* 7879 * Update the stored path or FRU for this vdev. 7880 */ 7881 static int 7882 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 7883 boolean_t ispath) 7884 { 7885 vdev_t *vd; 7886 boolean_t sync = B_FALSE; 7887 7888 ASSERT(spa_writeable(spa)); 7889 7890 spa_vdev_state_enter(spa, SCL_ALL); 7891 7892 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 7893 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 7894 7895 if (!vd->vdev_ops->vdev_op_leaf) 7896 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 7897 7898 if (ispath) { 7899 if (strcmp(value, vd->vdev_path) != 0) { 7900 spa_strfree(vd->vdev_path); 7901 vd->vdev_path = spa_strdup(value); 7902 sync = B_TRUE; 7903 } 7904 } else { 7905 if (vd->vdev_fru == NULL) { 7906 vd->vdev_fru = spa_strdup(value); 7907 sync = B_TRUE; 7908 } else if (strcmp(value, vd->vdev_fru) != 0) { 7909 spa_strfree(vd->vdev_fru); 7910 vd->vdev_fru = spa_strdup(value); 7911 sync = B_TRUE; 7912 } 7913 } 7914 7915 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 7916 } 7917 7918 int 7919 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 7920 { 7921 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 7922 } 7923 7924 int 7925 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 7926 { 7927 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 7928 } 7929 7930 /* 7931 * ========================================================================== 7932 * SPA Scanning 7933 * ========================================================================== 7934 */ 7935 int 7936 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 7937 { 7938 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7939 7940 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 7941 return (SET_ERROR(EBUSY)); 7942 7943 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 7944 } 7945 7946 int 7947 spa_scan_stop(spa_t *spa) 7948 { 7949 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7950 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 7951 return (SET_ERROR(EBUSY)); 7952 return (dsl_scan_cancel(spa->spa_dsl_pool)); 7953 } 7954 7955 int 7956 spa_scan(spa_t *spa, pool_scan_func_t func) 7957 { 7958 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7959 7960 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 7961 return (SET_ERROR(ENOTSUP)); 7962 7963 if (func == POOL_SCAN_RESILVER && 7964 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 7965 return (SET_ERROR(ENOTSUP)); 7966 7967 /* 7968 * If a resilver was requested, but there is no DTL on a 7969 * writeable leaf device, we have nothing to do. 7970 */ 7971 if (func == POOL_SCAN_RESILVER && 7972 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 7973 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 7974 return (0); 7975 } 7976 7977 return (dsl_scan(spa->spa_dsl_pool, func)); 7978 } 7979 7980 /* 7981 * ========================================================================== 7982 * SPA async task processing 7983 * ========================================================================== 7984 */ 7985 7986 static void 7987 spa_async_remove(spa_t *spa, vdev_t *vd) 7988 { 7989 if (vd->vdev_remove_wanted) { 7990 vd->vdev_remove_wanted = B_FALSE; 7991 vd->vdev_delayed_close = B_FALSE; 7992 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 7993 7994 /* 7995 * We want to clear the stats, but we don't want to do a full 7996 * vdev_clear() as that will cause us to throw away 7997 * degraded/faulted state as well as attempt to reopen the 7998 * device, all of which is a waste. 7999 */ 8000 vd->vdev_stat.vs_read_errors = 0; 8001 vd->vdev_stat.vs_write_errors = 0; 8002 vd->vdev_stat.vs_checksum_errors = 0; 8003 8004 vdev_state_dirty(vd->vdev_top); 8005 8006 /* Tell userspace that the vdev is gone. */ 8007 zfs_post_remove(spa, vd); 8008 } 8009 8010 for (int c = 0; c < vd->vdev_children; c++) 8011 spa_async_remove(spa, vd->vdev_child[c]); 8012 } 8013 8014 static void 8015 spa_async_probe(spa_t *spa, vdev_t *vd) 8016 { 8017 if (vd->vdev_probe_wanted) { 8018 vd->vdev_probe_wanted = B_FALSE; 8019 vdev_reopen(vd); /* vdev_open() does the actual probe */ 8020 } 8021 8022 for (int c = 0; c < vd->vdev_children; c++) 8023 spa_async_probe(spa, vd->vdev_child[c]); 8024 } 8025 8026 static void 8027 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 8028 { 8029 if (!spa->spa_autoexpand) 8030 return; 8031 8032 for (int c = 0; c < vd->vdev_children; c++) { 8033 vdev_t *cvd = vd->vdev_child[c]; 8034 spa_async_autoexpand(spa, cvd); 8035 } 8036 8037 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 8038 return; 8039 8040 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND); 8041 } 8042 8043 static void 8044 spa_async_thread(void *arg) 8045 { 8046 spa_t *spa = (spa_t *)arg; 8047 dsl_pool_t *dp = spa->spa_dsl_pool; 8048 int tasks; 8049 8050 ASSERT(spa->spa_sync_on); 8051 8052 mutex_enter(&spa->spa_async_lock); 8053 tasks = spa->spa_async_tasks; 8054 spa->spa_async_tasks = 0; 8055 mutex_exit(&spa->spa_async_lock); 8056 8057 /* 8058 * See if the config needs to be updated. 8059 */ 8060 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 8061 uint64_t old_space, new_space; 8062 8063 mutex_enter(&spa_namespace_lock); 8064 old_space = metaslab_class_get_space(spa_normal_class(spa)); 8065 old_space += metaslab_class_get_space(spa_special_class(spa)); 8066 old_space += metaslab_class_get_space(spa_dedup_class(spa)); 8067 old_space += metaslab_class_get_space( 8068 spa_embedded_log_class(spa)); 8069 8070 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 8071 8072 new_space = metaslab_class_get_space(spa_normal_class(spa)); 8073 new_space += metaslab_class_get_space(spa_special_class(spa)); 8074 new_space += metaslab_class_get_space(spa_dedup_class(spa)); 8075 new_space += metaslab_class_get_space( 8076 spa_embedded_log_class(spa)); 8077 mutex_exit(&spa_namespace_lock); 8078 8079 /* 8080 * If the pool grew as a result of the config update, 8081 * then log an internal history event. 8082 */ 8083 if (new_space != old_space) { 8084 spa_history_log_internal(spa, "vdev online", NULL, 8085 "pool '%s' size: %llu(+%llu)", 8086 spa_name(spa), (u_longlong_t)new_space, 8087 (u_longlong_t)(new_space - old_space)); 8088 } 8089 } 8090 8091 /* 8092 * See if any devices need to be marked REMOVED. 8093 */ 8094 if (tasks & SPA_ASYNC_REMOVE) { 8095 spa_vdev_state_enter(spa, SCL_NONE); 8096 spa_async_remove(spa, spa->spa_root_vdev); 8097 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 8098 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 8099 for (int i = 0; i < spa->spa_spares.sav_count; i++) 8100 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 8101 (void) spa_vdev_state_exit(spa, NULL, 0); 8102 } 8103 8104 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 8105 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8106 spa_async_autoexpand(spa, spa->spa_root_vdev); 8107 spa_config_exit(spa, SCL_CONFIG, FTAG); 8108 } 8109 8110 /* 8111 * See if any devices need to be probed. 8112 */ 8113 if (tasks & SPA_ASYNC_PROBE) { 8114 spa_vdev_state_enter(spa, SCL_NONE); 8115 spa_async_probe(spa, spa->spa_root_vdev); 8116 (void) spa_vdev_state_exit(spa, NULL, 0); 8117 } 8118 8119 /* 8120 * If any devices are done replacing, detach them. 8121 */ 8122 if (tasks & SPA_ASYNC_RESILVER_DONE || 8123 tasks & SPA_ASYNC_REBUILD_DONE) { 8124 spa_vdev_resilver_done(spa); 8125 } 8126 8127 /* 8128 * Kick off a resilver. 8129 */ 8130 if (tasks & SPA_ASYNC_RESILVER && 8131 !vdev_rebuild_active(spa->spa_root_vdev) && 8132 (!dsl_scan_resilvering(dp) || 8133 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 8134 dsl_scan_restart_resilver(dp, 0); 8135 8136 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 8137 mutex_enter(&spa_namespace_lock); 8138 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8139 vdev_initialize_restart(spa->spa_root_vdev); 8140 spa_config_exit(spa, SCL_CONFIG, FTAG); 8141 mutex_exit(&spa_namespace_lock); 8142 } 8143 8144 if (tasks & SPA_ASYNC_TRIM_RESTART) { 8145 mutex_enter(&spa_namespace_lock); 8146 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8147 vdev_trim_restart(spa->spa_root_vdev); 8148 spa_config_exit(spa, SCL_CONFIG, FTAG); 8149 mutex_exit(&spa_namespace_lock); 8150 } 8151 8152 if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) { 8153 mutex_enter(&spa_namespace_lock); 8154 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8155 vdev_autotrim_restart(spa); 8156 spa_config_exit(spa, SCL_CONFIG, FTAG); 8157 mutex_exit(&spa_namespace_lock); 8158 } 8159 8160 /* 8161 * Kick off L2 cache whole device TRIM. 8162 */ 8163 if (tasks & SPA_ASYNC_L2CACHE_TRIM) { 8164 mutex_enter(&spa_namespace_lock); 8165 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8166 vdev_trim_l2arc(spa); 8167 spa_config_exit(spa, SCL_CONFIG, FTAG); 8168 mutex_exit(&spa_namespace_lock); 8169 } 8170 8171 /* 8172 * Kick off L2 cache rebuilding. 8173 */ 8174 if (tasks & SPA_ASYNC_L2CACHE_REBUILD) { 8175 mutex_enter(&spa_namespace_lock); 8176 spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER); 8177 l2arc_spa_rebuild_start(spa); 8178 spa_config_exit(spa, SCL_L2ARC, FTAG); 8179 mutex_exit(&spa_namespace_lock); 8180 } 8181 8182 /* 8183 * Let the world know that we're done. 8184 */ 8185 mutex_enter(&spa->spa_async_lock); 8186 spa->spa_async_thread = NULL; 8187 cv_broadcast(&spa->spa_async_cv); 8188 mutex_exit(&spa->spa_async_lock); 8189 thread_exit(); 8190 } 8191 8192 void 8193 spa_async_suspend(spa_t *spa) 8194 { 8195 mutex_enter(&spa->spa_async_lock); 8196 spa->spa_async_suspended++; 8197 while (spa->spa_async_thread != NULL) 8198 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 8199 mutex_exit(&spa->spa_async_lock); 8200 8201 spa_vdev_remove_suspend(spa); 8202 8203 zthr_t *condense_thread = spa->spa_condense_zthr; 8204 if (condense_thread != NULL) 8205 zthr_cancel(condense_thread); 8206 8207 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8208 if (discard_thread != NULL) 8209 zthr_cancel(discard_thread); 8210 8211 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8212 if (ll_delete_thread != NULL) 8213 zthr_cancel(ll_delete_thread); 8214 8215 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8216 if (ll_condense_thread != NULL) 8217 zthr_cancel(ll_condense_thread); 8218 } 8219 8220 void 8221 spa_async_resume(spa_t *spa) 8222 { 8223 mutex_enter(&spa->spa_async_lock); 8224 ASSERT(spa->spa_async_suspended != 0); 8225 spa->spa_async_suspended--; 8226 mutex_exit(&spa->spa_async_lock); 8227 spa_restart_removal(spa); 8228 8229 zthr_t *condense_thread = spa->spa_condense_zthr; 8230 if (condense_thread != NULL) 8231 zthr_resume(condense_thread); 8232 8233 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 8234 if (discard_thread != NULL) 8235 zthr_resume(discard_thread); 8236 8237 zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr; 8238 if (ll_delete_thread != NULL) 8239 zthr_resume(ll_delete_thread); 8240 8241 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 8242 if (ll_condense_thread != NULL) 8243 zthr_resume(ll_condense_thread); 8244 } 8245 8246 static boolean_t 8247 spa_async_tasks_pending(spa_t *spa) 8248 { 8249 uint_t non_config_tasks; 8250 uint_t config_task; 8251 boolean_t config_task_suspended; 8252 8253 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 8254 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 8255 if (spa->spa_ccw_fail_time == 0) { 8256 config_task_suspended = B_FALSE; 8257 } else { 8258 config_task_suspended = 8259 (gethrtime() - spa->spa_ccw_fail_time) < 8260 ((hrtime_t)zfs_ccw_retry_interval * NANOSEC); 8261 } 8262 8263 return (non_config_tasks || (config_task && !config_task_suspended)); 8264 } 8265 8266 static void 8267 spa_async_dispatch(spa_t *spa) 8268 { 8269 mutex_enter(&spa->spa_async_lock); 8270 if (spa_async_tasks_pending(spa) && 8271 !spa->spa_async_suspended && 8272 spa->spa_async_thread == NULL) 8273 spa->spa_async_thread = thread_create(NULL, 0, 8274 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 8275 mutex_exit(&spa->spa_async_lock); 8276 } 8277 8278 void 8279 spa_async_request(spa_t *spa, int task) 8280 { 8281 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 8282 mutex_enter(&spa->spa_async_lock); 8283 spa->spa_async_tasks |= task; 8284 mutex_exit(&spa->spa_async_lock); 8285 } 8286 8287 int 8288 spa_async_tasks(spa_t *spa) 8289 { 8290 return (spa->spa_async_tasks); 8291 } 8292 8293 /* 8294 * ========================================================================== 8295 * SPA syncing routines 8296 * ========================================================================== 8297 */ 8298 8299 8300 static int 8301 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8302 dmu_tx_t *tx) 8303 { 8304 bpobj_t *bpo = arg; 8305 bpobj_enqueue(bpo, bp, bp_freed, tx); 8306 return (0); 8307 } 8308 8309 int 8310 bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8311 { 8312 return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx)); 8313 } 8314 8315 int 8316 bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8317 { 8318 return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx)); 8319 } 8320 8321 static int 8322 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 8323 { 8324 zio_t *pio = arg; 8325 8326 zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp, 8327 pio->io_flags)); 8328 return (0); 8329 } 8330 8331 static int 8332 bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 8333 dmu_tx_t *tx) 8334 { 8335 ASSERT(!bp_freed); 8336 return (spa_free_sync_cb(arg, bp, tx)); 8337 } 8338 8339 /* 8340 * Note: this simple function is not inlined to make it easier to dtrace the 8341 * amount of time spent syncing frees. 8342 */ 8343 static void 8344 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 8345 { 8346 zio_t *zio = zio_root(spa, NULL, NULL, 0); 8347 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 8348 VERIFY(zio_wait(zio) == 0); 8349 } 8350 8351 /* 8352 * Note: this simple function is not inlined to make it easier to dtrace the 8353 * amount of time spent syncing deferred frees. 8354 */ 8355 static void 8356 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 8357 { 8358 if (spa_sync_pass(spa) != 1) 8359 return; 8360 8361 /* 8362 * Note: 8363 * If the log space map feature is active, we stop deferring 8364 * frees to the next TXG and therefore running this function 8365 * would be considered a no-op as spa_deferred_bpobj should 8366 * not have any entries. 8367 * 8368 * That said we run this function anyway (instead of returning 8369 * immediately) for the edge-case scenario where we just 8370 * activated the log space map feature in this TXG but we have 8371 * deferred frees from the previous TXG. 8372 */ 8373 zio_t *zio = zio_root(spa, NULL, NULL, 0); 8374 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 8375 bpobj_spa_free_sync_cb, zio, tx), ==, 0); 8376 VERIFY0(zio_wait(zio)); 8377 } 8378 8379 static void 8380 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 8381 { 8382 char *packed = NULL; 8383 size_t bufsize; 8384 size_t nvsize = 0; 8385 dmu_buf_t *db; 8386 8387 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 8388 8389 /* 8390 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 8391 * information. This avoids the dmu_buf_will_dirty() path and 8392 * saves us a pre-read to get data we don't actually care about. 8393 */ 8394 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 8395 packed = vmem_alloc(bufsize, KM_SLEEP); 8396 8397 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 8398 KM_SLEEP) == 0); 8399 bzero(packed + nvsize, bufsize - nvsize); 8400 8401 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 8402 8403 vmem_free(packed, bufsize); 8404 8405 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 8406 dmu_buf_will_dirty(db, tx); 8407 *(uint64_t *)db->db_data = nvsize; 8408 dmu_buf_rele(db, FTAG); 8409 } 8410 8411 static void 8412 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 8413 const char *config, const char *entry) 8414 { 8415 nvlist_t *nvroot; 8416 nvlist_t **list; 8417 int i; 8418 8419 if (!sav->sav_sync) 8420 return; 8421 8422 /* 8423 * Update the MOS nvlist describing the list of available devices. 8424 * spa_validate_aux() will have already made sure this nvlist is 8425 * valid and the vdevs are labeled appropriately. 8426 */ 8427 if (sav->sav_object == 0) { 8428 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 8429 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 8430 sizeof (uint64_t), tx); 8431 VERIFY(zap_update(spa->spa_meta_objset, 8432 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 8433 &sav->sav_object, tx) == 0); 8434 } 8435 8436 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 8437 if (sav->sav_count == 0) { 8438 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 8439 } else { 8440 list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP); 8441 for (i = 0; i < sav->sav_count; i++) 8442 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 8443 B_FALSE, VDEV_CONFIG_L2CACHE); 8444 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 8445 sav->sav_count) == 0); 8446 for (i = 0; i < sav->sav_count; i++) 8447 nvlist_free(list[i]); 8448 kmem_free(list, sav->sav_count * sizeof (void *)); 8449 } 8450 8451 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 8452 nvlist_free(nvroot); 8453 8454 sav->sav_sync = B_FALSE; 8455 } 8456 8457 /* 8458 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 8459 * The all-vdev ZAP must be empty. 8460 */ 8461 static void 8462 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 8463 { 8464 spa_t *spa = vd->vdev_spa; 8465 8466 if (vd->vdev_top_zap != 0) { 8467 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8468 vd->vdev_top_zap, tx)); 8469 } 8470 if (vd->vdev_leaf_zap != 0) { 8471 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 8472 vd->vdev_leaf_zap, tx)); 8473 } 8474 for (uint64_t i = 0; i < vd->vdev_children; i++) { 8475 spa_avz_build(vd->vdev_child[i], avz, tx); 8476 } 8477 } 8478 8479 static void 8480 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 8481 { 8482 nvlist_t *config; 8483 8484 /* 8485 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 8486 * its config may not be dirty but we still need to build per-vdev ZAPs. 8487 * Similarly, if the pool is being assembled (e.g. after a split), we 8488 * need to rebuild the AVZ although the config may not be dirty. 8489 */ 8490 if (list_is_empty(&spa->spa_config_dirty_list) && 8491 spa->spa_avz_action == AVZ_ACTION_NONE) 8492 return; 8493 8494 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 8495 8496 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 8497 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 8498 spa->spa_all_vdev_zaps != 0); 8499 8500 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 8501 /* Make and build the new AVZ */ 8502 uint64_t new_avz = zap_create(spa->spa_meta_objset, 8503 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 8504 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 8505 8506 /* Diff old AVZ with new one */ 8507 zap_cursor_t zc; 8508 zap_attribute_t za; 8509 8510 for (zap_cursor_init(&zc, spa->spa_meta_objset, 8511 spa->spa_all_vdev_zaps); 8512 zap_cursor_retrieve(&zc, &za) == 0; 8513 zap_cursor_advance(&zc)) { 8514 uint64_t vdzap = za.za_first_integer; 8515 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 8516 vdzap) == ENOENT) { 8517 /* 8518 * ZAP is listed in old AVZ but not in new one; 8519 * destroy it 8520 */ 8521 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 8522 tx)); 8523 } 8524 } 8525 8526 zap_cursor_fini(&zc); 8527 8528 /* Destroy the old AVZ */ 8529 VERIFY0(zap_destroy(spa->spa_meta_objset, 8530 spa->spa_all_vdev_zaps, tx)); 8531 8532 /* Replace the old AVZ in the dir obj with the new one */ 8533 VERIFY0(zap_update(spa->spa_meta_objset, 8534 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 8535 sizeof (new_avz), 1, &new_avz, tx)); 8536 8537 spa->spa_all_vdev_zaps = new_avz; 8538 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 8539 zap_cursor_t zc; 8540 zap_attribute_t za; 8541 8542 /* Walk through the AVZ and destroy all listed ZAPs */ 8543 for (zap_cursor_init(&zc, spa->spa_meta_objset, 8544 spa->spa_all_vdev_zaps); 8545 zap_cursor_retrieve(&zc, &za) == 0; 8546 zap_cursor_advance(&zc)) { 8547 uint64_t zap = za.za_first_integer; 8548 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 8549 } 8550 8551 zap_cursor_fini(&zc); 8552 8553 /* Destroy and unlink the AVZ itself */ 8554 VERIFY0(zap_destroy(spa->spa_meta_objset, 8555 spa->spa_all_vdev_zaps, tx)); 8556 VERIFY0(zap_remove(spa->spa_meta_objset, 8557 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 8558 spa->spa_all_vdev_zaps = 0; 8559 } 8560 8561 if (spa->spa_all_vdev_zaps == 0) { 8562 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 8563 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 8564 DMU_POOL_VDEV_ZAP_MAP, tx); 8565 } 8566 spa->spa_avz_action = AVZ_ACTION_NONE; 8567 8568 /* Create ZAPs for vdevs that don't have them. */ 8569 vdev_construct_zaps(spa->spa_root_vdev, tx); 8570 8571 config = spa_config_generate(spa, spa->spa_root_vdev, 8572 dmu_tx_get_txg(tx), B_FALSE); 8573 8574 /* 8575 * If we're upgrading the spa version then make sure that 8576 * the config object gets updated with the correct version. 8577 */ 8578 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 8579 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 8580 spa->spa_uberblock.ub_version); 8581 8582 spa_config_exit(spa, SCL_STATE, FTAG); 8583 8584 nvlist_free(spa->spa_config_syncing); 8585 spa->spa_config_syncing = config; 8586 8587 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 8588 } 8589 8590 static void 8591 spa_sync_version(void *arg, dmu_tx_t *tx) 8592 { 8593 uint64_t *versionp = arg; 8594 uint64_t version = *versionp; 8595 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8596 8597 /* 8598 * Setting the version is special cased when first creating the pool. 8599 */ 8600 ASSERT(tx->tx_txg != TXG_INITIAL); 8601 8602 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 8603 ASSERT(version >= spa_version(spa)); 8604 8605 spa->spa_uberblock.ub_version = version; 8606 vdev_config_dirty(spa->spa_root_vdev); 8607 spa_history_log_internal(spa, "set", tx, "version=%lld", 8608 (longlong_t)version); 8609 } 8610 8611 /* 8612 * Set zpool properties. 8613 */ 8614 static void 8615 spa_sync_props(void *arg, dmu_tx_t *tx) 8616 { 8617 nvlist_t *nvp = arg; 8618 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 8619 objset_t *mos = spa->spa_meta_objset; 8620 nvpair_t *elem = NULL; 8621 8622 mutex_enter(&spa->spa_props_lock); 8623 8624 while ((elem = nvlist_next_nvpair(nvp, elem))) { 8625 uint64_t intval; 8626 char *strval, *fname; 8627 zpool_prop_t prop; 8628 const char *propname; 8629 zprop_type_t proptype; 8630 spa_feature_t fid; 8631 8632 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 8633 case ZPOOL_PROP_INVAL: 8634 /* 8635 * We checked this earlier in spa_prop_validate(). 8636 */ 8637 ASSERT(zpool_prop_feature(nvpair_name(elem))); 8638 8639 fname = strchr(nvpair_name(elem), '@') + 1; 8640 VERIFY0(zfeature_lookup_name(fname, &fid)); 8641 8642 spa_feature_enable(spa, fid, tx); 8643 spa_history_log_internal(spa, "set", tx, 8644 "%s=enabled", nvpair_name(elem)); 8645 break; 8646 8647 case ZPOOL_PROP_VERSION: 8648 intval = fnvpair_value_uint64(elem); 8649 /* 8650 * The version is synced separately before other 8651 * properties and should be correct by now. 8652 */ 8653 ASSERT3U(spa_version(spa), >=, intval); 8654 break; 8655 8656 case ZPOOL_PROP_ALTROOT: 8657 /* 8658 * 'altroot' is a non-persistent property. It should 8659 * have been set temporarily at creation or import time. 8660 */ 8661 ASSERT(spa->spa_root != NULL); 8662 break; 8663 8664 case ZPOOL_PROP_READONLY: 8665 case ZPOOL_PROP_CACHEFILE: 8666 /* 8667 * 'readonly' and 'cachefile' are also non-persistent 8668 * properties. 8669 */ 8670 break; 8671 case ZPOOL_PROP_COMMENT: 8672 strval = fnvpair_value_string(elem); 8673 if (spa->spa_comment != NULL) 8674 spa_strfree(spa->spa_comment); 8675 spa->spa_comment = spa_strdup(strval); 8676 /* 8677 * We need to dirty the configuration on all the vdevs 8678 * so that their labels get updated. It's unnecessary 8679 * to do this for pool creation since the vdev's 8680 * configuration has already been dirtied. 8681 */ 8682 if (tx->tx_txg != TXG_INITIAL) 8683 vdev_config_dirty(spa->spa_root_vdev); 8684 spa_history_log_internal(spa, "set", tx, 8685 "%s=%s", nvpair_name(elem), strval); 8686 break; 8687 case ZPOOL_PROP_COMPATIBILITY: 8688 strval = fnvpair_value_string(elem); 8689 if (spa->spa_compatibility != NULL) 8690 spa_strfree(spa->spa_compatibility); 8691 spa->spa_compatibility = spa_strdup(strval); 8692 /* 8693 * Dirty the configuration on vdevs as above. 8694 */ 8695 if (tx->tx_txg != TXG_INITIAL) 8696 vdev_config_dirty(spa->spa_root_vdev); 8697 spa_history_log_internal(spa, "set", tx, 8698 "%s=%s", nvpair_name(elem), strval); 8699 break; 8700 8701 default: 8702 /* 8703 * Set pool property values in the poolprops mos object. 8704 */ 8705 if (spa->spa_pool_props_object == 0) { 8706 spa->spa_pool_props_object = 8707 zap_create_link(mos, DMU_OT_POOL_PROPS, 8708 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 8709 tx); 8710 } 8711 8712 /* normalize the property name */ 8713 propname = zpool_prop_to_name(prop); 8714 proptype = zpool_prop_get_type(prop); 8715 8716 if (nvpair_type(elem) == DATA_TYPE_STRING) { 8717 ASSERT(proptype == PROP_TYPE_STRING); 8718 strval = fnvpair_value_string(elem); 8719 VERIFY0(zap_update(mos, 8720 spa->spa_pool_props_object, propname, 8721 1, strlen(strval) + 1, strval, tx)); 8722 spa_history_log_internal(spa, "set", tx, 8723 "%s=%s", nvpair_name(elem), strval); 8724 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 8725 intval = fnvpair_value_uint64(elem); 8726 8727 if (proptype == PROP_TYPE_INDEX) { 8728 const char *unused; 8729 VERIFY0(zpool_prop_index_to_string( 8730 prop, intval, &unused)); 8731 } 8732 VERIFY0(zap_update(mos, 8733 spa->spa_pool_props_object, propname, 8734 8, 1, &intval, tx)); 8735 spa_history_log_internal(spa, "set", tx, 8736 "%s=%lld", nvpair_name(elem), 8737 (longlong_t)intval); 8738 } else { 8739 ASSERT(0); /* not allowed */ 8740 } 8741 8742 switch (prop) { 8743 case ZPOOL_PROP_DELEGATION: 8744 spa->spa_delegation = intval; 8745 break; 8746 case ZPOOL_PROP_BOOTFS: 8747 spa->spa_bootfs = intval; 8748 break; 8749 case ZPOOL_PROP_FAILUREMODE: 8750 spa->spa_failmode = intval; 8751 break; 8752 case ZPOOL_PROP_AUTOTRIM: 8753 spa->spa_autotrim = intval; 8754 spa_async_request(spa, 8755 SPA_ASYNC_AUTOTRIM_RESTART); 8756 break; 8757 case ZPOOL_PROP_AUTOEXPAND: 8758 spa->spa_autoexpand = intval; 8759 if (tx->tx_txg != TXG_INITIAL) 8760 spa_async_request(spa, 8761 SPA_ASYNC_AUTOEXPAND); 8762 break; 8763 case ZPOOL_PROP_MULTIHOST: 8764 spa->spa_multihost = intval; 8765 break; 8766 default: 8767 break; 8768 } 8769 } 8770 8771 } 8772 8773 mutex_exit(&spa->spa_props_lock); 8774 } 8775 8776 /* 8777 * Perform one-time upgrade on-disk changes. spa_version() does not 8778 * reflect the new version this txg, so there must be no changes this 8779 * txg to anything that the upgrade code depends on after it executes. 8780 * Therefore this must be called after dsl_pool_sync() does the sync 8781 * tasks. 8782 */ 8783 static void 8784 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 8785 { 8786 if (spa_sync_pass(spa) != 1) 8787 return; 8788 8789 dsl_pool_t *dp = spa->spa_dsl_pool; 8790 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 8791 8792 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 8793 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 8794 dsl_pool_create_origin(dp, tx); 8795 8796 /* Keeping the origin open increases spa_minref */ 8797 spa->spa_minref += 3; 8798 } 8799 8800 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 8801 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 8802 dsl_pool_upgrade_clones(dp, tx); 8803 } 8804 8805 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 8806 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 8807 dsl_pool_upgrade_dir_clones(dp, tx); 8808 8809 /* Keeping the freedir open increases spa_minref */ 8810 spa->spa_minref += 3; 8811 } 8812 8813 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 8814 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8815 spa_feature_create_zap_objects(spa, tx); 8816 } 8817 8818 /* 8819 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 8820 * when possibility to use lz4 compression for metadata was added 8821 * Old pools that have this feature enabled must be upgraded to have 8822 * this feature active 8823 */ 8824 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 8825 boolean_t lz4_en = spa_feature_is_enabled(spa, 8826 SPA_FEATURE_LZ4_COMPRESS); 8827 boolean_t lz4_ac = spa_feature_is_active(spa, 8828 SPA_FEATURE_LZ4_COMPRESS); 8829 8830 if (lz4_en && !lz4_ac) 8831 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 8832 } 8833 8834 /* 8835 * If we haven't written the salt, do so now. Note that the 8836 * feature may not be activated yet, but that's fine since 8837 * the presence of this ZAP entry is backwards compatible. 8838 */ 8839 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 8840 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 8841 VERIFY0(zap_add(spa->spa_meta_objset, 8842 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 8843 sizeof (spa->spa_cksum_salt.zcs_bytes), 8844 spa->spa_cksum_salt.zcs_bytes, tx)); 8845 } 8846 8847 rrw_exit(&dp->dp_config_rwlock, FTAG); 8848 } 8849 8850 static void 8851 vdev_indirect_state_sync_verify(vdev_t *vd) 8852 { 8853 vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping; 8854 vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births; 8855 8856 if (vd->vdev_ops == &vdev_indirect_ops) { 8857 ASSERT(vim != NULL); 8858 ASSERT(vib != NULL); 8859 } 8860 8861 uint64_t obsolete_sm_object = 0; 8862 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 8863 if (obsolete_sm_object != 0) { 8864 ASSERT(vd->vdev_obsolete_sm != NULL); 8865 ASSERT(vd->vdev_removing || 8866 vd->vdev_ops == &vdev_indirect_ops); 8867 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 8868 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 8869 ASSERT3U(obsolete_sm_object, ==, 8870 space_map_object(vd->vdev_obsolete_sm)); 8871 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 8872 space_map_allocated(vd->vdev_obsolete_sm)); 8873 } 8874 ASSERT(vd->vdev_obsolete_segments != NULL); 8875 8876 /* 8877 * Since frees / remaps to an indirect vdev can only 8878 * happen in syncing context, the obsolete segments 8879 * tree must be empty when we start syncing. 8880 */ 8881 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 8882 } 8883 8884 /* 8885 * Set the top-level vdev's max queue depth. Evaluate each top-level's 8886 * async write queue depth in case it changed. The max queue depth will 8887 * not change in the middle of syncing out this txg. 8888 */ 8889 static void 8890 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 8891 { 8892 ASSERT(spa_writeable(spa)); 8893 8894 vdev_t *rvd = spa->spa_root_vdev; 8895 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 8896 zfs_vdev_queue_depth_pct / 100; 8897 metaslab_class_t *normal = spa_normal_class(spa); 8898 metaslab_class_t *special = spa_special_class(spa); 8899 metaslab_class_t *dedup = spa_dedup_class(spa); 8900 8901 uint64_t slots_per_allocator = 0; 8902 for (int c = 0; c < rvd->vdev_children; c++) { 8903 vdev_t *tvd = rvd->vdev_child[c]; 8904 8905 metaslab_group_t *mg = tvd->vdev_mg; 8906 if (mg == NULL || !metaslab_group_initialized(mg)) 8907 continue; 8908 8909 metaslab_class_t *mc = mg->mg_class; 8910 if (mc != normal && mc != special && mc != dedup) 8911 continue; 8912 8913 /* 8914 * It is safe to do a lock-free check here because only async 8915 * allocations look at mg_max_alloc_queue_depth, and async 8916 * allocations all happen from spa_sync(). 8917 */ 8918 for (int i = 0; i < mg->mg_allocators; i++) { 8919 ASSERT0(zfs_refcount_count( 8920 &(mg->mg_allocator[i].mga_alloc_queue_depth))); 8921 } 8922 mg->mg_max_alloc_queue_depth = max_queue_depth; 8923 8924 for (int i = 0; i < mg->mg_allocators; i++) { 8925 mg->mg_allocator[i].mga_cur_max_alloc_queue_depth = 8926 zfs_vdev_def_queue_depth; 8927 } 8928 slots_per_allocator += zfs_vdev_def_queue_depth; 8929 } 8930 8931 for (int i = 0; i < spa->spa_alloc_count; i++) { 8932 ASSERT0(zfs_refcount_count(&normal->mc_allocator[i]. 8933 mca_alloc_slots)); 8934 ASSERT0(zfs_refcount_count(&special->mc_allocator[i]. 8935 mca_alloc_slots)); 8936 ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i]. 8937 mca_alloc_slots)); 8938 normal->mc_allocator[i].mca_alloc_max_slots = 8939 slots_per_allocator; 8940 special->mc_allocator[i].mca_alloc_max_slots = 8941 slots_per_allocator; 8942 dedup->mc_allocator[i].mca_alloc_max_slots = 8943 slots_per_allocator; 8944 } 8945 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 8946 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 8947 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 8948 } 8949 8950 static void 8951 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 8952 { 8953 ASSERT(spa_writeable(spa)); 8954 8955 vdev_t *rvd = spa->spa_root_vdev; 8956 for (int c = 0; c < rvd->vdev_children; c++) { 8957 vdev_t *vd = rvd->vdev_child[c]; 8958 vdev_indirect_state_sync_verify(vd); 8959 8960 if (vdev_indirect_should_condense(vd)) { 8961 spa_condense_indirect_start_sync(vd, tx); 8962 break; 8963 } 8964 } 8965 } 8966 8967 static void 8968 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 8969 { 8970 objset_t *mos = spa->spa_meta_objset; 8971 dsl_pool_t *dp = spa->spa_dsl_pool; 8972 uint64_t txg = tx->tx_txg; 8973 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 8974 8975 do { 8976 int pass = ++spa->spa_sync_pass; 8977 8978 spa_sync_config_object(spa, tx); 8979 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 8980 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 8981 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 8982 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 8983 spa_errlog_sync(spa, txg); 8984 dsl_pool_sync(dp, txg); 8985 8986 if (pass < zfs_sync_pass_deferred_free || 8987 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) { 8988 /* 8989 * If the log space map feature is active we don't 8990 * care about deferred frees and the deferred bpobj 8991 * as the log space map should effectively have the 8992 * same results (i.e. appending only to one object). 8993 */ 8994 spa_sync_frees(spa, free_bpl, tx); 8995 } else { 8996 /* 8997 * We can not defer frees in pass 1, because 8998 * we sync the deferred frees later in pass 1. 8999 */ 9000 ASSERT3U(pass, >, 1); 9001 bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb, 9002 &spa->spa_deferred_bpobj, tx); 9003 } 9004 9005 ddt_sync(spa, txg); 9006 dsl_scan_sync(dp, tx); 9007 svr_sync(spa, tx); 9008 spa_sync_upgrades(spa, tx); 9009 9010 spa_flush_metaslabs(spa, tx); 9011 9012 vdev_t *vd = NULL; 9013 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 9014 != NULL) 9015 vdev_sync(vd, txg); 9016 9017 /* 9018 * Note: We need to check if the MOS is dirty because we could 9019 * have marked the MOS dirty without updating the uberblock 9020 * (e.g. if we have sync tasks but no dirty user data). We need 9021 * to check the uberblock's rootbp because it is updated if we 9022 * have synced out dirty data (though in this case the MOS will 9023 * most likely also be dirty due to second order effects, we 9024 * don't want to rely on that here). 9025 */ 9026 if (pass == 1 && 9027 spa->spa_uberblock.ub_rootbp.blk_birth < txg && 9028 !dmu_objset_is_dirty(mos, txg)) { 9029 /* 9030 * Nothing changed on the first pass, therefore this 9031 * TXG is a no-op. Avoid syncing deferred frees, so 9032 * that we can keep this TXG as a no-op. 9033 */ 9034 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9035 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9036 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 9037 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 9038 break; 9039 } 9040 9041 spa_sync_deferred_frees(spa, tx); 9042 } while (dmu_objset_is_dirty(mos, txg)); 9043 } 9044 9045 /* 9046 * Rewrite the vdev configuration (which includes the uberblock) to 9047 * commit the transaction group. 9048 * 9049 * If there are no dirty vdevs, we sync the uberblock to a few random 9050 * top-level vdevs that are known to be visible in the config cache 9051 * (see spa_vdev_add() for a complete description). If there *are* dirty 9052 * vdevs, sync the uberblock to all vdevs. 9053 */ 9054 static void 9055 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 9056 { 9057 vdev_t *rvd = spa->spa_root_vdev; 9058 uint64_t txg = tx->tx_txg; 9059 9060 for (;;) { 9061 int error = 0; 9062 9063 /* 9064 * We hold SCL_STATE to prevent vdev open/close/etc. 9065 * while we're attempting to write the vdev labels. 9066 */ 9067 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9068 9069 if (list_is_empty(&spa->spa_config_dirty_list)) { 9070 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 9071 int svdcount = 0; 9072 int children = rvd->vdev_children; 9073 int c0 = spa_get_random(children); 9074 9075 for (int c = 0; c < children; c++) { 9076 vdev_t *vd = 9077 rvd->vdev_child[(c0 + c) % children]; 9078 9079 /* Stop when revisiting the first vdev */ 9080 if (c > 0 && svd[0] == vd) 9081 break; 9082 9083 if (vd->vdev_ms_array == 0 || 9084 vd->vdev_islog || 9085 !vdev_is_concrete(vd)) 9086 continue; 9087 9088 svd[svdcount++] = vd; 9089 if (svdcount == SPA_SYNC_MIN_VDEVS) 9090 break; 9091 } 9092 error = vdev_config_sync(svd, svdcount, txg); 9093 } else { 9094 error = vdev_config_sync(rvd->vdev_child, 9095 rvd->vdev_children, txg); 9096 } 9097 9098 if (error == 0) 9099 spa->spa_last_synced_guid = rvd->vdev_guid; 9100 9101 spa_config_exit(spa, SCL_STATE, FTAG); 9102 9103 if (error == 0) 9104 break; 9105 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 9106 zio_resume_wait(spa); 9107 } 9108 } 9109 9110 /* 9111 * Sync the specified transaction group. New blocks may be dirtied as 9112 * part of the process, so we iterate until it converges. 9113 */ 9114 void 9115 spa_sync(spa_t *spa, uint64_t txg) 9116 { 9117 vdev_t *vd = NULL; 9118 9119 VERIFY(spa_writeable(spa)); 9120 9121 /* 9122 * Wait for i/os issued in open context that need to complete 9123 * before this txg syncs. 9124 */ 9125 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 9126 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 9127 ZIO_FLAG_CANFAIL); 9128 9129 /* 9130 * Lock out configuration changes. 9131 */ 9132 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 9133 9134 spa->spa_syncing_txg = txg; 9135 spa->spa_sync_pass = 0; 9136 9137 for (int i = 0; i < spa->spa_alloc_count; i++) { 9138 mutex_enter(&spa->spa_alloc_locks[i]); 9139 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); 9140 mutex_exit(&spa->spa_alloc_locks[i]); 9141 } 9142 9143 /* 9144 * If there are any pending vdev state changes, convert them 9145 * into config changes that go out with this transaction group. 9146 */ 9147 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 9148 while (list_head(&spa->spa_state_dirty_list) != NULL) { 9149 /* 9150 * We need the write lock here because, for aux vdevs, 9151 * calling vdev_config_dirty() modifies sav_config. 9152 * This is ugly and will become unnecessary when we 9153 * eliminate the aux vdev wart by integrating all vdevs 9154 * into the root vdev tree. 9155 */ 9156 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9157 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 9158 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 9159 vdev_state_clean(vd); 9160 vdev_config_dirty(vd); 9161 } 9162 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9163 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9164 } 9165 spa_config_exit(spa, SCL_STATE, FTAG); 9166 9167 dsl_pool_t *dp = spa->spa_dsl_pool; 9168 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 9169 9170 spa->spa_sync_starttime = gethrtime(); 9171 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9172 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 9173 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 9174 NSEC_TO_TICK(spa->spa_deadman_synctime)); 9175 9176 /* 9177 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 9178 * set spa_deflate if we have no raid-z vdevs. 9179 */ 9180 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 9181 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 9182 vdev_t *rvd = spa->spa_root_vdev; 9183 9184 int i; 9185 for (i = 0; i < rvd->vdev_children; i++) { 9186 vd = rvd->vdev_child[i]; 9187 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 9188 break; 9189 } 9190 if (i == rvd->vdev_children) { 9191 spa->spa_deflate = TRUE; 9192 VERIFY0(zap_add(spa->spa_meta_objset, 9193 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 9194 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 9195 } 9196 } 9197 9198 spa_sync_adjust_vdev_max_queue_depth(spa); 9199 9200 spa_sync_condense_indirect(spa, tx); 9201 9202 spa_sync_iterate_to_convergence(spa, tx); 9203 9204 #ifdef ZFS_DEBUG 9205 if (!list_is_empty(&spa->spa_config_dirty_list)) { 9206 /* 9207 * Make sure that the number of ZAPs for all the vdevs matches 9208 * the number of ZAPs in the per-vdev ZAP list. This only gets 9209 * called if the config is dirty; otherwise there may be 9210 * outstanding AVZ operations that weren't completed in 9211 * spa_sync_config_object. 9212 */ 9213 uint64_t all_vdev_zap_entry_count; 9214 ASSERT0(zap_count(spa->spa_meta_objset, 9215 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 9216 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 9217 all_vdev_zap_entry_count); 9218 } 9219 #endif 9220 9221 if (spa->spa_vdev_removal != NULL) { 9222 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 9223 } 9224 9225 spa_sync_rewrite_vdev_config(spa, tx); 9226 dmu_tx_commit(tx); 9227 9228 taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); 9229 spa->spa_deadman_tqid = 0; 9230 9231 /* 9232 * Clear the dirty config list. 9233 */ 9234 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 9235 vdev_config_clean(vd); 9236 9237 /* 9238 * Now that the new config has synced transactionally, 9239 * let it become visible to the config cache. 9240 */ 9241 if (spa->spa_config_syncing != NULL) { 9242 spa_config_set(spa, spa->spa_config_syncing); 9243 spa->spa_config_txg = txg; 9244 spa->spa_config_syncing = NULL; 9245 } 9246 9247 dsl_pool_sync_done(dp, txg); 9248 9249 for (int i = 0; i < spa->spa_alloc_count; i++) { 9250 mutex_enter(&spa->spa_alloc_locks[i]); 9251 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); 9252 mutex_exit(&spa->spa_alloc_locks[i]); 9253 } 9254 9255 /* 9256 * Update usable space statistics. 9257 */ 9258 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 9259 != NULL) 9260 vdev_sync_done(vd, txg); 9261 9262 metaslab_class_evict_old(spa->spa_normal_class, txg); 9263 metaslab_class_evict_old(spa->spa_log_class, txg); 9264 9265 spa_sync_close_syncing_log_sm(spa); 9266 9267 spa_update_dspace(spa); 9268 9269 /* 9270 * It had better be the case that we didn't dirty anything 9271 * since vdev_config_sync(). 9272 */ 9273 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 9274 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 9275 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 9276 9277 while (zfs_pause_spa_sync) 9278 delay(1); 9279 9280 spa->spa_sync_pass = 0; 9281 9282 /* 9283 * Update the last synced uberblock here. We want to do this at 9284 * the end of spa_sync() so that consumers of spa_last_synced_txg() 9285 * will be guaranteed that all the processing associated with 9286 * that txg has been completed. 9287 */ 9288 spa->spa_ubsync = spa->spa_uberblock; 9289 spa_config_exit(spa, SCL_CONFIG, FTAG); 9290 9291 spa_handle_ignored_writes(spa); 9292 9293 /* 9294 * If any async tasks have been requested, kick them off. 9295 */ 9296 spa_async_dispatch(spa); 9297 } 9298 9299 /* 9300 * Sync all pools. We don't want to hold the namespace lock across these 9301 * operations, so we take a reference on the spa_t and drop the lock during the 9302 * sync. 9303 */ 9304 void 9305 spa_sync_allpools(void) 9306 { 9307 spa_t *spa = NULL; 9308 mutex_enter(&spa_namespace_lock); 9309 while ((spa = spa_next(spa)) != NULL) { 9310 if (spa_state(spa) != POOL_STATE_ACTIVE || 9311 !spa_writeable(spa) || spa_suspended(spa)) 9312 continue; 9313 spa_open_ref(spa, FTAG); 9314 mutex_exit(&spa_namespace_lock); 9315 txg_wait_synced(spa_get_dsl(spa), 0); 9316 mutex_enter(&spa_namespace_lock); 9317 spa_close(spa, FTAG); 9318 } 9319 mutex_exit(&spa_namespace_lock); 9320 } 9321 9322 /* 9323 * ========================================================================== 9324 * Miscellaneous routines 9325 * ========================================================================== 9326 */ 9327 9328 /* 9329 * Remove all pools in the system. 9330 */ 9331 void 9332 spa_evict_all(void) 9333 { 9334 spa_t *spa; 9335 9336 /* 9337 * Remove all cached state. All pools should be closed now, 9338 * so every spa in the AVL tree should be unreferenced. 9339 */ 9340 mutex_enter(&spa_namespace_lock); 9341 while ((spa = spa_next(NULL)) != NULL) { 9342 /* 9343 * Stop async tasks. The async thread may need to detach 9344 * a device that's been replaced, which requires grabbing 9345 * spa_namespace_lock, so we must drop it here. 9346 */ 9347 spa_open_ref(spa, FTAG); 9348 mutex_exit(&spa_namespace_lock); 9349 spa_async_suspend(spa); 9350 mutex_enter(&spa_namespace_lock); 9351 spa_close(spa, FTAG); 9352 9353 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 9354 spa_unload(spa); 9355 spa_deactivate(spa); 9356 } 9357 spa_remove(spa); 9358 } 9359 mutex_exit(&spa_namespace_lock); 9360 } 9361 9362 vdev_t * 9363 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 9364 { 9365 vdev_t *vd; 9366 int i; 9367 9368 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 9369 return (vd); 9370 9371 if (aux) { 9372 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 9373 vd = spa->spa_l2cache.sav_vdevs[i]; 9374 if (vd->vdev_guid == guid) 9375 return (vd); 9376 } 9377 9378 for (i = 0; i < spa->spa_spares.sav_count; i++) { 9379 vd = spa->spa_spares.sav_vdevs[i]; 9380 if (vd->vdev_guid == guid) 9381 return (vd); 9382 } 9383 } 9384 9385 return (NULL); 9386 } 9387 9388 void 9389 spa_upgrade(spa_t *spa, uint64_t version) 9390 { 9391 ASSERT(spa_writeable(spa)); 9392 9393 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 9394 9395 /* 9396 * This should only be called for a non-faulted pool, and since a 9397 * future version would result in an unopenable pool, this shouldn't be 9398 * possible. 9399 */ 9400 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 9401 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 9402 9403 spa->spa_uberblock.ub_version = version; 9404 vdev_config_dirty(spa->spa_root_vdev); 9405 9406 spa_config_exit(spa, SCL_ALL, FTAG); 9407 9408 txg_wait_synced(spa_get_dsl(spa), 0); 9409 } 9410 9411 boolean_t 9412 spa_has_spare(spa_t *spa, uint64_t guid) 9413 { 9414 int i; 9415 uint64_t spareguid; 9416 spa_aux_vdev_t *sav = &spa->spa_spares; 9417 9418 for (i = 0; i < sav->sav_count; i++) 9419 if (sav->sav_vdevs[i]->vdev_guid == guid) 9420 return (B_TRUE); 9421 9422 for (i = 0; i < sav->sav_npending; i++) { 9423 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 9424 &spareguid) == 0 && spareguid == guid) 9425 return (B_TRUE); 9426 } 9427 9428 return (B_FALSE); 9429 } 9430 9431 /* 9432 * Check if a pool has an active shared spare device. 9433 * Note: reference count of an active spare is 2, as a spare and as a replace 9434 */ 9435 static boolean_t 9436 spa_has_active_shared_spare(spa_t *spa) 9437 { 9438 int i, refcnt; 9439 uint64_t pool; 9440 spa_aux_vdev_t *sav = &spa->spa_spares; 9441 9442 for (i = 0; i < sav->sav_count; i++) { 9443 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 9444 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 9445 refcnt > 2) 9446 return (B_TRUE); 9447 } 9448 9449 return (B_FALSE); 9450 } 9451 9452 uint64_t 9453 spa_total_metaslabs(spa_t *spa) 9454 { 9455 vdev_t *rvd = spa->spa_root_vdev; 9456 9457 uint64_t m = 0; 9458 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 9459 vdev_t *vd = rvd->vdev_child[c]; 9460 if (!vdev_is_concrete(vd)) 9461 continue; 9462 m += vd->vdev_ms_count; 9463 } 9464 return (m); 9465 } 9466 9467 /* 9468 * Notify any waiting threads that some activity has switched from being in- 9469 * progress to not-in-progress so that the thread can wake up and determine 9470 * whether it is finished waiting. 9471 */ 9472 void 9473 spa_notify_waiters(spa_t *spa) 9474 { 9475 /* 9476 * Acquiring spa_activities_lock here prevents the cv_broadcast from 9477 * happening between the waiting thread's check and cv_wait. 9478 */ 9479 mutex_enter(&spa->spa_activities_lock); 9480 cv_broadcast(&spa->spa_activities_cv); 9481 mutex_exit(&spa->spa_activities_lock); 9482 } 9483 9484 /* 9485 * Notify any waiting threads that the pool is exporting, and then block until 9486 * they are finished using the spa_t. 9487 */ 9488 void 9489 spa_wake_waiters(spa_t *spa) 9490 { 9491 mutex_enter(&spa->spa_activities_lock); 9492 spa->spa_waiters_cancel = B_TRUE; 9493 cv_broadcast(&spa->spa_activities_cv); 9494 while (spa->spa_waiters != 0) 9495 cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock); 9496 spa->spa_waiters_cancel = B_FALSE; 9497 mutex_exit(&spa->spa_activities_lock); 9498 } 9499 9500 /* Whether the vdev or any of its descendants are being initialized/trimmed. */ 9501 static boolean_t 9502 spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity) 9503 { 9504 spa_t *spa = vd->vdev_spa; 9505 9506 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER)); 9507 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9508 ASSERT(activity == ZPOOL_WAIT_INITIALIZE || 9509 activity == ZPOOL_WAIT_TRIM); 9510 9511 kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ? 9512 &vd->vdev_initialize_lock : &vd->vdev_trim_lock; 9513 9514 mutex_exit(&spa->spa_activities_lock); 9515 mutex_enter(lock); 9516 mutex_enter(&spa->spa_activities_lock); 9517 9518 boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ? 9519 (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) : 9520 (vd->vdev_trim_state == VDEV_TRIM_ACTIVE); 9521 mutex_exit(lock); 9522 9523 if (in_progress) 9524 return (B_TRUE); 9525 9526 for (int i = 0; i < vd->vdev_children; i++) { 9527 if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i], 9528 activity)) 9529 return (B_TRUE); 9530 } 9531 9532 return (B_FALSE); 9533 } 9534 9535 /* 9536 * If use_guid is true, this checks whether the vdev specified by guid is 9537 * being initialized/trimmed. Otherwise, it checks whether any vdev in the pool 9538 * is being initialized/trimmed. The caller must hold the config lock and 9539 * spa_activities_lock. 9540 */ 9541 static int 9542 spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid, 9543 zpool_wait_activity_t activity, boolean_t *in_progress) 9544 { 9545 mutex_exit(&spa->spa_activities_lock); 9546 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9547 mutex_enter(&spa->spa_activities_lock); 9548 9549 vdev_t *vd; 9550 if (use_guid) { 9551 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 9552 if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) { 9553 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9554 return (EINVAL); 9555 } 9556 } else { 9557 vd = spa->spa_root_vdev; 9558 } 9559 9560 *in_progress = spa_vdev_activity_in_progress_impl(vd, activity); 9561 9562 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9563 return (0); 9564 } 9565 9566 /* 9567 * Locking for waiting threads 9568 * --------------------------- 9569 * 9570 * Waiting threads need a way to check whether a given activity is in progress, 9571 * and then, if it is, wait for it to complete. Each activity will have some 9572 * in-memory representation of the relevant on-disk state which can be used to 9573 * determine whether or not the activity is in progress. The in-memory state and 9574 * the locking used to protect it will be different for each activity, and may 9575 * not be suitable for use with a cvar (e.g., some state is protected by the 9576 * config lock). To allow waiting threads to wait without any races, another 9577 * lock, spa_activities_lock, is used. 9578 * 9579 * When the state is checked, both the activity-specific lock (if there is one) 9580 * and spa_activities_lock are held. In some cases, the activity-specific lock 9581 * is acquired explicitly (e.g. the config lock). In others, the locking is 9582 * internal to some check (e.g. bpobj_is_empty). After checking, the waiting 9583 * thread releases the activity-specific lock and, if the activity is in 9584 * progress, then cv_waits using spa_activities_lock. 9585 * 9586 * The waiting thread is woken when another thread, one completing some 9587 * activity, updates the state of the activity and then calls 9588 * spa_notify_waiters, which will cv_broadcast. This 'completing' thread only 9589 * needs to hold its activity-specific lock when updating the state, and this 9590 * lock can (but doesn't have to) be dropped before calling spa_notify_waiters. 9591 * 9592 * Because spa_notify_waiters acquires spa_activities_lock before broadcasting, 9593 * and because it is held when the waiting thread checks the state of the 9594 * activity, it can never be the case that the completing thread both updates 9595 * the activity state and cv_broadcasts in between the waiting thread's check 9596 * and cv_wait. Thus, a waiting thread can never miss a wakeup. 9597 * 9598 * In order to prevent deadlock, when the waiting thread does its check, in some 9599 * cases it will temporarily drop spa_activities_lock in order to acquire the 9600 * activity-specific lock. The order in which spa_activities_lock and the 9601 * activity specific lock are acquired in the waiting thread is determined by 9602 * the order in which they are acquired in the completing thread; if the 9603 * completing thread calls spa_notify_waiters with the activity-specific lock 9604 * held, then the waiting thread must also acquire the activity-specific lock 9605 * first. 9606 */ 9607 9608 static int 9609 spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity, 9610 boolean_t use_tag, uint64_t tag, boolean_t *in_progress) 9611 { 9612 int error = 0; 9613 9614 ASSERT(MUTEX_HELD(&spa->spa_activities_lock)); 9615 9616 switch (activity) { 9617 case ZPOOL_WAIT_CKPT_DISCARD: 9618 *in_progress = 9619 (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) && 9620 zap_contains(spa_meta_objset(spa), 9621 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) == 9622 ENOENT); 9623 break; 9624 case ZPOOL_WAIT_FREE: 9625 *in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS && 9626 !bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) || 9627 spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) || 9628 spa_livelist_delete_check(spa)); 9629 break; 9630 case ZPOOL_WAIT_INITIALIZE: 9631 case ZPOOL_WAIT_TRIM: 9632 error = spa_vdev_activity_in_progress(spa, use_tag, tag, 9633 activity, in_progress); 9634 break; 9635 case ZPOOL_WAIT_REPLACE: 9636 mutex_exit(&spa->spa_activities_lock); 9637 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 9638 mutex_enter(&spa->spa_activities_lock); 9639 9640 *in_progress = vdev_replace_in_progress(spa->spa_root_vdev); 9641 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 9642 break; 9643 case ZPOOL_WAIT_REMOVE: 9644 *in_progress = (spa->spa_removing_phys.sr_state == 9645 DSS_SCANNING); 9646 break; 9647 case ZPOOL_WAIT_RESILVER: 9648 if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev))) 9649 break; 9650 /* fall through */ 9651 case ZPOOL_WAIT_SCRUB: 9652 { 9653 boolean_t scanning, paused, is_scrub; 9654 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 9655 9656 is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB); 9657 scanning = (scn->scn_phys.scn_state == DSS_SCANNING); 9658 paused = dsl_scan_is_paused_scrub(scn); 9659 *in_progress = (scanning && !paused && 9660 is_scrub == (activity == ZPOOL_WAIT_SCRUB)); 9661 break; 9662 } 9663 default: 9664 panic("unrecognized value for activity %d", activity); 9665 } 9666 9667 return (error); 9668 } 9669 9670 static int 9671 spa_wait_common(const char *pool, zpool_wait_activity_t activity, 9672 boolean_t use_tag, uint64_t tag, boolean_t *waited) 9673 { 9674 /* 9675 * The tag is used to distinguish between instances of an activity. 9676 * 'initialize' and 'trim' are the only activities that we use this for. 9677 * The other activities can only have a single instance in progress in a 9678 * pool at one time, making the tag unnecessary. 9679 * 9680 * There can be multiple devices being replaced at once, but since they 9681 * all finish once resilvering finishes, we don't bother keeping track 9682 * of them individually, we just wait for them all to finish. 9683 */ 9684 if (use_tag && activity != ZPOOL_WAIT_INITIALIZE && 9685 activity != ZPOOL_WAIT_TRIM) 9686 return (EINVAL); 9687 9688 if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES) 9689 return (EINVAL); 9690 9691 spa_t *spa; 9692 int error = spa_open(pool, &spa, FTAG); 9693 if (error != 0) 9694 return (error); 9695 9696 /* 9697 * Increment the spa's waiter count so that we can call spa_close and 9698 * still ensure that the spa_t doesn't get freed before this thread is 9699 * finished with it when the pool is exported. We want to call spa_close 9700 * before we start waiting because otherwise the additional ref would 9701 * prevent the pool from being exported or destroyed throughout the 9702 * potentially long wait. 9703 */ 9704 mutex_enter(&spa->spa_activities_lock); 9705 spa->spa_waiters++; 9706 spa_close(spa, FTAG); 9707 9708 *waited = B_FALSE; 9709 for (;;) { 9710 boolean_t in_progress; 9711 error = spa_activity_in_progress(spa, activity, use_tag, tag, 9712 &in_progress); 9713 9714 if (error || !in_progress || spa->spa_waiters_cancel) 9715 break; 9716 9717 *waited = B_TRUE; 9718 9719 if (cv_wait_sig(&spa->spa_activities_cv, 9720 &spa->spa_activities_lock) == 0) { 9721 error = EINTR; 9722 break; 9723 } 9724 } 9725 9726 spa->spa_waiters--; 9727 cv_signal(&spa->spa_waiters_cv); 9728 mutex_exit(&spa->spa_activities_lock); 9729 9730 return (error); 9731 } 9732 9733 /* 9734 * Wait for a particular instance of the specified activity to complete, where 9735 * the instance is identified by 'tag' 9736 */ 9737 int 9738 spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag, 9739 boolean_t *waited) 9740 { 9741 return (spa_wait_common(pool, activity, B_TRUE, tag, waited)); 9742 } 9743 9744 /* 9745 * Wait for all instances of the specified activity complete 9746 */ 9747 int 9748 spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited) 9749 { 9750 9751 return (spa_wait_common(pool, activity, B_FALSE, 0, waited)); 9752 } 9753 9754 sysevent_t * 9755 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9756 { 9757 sysevent_t *ev = NULL; 9758 #ifdef _KERNEL 9759 nvlist_t *resource; 9760 9761 resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl); 9762 if (resource) { 9763 ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); 9764 ev->resource = resource; 9765 } 9766 #endif 9767 return (ev); 9768 } 9769 9770 void 9771 spa_event_post(sysevent_t *ev) 9772 { 9773 #ifdef _KERNEL 9774 if (ev) { 9775 zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); 9776 kmem_free(ev, sizeof (*ev)); 9777 } 9778 #endif 9779 } 9780 9781 /* 9782 * Post a zevent corresponding to the given sysevent. The 'name' must be one 9783 * of the event definitions in sys/sysevent/eventdefs.h. The payload will be 9784 * filled in from the spa and (optionally) the vdev. This doesn't do anything 9785 * in the userland libzpool, as we don't want consumers to misinterpret ztest 9786 * or zdb as real changes. 9787 */ 9788 void 9789 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 9790 { 9791 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 9792 } 9793 9794 /* state manipulation functions */ 9795 EXPORT_SYMBOL(spa_open); 9796 EXPORT_SYMBOL(spa_open_rewind); 9797 EXPORT_SYMBOL(spa_get_stats); 9798 EXPORT_SYMBOL(spa_create); 9799 EXPORT_SYMBOL(spa_import); 9800 EXPORT_SYMBOL(spa_tryimport); 9801 EXPORT_SYMBOL(spa_destroy); 9802 EXPORT_SYMBOL(spa_export); 9803 EXPORT_SYMBOL(spa_reset); 9804 EXPORT_SYMBOL(spa_async_request); 9805 EXPORT_SYMBOL(spa_async_suspend); 9806 EXPORT_SYMBOL(spa_async_resume); 9807 EXPORT_SYMBOL(spa_inject_addref); 9808 EXPORT_SYMBOL(spa_inject_delref); 9809 EXPORT_SYMBOL(spa_scan_stat_init); 9810 EXPORT_SYMBOL(spa_scan_get_stats); 9811 9812 /* device manipulation */ 9813 EXPORT_SYMBOL(spa_vdev_add); 9814 EXPORT_SYMBOL(spa_vdev_attach); 9815 EXPORT_SYMBOL(spa_vdev_detach); 9816 EXPORT_SYMBOL(spa_vdev_setpath); 9817 EXPORT_SYMBOL(spa_vdev_setfru); 9818 EXPORT_SYMBOL(spa_vdev_split_mirror); 9819 9820 /* spare statech is global across all pools) */ 9821 EXPORT_SYMBOL(spa_spare_add); 9822 EXPORT_SYMBOL(spa_spare_remove); 9823 EXPORT_SYMBOL(spa_spare_exists); 9824 EXPORT_SYMBOL(spa_spare_activate); 9825 9826 /* L2ARC statech is global across all pools) */ 9827 EXPORT_SYMBOL(spa_l2cache_add); 9828 EXPORT_SYMBOL(spa_l2cache_remove); 9829 EXPORT_SYMBOL(spa_l2cache_exists); 9830 EXPORT_SYMBOL(spa_l2cache_activate); 9831 EXPORT_SYMBOL(spa_l2cache_drop); 9832 9833 /* scanning */ 9834 EXPORT_SYMBOL(spa_scan); 9835 EXPORT_SYMBOL(spa_scan_stop); 9836 9837 /* spa syncing */ 9838 EXPORT_SYMBOL(spa_sync); /* only for DMU use */ 9839 EXPORT_SYMBOL(spa_sync_allpools); 9840 9841 /* properties */ 9842 EXPORT_SYMBOL(spa_prop_set); 9843 EXPORT_SYMBOL(spa_prop_get); 9844 EXPORT_SYMBOL(spa_prop_clear_bootfs); 9845 9846 /* asynchronous event notification */ 9847 EXPORT_SYMBOL(spa_event_notify); 9848 9849 /* BEGIN CSTYLED */ 9850 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW, 9851 "log2(fraction of arc that can be used by inflight I/Os when " 9852 "verifying pool during import"); 9853 9854 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, 9855 "Set to traverse metadata on pool import"); 9856 9857 ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW, 9858 "Set to traverse data on pool import"); 9859 9860 ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW, 9861 "Print vdev tree to zfs_dbgmsg during pool import"); 9862 9863 ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD, 9864 "Percentage of CPUs to run an IO worker thread"); 9865 9866 ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW, 9867 "Allow importing pool with up to this number of missing top-level " 9868 "vdevs (in read-only mode)"); 9869 9870 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW, 9871 "Set the livelist condense zthr to pause"); 9872 9873 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW, 9874 "Set the livelist condense synctask to pause"); 9875 9876 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW, 9877 "Whether livelist condensing was canceled in the synctask"); 9878 9879 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW, 9880 "Whether livelist condensing was canceled in the zthr function"); 9881 9882 ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW, 9883 "Whether extra ALLOC blkptrs were added to a livelist entry while it " 9884 "was being condensed"); 9885 /* END CSTYLED */ 9886