1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2016 Toomas Soome <tsoome@me.com> 30 * Copyright 2018 Joyent, Inc. 31 * Copyright (c) 2017, Intel Corporation. 32 * Copyright (c) 2017 Datto Inc. 33 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. 34 */ 35 36 /* 37 * SPA: Storage Pool Allocator 38 * 39 * This file contains all the routines used when modifying on-disk SPA state. 40 * This includes opening, importing, destroying, exporting a pool, and syncing a 41 * pool. 42 */ 43 44 #include <sys/zfs_context.h> 45 #include <sys/fm/fs/zfs.h> 46 #include <sys/spa_impl.h> 47 #include <sys/zio.h> 48 #include <sys/zio_checksum.h> 49 #include <sys/dmu.h> 50 #include <sys/dmu_tx.h> 51 #include <sys/zap.h> 52 #include <sys/zil.h> 53 #include <sys/ddt.h> 54 #include <sys/vdev_impl.h> 55 #include <sys/vdev_removal.h> 56 #include <sys/vdev_indirect_mapping.h> 57 #include <sys/vdev_indirect_births.h> 58 #include <sys/vdev_initialize.h> 59 #include <sys/metaslab.h> 60 #include <sys/metaslab_impl.h> 61 #include <sys/mmp.h> 62 #include <sys/uberblock_impl.h> 63 #include <sys/txg.h> 64 #include <sys/avl.h> 65 #include <sys/bpobj.h> 66 #include <sys/dmu_traverse.h> 67 #include <sys/dmu_objset.h> 68 #include <sys/unique.h> 69 #include <sys/dsl_pool.h> 70 #include <sys/dsl_dataset.h> 71 #include <sys/dsl_dir.h> 72 #include <sys/dsl_prop.h> 73 #include <sys/dsl_synctask.h> 74 #include <sys/fs/zfs.h> 75 #include <sys/arc.h> 76 #include <sys/callb.h> 77 #include <sys/systeminfo.h> 78 #include <sys/spa_boot.h> 79 #include <sys/zfs_ioctl.h> 80 #include <sys/dsl_scan.h> 81 #include <sys/zfeature.h> 82 #include <sys/dsl_destroy.h> 83 #include <sys/abd.h> 84 85 #ifdef _KERNEL 86 #include <sys/bootprops.h> 87 #include <sys/callb.h> 88 #include <sys/cpupart.h> 89 #include <sys/pool.h> 90 #include <sys/sysdc.h> 91 #include <sys/zone.h> 92 #endif /* _KERNEL */ 93 94 #include "zfs_prop.h" 95 #include "zfs_comutil.h" 96 97 /* 98 * The interval, in seconds, at which failed configuration cache file writes 99 * should be retried. 100 */ 101 int zfs_ccw_retry_interval = 300; 102 103 typedef enum zti_modes { 104 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 105 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 106 ZTI_MODE_NULL, /* don't create a taskq */ 107 ZTI_NMODES 108 } zti_modes_t; 109 110 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 111 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 112 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 113 114 #define ZTI_N(n) ZTI_P(n, 1) 115 #define ZTI_ONE ZTI_N(1) 116 117 typedef struct zio_taskq_info { 118 zti_modes_t zti_mode; 119 uint_t zti_value; 120 uint_t zti_count; 121 } zio_taskq_info_t; 122 123 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 124 "issue", "issue_high", "intr", "intr_high" 125 }; 126 127 /* 128 * This table defines the taskq settings for each ZFS I/O type. When 129 * initializing a pool, we use this table to create an appropriately sized 130 * taskq. Some operations are low volume and therefore have a small, static 131 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 132 * macros. Other operations process a large amount of data; the ZTI_BATCH 133 * macro causes us to create a taskq oriented for throughput. Some operations 134 * are so high frequency and short-lived that the taskq itself can become a a 135 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 136 * additional degree of parallelism specified by the number of threads per- 137 * taskq and the number of taskqs; when dispatching an event in this case, the 138 * particular taskq is chosen at random. 139 * 140 * The different taskq priorities are to handle the different contexts (issue 141 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 142 * need to be handled with minimum delay. 143 */ 144 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 145 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 146 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 147 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 148 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 149 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 150 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 151 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 152 }; 153 154 static void spa_sync_version(void *arg, dmu_tx_t *tx); 155 static void spa_sync_props(void *arg, dmu_tx_t *tx); 156 static boolean_t spa_has_active_shared_spare(spa_t *spa); 157 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport); 158 static void spa_vdev_resilver_done(spa_t *spa); 159 160 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 161 id_t zio_taskq_psrset_bind = PS_NONE; 162 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 163 uint_t zio_taskq_basedc = 80; /* base duty cycle */ 164 165 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 166 extern int zfs_sync_pass_deferred_free; 167 168 /* 169 * Report any spa_load_verify errors found, but do not fail spa_load. 170 * This is used by zdb to analyze non-idle pools. 171 */ 172 boolean_t spa_load_verify_dryrun = B_FALSE; 173 174 /* 175 * This (illegal) pool name is used when temporarily importing a spa_t in order 176 * to get the vdev stats associated with the imported devices. 177 */ 178 #define TRYIMPORT_NAME "$import" 179 180 /* 181 * For debugging purposes: print out vdev tree during pool import. 182 */ 183 boolean_t spa_load_print_vdev_tree = B_FALSE; 184 185 /* 186 * A non-zero value for zfs_max_missing_tvds means that we allow importing 187 * pools with missing top-level vdevs. This is strictly intended for advanced 188 * pool recovery cases since missing data is almost inevitable. Pools with 189 * missing devices can only be imported read-only for safety reasons, and their 190 * fail-mode will be automatically set to "continue". 191 * 192 * With 1 missing vdev we should be able to import the pool and mount all 193 * datasets. User data that was not modified after the missing device has been 194 * added should be recoverable. This means that snapshots created prior to the 195 * addition of that device should be completely intact. 196 * 197 * With 2 missing vdevs, some datasets may fail to mount since there are 198 * dataset statistics that are stored as regular metadata. Some data might be 199 * recoverable if those vdevs were added recently. 200 * 201 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries 202 * may be missing entirely. Chances of data recovery are very low. Note that 203 * there are also risks of performing an inadvertent rewind as we might be 204 * missing all the vdevs with the latest uberblocks. 205 */ 206 uint64_t zfs_max_missing_tvds = 0; 207 208 /* 209 * The parameters below are similar to zfs_max_missing_tvds but are only 210 * intended for a preliminary open of the pool with an untrusted config which 211 * might be incomplete or out-dated. 212 * 213 * We are more tolerant for pools opened from a cachefile since we could have 214 * an out-dated cachefile where a device removal was not registered. 215 * We could have set the limit arbitrarily high but in the case where devices 216 * are really missing we would want to return the proper error codes; we chose 217 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available 218 * and we get a chance to retrieve the trusted config. 219 */ 220 uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1; 221 222 /* 223 * In the case where config was assembled by scanning device paths (/dev/dsks 224 * by default) we are less tolerant since all the existing devices should have 225 * been detected and we want spa_load to return the right error codes. 226 */ 227 uint64_t zfs_max_missing_tvds_scan = 0; 228 229 /* 230 * Debugging aid that pauses spa_sync() towards the end. 231 */ 232 boolean_t zfs_pause_spa_sync = B_FALSE; 233 234 /* 235 * ========================================================================== 236 * SPA properties routines 237 * ========================================================================== 238 */ 239 240 /* 241 * Add a (source=src, propname=propval) list to an nvlist. 242 */ 243 static void 244 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 245 uint64_t intval, zprop_source_t src) 246 { 247 const char *propname = zpool_prop_to_name(prop); 248 nvlist_t *propval; 249 250 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 251 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 252 253 if (strval != NULL) 254 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 255 else 256 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 257 258 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 259 nvlist_free(propval); 260 } 261 262 /* 263 * Get property values from the spa configuration. 264 */ 265 static void 266 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 267 { 268 vdev_t *rvd = spa->spa_root_vdev; 269 dsl_pool_t *pool = spa->spa_dsl_pool; 270 uint64_t size, alloc, cap, version; 271 zprop_source_t src = ZPROP_SRC_NONE; 272 spa_config_dirent_t *dp; 273 metaslab_class_t *mc = spa_normal_class(spa); 274 275 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 276 277 if (rvd != NULL) { 278 alloc = metaslab_class_get_alloc(mc); 279 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 280 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 281 282 size = metaslab_class_get_space(mc); 283 size += metaslab_class_get_space(spa_special_class(spa)); 284 size += metaslab_class_get_space(spa_dedup_class(spa)); 285 286 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 287 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 288 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 289 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 290 size - alloc, src); 291 spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL, 292 spa->spa_checkpoint_info.sci_dspace, src); 293 294 spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL, 295 metaslab_class_fragmentation(mc), src); 296 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, 297 metaslab_class_expandable_space(mc), src); 298 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 299 (spa_mode(spa) == FREAD), src); 300 301 cap = (size == 0) ? 0 : (alloc * 100 / size); 302 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 303 304 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 305 ddt_get_pool_dedup_ratio(spa), src); 306 307 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 308 rvd->vdev_state, src); 309 310 version = spa_version(spa); 311 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 312 src = ZPROP_SRC_DEFAULT; 313 else 314 src = ZPROP_SRC_LOCAL; 315 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 316 } 317 318 if (pool != NULL) { 319 /* 320 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 321 * when opening pools before this version freedir will be NULL. 322 */ 323 if (pool->dp_free_dir != NULL) { 324 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 325 dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes, 326 src); 327 } else { 328 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 329 NULL, 0, src); 330 } 331 332 if (pool->dp_leak_dir != NULL) { 333 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 334 dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes, 335 src); 336 } else { 337 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 338 NULL, 0, src); 339 } 340 } 341 342 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 343 344 if (spa->spa_comment != NULL) { 345 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 346 0, ZPROP_SRC_LOCAL); 347 } 348 349 if (spa->spa_root != NULL) 350 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 351 0, ZPROP_SRC_LOCAL); 352 353 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { 354 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 355 MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE); 356 } else { 357 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL, 358 SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE); 359 } 360 361 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { 362 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 363 DNODE_MAX_SIZE, ZPROP_SRC_NONE); 364 } else { 365 spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL, 366 DNODE_MIN_SIZE, ZPROP_SRC_NONE); 367 } 368 369 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 370 if (dp->scd_path == NULL) { 371 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 372 "none", 0, ZPROP_SRC_LOCAL); 373 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 374 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 375 dp->scd_path, 0, ZPROP_SRC_LOCAL); 376 } 377 } 378 } 379 380 /* 381 * Get zpool property values. 382 */ 383 int 384 spa_prop_get(spa_t *spa, nvlist_t **nvp) 385 { 386 objset_t *mos = spa->spa_meta_objset; 387 zap_cursor_t zc; 388 zap_attribute_t za; 389 int err; 390 391 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 392 393 mutex_enter(&spa->spa_props_lock); 394 395 /* 396 * Get properties from the spa config. 397 */ 398 spa_prop_get_config(spa, nvp); 399 400 /* If no pool property object, no more prop to get. */ 401 if (mos == NULL || spa->spa_pool_props_object == 0) { 402 mutex_exit(&spa->spa_props_lock); 403 return (0); 404 } 405 406 /* 407 * Get properties from the MOS pool property object. 408 */ 409 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 410 (err = zap_cursor_retrieve(&zc, &za)) == 0; 411 zap_cursor_advance(&zc)) { 412 uint64_t intval = 0; 413 char *strval = NULL; 414 zprop_source_t src = ZPROP_SRC_DEFAULT; 415 zpool_prop_t prop; 416 417 if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL) 418 continue; 419 420 switch (za.za_integer_length) { 421 case 8: 422 /* integer property */ 423 if (za.za_first_integer != 424 zpool_prop_default_numeric(prop)) 425 src = ZPROP_SRC_LOCAL; 426 427 if (prop == ZPOOL_PROP_BOOTFS) { 428 dsl_pool_t *dp; 429 dsl_dataset_t *ds = NULL; 430 431 dp = spa_get_dsl(spa); 432 dsl_pool_config_enter(dp, FTAG); 433 err = dsl_dataset_hold_obj(dp, 434 za.za_first_integer, FTAG, &ds); 435 if (err != 0) { 436 dsl_pool_config_exit(dp, FTAG); 437 break; 438 } 439 440 strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, 441 KM_SLEEP); 442 dsl_dataset_name(ds, strval); 443 dsl_dataset_rele(ds, FTAG); 444 dsl_pool_config_exit(dp, FTAG); 445 } else { 446 strval = NULL; 447 intval = za.za_first_integer; 448 } 449 450 spa_prop_add_list(*nvp, prop, strval, intval, src); 451 452 if (strval != NULL) 453 kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN); 454 455 break; 456 457 case 1: 458 /* string property */ 459 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 460 err = zap_lookup(mos, spa->spa_pool_props_object, 461 za.za_name, 1, za.za_num_integers, strval); 462 if (err) { 463 kmem_free(strval, za.za_num_integers); 464 break; 465 } 466 spa_prop_add_list(*nvp, prop, strval, 0, src); 467 kmem_free(strval, za.za_num_integers); 468 break; 469 470 default: 471 break; 472 } 473 } 474 zap_cursor_fini(&zc); 475 mutex_exit(&spa->spa_props_lock); 476 out: 477 if (err && err != ENOENT) { 478 nvlist_free(*nvp); 479 *nvp = NULL; 480 return (err); 481 } 482 483 return (0); 484 } 485 486 /* 487 * Validate the given pool properties nvlist and modify the list 488 * for the property values to be set. 489 */ 490 static int 491 spa_prop_validate(spa_t *spa, nvlist_t *props) 492 { 493 nvpair_t *elem; 494 int error = 0, reset_bootfs = 0; 495 uint64_t objnum = 0; 496 boolean_t has_feature = B_FALSE; 497 498 elem = NULL; 499 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 500 uint64_t intval; 501 char *strval, *slash, *check, *fname; 502 const char *propname = nvpair_name(elem); 503 zpool_prop_t prop = zpool_name_to_prop(propname); 504 505 switch (prop) { 506 case ZPOOL_PROP_INVAL: 507 if (!zpool_prop_feature(propname)) { 508 error = SET_ERROR(EINVAL); 509 break; 510 } 511 512 /* 513 * Sanitize the input. 514 */ 515 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 516 error = SET_ERROR(EINVAL); 517 break; 518 } 519 520 if (nvpair_value_uint64(elem, &intval) != 0) { 521 error = SET_ERROR(EINVAL); 522 break; 523 } 524 525 if (intval != 0) { 526 error = SET_ERROR(EINVAL); 527 break; 528 } 529 530 fname = strchr(propname, '@') + 1; 531 if (zfeature_lookup_name(fname, NULL) != 0) { 532 error = SET_ERROR(EINVAL); 533 break; 534 } 535 536 has_feature = B_TRUE; 537 break; 538 539 case ZPOOL_PROP_VERSION: 540 error = nvpair_value_uint64(elem, &intval); 541 if (!error && 542 (intval < spa_version(spa) || 543 intval > SPA_VERSION_BEFORE_FEATURES || 544 has_feature)) 545 error = SET_ERROR(EINVAL); 546 break; 547 548 case ZPOOL_PROP_DELEGATION: 549 case ZPOOL_PROP_AUTOREPLACE: 550 case ZPOOL_PROP_LISTSNAPS: 551 case ZPOOL_PROP_AUTOEXPAND: 552 error = nvpair_value_uint64(elem, &intval); 553 if (!error && intval > 1) 554 error = SET_ERROR(EINVAL); 555 break; 556 557 case ZPOOL_PROP_MULTIHOST: 558 error = nvpair_value_uint64(elem, &intval); 559 if (!error && intval > 1) 560 error = SET_ERROR(EINVAL); 561 562 if (!error && !spa_get_hostid()) 563 error = SET_ERROR(ENOTSUP); 564 565 break; 566 567 case ZPOOL_PROP_BOOTFS: 568 /* 569 * If the pool version is less than SPA_VERSION_BOOTFS, 570 * or the pool is still being created (version == 0), 571 * the bootfs property cannot be set. 572 */ 573 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 574 error = SET_ERROR(ENOTSUP); 575 break; 576 } 577 578 /* 579 * Make sure the vdev config is bootable 580 */ 581 if (!vdev_is_bootable(spa->spa_root_vdev)) { 582 error = SET_ERROR(ENOTSUP); 583 break; 584 } 585 586 reset_bootfs = 1; 587 588 error = nvpair_value_string(elem, &strval); 589 590 if (!error) { 591 objset_t *os; 592 uint64_t propval; 593 594 if (strval == NULL || strval[0] == '\0') { 595 objnum = zpool_prop_default_numeric( 596 ZPOOL_PROP_BOOTFS); 597 break; 598 } 599 600 error = dmu_objset_hold(strval, FTAG, &os); 601 if (error != 0) 602 break; 603 604 /* 605 * Must be ZPL, and its property settings 606 * must be supported. 607 */ 608 609 if (dmu_objset_type(os) != DMU_OST_ZFS) { 610 error = SET_ERROR(ENOTSUP); 611 } else if ((error = 612 dsl_prop_get_int_ds(dmu_objset_ds(os), 613 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 614 &propval)) == 0 && 615 !BOOTFS_COMPRESS_VALID(propval)) { 616 error = SET_ERROR(ENOTSUP); 617 } else { 618 objnum = dmu_objset_id(os); 619 } 620 dmu_objset_rele(os, FTAG); 621 } 622 break; 623 624 case ZPOOL_PROP_FAILUREMODE: 625 error = nvpair_value_uint64(elem, &intval); 626 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 627 intval > ZIO_FAILURE_MODE_PANIC)) 628 error = SET_ERROR(EINVAL); 629 630 /* 631 * This is a special case which only occurs when 632 * the pool has completely failed. This allows 633 * the user to change the in-core failmode property 634 * without syncing it out to disk (I/Os might 635 * currently be blocked). We do this by returning 636 * EIO to the caller (spa_prop_set) to trick it 637 * into thinking we encountered a property validation 638 * error. 639 */ 640 if (!error && spa_suspended(spa)) { 641 spa->spa_failmode = intval; 642 error = SET_ERROR(EIO); 643 } 644 break; 645 646 case ZPOOL_PROP_CACHEFILE: 647 if ((error = nvpair_value_string(elem, &strval)) != 0) 648 break; 649 650 if (strval[0] == '\0') 651 break; 652 653 if (strcmp(strval, "none") == 0) 654 break; 655 656 if (strval[0] != '/') { 657 error = SET_ERROR(EINVAL); 658 break; 659 } 660 661 slash = strrchr(strval, '/'); 662 ASSERT(slash != NULL); 663 664 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 665 strcmp(slash, "/..") == 0) 666 error = SET_ERROR(EINVAL); 667 break; 668 669 case ZPOOL_PROP_COMMENT: 670 if ((error = nvpair_value_string(elem, &strval)) != 0) 671 break; 672 for (check = strval; *check != '\0'; check++) { 673 /* 674 * The kernel doesn't have an easy isprint() 675 * check. For this kernel check, we merely 676 * check ASCII apart from DEL. Fix this if 677 * there is an easy-to-use kernel isprint(). 678 */ 679 if (*check >= 0x7f) { 680 error = SET_ERROR(EINVAL); 681 break; 682 } 683 } 684 if (strlen(strval) > ZPROP_MAX_COMMENT) 685 error = E2BIG; 686 break; 687 688 case ZPOOL_PROP_DEDUPDITTO: 689 if (spa_version(spa) < SPA_VERSION_DEDUP) 690 error = SET_ERROR(ENOTSUP); 691 else 692 error = nvpair_value_uint64(elem, &intval); 693 if (error == 0 && 694 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 695 error = SET_ERROR(EINVAL); 696 break; 697 } 698 699 if (error) 700 break; 701 } 702 703 if (!error && reset_bootfs) { 704 error = nvlist_remove(props, 705 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 706 707 if (!error) { 708 error = nvlist_add_uint64(props, 709 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 710 } 711 } 712 713 return (error); 714 } 715 716 void 717 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 718 { 719 char *cachefile; 720 spa_config_dirent_t *dp; 721 722 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 723 &cachefile) != 0) 724 return; 725 726 dp = kmem_alloc(sizeof (spa_config_dirent_t), 727 KM_SLEEP); 728 729 if (cachefile[0] == '\0') 730 dp->scd_path = spa_strdup(spa_config_path); 731 else if (strcmp(cachefile, "none") == 0) 732 dp->scd_path = NULL; 733 else 734 dp->scd_path = spa_strdup(cachefile); 735 736 list_insert_head(&spa->spa_config_list, dp); 737 if (need_sync) 738 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 739 } 740 741 int 742 spa_prop_set(spa_t *spa, nvlist_t *nvp) 743 { 744 int error; 745 nvpair_t *elem = NULL; 746 boolean_t need_sync = B_FALSE; 747 748 if ((error = spa_prop_validate(spa, nvp)) != 0) 749 return (error); 750 751 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 752 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 753 754 if (prop == ZPOOL_PROP_CACHEFILE || 755 prop == ZPOOL_PROP_ALTROOT || 756 prop == ZPOOL_PROP_READONLY) 757 continue; 758 759 if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) { 760 uint64_t ver; 761 762 if (prop == ZPOOL_PROP_VERSION) { 763 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 764 } else { 765 ASSERT(zpool_prop_feature(nvpair_name(elem))); 766 ver = SPA_VERSION_FEATURES; 767 need_sync = B_TRUE; 768 } 769 770 /* Save time if the version is already set. */ 771 if (ver == spa_version(spa)) 772 continue; 773 774 /* 775 * In addition to the pool directory object, we might 776 * create the pool properties object, the features for 777 * read object, the features for write object, or the 778 * feature descriptions object. 779 */ 780 error = dsl_sync_task(spa->spa_name, NULL, 781 spa_sync_version, &ver, 782 6, ZFS_SPACE_CHECK_RESERVED); 783 if (error) 784 return (error); 785 continue; 786 } 787 788 need_sync = B_TRUE; 789 break; 790 } 791 792 if (need_sync) { 793 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 794 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 795 } 796 797 return (0); 798 } 799 800 /* 801 * If the bootfs property value is dsobj, clear it. 802 */ 803 void 804 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 805 { 806 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 807 VERIFY(zap_remove(spa->spa_meta_objset, 808 spa->spa_pool_props_object, 809 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 810 spa->spa_bootfs = 0; 811 } 812 } 813 814 /*ARGSUSED*/ 815 static int 816 spa_change_guid_check(void *arg, dmu_tx_t *tx) 817 { 818 uint64_t *newguid = arg; 819 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 820 vdev_t *rvd = spa->spa_root_vdev; 821 uint64_t vdev_state; 822 823 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 824 int error = (spa_has_checkpoint(spa)) ? 825 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 826 return (SET_ERROR(error)); 827 } 828 829 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 830 vdev_state = rvd->vdev_state; 831 spa_config_exit(spa, SCL_STATE, FTAG); 832 833 if (vdev_state != VDEV_STATE_HEALTHY) 834 return (SET_ERROR(ENXIO)); 835 836 ASSERT3U(spa_guid(spa), !=, *newguid); 837 838 return (0); 839 } 840 841 static void 842 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 843 { 844 uint64_t *newguid = arg; 845 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 846 uint64_t oldguid; 847 vdev_t *rvd = spa->spa_root_vdev; 848 849 oldguid = spa_guid(spa); 850 851 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 852 rvd->vdev_guid = *newguid; 853 rvd->vdev_guid_sum += (*newguid - oldguid); 854 vdev_config_dirty(rvd); 855 spa_config_exit(spa, SCL_STATE, FTAG); 856 857 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 858 oldguid, *newguid); 859 } 860 861 /* 862 * Change the GUID for the pool. This is done so that we can later 863 * re-import a pool built from a clone of our own vdevs. We will modify 864 * the root vdev's guid, our own pool guid, and then mark all of our 865 * vdevs dirty. Note that we must make sure that all our vdevs are 866 * online when we do this, or else any vdevs that weren't present 867 * would be orphaned from our pool. We are also going to issue a 868 * sysevent to update any watchers. 869 */ 870 int 871 spa_change_guid(spa_t *spa) 872 { 873 int error; 874 uint64_t guid; 875 876 mutex_enter(&spa->spa_vdev_top_lock); 877 mutex_enter(&spa_namespace_lock); 878 guid = spa_generate_guid(NULL); 879 880 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 881 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 882 883 if (error == 0) { 884 spa_write_cachefile(spa, B_FALSE, B_TRUE); 885 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID); 886 } 887 888 mutex_exit(&spa_namespace_lock); 889 mutex_exit(&spa->spa_vdev_top_lock); 890 891 return (error); 892 } 893 894 /* 895 * ========================================================================== 896 * SPA state manipulation (open/create/destroy/import/export) 897 * ========================================================================== 898 */ 899 900 static int 901 spa_error_entry_compare(const void *a, const void *b) 902 { 903 const spa_error_entry_t *sa = (const spa_error_entry_t *)a; 904 const spa_error_entry_t *sb = (const spa_error_entry_t *)b; 905 int ret; 906 907 ret = memcmp(&sa->se_bookmark, &sb->se_bookmark, 908 sizeof (zbookmark_phys_t)); 909 910 return (AVL_ISIGN(ret)); 911 } 912 913 /* 914 * Utility function which retrieves copies of the current logs and 915 * re-initializes them in the process. 916 */ 917 void 918 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 919 { 920 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 921 922 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 923 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 924 925 avl_create(&spa->spa_errlist_scrub, 926 spa_error_entry_compare, sizeof (spa_error_entry_t), 927 offsetof(spa_error_entry_t, se_avl)); 928 avl_create(&spa->spa_errlist_last, 929 spa_error_entry_compare, sizeof (spa_error_entry_t), 930 offsetof(spa_error_entry_t, se_avl)); 931 } 932 933 static void 934 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 935 { 936 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 937 enum zti_modes mode = ztip->zti_mode; 938 uint_t value = ztip->zti_value; 939 uint_t count = ztip->zti_count; 940 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 941 char name[32]; 942 uint_t flags = 0; 943 boolean_t batch = B_FALSE; 944 945 if (mode == ZTI_MODE_NULL) { 946 tqs->stqs_count = 0; 947 tqs->stqs_taskq = NULL; 948 return; 949 } 950 951 ASSERT3U(count, >, 0); 952 953 tqs->stqs_count = count; 954 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 955 956 switch (mode) { 957 case ZTI_MODE_FIXED: 958 ASSERT3U(value, >=, 1); 959 value = MAX(value, 1); 960 break; 961 962 case ZTI_MODE_BATCH: 963 batch = B_TRUE; 964 flags |= TASKQ_THREADS_CPU_PCT; 965 value = zio_taskq_batch_pct; 966 break; 967 968 default: 969 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 970 "spa_activate()", 971 zio_type_name[t], zio_taskq_types[q], mode, value); 972 break; 973 } 974 975 for (uint_t i = 0; i < count; i++) { 976 taskq_t *tq; 977 978 if (count > 1) { 979 (void) snprintf(name, sizeof (name), "%s_%s_%u", 980 zio_type_name[t], zio_taskq_types[q], i); 981 } else { 982 (void) snprintf(name, sizeof (name), "%s_%s", 983 zio_type_name[t], zio_taskq_types[q]); 984 } 985 986 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 987 if (batch) 988 flags |= TASKQ_DC_BATCH; 989 990 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 991 spa->spa_proc, zio_taskq_basedc, flags); 992 } else { 993 pri_t pri = maxclsyspri; 994 /* 995 * The write issue taskq can be extremely CPU 996 * intensive. Run it at slightly lower priority 997 * than the other taskqs. 998 */ 999 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 1000 pri--; 1001 1002 tq = taskq_create_proc(name, value, pri, 50, 1003 INT_MAX, spa->spa_proc, flags); 1004 } 1005 1006 tqs->stqs_taskq[i] = tq; 1007 } 1008 } 1009 1010 static void 1011 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 1012 { 1013 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1014 1015 if (tqs->stqs_taskq == NULL) { 1016 ASSERT0(tqs->stqs_count); 1017 return; 1018 } 1019 1020 for (uint_t i = 0; i < tqs->stqs_count; i++) { 1021 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 1022 taskq_destroy(tqs->stqs_taskq[i]); 1023 } 1024 1025 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 1026 tqs->stqs_taskq = NULL; 1027 } 1028 1029 /* 1030 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 1031 * Note that a type may have multiple discrete taskqs to avoid lock contention 1032 * on the taskq itself. In that case we choose which taskq at random by using 1033 * the low bits of gethrtime(). 1034 */ 1035 void 1036 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 1037 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 1038 { 1039 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1040 taskq_t *tq; 1041 1042 ASSERT3P(tqs->stqs_taskq, !=, NULL); 1043 ASSERT3U(tqs->stqs_count, !=, 0); 1044 1045 if (tqs->stqs_count == 1) { 1046 tq = tqs->stqs_taskq[0]; 1047 } else { 1048 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 1049 } 1050 1051 taskq_dispatch_ent(tq, func, arg, flags, ent); 1052 } 1053 1054 static void 1055 spa_create_zio_taskqs(spa_t *spa) 1056 { 1057 for (int t = 0; t < ZIO_TYPES; t++) { 1058 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1059 spa_taskqs_init(spa, t, q); 1060 } 1061 } 1062 } 1063 1064 #ifdef _KERNEL 1065 static void 1066 spa_thread(void *arg) 1067 { 1068 callb_cpr_t cprinfo; 1069 1070 spa_t *spa = arg; 1071 user_t *pu = PTOU(curproc); 1072 1073 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 1074 spa->spa_name); 1075 1076 ASSERT(curproc != &p0); 1077 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 1078 "zpool-%s", spa->spa_name); 1079 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1080 1081 /* bind this thread to the requested psrset */ 1082 if (zio_taskq_psrset_bind != PS_NONE) { 1083 pool_lock(); 1084 mutex_enter(&cpu_lock); 1085 mutex_enter(&pidlock); 1086 mutex_enter(&curproc->p_lock); 1087 1088 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1089 0, NULL, NULL) == 0) { 1090 curthread->t_bind_pset = zio_taskq_psrset_bind; 1091 } else { 1092 cmn_err(CE_WARN, 1093 "Couldn't bind process for zfs pool \"%s\" to " 1094 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1095 } 1096 1097 mutex_exit(&curproc->p_lock); 1098 mutex_exit(&pidlock); 1099 mutex_exit(&cpu_lock); 1100 pool_unlock(); 1101 } 1102 1103 if (zio_taskq_sysdc) { 1104 sysdc_thread_enter(curthread, 100, 0); 1105 } 1106 1107 spa->spa_proc = curproc; 1108 spa->spa_did = curthread->t_did; 1109 1110 spa_create_zio_taskqs(spa); 1111 1112 mutex_enter(&spa->spa_proc_lock); 1113 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1114 1115 spa->spa_proc_state = SPA_PROC_ACTIVE; 1116 cv_broadcast(&spa->spa_proc_cv); 1117 1118 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1119 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1120 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1121 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1122 1123 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1124 spa->spa_proc_state = SPA_PROC_GONE; 1125 spa->spa_proc = &p0; 1126 cv_broadcast(&spa->spa_proc_cv); 1127 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1128 1129 mutex_enter(&curproc->p_lock); 1130 lwp_exit(); 1131 } 1132 #endif 1133 1134 /* 1135 * Activate an uninitialized pool. 1136 */ 1137 static void 1138 spa_activate(spa_t *spa, int mode) 1139 { 1140 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1141 1142 spa->spa_state = POOL_STATE_ACTIVE; 1143 spa->spa_mode = mode; 1144 1145 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1146 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1147 spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops); 1148 spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops); 1149 1150 /* Try to create a covering process */ 1151 mutex_enter(&spa->spa_proc_lock); 1152 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1153 ASSERT(spa->spa_proc == &p0); 1154 spa->spa_did = 0; 1155 1156 /* Only create a process if we're going to be around a while. */ 1157 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1158 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1159 NULL, 0) == 0) { 1160 spa->spa_proc_state = SPA_PROC_CREATED; 1161 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1162 cv_wait(&spa->spa_proc_cv, 1163 &spa->spa_proc_lock); 1164 } 1165 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1166 ASSERT(spa->spa_proc != &p0); 1167 ASSERT(spa->spa_did != 0); 1168 } else { 1169 #ifdef _KERNEL 1170 cmn_err(CE_WARN, 1171 "Couldn't create process for zfs pool \"%s\"\n", 1172 spa->spa_name); 1173 #endif 1174 } 1175 } 1176 mutex_exit(&spa->spa_proc_lock); 1177 1178 /* If we didn't create a process, we need to create our taskqs. */ 1179 if (spa->spa_proc == &p0) { 1180 spa_create_zio_taskqs(spa); 1181 } 1182 1183 for (size_t i = 0; i < TXG_SIZE; i++) { 1184 spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 1185 ZIO_FLAG_CANFAIL); 1186 } 1187 1188 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1189 offsetof(vdev_t, vdev_config_dirty_node)); 1190 list_create(&spa->spa_evicting_os_list, sizeof (objset_t), 1191 offsetof(objset_t, os_evicting_node)); 1192 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1193 offsetof(vdev_t, vdev_state_dirty_node)); 1194 1195 txg_list_create(&spa->spa_vdev_txg_list, spa, 1196 offsetof(struct vdev, vdev_txg_node)); 1197 1198 avl_create(&spa->spa_errlist_scrub, 1199 spa_error_entry_compare, sizeof (spa_error_entry_t), 1200 offsetof(spa_error_entry_t, se_avl)); 1201 avl_create(&spa->spa_errlist_last, 1202 spa_error_entry_compare, sizeof (spa_error_entry_t), 1203 offsetof(spa_error_entry_t, se_avl)); 1204 1205 spa_keystore_init(&spa->spa_keystore); 1206 1207 /* 1208 * The taskq to upgrade datasets in this pool. Currently used by 1209 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA. 1210 */ 1211 spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus, 1212 minclsyspri, 1, INT_MAX, TASKQ_DYNAMIC); 1213 } 1214 1215 /* 1216 * Opposite of spa_activate(). 1217 */ 1218 static void 1219 spa_deactivate(spa_t *spa) 1220 { 1221 ASSERT(spa->spa_sync_on == B_FALSE); 1222 ASSERT(spa->spa_dsl_pool == NULL); 1223 ASSERT(spa->spa_root_vdev == NULL); 1224 ASSERT(spa->spa_async_zio_root == NULL); 1225 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1226 1227 spa_evicting_os_wait(spa); 1228 1229 if (spa->spa_upgrade_taskq) { 1230 taskq_destroy(spa->spa_upgrade_taskq); 1231 spa->spa_upgrade_taskq = NULL; 1232 } 1233 1234 txg_list_destroy(&spa->spa_vdev_txg_list); 1235 1236 list_destroy(&spa->spa_config_dirty_list); 1237 list_destroy(&spa->spa_evicting_os_list); 1238 list_destroy(&spa->spa_state_dirty_list); 1239 1240 for (int t = 0; t < ZIO_TYPES; t++) { 1241 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1242 spa_taskqs_fini(spa, t, q); 1243 } 1244 } 1245 1246 for (size_t i = 0; i < TXG_SIZE; i++) { 1247 ASSERT3P(spa->spa_txg_zio[i], !=, NULL); 1248 VERIFY0(zio_wait(spa->spa_txg_zio[i])); 1249 spa->spa_txg_zio[i] = NULL; 1250 } 1251 1252 metaslab_class_destroy(spa->spa_normal_class); 1253 spa->spa_normal_class = NULL; 1254 1255 metaslab_class_destroy(spa->spa_log_class); 1256 spa->spa_log_class = NULL; 1257 1258 metaslab_class_destroy(spa->spa_special_class); 1259 spa->spa_special_class = NULL; 1260 1261 metaslab_class_destroy(spa->spa_dedup_class); 1262 spa->spa_dedup_class = NULL; 1263 1264 /* 1265 * If this was part of an import or the open otherwise failed, we may 1266 * still have errors left in the queues. Empty them just in case. 1267 */ 1268 spa_errlog_drain(spa); 1269 avl_destroy(&spa->spa_errlist_scrub); 1270 avl_destroy(&spa->spa_errlist_last); 1271 1272 spa_keystore_fini(&spa->spa_keystore); 1273 1274 spa->spa_state = POOL_STATE_UNINITIALIZED; 1275 1276 mutex_enter(&spa->spa_proc_lock); 1277 if (spa->spa_proc_state != SPA_PROC_NONE) { 1278 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1279 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1280 cv_broadcast(&spa->spa_proc_cv); 1281 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1282 ASSERT(spa->spa_proc != &p0); 1283 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1284 } 1285 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1286 spa->spa_proc_state = SPA_PROC_NONE; 1287 } 1288 ASSERT(spa->spa_proc == &p0); 1289 mutex_exit(&spa->spa_proc_lock); 1290 1291 /* 1292 * We want to make sure spa_thread() has actually exited the ZFS 1293 * module, so that the module can't be unloaded out from underneath 1294 * it. 1295 */ 1296 if (spa->spa_did != 0) { 1297 thread_join(spa->spa_did); 1298 spa->spa_did = 0; 1299 } 1300 } 1301 1302 /* 1303 * Verify a pool configuration, and construct the vdev tree appropriately. This 1304 * will create all the necessary vdevs in the appropriate layout, with each vdev 1305 * in the CLOSED state. This will prep the pool before open/creation/import. 1306 * All vdev validation is done by the vdev_alloc() routine. 1307 */ 1308 static int 1309 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1310 uint_t id, int atype) 1311 { 1312 nvlist_t **child; 1313 uint_t children; 1314 int error; 1315 1316 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1317 return (error); 1318 1319 if ((*vdp)->vdev_ops->vdev_op_leaf) 1320 return (0); 1321 1322 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1323 &child, &children); 1324 1325 if (error == ENOENT) 1326 return (0); 1327 1328 if (error) { 1329 vdev_free(*vdp); 1330 *vdp = NULL; 1331 return (SET_ERROR(EINVAL)); 1332 } 1333 1334 for (int c = 0; c < children; c++) { 1335 vdev_t *vd; 1336 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1337 atype)) != 0) { 1338 vdev_free(*vdp); 1339 *vdp = NULL; 1340 return (error); 1341 } 1342 } 1343 1344 ASSERT(*vdp != NULL); 1345 1346 return (0); 1347 } 1348 1349 /* 1350 * Opposite of spa_load(). 1351 */ 1352 static void 1353 spa_unload(spa_t *spa) 1354 { 1355 int i; 1356 1357 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1358 1359 spa_load_note(spa, "UNLOADING"); 1360 1361 /* 1362 * Stop async tasks. 1363 */ 1364 spa_async_suspend(spa); 1365 1366 if (spa->spa_root_vdev) { 1367 vdev_initialize_stop_all(spa->spa_root_vdev, 1368 VDEV_INITIALIZE_ACTIVE); 1369 } 1370 1371 /* 1372 * Stop syncing. 1373 */ 1374 if (spa->spa_sync_on) { 1375 txg_sync_stop(spa->spa_dsl_pool); 1376 spa->spa_sync_on = B_FALSE; 1377 } 1378 1379 /* 1380 * Even though vdev_free() also calls vdev_metaslab_fini, we need 1381 * to call it earlier, before we wait for async i/o to complete. 1382 * This ensures that there is no async metaslab prefetching, by 1383 * calling taskq_wait(mg_taskq). 1384 */ 1385 if (spa->spa_root_vdev != NULL) { 1386 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1387 for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) 1388 vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); 1389 spa_config_exit(spa, SCL_ALL, spa); 1390 } 1391 1392 if (spa->spa_mmp.mmp_thread) 1393 mmp_thread_stop(spa); 1394 1395 /* 1396 * Wait for any outstanding async I/O to complete. 1397 */ 1398 if (spa->spa_async_zio_root != NULL) { 1399 for (int i = 0; i < max_ncpus; i++) 1400 (void) zio_wait(spa->spa_async_zio_root[i]); 1401 kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); 1402 spa->spa_async_zio_root = NULL; 1403 } 1404 1405 if (spa->spa_vdev_removal != NULL) { 1406 spa_vdev_removal_destroy(spa->spa_vdev_removal); 1407 spa->spa_vdev_removal = NULL; 1408 } 1409 1410 if (spa->spa_condense_zthr != NULL) { 1411 zthr_destroy(spa->spa_condense_zthr); 1412 spa->spa_condense_zthr = NULL; 1413 } 1414 1415 if (spa->spa_checkpoint_discard_zthr != NULL) { 1416 zthr_destroy(spa->spa_checkpoint_discard_zthr); 1417 spa->spa_checkpoint_discard_zthr = NULL; 1418 } 1419 1420 spa_condense_fini(spa); 1421 1422 bpobj_close(&spa->spa_deferred_bpobj); 1423 1424 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1425 1426 /* 1427 * Close all vdevs. 1428 */ 1429 if (spa->spa_root_vdev) 1430 vdev_free(spa->spa_root_vdev); 1431 ASSERT(spa->spa_root_vdev == NULL); 1432 1433 /* 1434 * Close the dsl pool. 1435 */ 1436 if (spa->spa_dsl_pool) { 1437 dsl_pool_close(spa->spa_dsl_pool); 1438 spa->spa_dsl_pool = NULL; 1439 spa->spa_meta_objset = NULL; 1440 } 1441 1442 ddt_unload(spa); 1443 1444 /* 1445 * Drop and purge level 2 cache 1446 */ 1447 spa_l2cache_drop(spa); 1448 1449 for (i = 0; i < spa->spa_spares.sav_count; i++) 1450 vdev_free(spa->spa_spares.sav_vdevs[i]); 1451 if (spa->spa_spares.sav_vdevs) { 1452 kmem_free(spa->spa_spares.sav_vdevs, 1453 spa->spa_spares.sav_count * sizeof (void *)); 1454 spa->spa_spares.sav_vdevs = NULL; 1455 } 1456 if (spa->spa_spares.sav_config) { 1457 nvlist_free(spa->spa_spares.sav_config); 1458 spa->spa_spares.sav_config = NULL; 1459 } 1460 spa->spa_spares.sav_count = 0; 1461 1462 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1463 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1464 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1465 } 1466 if (spa->spa_l2cache.sav_vdevs) { 1467 kmem_free(spa->spa_l2cache.sav_vdevs, 1468 spa->spa_l2cache.sav_count * sizeof (void *)); 1469 spa->spa_l2cache.sav_vdevs = NULL; 1470 } 1471 if (spa->spa_l2cache.sav_config) { 1472 nvlist_free(spa->spa_l2cache.sav_config); 1473 spa->spa_l2cache.sav_config = NULL; 1474 } 1475 spa->spa_l2cache.sav_count = 0; 1476 1477 spa->spa_async_suspended = 0; 1478 1479 spa->spa_indirect_vdevs_loaded = B_FALSE; 1480 1481 if (spa->spa_comment != NULL) { 1482 spa_strfree(spa->spa_comment); 1483 spa->spa_comment = NULL; 1484 } 1485 1486 spa_config_exit(spa, SCL_ALL, spa); 1487 } 1488 1489 /* 1490 * Load (or re-load) the current list of vdevs describing the active spares for 1491 * this pool. When this is called, we have some form of basic information in 1492 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1493 * then re-generate a more complete list including status information. 1494 */ 1495 void 1496 spa_load_spares(spa_t *spa) 1497 { 1498 nvlist_t **spares; 1499 uint_t nspares; 1500 int i; 1501 vdev_t *vd, *tvd; 1502 1503 #ifndef _KERNEL 1504 /* 1505 * zdb opens both the current state of the pool and the 1506 * checkpointed state (if present), with a different spa_t. 1507 * 1508 * As spare vdevs are shared among open pools, we skip loading 1509 * them when we load the checkpointed state of the pool. 1510 */ 1511 if (!spa_writeable(spa)) 1512 return; 1513 #endif 1514 1515 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1516 1517 /* 1518 * First, close and free any existing spare vdevs. 1519 */ 1520 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1521 vd = spa->spa_spares.sav_vdevs[i]; 1522 1523 /* Undo the call to spa_activate() below */ 1524 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1525 B_FALSE)) != NULL && tvd->vdev_isspare) 1526 spa_spare_remove(tvd); 1527 vdev_close(vd); 1528 vdev_free(vd); 1529 } 1530 1531 if (spa->spa_spares.sav_vdevs) 1532 kmem_free(spa->spa_spares.sav_vdevs, 1533 spa->spa_spares.sav_count * sizeof (void *)); 1534 1535 if (spa->spa_spares.sav_config == NULL) 1536 nspares = 0; 1537 else 1538 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1539 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1540 1541 spa->spa_spares.sav_count = (int)nspares; 1542 spa->spa_spares.sav_vdevs = NULL; 1543 1544 if (nspares == 0) 1545 return; 1546 1547 /* 1548 * Construct the array of vdevs, opening them to get status in the 1549 * process. For each spare, there is potentially two different vdev_t 1550 * structures associated with it: one in the list of spares (used only 1551 * for basic validation purposes) and one in the active vdev 1552 * configuration (if it's spared in). During this phase we open and 1553 * validate each vdev on the spare list. If the vdev also exists in the 1554 * active configuration, then we also mark this vdev as an active spare. 1555 */ 1556 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1557 KM_SLEEP); 1558 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1559 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1560 VDEV_ALLOC_SPARE) == 0); 1561 ASSERT(vd != NULL); 1562 1563 spa->spa_spares.sav_vdevs[i] = vd; 1564 1565 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1566 B_FALSE)) != NULL) { 1567 if (!tvd->vdev_isspare) 1568 spa_spare_add(tvd); 1569 1570 /* 1571 * We only mark the spare active if we were successfully 1572 * able to load the vdev. Otherwise, importing a pool 1573 * with a bad active spare would result in strange 1574 * behavior, because multiple pool would think the spare 1575 * is actively in use. 1576 * 1577 * There is a vulnerability here to an equally bizarre 1578 * circumstance, where a dead active spare is later 1579 * brought back to life (onlined or otherwise). Given 1580 * the rarity of this scenario, and the extra complexity 1581 * it adds, we ignore the possibility. 1582 */ 1583 if (!vdev_is_dead(tvd)) 1584 spa_spare_activate(tvd); 1585 } 1586 1587 vd->vdev_top = vd; 1588 vd->vdev_aux = &spa->spa_spares; 1589 1590 if (vdev_open(vd) != 0) 1591 continue; 1592 1593 if (vdev_validate_aux(vd) == 0) 1594 spa_spare_add(vd); 1595 } 1596 1597 /* 1598 * Recompute the stashed list of spares, with status information 1599 * this time. 1600 */ 1601 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1602 DATA_TYPE_NVLIST_ARRAY) == 0); 1603 1604 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1605 KM_SLEEP); 1606 for (i = 0; i < spa->spa_spares.sav_count; i++) 1607 spares[i] = vdev_config_generate(spa, 1608 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1609 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1610 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1611 for (i = 0; i < spa->spa_spares.sav_count; i++) 1612 nvlist_free(spares[i]); 1613 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1614 } 1615 1616 /* 1617 * Load (or re-load) the current list of vdevs describing the active l2cache for 1618 * this pool. When this is called, we have some form of basic information in 1619 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1620 * then re-generate a more complete list including status information. 1621 * Devices which are already active have their details maintained, and are 1622 * not re-opened. 1623 */ 1624 void 1625 spa_load_l2cache(spa_t *spa) 1626 { 1627 nvlist_t **l2cache; 1628 uint_t nl2cache; 1629 int i, j, oldnvdevs; 1630 uint64_t guid; 1631 vdev_t *vd, **oldvdevs, **newvdevs; 1632 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1633 1634 #ifndef _KERNEL 1635 /* 1636 * zdb opens both the current state of the pool and the 1637 * checkpointed state (if present), with a different spa_t. 1638 * 1639 * As L2 caches are part of the ARC which is shared among open 1640 * pools, we skip loading them when we load the checkpointed 1641 * state of the pool. 1642 */ 1643 if (!spa_writeable(spa)) 1644 return; 1645 #endif 1646 1647 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1648 1649 if (sav->sav_config != NULL) { 1650 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1651 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1652 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1653 } else { 1654 nl2cache = 0; 1655 newvdevs = NULL; 1656 } 1657 1658 oldvdevs = sav->sav_vdevs; 1659 oldnvdevs = sav->sav_count; 1660 sav->sav_vdevs = NULL; 1661 sav->sav_count = 0; 1662 1663 /* 1664 * Process new nvlist of vdevs. 1665 */ 1666 for (i = 0; i < nl2cache; i++) { 1667 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1668 &guid) == 0); 1669 1670 newvdevs[i] = NULL; 1671 for (j = 0; j < oldnvdevs; j++) { 1672 vd = oldvdevs[j]; 1673 if (vd != NULL && guid == vd->vdev_guid) { 1674 /* 1675 * Retain previous vdev for add/remove ops. 1676 */ 1677 newvdevs[i] = vd; 1678 oldvdevs[j] = NULL; 1679 break; 1680 } 1681 } 1682 1683 if (newvdevs[i] == NULL) { 1684 /* 1685 * Create new vdev 1686 */ 1687 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1688 VDEV_ALLOC_L2CACHE) == 0); 1689 ASSERT(vd != NULL); 1690 newvdevs[i] = vd; 1691 1692 /* 1693 * Commit this vdev as an l2cache device, 1694 * even if it fails to open. 1695 */ 1696 spa_l2cache_add(vd); 1697 1698 vd->vdev_top = vd; 1699 vd->vdev_aux = sav; 1700 1701 spa_l2cache_activate(vd); 1702 1703 if (vdev_open(vd) != 0) 1704 continue; 1705 1706 (void) vdev_validate_aux(vd); 1707 1708 if (!vdev_is_dead(vd)) 1709 l2arc_add_vdev(spa, vd); 1710 } 1711 } 1712 1713 /* 1714 * Purge vdevs that were dropped 1715 */ 1716 for (i = 0; i < oldnvdevs; i++) { 1717 uint64_t pool; 1718 1719 vd = oldvdevs[i]; 1720 if (vd != NULL) { 1721 ASSERT(vd->vdev_isl2cache); 1722 1723 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1724 pool != 0ULL && l2arc_vdev_present(vd)) 1725 l2arc_remove_vdev(vd); 1726 vdev_clear_stats(vd); 1727 vdev_free(vd); 1728 } 1729 } 1730 1731 if (oldvdevs) 1732 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1733 1734 if (sav->sav_config == NULL) 1735 goto out; 1736 1737 sav->sav_vdevs = newvdevs; 1738 sav->sav_count = (int)nl2cache; 1739 1740 /* 1741 * Recompute the stashed list of l2cache devices, with status 1742 * information this time. 1743 */ 1744 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1745 DATA_TYPE_NVLIST_ARRAY) == 0); 1746 1747 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1748 for (i = 0; i < sav->sav_count; i++) 1749 l2cache[i] = vdev_config_generate(spa, 1750 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1751 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1752 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1753 out: 1754 for (i = 0; i < sav->sav_count; i++) 1755 nvlist_free(l2cache[i]); 1756 if (sav->sav_count) 1757 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1758 } 1759 1760 static int 1761 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1762 { 1763 dmu_buf_t *db; 1764 char *packed = NULL; 1765 size_t nvsize = 0; 1766 int error; 1767 *value = NULL; 1768 1769 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1770 if (error != 0) 1771 return (error); 1772 1773 nvsize = *(uint64_t *)db->db_data; 1774 dmu_buf_rele(db, FTAG); 1775 1776 packed = kmem_alloc(nvsize, KM_SLEEP); 1777 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1778 DMU_READ_PREFETCH); 1779 if (error == 0) 1780 error = nvlist_unpack(packed, nvsize, value, 0); 1781 kmem_free(packed, nvsize); 1782 1783 return (error); 1784 } 1785 1786 /* 1787 * Concrete top-level vdevs that are not missing and are not logs. At every 1788 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds. 1789 */ 1790 static uint64_t 1791 spa_healthy_core_tvds(spa_t *spa) 1792 { 1793 vdev_t *rvd = spa->spa_root_vdev; 1794 uint64_t tvds = 0; 1795 1796 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1797 vdev_t *vd = rvd->vdev_child[i]; 1798 if (vd->vdev_islog) 1799 continue; 1800 if (vdev_is_concrete(vd) && !vdev_is_dead(vd)) 1801 tvds++; 1802 } 1803 1804 return (tvds); 1805 } 1806 1807 /* 1808 * Checks to see if the given vdev could not be opened, in which case we post a 1809 * sysevent to notify the autoreplace code that the device has been removed. 1810 */ 1811 static void 1812 spa_check_removed(vdev_t *vd) 1813 { 1814 for (uint64_t c = 0; c < vd->vdev_children; c++) 1815 spa_check_removed(vd->vdev_child[c]); 1816 1817 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1818 vdev_is_concrete(vd)) { 1819 zfs_post_autoreplace(vd->vdev_spa, vd); 1820 spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK); 1821 } 1822 } 1823 1824 static int 1825 spa_check_for_missing_logs(spa_t *spa) 1826 { 1827 vdev_t *rvd = spa->spa_root_vdev; 1828 1829 /* 1830 * If we're doing a normal import, then build up any additional 1831 * diagnostic information about missing log devices. 1832 * We'll pass this up to the user for further processing. 1833 */ 1834 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1835 nvlist_t **child, *nv; 1836 uint64_t idx = 0; 1837 1838 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1839 KM_SLEEP); 1840 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1841 1842 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 1843 vdev_t *tvd = rvd->vdev_child[c]; 1844 1845 /* 1846 * We consider a device as missing only if it failed 1847 * to open (i.e. offline or faulted is not considered 1848 * as missing). 1849 */ 1850 if (tvd->vdev_islog && 1851 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 1852 child[idx++] = vdev_config_generate(spa, tvd, 1853 B_FALSE, VDEV_CONFIG_MISSING); 1854 } 1855 } 1856 1857 if (idx > 0) { 1858 fnvlist_add_nvlist_array(nv, 1859 ZPOOL_CONFIG_CHILDREN, child, idx); 1860 fnvlist_add_nvlist(spa->spa_load_info, 1861 ZPOOL_CONFIG_MISSING_DEVICES, nv); 1862 1863 for (uint64_t i = 0; i < idx; i++) 1864 nvlist_free(child[i]); 1865 } 1866 nvlist_free(nv); 1867 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1868 1869 if (idx > 0) { 1870 spa_load_failed(spa, "some log devices are missing"); 1871 vdev_dbgmsg_print_tree(rvd, 2); 1872 return (SET_ERROR(ENXIO)); 1873 } 1874 } else { 1875 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 1876 vdev_t *tvd = rvd->vdev_child[c]; 1877 1878 if (tvd->vdev_islog && 1879 tvd->vdev_state == VDEV_STATE_CANT_OPEN) { 1880 spa_set_log_state(spa, SPA_LOG_CLEAR); 1881 spa_load_note(spa, "some log devices are " 1882 "missing, ZIL is dropped."); 1883 vdev_dbgmsg_print_tree(rvd, 2); 1884 break; 1885 } 1886 } 1887 } 1888 1889 return (0); 1890 } 1891 1892 /* 1893 * Check for missing log devices 1894 */ 1895 static boolean_t 1896 spa_check_logs(spa_t *spa) 1897 { 1898 boolean_t rv = B_FALSE; 1899 dsl_pool_t *dp = spa_get_dsl(spa); 1900 1901 switch (spa->spa_log_state) { 1902 case SPA_LOG_MISSING: 1903 /* need to recheck in case slog has been restored */ 1904 case SPA_LOG_UNKNOWN: 1905 rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1906 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 1907 if (rv) 1908 spa_set_log_state(spa, SPA_LOG_MISSING); 1909 break; 1910 } 1911 return (rv); 1912 } 1913 1914 static boolean_t 1915 spa_passivate_log(spa_t *spa) 1916 { 1917 vdev_t *rvd = spa->spa_root_vdev; 1918 boolean_t slog_found = B_FALSE; 1919 1920 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1921 1922 if (!spa_has_slogs(spa)) 1923 return (B_FALSE); 1924 1925 for (int c = 0; c < rvd->vdev_children; c++) { 1926 vdev_t *tvd = rvd->vdev_child[c]; 1927 metaslab_group_t *mg = tvd->vdev_mg; 1928 1929 if (tvd->vdev_islog) { 1930 metaslab_group_passivate(mg); 1931 slog_found = B_TRUE; 1932 } 1933 } 1934 1935 return (slog_found); 1936 } 1937 1938 static void 1939 spa_activate_log(spa_t *spa) 1940 { 1941 vdev_t *rvd = spa->spa_root_vdev; 1942 1943 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1944 1945 for (int c = 0; c < rvd->vdev_children; c++) { 1946 vdev_t *tvd = rvd->vdev_child[c]; 1947 metaslab_group_t *mg = tvd->vdev_mg; 1948 1949 if (tvd->vdev_islog) 1950 metaslab_group_activate(mg); 1951 } 1952 } 1953 1954 int 1955 spa_reset_logs(spa_t *spa) 1956 { 1957 int error; 1958 1959 error = dmu_objset_find(spa_name(spa), zil_reset, 1960 NULL, DS_FIND_CHILDREN); 1961 if (error == 0) { 1962 /* 1963 * We successfully offlined the log device, sync out the 1964 * current txg so that the "stubby" block can be removed 1965 * by zil_sync(). 1966 */ 1967 txg_wait_synced(spa->spa_dsl_pool, 0); 1968 } 1969 return (error); 1970 } 1971 1972 static void 1973 spa_aux_check_removed(spa_aux_vdev_t *sav) 1974 { 1975 for (int i = 0; i < sav->sav_count; i++) 1976 spa_check_removed(sav->sav_vdevs[i]); 1977 } 1978 1979 void 1980 spa_claim_notify(zio_t *zio) 1981 { 1982 spa_t *spa = zio->io_spa; 1983 1984 if (zio->io_error) 1985 return; 1986 1987 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1988 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1989 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1990 mutex_exit(&spa->spa_props_lock); 1991 } 1992 1993 typedef struct spa_load_error { 1994 uint64_t sle_meta_count; 1995 uint64_t sle_data_count; 1996 } spa_load_error_t; 1997 1998 static void 1999 spa_load_verify_done(zio_t *zio) 2000 { 2001 blkptr_t *bp = zio->io_bp; 2002 spa_load_error_t *sle = zio->io_private; 2003 dmu_object_type_t type = BP_GET_TYPE(bp); 2004 int error = zio->io_error; 2005 spa_t *spa = zio->io_spa; 2006 2007 abd_free(zio->io_abd); 2008 if (error) { 2009 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 2010 type != DMU_OT_INTENT_LOG) 2011 atomic_inc_64(&sle->sle_meta_count); 2012 else 2013 atomic_inc_64(&sle->sle_data_count); 2014 } 2015 2016 mutex_enter(&spa->spa_scrub_lock); 2017 spa->spa_load_verify_ios--; 2018 cv_broadcast(&spa->spa_scrub_io_cv); 2019 mutex_exit(&spa->spa_scrub_lock); 2020 } 2021 2022 /* 2023 * Maximum number of concurrent scrub i/os to create while verifying 2024 * a pool while importing it. 2025 */ 2026 int spa_load_verify_maxinflight = 10000; 2027 boolean_t spa_load_verify_metadata = B_TRUE; 2028 boolean_t spa_load_verify_data = B_TRUE; 2029 2030 /*ARGSUSED*/ 2031 static int 2032 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 2033 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 2034 { 2035 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2036 return (0); 2037 /* 2038 * Note: normally this routine will not be called if 2039 * spa_load_verify_metadata is not set. However, it may be useful 2040 * to manually set the flag after the traversal has begun. 2041 */ 2042 if (!spa_load_verify_metadata) 2043 return (0); 2044 if (!BP_IS_METADATA(bp) && !spa_load_verify_data) 2045 return (0); 2046 2047 zio_t *rio = arg; 2048 size_t size = BP_GET_PSIZE(bp); 2049 2050 mutex_enter(&spa->spa_scrub_lock); 2051 while (spa->spa_load_verify_ios >= spa_load_verify_maxinflight) 2052 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2053 spa->spa_load_verify_ios++; 2054 mutex_exit(&spa->spa_scrub_lock); 2055 2056 zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size, 2057 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 2058 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 2059 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 2060 return (0); 2061 } 2062 2063 /* ARGSUSED */ 2064 int 2065 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 2066 { 2067 if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) 2068 return (SET_ERROR(ENAMETOOLONG)); 2069 2070 return (0); 2071 } 2072 2073 static int 2074 spa_load_verify(spa_t *spa) 2075 { 2076 zio_t *rio; 2077 spa_load_error_t sle = { 0 }; 2078 zpool_load_policy_t policy; 2079 boolean_t verify_ok = B_FALSE; 2080 int error = 0; 2081 2082 zpool_get_load_policy(spa->spa_config, &policy); 2083 2084 if (policy.zlp_rewind & ZPOOL_NEVER_REWIND) 2085 return (0); 2086 2087 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG); 2088 error = dmu_objset_find_dp(spa->spa_dsl_pool, 2089 spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL, 2090 DS_FIND_CHILDREN); 2091 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG); 2092 if (error != 0) 2093 return (error); 2094 2095 rio = zio_root(spa, NULL, &sle, 2096 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 2097 2098 if (spa_load_verify_metadata) { 2099 if (spa->spa_extreme_rewind) { 2100 spa_load_note(spa, "performing a complete scan of the " 2101 "pool since extreme rewind is on. This may take " 2102 "a very long time.\n (spa_load_verify_data=%u, " 2103 "spa_load_verify_metadata=%u)", 2104 spa_load_verify_data, spa_load_verify_metadata); 2105 } 2106 error = traverse_pool(spa, spa->spa_verify_min_txg, 2107 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | 2108 TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio); 2109 } 2110 2111 (void) zio_wait(rio); 2112 2113 spa->spa_load_meta_errors = sle.sle_meta_count; 2114 spa->spa_load_data_errors = sle.sle_data_count; 2115 2116 if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) { 2117 spa_load_note(spa, "spa_load_verify found %llu metadata errors " 2118 "and %llu data errors", (u_longlong_t)sle.sle_meta_count, 2119 (u_longlong_t)sle.sle_data_count); 2120 } 2121 2122 if (spa_load_verify_dryrun || 2123 (!error && sle.sle_meta_count <= policy.zlp_maxmeta && 2124 sle.sle_data_count <= policy.zlp_maxdata)) { 2125 int64_t loss = 0; 2126 2127 verify_ok = B_TRUE; 2128 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 2129 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 2130 2131 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 2132 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2133 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 2134 VERIFY(nvlist_add_int64(spa->spa_load_info, 2135 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 2136 VERIFY(nvlist_add_uint64(spa->spa_load_info, 2137 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 2138 } else { 2139 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 2140 } 2141 2142 if (spa_load_verify_dryrun) 2143 return (0); 2144 2145 if (error) { 2146 if (error != ENXIO && error != EIO) 2147 error = SET_ERROR(EIO); 2148 return (error); 2149 } 2150 2151 return (verify_ok ? 0 : EIO); 2152 } 2153 2154 /* 2155 * Find a value in the pool props object. 2156 */ 2157 static void 2158 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2159 { 2160 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2161 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2162 } 2163 2164 /* 2165 * Find a value in the pool directory object. 2166 */ 2167 static int 2168 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent) 2169 { 2170 int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2171 name, sizeof (uint64_t), 1, val); 2172 2173 if (error != 0 && (error != ENOENT || log_enoent)) { 2174 spa_load_failed(spa, "couldn't get '%s' value in MOS directory " 2175 "[error=%d]", name, error); 2176 } 2177 2178 return (error); 2179 } 2180 2181 static int 2182 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2183 { 2184 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2185 return (SET_ERROR(err)); 2186 } 2187 2188 static void 2189 spa_spawn_aux_threads(spa_t *spa) 2190 { 2191 ASSERT(spa_writeable(spa)); 2192 2193 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2194 2195 spa_start_indirect_condensing_thread(spa); 2196 2197 ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL); 2198 spa->spa_checkpoint_discard_zthr = 2199 zthr_create(spa_checkpoint_discard_thread_check, 2200 spa_checkpoint_discard_thread, spa); 2201 } 2202 2203 /* 2204 * Fix up config after a partly-completed split. This is done with the 2205 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2206 * pool have that entry in their config, but only the splitting one contains 2207 * a list of all the guids of the vdevs that are being split off. 2208 * 2209 * This function determines what to do with that list: either rejoin 2210 * all the disks to the pool, or complete the splitting process. To attempt 2211 * the rejoin, each disk that is offlined is marked online again, and 2212 * we do a reopen() call. If the vdev label for every disk that was 2213 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2214 * then we call vdev_split() on each disk, and complete the split. 2215 * 2216 * Otherwise we leave the config alone, with all the vdevs in place in 2217 * the original pool. 2218 */ 2219 static void 2220 spa_try_repair(spa_t *spa, nvlist_t *config) 2221 { 2222 uint_t extracted; 2223 uint64_t *glist; 2224 uint_t i, gcount; 2225 nvlist_t *nvl; 2226 vdev_t **vd; 2227 boolean_t attempt_reopen; 2228 2229 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2230 return; 2231 2232 /* check that the config is complete */ 2233 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2234 &glist, &gcount) != 0) 2235 return; 2236 2237 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2238 2239 /* attempt to online all the vdevs & validate */ 2240 attempt_reopen = B_TRUE; 2241 for (i = 0; i < gcount; i++) { 2242 if (glist[i] == 0) /* vdev is hole */ 2243 continue; 2244 2245 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2246 if (vd[i] == NULL) { 2247 /* 2248 * Don't bother attempting to reopen the disks; 2249 * just do the split. 2250 */ 2251 attempt_reopen = B_FALSE; 2252 } else { 2253 /* attempt to re-online it */ 2254 vd[i]->vdev_offline = B_FALSE; 2255 } 2256 } 2257 2258 if (attempt_reopen) { 2259 vdev_reopen(spa->spa_root_vdev); 2260 2261 /* check each device to see what state it's in */ 2262 for (extracted = 0, i = 0; i < gcount; i++) { 2263 if (vd[i] != NULL && 2264 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2265 break; 2266 ++extracted; 2267 } 2268 } 2269 2270 /* 2271 * If every disk has been moved to the new pool, or if we never 2272 * even attempted to look at them, then we split them off for 2273 * good. 2274 */ 2275 if (!attempt_reopen || gcount == extracted) { 2276 for (i = 0; i < gcount; i++) 2277 if (vd[i] != NULL) 2278 vdev_split(vd[i]); 2279 vdev_reopen(spa->spa_root_vdev); 2280 } 2281 2282 kmem_free(vd, gcount * sizeof (vdev_t *)); 2283 } 2284 2285 static int 2286 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type) 2287 { 2288 char *ereport = FM_EREPORT_ZFS_POOL; 2289 int error; 2290 2291 spa->spa_load_state = state; 2292 2293 gethrestime(&spa->spa_loaded_ts); 2294 error = spa_load_impl(spa, type, &ereport); 2295 2296 /* 2297 * Don't count references from objsets that are already closed 2298 * and are making their way through the eviction process. 2299 */ 2300 spa_evicting_os_wait(spa); 2301 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 2302 if (error) { 2303 if (error != EEXIST) { 2304 spa->spa_loaded_ts.tv_sec = 0; 2305 spa->spa_loaded_ts.tv_nsec = 0; 2306 } 2307 if (error != EBADF) { 2308 zfs_ereport_post(ereport, spa, NULL, NULL, NULL, 0, 0); 2309 } 2310 } 2311 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2312 spa->spa_ena = 0; 2313 2314 return (error); 2315 } 2316 2317 /* 2318 * Count the number of per-vdev ZAPs associated with all of the vdevs in the 2319 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the 2320 * spa's per-vdev ZAP list. 2321 */ 2322 static uint64_t 2323 vdev_count_verify_zaps(vdev_t *vd) 2324 { 2325 spa_t *spa = vd->vdev_spa; 2326 uint64_t total = 0; 2327 if (vd->vdev_top_zap != 0) { 2328 total++; 2329 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2330 spa->spa_all_vdev_zaps, vd->vdev_top_zap)); 2331 } 2332 if (vd->vdev_leaf_zap != 0) { 2333 total++; 2334 ASSERT0(zap_lookup_int(spa->spa_meta_objset, 2335 spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); 2336 } 2337 2338 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2339 total += vdev_count_verify_zaps(vd->vdev_child[i]); 2340 } 2341 2342 return (total); 2343 } 2344 2345 /* 2346 * Determine whether the activity check is required. 2347 */ 2348 static boolean_t 2349 spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, 2350 nvlist_t *config) 2351 { 2352 uint64_t state = 0; 2353 uint64_t hostid = 0; 2354 uint64_t tryconfig_txg = 0; 2355 uint64_t tryconfig_timestamp = 0; 2356 nvlist_t *nvinfo; 2357 2358 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 2359 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 2360 (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG, 2361 &tryconfig_txg); 2362 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2363 &tryconfig_timestamp); 2364 } 2365 2366 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); 2367 2368 /* 2369 * Disable the MMP activity check - This is used by zdb which 2370 * is intended to be used on potentially active pools. 2371 */ 2372 if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) 2373 return (B_FALSE); 2374 2375 /* 2376 * Skip the activity check when the MMP feature is disabled. 2377 */ 2378 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) 2379 return (B_FALSE); 2380 /* 2381 * If the tryconfig_* values are nonzero, they are the results of an 2382 * earlier tryimport. If they match the uberblock we just found, then 2383 * the pool has not changed and we return false so we do not test a 2384 * second time. 2385 */ 2386 if (tryconfig_txg && tryconfig_txg == ub->ub_txg && 2387 tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp) 2388 return (B_FALSE); 2389 2390 /* 2391 * Allow the activity check to be skipped when importing the pool 2392 * on the same host which last imported it. Since the hostid from 2393 * configuration may be stale use the one read from the label. 2394 */ 2395 if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID)) 2396 hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID); 2397 2398 if (hostid == spa_get_hostid()) 2399 return (B_FALSE); 2400 2401 /* 2402 * Skip the activity test when the pool was cleanly exported. 2403 */ 2404 if (state != POOL_STATE_ACTIVE) 2405 return (B_FALSE); 2406 2407 return (B_TRUE); 2408 } 2409 2410 /* 2411 * Perform the import activity check. If the user canceled the import or 2412 * we detected activity then fail. 2413 */ 2414 static int 2415 spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) 2416 { 2417 uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); 2418 uint64_t txg = ub->ub_txg; 2419 uint64_t timestamp = ub->ub_timestamp; 2420 uint64_t import_delay = NANOSEC; 2421 hrtime_t import_expire; 2422 nvlist_t *mmp_label = NULL; 2423 vdev_t *rvd = spa->spa_root_vdev; 2424 kcondvar_t cv; 2425 kmutex_t mtx; 2426 int error = 0; 2427 2428 cv_init(&cv, NULL, CV_DEFAULT, NULL); 2429 mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL); 2430 mutex_enter(&mtx); 2431 2432 /* 2433 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed 2434 * during the earlier tryimport. If the txg recorded there is 0 then 2435 * the pool is known to be active on another host. 2436 * 2437 * Otherwise, the pool might be in use on another node. Check for 2438 * changes in the uberblocks on disk if necessary. 2439 */ 2440 if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { 2441 nvlist_t *nvinfo = fnvlist_lookup_nvlist(config, 2442 ZPOOL_CONFIG_LOAD_INFO); 2443 2444 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) && 2445 fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) { 2446 vdev_uberblock_load(rvd, ub, &mmp_label); 2447 error = SET_ERROR(EREMOTEIO); 2448 goto out; 2449 } 2450 } 2451 2452 /* 2453 * Preferentially use the zfs_multihost_interval from the node which 2454 * last imported the pool. This value is stored in an MMP uberblock as. 2455 * 2456 * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval 2457 */ 2458 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay) 2459 import_delay = MAX(import_delay, import_intervals * 2460 ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1)); 2461 2462 /* Apply a floor using the local default values. */ 2463 import_delay = MAX(import_delay, import_intervals * 2464 MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL))); 2465 2466 zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u " 2467 "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals, 2468 vdev_count_leaves(spa)); 2469 2470 /* Add a small random factor in case of simultaneous imports (0-25%) */ 2471 import_expire = gethrtime() + import_delay + 2472 (import_delay * spa_get_random(250) / 1000); 2473 2474 while (gethrtime() < import_expire) { 2475 vdev_uberblock_load(rvd, ub, &mmp_label); 2476 2477 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) { 2478 error = SET_ERROR(EREMOTEIO); 2479 break; 2480 } 2481 2482 if (mmp_label) { 2483 nvlist_free(mmp_label); 2484 mmp_label = NULL; 2485 } 2486 2487 error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz); 2488 if (error != -1) { 2489 error = SET_ERROR(EINTR); 2490 break; 2491 } 2492 error = 0; 2493 } 2494 2495 out: 2496 mutex_exit(&mtx); 2497 mutex_destroy(&mtx); 2498 cv_destroy(&cv); 2499 2500 /* 2501 * If the pool is determined to be active store the status in the 2502 * spa->spa_load_info nvlist. If the remote hostname or hostid are 2503 * available from configuration read from disk store them as well. 2504 * This allows 'zpool import' to generate a more useful message. 2505 * 2506 * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory) 2507 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool 2508 * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool 2509 */ 2510 if (error == EREMOTEIO) { 2511 char *hostname = "<unknown>"; 2512 uint64_t hostid = 0; 2513 2514 if (mmp_label) { 2515 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) { 2516 hostname = fnvlist_lookup_string(mmp_label, 2517 ZPOOL_CONFIG_HOSTNAME); 2518 fnvlist_add_string(spa->spa_load_info, 2519 ZPOOL_CONFIG_MMP_HOSTNAME, hostname); 2520 } 2521 2522 if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) { 2523 hostid = fnvlist_lookup_uint64(mmp_label, 2524 ZPOOL_CONFIG_HOSTID); 2525 fnvlist_add_uint64(spa->spa_load_info, 2526 ZPOOL_CONFIG_MMP_HOSTID, hostid); 2527 } 2528 } 2529 2530 fnvlist_add_uint64(spa->spa_load_info, 2531 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE); 2532 fnvlist_add_uint64(spa->spa_load_info, 2533 ZPOOL_CONFIG_MMP_TXG, 0); 2534 2535 error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO); 2536 } 2537 2538 if (mmp_label) 2539 nvlist_free(mmp_label); 2540 2541 return (error); 2542 } 2543 2544 static int 2545 spa_verify_host(spa_t *spa, nvlist_t *mos_config) 2546 { 2547 uint64_t hostid; 2548 char *hostname; 2549 uint64_t myhostid = 0; 2550 2551 if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config, 2552 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2553 hostname = fnvlist_lookup_string(mos_config, 2554 ZPOOL_CONFIG_HOSTNAME); 2555 2556 myhostid = zone_get_hostid(NULL); 2557 2558 if (hostid != 0 && myhostid != 0 && hostid != myhostid) { 2559 cmn_err(CE_WARN, "pool '%s' could not be " 2560 "loaded as it was last accessed by " 2561 "another system (host: %s hostid: 0x%llx). " 2562 "See: http://illumos.org/msg/ZFS-8000-EY", 2563 spa_name(spa), hostname, (u_longlong_t)hostid); 2564 spa_load_failed(spa, "hostid verification failed: pool " 2565 "last accessed by host: %s (hostid: 0x%llx)", 2566 hostname, (u_longlong_t)hostid); 2567 return (SET_ERROR(EBADF)); 2568 } 2569 } 2570 2571 return (0); 2572 } 2573 2574 static int 2575 spa_ld_parse_config(spa_t *spa, spa_import_type_t type) 2576 { 2577 int error = 0; 2578 nvlist_t *nvtree, *nvl, *config = spa->spa_config; 2579 int parse; 2580 vdev_t *rvd; 2581 uint64_t pool_guid; 2582 char *comment; 2583 2584 /* 2585 * Versioning wasn't explicitly added to the label until later, so if 2586 * it's not present treat it as the initial version. 2587 */ 2588 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2589 &spa->spa_ubsync.ub_version) != 0) 2590 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2591 2592 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 2593 spa_load_failed(spa, "invalid config provided: '%s' missing", 2594 ZPOOL_CONFIG_POOL_GUID); 2595 return (SET_ERROR(EINVAL)); 2596 } 2597 2598 /* 2599 * If we are doing an import, ensure that the pool is not already 2600 * imported by checking if its pool guid already exists in the 2601 * spa namespace. 2602 * 2603 * The only case that we allow an already imported pool to be 2604 * imported again, is when the pool is checkpointed and we want to 2605 * look at its checkpointed state from userland tools like zdb. 2606 */ 2607 #ifdef _KERNEL 2608 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 2609 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 2610 spa_guid_exists(pool_guid, 0)) { 2611 #else 2612 if ((spa->spa_load_state == SPA_LOAD_IMPORT || 2613 spa->spa_load_state == SPA_LOAD_TRYIMPORT) && 2614 spa_guid_exists(pool_guid, 0) && 2615 !spa_importing_readonly_checkpoint(spa)) { 2616 #endif 2617 spa_load_failed(spa, "a pool with guid %llu is already open", 2618 (u_longlong_t)pool_guid); 2619 return (SET_ERROR(EEXIST)); 2620 } 2621 2622 spa->spa_config_guid = pool_guid; 2623 2624 nvlist_free(spa->spa_load_info); 2625 spa->spa_load_info = fnvlist_alloc(); 2626 2627 ASSERT(spa->spa_comment == NULL); 2628 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2629 spa->spa_comment = spa_strdup(comment); 2630 2631 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2632 &spa->spa_config_txg); 2633 2634 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0) 2635 spa->spa_config_splitting = fnvlist_dup(nvl); 2636 2637 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) { 2638 spa_load_failed(spa, "invalid config provided: '%s' missing", 2639 ZPOOL_CONFIG_VDEV_TREE); 2640 return (SET_ERROR(EINVAL)); 2641 } 2642 2643 /* 2644 * Create "The Godfather" zio to hold all async IOs 2645 */ 2646 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 2647 KM_SLEEP); 2648 for (int i = 0; i < max_ncpus; i++) { 2649 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 2650 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2651 ZIO_FLAG_GODFATHER); 2652 } 2653 2654 /* 2655 * Parse the configuration into a vdev tree. We explicitly set the 2656 * value that will be returned by spa_version() since parsing the 2657 * configuration requires knowing the version number. 2658 */ 2659 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2660 parse = (type == SPA_IMPORT_EXISTING ? 2661 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2662 error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse); 2663 spa_config_exit(spa, SCL_ALL, FTAG); 2664 2665 if (error != 0) { 2666 spa_load_failed(spa, "unable to parse config [error=%d]", 2667 error); 2668 return (error); 2669 } 2670 2671 ASSERT(spa->spa_root_vdev == rvd); 2672 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 2673 ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT); 2674 2675 if (type != SPA_IMPORT_ASSEMBLE) { 2676 ASSERT(spa_guid(spa) == pool_guid); 2677 } 2678 2679 return (0); 2680 } 2681 2682 /* 2683 * Recursively open all vdevs in the vdev tree. This function is called twice: 2684 * first with the untrusted config, then with the trusted config. 2685 */ 2686 static int 2687 spa_ld_open_vdevs(spa_t *spa) 2688 { 2689 int error = 0; 2690 2691 /* 2692 * spa_missing_tvds_allowed defines how many top-level vdevs can be 2693 * missing/unopenable for the root vdev to be still considered openable. 2694 */ 2695 if (spa->spa_trust_config) { 2696 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds; 2697 } else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) { 2698 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile; 2699 } else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) { 2700 spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan; 2701 } else { 2702 spa->spa_missing_tvds_allowed = 0; 2703 } 2704 2705 spa->spa_missing_tvds_allowed = 2706 MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed); 2707 2708 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2709 error = vdev_open(spa->spa_root_vdev); 2710 spa_config_exit(spa, SCL_ALL, FTAG); 2711 2712 if (spa->spa_missing_tvds != 0) { 2713 spa_load_note(spa, "vdev tree has %lld missing top-level " 2714 "vdevs.", (u_longlong_t)spa->spa_missing_tvds); 2715 if (spa->spa_trust_config && (spa->spa_mode & FWRITE)) { 2716 /* 2717 * Although theoretically we could allow users to open 2718 * incomplete pools in RW mode, we'd need to add a lot 2719 * of extra logic (e.g. adjust pool space to account 2720 * for missing vdevs). 2721 * This limitation also prevents users from accidentally 2722 * opening the pool in RW mode during data recovery and 2723 * damaging it further. 2724 */ 2725 spa_load_note(spa, "pools with missing top-level " 2726 "vdevs can only be opened in read-only mode."); 2727 error = SET_ERROR(ENXIO); 2728 } else { 2729 spa_load_note(spa, "current settings allow for maximum " 2730 "%lld missing top-level vdevs at this stage.", 2731 (u_longlong_t)spa->spa_missing_tvds_allowed); 2732 } 2733 } 2734 if (error != 0) { 2735 spa_load_failed(spa, "unable to open vdev tree [error=%d]", 2736 error); 2737 } 2738 if (spa->spa_missing_tvds != 0 || error != 0) 2739 vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2); 2740 2741 return (error); 2742 } 2743 2744 /* 2745 * We need to validate the vdev labels against the configuration that 2746 * we have in hand. This function is called twice: first with an untrusted 2747 * config, then with a trusted config. The validation is more strict when the 2748 * config is trusted. 2749 */ 2750 static int 2751 spa_ld_validate_vdevs(spa_t *spa) 2752 { 2753 int error = 0; 2754 vdev_t *rvd = spa->spa_root_vdev; 2755 2756 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2757 error = vdev_validate(rvd); 2758 spa_config_exit(spa, SCL_ALL, FTAG); 2759 2760 if (error != 0) { 2761 spa_load_failed(spa, "vdev_validate failed [error=%d]", error); 2762 return (error); 2763 } 2764 2765 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 2766 spa_load_failed(spa, "cannot open vdev tree after invalidating " 2767 "some vdevs"); 2768 vdev_dbgmsg_print_tree(rvd, 2); 2769 return (SET_ERROR(ENXIO)); 2770 } 2771 2772 return (0); 2773 } 2774 2775 static void 2776 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub) 2777 { 2778 spa->spa_state = POOL_STATE_ACTIVE; 2779 spa->spa_ubsync = spa->spa_uberblock; 2780 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2781 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2782 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2783 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2784 spa->spa_claim_max_txg = spa->spa_first_txg; 2785 spa->spa_prev_software_version = ub->ub_software_version; 2786 } 2787 2788 static int 2789 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) 2790 { 2791 vdev_t *rvd = spa->spa_root_vdev; 2792 nvlist_t *label; 2793 uberblock_t *ub = &spa->spa_uberblock; 2794 boolean_t activity_check = B_FALSE; 2795 2796 /* 2797 * If we are opening the checkpointed state of the pool by 2798 * rewinding to it, at this point we will have written the 2799 * checkpointed uberblock to the vdev labels, so searching 2800 * the labels will find the right uberblock. However, if 2801 * we are opening the checkpointed state read-only, we have 2802 * not modified the labels. Therefore, we must ignore the 2803 * labels and continue using the spa_uberblock that was set 2804 * by spa_ld_checkpoint_rewind. 2805 * 2806 * Note that it would be fine to ignore the labels when 2807 * rewinding (opening writeable) as well. However, if we 2808 * crash just after writing the labels, we will end up 2809 * searching the labels. Doing so in the common case means 2810 * that this code path gets exercised normally, rather than 2811 * just in the edge case. 2812 */ 2813 if (ub->ub_checkpoint_txg != 0 && 2814 spa_importing_readonly_checkpoint(spa)) { 2815 spa_ld_select_uberblock_done(spa, ub); 2816 return (0); 2817 } 2818 2819 /* 2820 * Find the best uberblock. 2821 */ 2822 vdev_uberblock_load(rvd, ub, &label); 2823 2824 /* 2825 * If we weren't able to find a single valid uberblock, return failure. 2826 */ 2827 if (ub->ub_txg == 0) { 2828 nvlist_free(label); 2829 spa_load_failed(spa, "no valid uberblock found"); 2830 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2831 } 2832 2833 spa_load_note(spa, "using uberblock with txg=%llu", 2834 (u_longlong_t)ub->ub_txg); 2835 2836 /* 2837 * For pools which have the multihost property on determine if the 2838 * pool is truly inactive and can be safely imported. Prevent 2839 * hosts which don't have a hostid set from importing the pool. 2840 */ 2841 activity_check = spa_activity_check_required(spa, ub, label, 2842 spa->spa_config); 2843 if (activity_check) { 2844 if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay && 2845 spa_get_hostid() == 0) { 2846 nvlist_free(label); 2847 fnvlist_add_uint64(spa->spa_load_info, 2848 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 2849 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 2850 } 2851 2852 int error = spa_activity_check(spa, ub, spa->spa_config); 2853 if (error) { 2854 nvlist_free(label); 2855 return (error); 2856 } 2857 2858 fnvlist_add_uint64(spa->spa_load_info, 2859 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); 2860 fnvlist_add_uint64(spa->spa_load_info, 2861 ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); 2862 } 2863 2864 /* 2865 * If the pool has an unsupported version we can't open it. 2866 */ 2867 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2868 nvlist_free(label); 2869 spa_load_failed(spa, "version %llu is not supported", 2870 (u_longlong_t)ub->ub_version); 2871 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2872 } 2873 2874 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2875 nvlist_t *features; 2876 2877 /* 2878 * If we weren't able to find what's necessary for reading the 2879 * MOS in the label, return failure. 2880 */ 2881 if (label == NULL) { 2882 spa_load_failed(spa, "label config unavailable"); 2883 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2884 ENXIO)); 2885 } 2886 2887 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ, 2888 &features) != 0) { 2889 nvlist_free(label); 2890 spa_load_failed(spa, "invalid label: '%s' missing", 2891 ZPOOL_CONFIG_FEATURES_FOR_READ); 2892 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2893 ENXIO)); 2894 } 2895 2896 /* 2897 * Update our in-core representation with the definitive values 2898 * from the label. 2899 */ 2900 nvlist_free(spa->spa_label_features); 2901 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2902 } 2903 2904 nvlist_free(label); 2905 2906 /* 2907 * Look through entries in the label nvlist's features_for_read. If 2908 * there is a feature listed there which we don't understand then we 2909 * cannot open a pool. 2910 */ 2911 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2912 nvlist_t *unsup_feat; 2913 2914 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2915 0); 2916 2917 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2918 NULL); nvp != NULL; 2919 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2920 if (!zfeature_is_supported(nvpair_name(nvp))) { 2921 VERIFY(nvlist_add_string(unsup_feat, 2922 nvpair_name(nvp), "") == 0); 2923 } 2924 } 2925 2926 if (!nvlist_empty(unsup_feat)) { 2927 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2928 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2929 nvlist_free(unsup_feat); 2930 spa_load_failed(spa, "some features are unsupported"); 2931 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2932 ENOTSUP)); 2933 } 2934 2935 nvlist_free(unsup_feat); 2936 } 2937 2938 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2939 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2940 spa_try_repair(spa, spa->spa_config); 2941 spa_config_exit(spa, SCL_ALL, FTAG); 2942 nvlist_free(spa->spa_config_splitting); 2943 spa->spa_config_splitting = NULL; 2944 } 2945 2946 /* 2947 * Initialize internal SPA structures. 2948 */ 2949 spa_ld_select_uberblock_done(spa, ub); 2950 2951 return (0); 2952 } 2953 2954 static int 2955 spa_ld_open_rootbp(spa_t *spa) 2956 { 2957 int error = 0; 2958 vdev_t *rvd = spa->spa_root_vdev; 2959 2960 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2961 if (error != 0) { 2962 spa_load_failed(spa, "unable to open rootbp in dsl_pool_init " 2963 "[error=%d]", error); 2964 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2965 } 2966 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2967 2968 return (0); 2969 } 2970 2971 static int 2972 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type, 2973 boolean_t reloading) 2974 { 2975 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 2976 nvlist_t *nv, *mos_config, *policy; 2977 int error = 0, copy_error; 2978 uint64_t healthy_tvds, healthy_tvds_mos; 2979 uint64_t mos_config_txg; 2980 2981 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE) 2982 != 0) 2983 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2984 2985 /* 2986 * If we're assembling a pool from a split, the config provided is 2987 * already trusted so there is nothing to do. 2988 */ 2989 if (type == SPA_IMPORT_ASSEMBLE) 2990 return (0); 2991 2992 healthy_tvds = spa_healthy_core_tvds(spa); 2993 2994 if (load_nvlist(spa, spa->spa_config_object, &mos_config) 2995 != 0) { 2996 spa_load_failed(spa, "unable to retrieve MOS config"); 2997 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2998 } 2999 3000 /* 3001 * If we are doing an open, pool owner wasn't verified yet, thus do 3002 * the verification here. 3003 */ 3004 if (spa->spa_load_state == SPA_LOAD_OPEN) { 3005 error = spa_verify_host(spa, mos_config); 3006 if (error != 0) { 3007 nvlist_free(mos_config); 3008 return (error); 3009 } 3010 } 3011 3012 nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE); 3013 3014 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3015 3016 /* 3017 * Build a new vdev tree from the trusted config 3018 */ 3019 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 3020 3021 /* 3022 * Vdev paths in the MOS may be obsolete. If the untrusted config was 3023 * obtained by scanning /dev/dsk, then it will have the right vdev 3024 * paths. We update the trusted MOS config with this information. 3025 * We first try to copy the paths with vdev_copy_path_strict, which 3026 * succeeds only when both configs have exactly the same vdev tree. 3027 * If that fails, we fall back to a more flexible method that has a 3028 * best effort policy. 3029 */ 3030 copy_error = vdev_copy_path_strict(rvd, mrvd); 3031 if (copy_error != 0 || spa_load_print_vdev_tree) { 3032 spa_load_note(spa, "provided vdev tree:"); 3033 vdev_dbgmsg_print_tree(rvd, 2); 3034 spa_load_note(spa, "MOS vdev tree:"); 3035 vdev_dbgmsg_print_tree(mrvd, 2); 3036 } 3037 if (copy_error != 0) { 3038 spa_load_note(spa, "vdev_copy_path_strict failed, falling " 3039 "back to vdev_copy_path_relaxed"); 3040 vdev_copy_path_relaxed(rvd, mrvd); 3041 } 3042 3043 vdev_close(rvd); 3044 vdev_free(rvd); 3045 spa->spa_root_vdev = mrvd; 3046 rvd = mrvd; 3047 spa_config_exit(spa, SCL_ALL, FTAG); 3048 3049 /* 3050 * We will use spa_config if we decide to reload the spa or if spa_load 3051 * fails and we rewind. We must thus regenerate the config using the 3052 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to 3053 * pass settings on how to load the pool and is not stored in the MOS. 3054 * We copy it over to our new, trusted config. 3055 */ 3056 mos_config_txg = fnvlist_lookup_uint64(mos_config, 3057 ZPOOL_CONFIG_POOL_TXG); 3058 nvlist_free(mos_config); 3059 mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE); 3060 if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY, 3061 &policy) == 0) 3062 fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy); 3063 spa_config_set(spa, mos_config); 3064 spa->spa_config_source = SPA_CONFIG_SRC_MOS; 3065 3066 /* 3067 * Now that we got the config from the MOS, we should be more strict 3068 * in checking blkptrs and can make assumptions about the consistency 3069 * of the vdev tree. spa_trust_config must be set to true before opening 3070 * vdevs in order for them to be writeable. 3071 */ 3072 spa->spa_trust_config = B_TRUE; 3073 3074 /* 3075 * Open and validate the new vdev tree 3076 */ 3077 error = spa_ld_open_vdevs(spa); 3078 if (error != 0) 3079 return (error); 3080 3081 error = spa_ld_validate_vdevs(spa); 3082 if (error != 0) 3083 return (error); 3084 3085 if (copy_error != 0 || spa_load_print_vdev_tree) { 3086 spa_load_note(spa, "final vdev tree:"); 3087 vdev_dbgmsg_print_tree(rvd, 2); 3088 } 3089 3090 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT && 3091 !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) { 3092 /* 3093 * Sanity check to make sure that we are indeed loading the 3094 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds 3095 * in the config provided and they happened to be the only ones 3096 * to have the latest uberblock, we could involuntarily perform 3097 * an extreme rewind. 3098 */ 3099 healthy_tvds_mos = spa_healthy_core_tvds(spa); 3100 if (healthy_tvds_mos - healthy_tvds >= 3101 SPA_SYNC_MIN_VDEVS) { 3102 spa_load_note(spa, "config provided misses too many " 3103 "top-level vdevs compared to MOS (%lld vs %lld). ", 3104 (u_longlong_t)healthy_tvds, 3105 (u_longlong_t)healthy_tvds_mos); 3106 spa_load_note(spa, "vdev tree:"); 3107 vdev_dbgmsg_print_tree(rvd, 2); 3108 if (reloading) { 3109 spa_load_failed(spa, "config was already " 3110 "provided from MOS. Aborting."); 3111 return (spa_vdev_err(rvd, 3112 VDEV_AUX_CORRUPT_DATA, EIO)); 3113 } 3114 spa_load_note(spa, "spa must be reloaded using MOS " 3115 "config"); 3116 return (SET_ERROR(EAGAIN)); 3117 } 3118 } 3119 3120 error = spa_check_for_missing_logs(spa); 3121 if (error != 0) 3122 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 3123 3124 if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) { 3125 spa_load_failed(spa, "uberblock guid sum doesn't match MOS " 3126 "guid sum (%llu != %llu)", 3127 (u_longlong_t)spa->spa_uberblock.ub_guid_sum, 3128 (u_longlong_t)rvd->vdev_guid_sum); 3129 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 3130 ENXIO)); 3131 } 3132 3133 return (0); 3134 } 3135 3136 static int 3137 spa_ld_open_indirect_vdev_metadata(spa_t *spa) 3138 { 3139 int error = 0; 3140 vdev_t *rvd = spa->spa_root_vdev; 3141 3142 /* 3143 * Everything that we read before spa_remove_init() must be stored 3144 * on concreted vdevs. Therefore we do this as early as possible. 3145 */ 3146 error = spa_remove_init(spa); 3147 if (error != 0) { 3148 spa_load_failed(spa, "spa_remove_init failed [error=%d]", 3149 error); 3150 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3151 } 3152 3153 /* 3154 * Retrieve information needed to condense indirect vdev mappings. 3155 */ 3156 error = spa_condense_init(spa); 3157 if (error != 0) { 3158 spa_load_failed(spa, "spa_condense_init failed [error=%d]", 3159 error); 3160 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3161 } 3162 3163 return (0); 3164 } 3165 3166 static int 3167 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep) 3168 { 3169 int error = 0; 3170 vdev_t *rvd = spa->spa_root_vdev; 3171 3172 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 3173 boolean_t missing_feat_read = B_FALSE; 3174 nvlist_t *unsup_feat, *enabled_feat; 3175 3176 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 3177 &spa->spa_feat_for_read_obj, B_TRUE) != 0) { 3178 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3179 } 3180 3181 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 3182 &spa->spa_feat_for_write_obj, B_TRUE) != 0) { 3183 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3184 } 3185 3186 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 3187 &spa->spa_feat_desc_obj, B_TRUE) != 0) { 3188 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3189 } 3190 3191 enabled_feat = fnvlist_alloc(); 3192 unsup_feat = fnvlist_alloc(); 3193 3194 if (!spa_features_check(spa, B_FALSE, 3195 unsup_feat, enabled_feat)) 3196 missing_feat_read = B_TRUE; 3197 3198 if (spa_writeable(spa) || 3199 spa->spa_load_state == SPA_LOAD_TRYIMPORT) { 3200 if (!spa_features_check(spa, B_TRUE, 3201 unsup_feat, enabled_feat)) { 3202 *missing_feat_writep = B_TRUE; 3203 } 3204 } 3205 3206 fnvlist_add_nvlist(spa->spa_load_info, 3207 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 3208 3209 if (!nvlist_empty(unsup_feat)) { 3210 fnvlist_add_nvlist(spa->spa_load_info, 3211 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 3212 } 3213 3214 fnvlist_free(enabled_feat); 3215 fnvlist_free(unsup_feat); 3216 3217 if (!missing_feat_read) { 3218 fnvlist_add_boolean(spa->spa_load_info, 3219 ZPOOL_CONFIG_CAN_RDONLY); 3220 } 3221 3222 /* 3223 * If the state is SPA_LOAD_TRYIMPORT, our objective is 3224 * twofold: to determine whether the pool is available for 3225 * import in read-write mode and (if it is not) whether the 3226 * pool is available for import in read-only mode. If the pool 3227 * is available for import in read-write mode, it is displayed 3228 * as available in userland; if it is not available for import 3229 * in read-only mode, it is displayed as unavailable in 3230 * userland. If the pool is available for import in read-only 3231 * mode but not read-write mode, it is displayed as unavailable 3232 * in userland with a special note that the pool is actually 3233 * available for open in read-only mode. 3234 * 3235 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 3236 * missing a feature for write, we must first determine whether 3237 * the pool can be opened read-only before returning to 3238 * userland in order to know whether to display the 3239 * abovementioned note. 3240 */ 3241 if (missing_feat_read || (*missing_feat_writep && 3242 spa_writeable(spa))) { 3243 spa_load_failed(spa, "pool uses unsupported features"); 3244 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 3245 ENOTSUP)); 3246 } 3247 3248 /* 3249 * Load refcounts for ZFS features from disk into an in-memory 3250 * cache during SPA initialization. 3251 */ 3252 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 3253 uint64_t refcount; 3254 3255 error = feature_get_refcount_from_disk(spa, 3256 &spa_feature_table[i], &refcount); 3257 if (error == 0) { 3258 spa->spa_feat_refcount_cache[i] = refcount; 3259 } else if (error == ENOTSUP) { 3260 spa->spa_feat_refcount_cache[i] = 3261 SPA_FEATURE_DISABLED; 3262 } else { 3263 spa_load_failed(spa, "error getting refcount " 3264 "for feature %s [error=%d]", 3265 spa_feature_table[i].fi_guid, error); 3266 return (spa_vdev_err(rvd, 3267 VDEV_AUX_CORRUPT_DATA, EIO)); 3268 } 3269 } 3270 } 3271 3272 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 3273 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 3274 &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0) 3275 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3276 } 3277 3278 /* 3279 * Encryption was added before bookmark_v2, even though bookmark_v2 3280 * is now a dependency. If this pool has encryption enabled without 3281 * bookmark_v2, trigger an errata message. 3282 */ 3283 if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) && 3284 !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) { 3285 spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION; 3286 } 3287 3288 return (0); 3289 } 3290 3291 static int 3292 spa_ld_load_special_directories(spa_t *spa) 3293 { 3294 int error = 0; 3295 vdev_t *rvd = spa->spa_root_vdev; 3296 3297 spa->spa_is_initializing = B_TRUE; 3298 error = dsl_pool_open(spa->spa_dsl_pool); 3299 spa->spa_is_initializing = B_FALSE; 3300 if (error != 0) { 3301 spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error); 3302 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3303 } 3304 3305 return (0); 3306 } 3307 3308 static int 3309 spa_ld_get_props(spa_t *spa) 3310 { 3311 int error = 0; 3312 uint64_t obj; 3313 vdev_t *rvd = spa->spa_root_vdev; 3314 3315 /* Grab the secret checksum salt from the MOS. */ 3316 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3317 DMU_POOL_CHECKSUM_SALT, 1, 3318 sizeof (spa->spa_cksum_salt.zcs_bytes), 3319 spa->spa_cksum_salt.zcs_bytes); 3320 if (error == ENOENT) { 3321 /* Generate a new salt for subsequent use */ 3322 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 3323 sizeof (spa->spa_cksum_salt.zcs_bytes)); 3324 } else if (error != 0) { 3325 spa_load_failed(spa, "unable to retrieve checksum salt from " 3326 "MOS [error=%d]", error); 3327 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3328 } 3329 3330 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0) 3331 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3332 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 3333 if (error != 0) { 3334 spa_load_failed(spa, "error opening deferred-frees bpobj " 3335 "[error=%d]", error); 3336 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3337 } 3338 3339 /* 3340 * Load the bit that tells us to use the new accounting function 3341 * (raid-z deflation). If we have an older pool, this will not 3342 * be present. 3343 */ 3344 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE); 3345 if (error != 0 && error != ENOENT) 3346 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3347 3348 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 3349 &spa->spa_creation_version, B_FALSE); 3350 if (error != 0 && error != ENOENT) 3351 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3352 3353 /* 3354 * Load the persistent error log. If we have an older pool, this will 3355 * not be present. 3356 */ 3357 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last, 3358 B_FALSE); 3359 if (error != 0 && error != ENOENT) 3360 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3361 3362 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 3363 &spa->spa_errlog_scrub, B_FALSE); 3364 if (error != 0 && error != ENOENT) 3365 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3366 3367 /* 3368 * Load the history object. If we have an older pool, this 3369 * will not be present. 3370 */ 3371 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE); 3372 if (error != 0 && error != ENOENT) 3373 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3374 3375 /* 3376 * Load the per-vdev ZAP map. If we have an older pool, this will not 3377 * be present; in this case, defer its creation to a later time to 3378 * avoid dirtying the MOS this early / out of sync context. See 3379 * spa_sync_config_object. 3380 */ 3381 3382 /* The sentinel is only available in the MOS config. */ 3383 nvlist_t *mos_config; 3384 if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) { 3385 spa_load_failed(spa, "unable to retrieve MOS config"); 3386 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3387 } 3388 3389 error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP, 3390 &spa->spa_all_vdev_zaps, B_FALSE); 3391 3392 if (error == ENOENT) { 3393 VERIFY(!nvlist_exists(mos_config, 3394 ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 3395 spa->spa_avz_action = AVZ_ACTION_INITIALIZE; 3396 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 3397 } else if (error != 0) { 3398 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3399 } else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) { 3400 /* 3401 * An older version of ZFS overwrote the sentinel value, so 3402 * we have orphaned per-vdev ZAPs in the MOS. Defer their 3403 * destruction to later; see spa_sync_config_object. 3404 */ 3405 spa->spa_avz_action = AVZ_ACTION_DESTROY; 3406 /* 3407 * We're assuming that no vdevs have had their ZAPs created 3408 * before this. Better be sure of it. 3409 */ 3410 ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev)); 3411 } 3412 nvlist_free(mos_config); 3413 3414 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3415 3416 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object, 3417 B_FALSE); 3418 if (error && error != ENOENT) 3419 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3420 3421 if (error == 0) { 3422 uint64_t autoreplace; 3423 3424 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 3425 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 3426 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 3427 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 3428 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 3429 spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost); 3430 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 3431 &spa->spa_dedup_ditto); 3432 3433 spa->spa_autoreplace = (autoreplace != 0); 3434 } 3435 3436 /* 3437 * If we are importing a pool with missing top-level vdevs, 3438 * we enforce that the pool doesn't panic or get suspended on 3439 * error since the likelihood of missing data is extremely high. 3440 */ 3441 if (spa->spa_missing_tvds > 0 && 3442 spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE && 3443 spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3444 spa_load_note(spa, "forcing failmode to 'continue' " 3445 "as some top level vdevs are missing"); 3446 spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE; 3447 } 3448 3449 return (0); 3450 } 3451 3452 static int 3453 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type) 3454 { 3455 int error = 0; 3456 vdev_t *rvd = spa->spa_root_vdev; 3457 3458 /* 3459 * If we're assembling the pool from the split-off vdevs of 3460 * an existing pool, we don't want to attach the spares & cache 3461 * devices. 3462 */ 3463 3464 /* 3465 * Load any hot spares for this pool. 3466 */ 3467 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object, 3468 B_FALSE); 3469 if (error != 0 && error != ENOENT) 3470 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3471 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 3472 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 3473 if (load_nvlist(spa, spa->spa_spares.sav_object, 3474 &spa->spa_spares.sav_config) != 0) { 3475 spa_load_failed(spa, "error loading spares nvlist"); 3476 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3477 } 3478 3479 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3480 spa_load_spares(spa); 3481 spa_config_exit(spa, SCL_ALL, FTAG); 3482 } else if (error == 0) { 3483 spa->spa_spares.sav_sync = B_TRUE; 3484 } 3485 3486 /* 3487 * Load any level 2 ARC devices for this pool. 3488 */ 3489 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 3490 &spa->spa_l2cache.sav_object, B_FALSE); 3491 if (error != 0 && error != ENOENT) 3492 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3493 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 3494 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 3495 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 3496 &spa->spa_l2cache.sav_config) != 0) { 3497 spa_load_failed(spa, "error loading l2cache nvlist"); 3498 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3499 } 3500 3501 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3502 spa_load_l2cache(spa); 3503 spa_config_exit(spa, SCL_ALL, FTAG); 3504 } else if (error == 0) { 3505 spa->spa_l2cache.sav_sync = B_TRUE; 3506 } 3507 3508 return (0); 3509 } 3510 3511 static int 3512 spa_ld_load_vdev_metadata(spa_t *spa) 3513 { 3514 int error = 0; 3515 vdev_t *rvd = spa->spa_root_vdev; 3516 3517 /* 3518 * If the 'multihost' property is set, then never allow a pool to 3519 * be imported when the system hostid is zero. The exception to 3520 * this rule is zdb which is always allowed to access pools. 3521 */ 3522 if (spa_multihost(spa) && spa_get_hostid() == 0 && 3523 (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) { 3524 fnvlist_add_uint64(spa->spa_load_info, 3525 ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID); 3526 return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO)); 3527 } 3528 3529 /* 3530 * If the 'autoreplace' property is set, then post a resource notifying 3531 * the ZFS DE that it should not issue any faults for unopenable 3532 * devices. We also iterate over the vdevs, and post a sysevent for any 3533 * unopenable vdevs so that the normal autoreplace handler can take 3534 * over. 3535 */ 3536 if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3537 spa_check_removed(spa->spa_root_vdev); 3538 /* 3539 * For the import case, this is done in spa_import(), because 3540 * at this point we're using the spare definitions from 3541 * the MOS config, not necessarily from the userland config. 3542 */ 3543 if (spa->spa_load_state != SPA_LOAD_IMPORT) { 3544 spa_aux_check_removed(&spa->spa_spares); 3545 spa_aux_check_removed(&spa->spa_l2cache); 3546 } 3547 } 3548 3549 /* 3550 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc. 3551 */ 3552 error = vdev_load(rvd); 3553 if (error != 0) { 3554 spa_load_failed(spa, "vdev_load failed [error=%d]", error); 3555 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error)); 3556 } 3557 3558 /* 3559 * Propagate the leaf DTLs we just loaded all the way up the vdev tree. 3560 */ 3561 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3562 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 3563 spa_config_exit(spa, SCL_ALL, FTAG); 3564 3565 return (0); 3566 } 3567 3568 static int 3569 spa_ld_load_dedup_tables(spa_t *spa) 3570 { 3571 int error = 0; 3572 vdev_t *rvd = spa->spa_root_vdev; 3573 3574 error = ddt_load(spa); 3575 if (error != 0) { 3576 spa_load_failed(spa, "ddt_load failed [error=%d]", error); 3577 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 3578 } 3579 3580 return (0); 3581 } 3582 3583 static int 3584 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport) 3585 { 3586 vdev_t *rvd = spa->spa_root_vdev; 3587 3588 if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) { 3589 boolean_t missing = spa_check_logs(spa); 3590 if (missing) { 3591 if (spa->spa_missing_tvds != 0) { 3592 spa_load_note(spa, "spa_check_logs failed " 3593 "so dropping the logs"); 3594 } else { 3595 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 3596 spa_load_failed(spa, "spa_check_logs failed"); 3597 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, 3598 ENXIO)); 3599 } 3600 } 3601 } 3602 3603 return (0); 3604 } 3605 3606 static int 3607 spa_ld_verify_pool_data(spa_t *spa) 3608 { 3609 int error = 0; 3610 vdev_t *rvd = spa->spa_root_vdev; 3611 3612 /* 3613 * We've successfully opened the pool, verify that we're ready 3614 * to start pushing transactions. 3615 */ 3616 if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) { 3617 error = spa_load_verify(spa); 3618 if (error != 0) { 3619 spa_load_failed(spa, "spa_load_verify failed " 3620 "[error=%d]", error); 3621 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 3622 error)); 3623 } 3624 } 3625 3626 return (0); 3627 } 3628 3629 static void 3630 spa_ld_claim_log_blocks(spa_t *spa) 3631 { 3632 dmu_tx_t *tx; 3633 dsl_pool_t *dp = spa_get_dsl(spa); 3634 3635 /* 3636 * Claim log blocks that haven't been committed yet. 3637 * This must all happen in a single txg. 3638 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 3639 * invoked from zil_claim_log_block()'s i/o done callback. 3640 * Price of rollback is that we abandon the log. 3641 */ 3642 spa->spa_claiming = B_TRUE; 3643 3644 tx = dmu_tx_create_assigned(dp, spa_first_txg(spa)); 3645 (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 3646 zil_claim, tx, DS_FIND_CHILDREN); 3647 dmu_tx_commit(tx); 3648 3649 spa->spa_claiming = B_FALSE; 3650 3651 spa_set_log_state(spa, SPA_LOG_GOOD); 3652 } 3653 3654 static void 3655 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg, 3656 boolean_t update_config_cache) 3657 { 3658 vdev_t *rvd = spa->spa_root_vdev; 3659 int need_update = B_FALSE; 3660 3661 /* 3662 * If the config cache is stale, or we have uninitialized 3663 * metaslabs (see spa_vdev_add()), then update the config. 3664 * 3665 * If this is a verbatim import, trust the current 3666 * in-core spa_config and update the disk labels. 3667 */ 3668 if (update_config_cache || config_cache_txg != spa->spa_config_txg || 3669 spa->spa_load_state == SPA_LOAD_IMPORT || 3670 spa->spa_load_state == SPA_LOAD_RECOVER || 3671 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 3672 need_update = B_TRUE; 3673 3674 for (int c = 0; c < rvd->vdev_children; c++) 3675 if (rvd->vdev_child[c]->vdev_ms_array == 0) 3676 need_update = B_TRUE; 3677 3678 /* 3679 * Update the config cache asychronously in case we're the 3680 * root pool, in which case the config cache isn't writable yet. 3681 */ 3682 if (need_update) 3683 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3684 } 3685 3686 static void 3687 spa_ld_prepare_for_reload(spa_t *spa) 3688 { 3689 int mode = spa->spa_mode; 3690 int async_suspended = spa->spa_async_suspended; 3691 3692 spa_unload(spa); 3693 spa_deactivate(spa); 3694 spa_activate(spa, mode); 3695 3696 /* 3697 * We save the value of spa_async_suspended as it gets reset to 0 by 3698 * spa_unload(). We want to restore it back to the original value before 3699 * returning as we might be calling spa_async_resume() later. 3700 */ 3701 spa->spa_async_suspended = async_suspended; 3702 } 3703 3704 static int 3705 spa_ld_read_checkpoint_txg(spa_t *spa) 3706 { 3707 uberblock_t checkpoint; 3708 int error = 0; 3709 3710 ASSERT0(spa->spa_checkpoint_txg); 3711 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3712 3713 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3714 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 3715 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 3716 3717 if (error == ENOENT) 3718 return (0); 3719 3720 if (error != 0) 3721 return (error); 3722 3723 ASSERT3U(checkpoint.ub_txg, !=, 0); 3724 ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0); 3725 ASSERT3U(checkpoint.ub_timestamp, !=, 0); 3726 spa->spa_checkpoint_txg = checkpoint.ub_txg; 3727 spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp; 3728 3729 return (0); 3730 } 3731 3732 static int 3733 spa_ld_mos_init(spa_t *spa, spa_import_type_t type) 3734 { 3735 int error = 0; 3736 3737 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3738 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 3739 3740 /* 3741 * Never trust the config that is provided unless we are assembling 3742 * a pool following a split. 3743 * This means don't trust blkptrs and the vdev tree in general. This 3744 * also effectively puts the spa in read-only mode since 3745 * spa_writeable() checks for spa_trust_config to be true. 3746 * We will later load a trusted config from the MOS. 3747 */ 3748 if (type != SPA_IMPORT_ASSEMBLE) 3749 spa->spa_trust_config = B_FALSE; 3750 3751 /* 3752 * Parse the config provided to create a vdev tree. 3753 */ 3754 error = spa_ld_parse_config(spa, type); 3755 if (error != 0) 3756 return (error); 3757 3758 /* 3759 * Now that we have the vdev tree, try to open each vdev. This involves 3760 * opening the underlying physical device, retrieving its geometry and 3761 * probing the vdev with a dummy I/O. The state of each vdev will be set 3762 * based on the success of those operations. After this we'll be ready 3763 * to read from the vdevs. 3764 */ 3765 error = spa_ld_open_vdevs(spa); 3766 if (error != 0) 3767 return (error); 3768 3769 /* 3770 * Read the label of each vdev and make sure that the GUIDs stored 3771 * there match the GUIDs in the config provided. 3772 * If we're assembling a new pool that's been split off from an 3773 * existing pool, the labels haven't yet been updated so we skip 3774 * validation for now. 3775 */ 3776 if (type != SPA_IMPORT_ASSEMBLE) { 3777 error = spa_ld_validate_vdevs(spa); 3778 if (error != 0) 3779 return (error); 3780 } 3781 3782 /* 3783 * Read all vdev labels to find the best uberblock (i.e. latest, 3784 * unless spa_load_max_txg is set) and store it in spa_uberblock. We 3785 * get the list of features required to read blkptrs in the MOS from 3786 * the vdev label with the best uberblock and verify that our version 3787 * of zfs supports them all. 3788 */ 3789 error = spa_ld_select_uberblock(spa, type); 3790 if (error != 0) 3791 return (error); 3792 3793 /* 3794 * Pass that uberblock to the dsl_pool layer which will open the root 3795 * blkptr. This blkptr points to the latest version of the MOS and will 3796 * allow us to read its contents. 3797 */ 3798 error = spa_ld_open_rootbp(spa); 3799 if (error != 0) 3800 return (error); 3801 3802 return (0); 3803 } 3804 3805 static int 3806 spa_ld_checkpoint_rewind(spa_t *spa) 3807 { 3808 uberblock_t checkpoint; 3809 int error = 0; 3810 3811 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3812 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 3813 3814 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3815 DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t), 3816 sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint); 3817 3818 if (error != 0) { 3819 spa_load_failed(spa, "unable to retrieve checkpointed " 3820 "uberblock from the MOS config [error=%d]", error); 3821 3822 if (error == ENOENT) 3823 error = ZFS_ERR_NO_CHECKPOINT; 3824 3825 return (error); 3826 } 3827 3828 ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg); 3829 ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg); 3830 3831 /* 3832 * We need to update the txg and timestamp of the checkpointed 3833 * uberblock to be higher than the latest one. This ensures that 3834 * the checkpointed uberblock is selected if we were to close and 3835 * reopen the pool right after we've written it in the vdev labels. 3836 * (also see block comment in vdev_uberblock_compare) 3837 */ 3838 checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1; 3839 checkpoint.ub_timestamp = gethrestime_sec(); 3840 3841 /* 3842 * Set current uberblock to be the checkpointed uberblock. 3843 */ 3844 spa->spa_uberblock = checkpoint; 3845 3846 /* 3847 * If we are doing a normal rewind, then the pool is open for 3848 * writing and we sync the "updated" checkpointed uberblock to 3849 * disk. Once this is done, we've basically rewound the whole 3850 * pool and there is no way back. 3851 * 3852 * There are cases when we don't want to attempt and sync the 3853 * checkpointed uberblock to disk because we are opening a 3854 * pool as read-only. Specifically, verifying the checkpointed 3855 * state with zdb, and importing the checkpointed state to get 3856 * a "preview" of its content. 3857 */ 3858 if (spa_writeable(spa)) { 3859 vdev_t *rvd = spa->spa_root_vdev; 3860 3861 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3862 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 3863 int svdcount = 0; 3864 int children = rvd->vdev_children; 3865 int c0 = spa_get_random(children); 3866 3867 for (int c = 0; c < children; c++) { 3868 vdev_t *vd = rvd->vdev_child[(c0 + c) % children]; 3869 3870 /* Stop when revisiting the first vdev */ 3871 if (c > 0 && svd[0] == vd) 3872 break; 3873 3874 if (vd->vdev_ms_array == 0 || vd->vdev_islog || 3875 !vdev_is_concrete(vd)) 3876 continue; 3877 3878 svd[svdcount++] = vd; 3879 if (svdcount == SPA_SYNC_MIN_VDEVS) 3880 break; 3881 } 3882 error = vdev_config_sync(svd, svdcount, spa->spa_first_txg); 3883 if (error == 0) 3884 spa->spa_last_synced_guid = rvd->vdev_guid; 3885 spa_config_exit(spa, SCL_ALL, FTAG); 3886 3887 if (error != 0) { 3888 spa_load_failed(spa, "failed to write checkpointed " 3889 "uberblock to the vdev labels [error=%d]", error); 3890 return (error); 3891 } 3892 } 3893 3894 return (0); 3895 } 3896 3897 static int 3898 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type, 3899 boolean_t *update_config_cache) 3900 { 3901 int error; 3902 3903 /* 3904 * Parse the config for pool, open and validate vdevs, 3905 * select an uberblock, and use that uberblock to open 3906 * the MOS. 3907 */ 3908 error = spa_ld_mos_init(spa, type); 3909 if (error != 0) 3910 return (error); 3911 3912 /* 3913 * Retrieve the trusted config stored in the MOS and use it to create 3914 * a new, exact version of the vdev tree, then reopen all vdevs. 3915 */ 3916 error = spa_ld_trusted_config(spa, type, B_FALSE); 3917 if (error == EAGAIN) { 3918 if (update_config_cache != NULL) 3919 *update_config_cache = B_TRUE; 3920 3921 /* 3922 * Redo the loading process with the trusted config if it is 3923 * too different from the untrusted config. 3924 */ 3925 spa_ld_prepare_for_reload(spa); 3926 spa_load_note(spa, "RELOADING"); 3927 error = spa_ld_mos_init(spa, type); 3928 if (error != 0) 3929 return (error); 3930 3931 error = spa_ld_trusted_config(spa, type, B_TRUE); 3932 if (error != 0) 3933 return (error); 3934 3935 } else if (error != 0) { 3936 return (error); 3937 } 3938 3939 return (0); 3940 } 3941 3942 /* 3943 * Load an existing storage pool, using the config provided. This config 3944 * describes which vdevs are part of the pool and is later validated against 3945 * partial configs present in each vdev's label and an entire copy of the 3946 * config stored in the MOS. 3947 */ 3948 static int 3949 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport) 3950 { 3951 int error = 0; 3952 boolean_t missing_feat_write = B_FALSE; 3953 boolean_t checkpoint_rewind = 3954 (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 3955 boolean_t update_config_cache = B_FALSE; 3956 3957 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3958 ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE); 3959 3960 spa_load_note(spa, "LOADING"); 3961 3962 error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache); 3963 if (error != 0) 3964 return (error); 3965 3966 /* 3967 * If we are rewinding to the checkpoint then we need to repeat 3968 * everything we've done so far in this function but this time 3969 * selecting the checkpointed uberblock and using that to open 3970 * the MOS. 3971 */ 3972 if (checkpoint_rewind) { 3973 /* 3974 * If we are rewinding to the checkpoint update config cache 3975 * anyway. 3976 */ 3977 update_config_cache = B_TRUE; 3978 3979 /* 3980 * Extract the checkpointed uberblock from the current MOS 3981 * and use this as the pool's uberblock from now on. If the 3982 * pool is imported as writeable we also write the checkpoint 3983 * uberblock to the labels, making the rewind permanent. 3984 */ 3985 error = spa_ld_checkpoint_rewind(spa); 3986 if (error != 0) 3987 return (error); 3988 3989 /* 3990 * Redo the loading process process again with the 3991 * checkpointed uberblock. 3992 */ 3993 spa_ld_prepare_for_reload(spa); 3994 spa_load_note(spa, "LOADING checkpointed uberblock"); 3995 error = spa_ld_mos_with_trusted_config(spa, type, NULL); 3996 if (error != 0) 3997 return (error); 3998 } 3999 4000 /* 4001 * Retrieve the checkpoint txg if the pool has a checkpoint. 4002 */ 4003 error = spa_ld_read_checkpoint_txg(spa); 4004 if (error != 0) 4005 return (error); 4006 4007 /* 4008 * Retrieve the mapping of indirect vdevs. Those vdevs were removed 4009 * from the pool and their contents were re-mapped to other vdevs. Note 4010 * that everything that we read before this step must have been 4011 * rewritten on concrete vdevs after the last device removal was 4012 * initiated. Otherwise we could be reading from indirect vdevs before 4013 * we have loaded their mappings. 4014 */ 4015 error = spa_ld_open_indirect_vdev_metadata(spa); 4016 if (error != 0) 4017 return (error); 4018 4019 /* 4020 * Retrieve the full list of active features from the MOS and check if 4021 * they are all supported. 4022 */ 4023 error = spa_ld_check_features(spa, &missing_feat_write); 4024 if (error != 0) 4025 return (error); 4026 4027 /* 4028 * Load several special directories from the MOS needed by the dsl_pool 4029 * layer. 4030 */ 4031 error = spa_ld_load_special_directories(spa); 4032 if (error != 0) 4033 return (error); 4034 4035 /* 4036 * Retrieve pool properties from the MOS. 4037 */ 4038 error = spa_ld_get_props(spa); 4039 if (error != 0) 4040 return (error); 4041 4042 /* 4043 * Retrieve the list of auxiliary devices - cache devices and spares - 4044 * and open them. 4045 */ 4046 error = spa_ld_open_aux_vdevs(spa, type); 4047 if (error != 0) 4048 return (error); 4049 4050 /* 4051 * Load the metadata for all vdevs. Also check if unopenable devices 4052 * should be autoreplaced. 4053 */ 4054 error = spa_ld_load_vdev_metadata(spa); 4055 if (error != 0) 4056 return (error); 4057 4058 error = spa_ld_load_dedup_tables(spa); 4059 if (error != 0) 4060 return (error); 4061 4062 /* 4063 * Verify the logs now to make sure we don't have any unexpected errors 4064 * when we claim log blocks later. 4065 */ 4066 error = spa_ld_verify_logs(spa, type, ereport); 4067 if (error != 0) 4068 return (error); 4069 4070 if (missing_feat_write) { 4071 ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT); 4072 4073 /* 4074 * At this point, we know that we can open the pool in 4075 * read-only mode but not read-write mode. We now have enough 4076 * information and can return to userland. 4077 */ 4078 return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT, 4079 ENOTSUP)); 4080 } 4081 4082 /* 4083 * Traverse the last txgs to make sure the pool was left off in a safe 4084 * state. When performing an extreme rewind, we verify the whole pool, 4085 * which can take a very long time. 4086 */ 4087 error = spa_ld_verify_pool_data(spa); 4088 if (error != 0) 4089 return (error); 4090 4091 /* 4092 * Calculate the deflated space for the pool. This must be done before 4093 * we write anything to the pool because we'd need to update the space 4094 * accounting using the deflated sizes. 4095 */ 4096 spa_update_dspace(spa); 4097 4098 /* 4099 * We have now retrieved all the information we needed to open the 4100 * pool. If we are importing the pool in read-write mode, a few 4101 * additional steps must be performed to finish the import. 4102 */ 4103 if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER || 4104 spa->spa_load_max_txg == UINT64_MAX)) { 4105 uint64_t config_cache_txg = spa->spa_config_txg; 4106 4107 ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT); 4108 4109 /* 4110 * In case of a checkpoint rewind, log the original txg 4111 * of the checkpointed uberblock. 4112 */ 4113 if (checkpoint_rewind) { 4114 spa_history_log_internal(spa, "checkpoint rewind", 4115 NULL, "rewound state to txg=%llu", 4116 (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg); 4117 } 4118 4119 /* 4120 * Traverse the ZIL and claim all blocks. 4121 */ 4122 spa_ld_claim_log_blocks(spa); 4123 4124 /* 4125 * Kick-off the syncing thread. 4126 */ 4127 spa->spa_sync_on = B_TRUE; 4128 txg_sync_start(spa->spa_dsl_pool); 4129 mmp_thread_start(spa); 4130 4131 /* 4132 * Wait for all claims to sync. We sync up to the highest 4133 * claimed log block birth time so that claimed log blocks 4134 * don't appear to be from the future. spa_claim_max_txg 4135 * will have been set for us by ZIL traversal operations 4136 * performed above. 4137 */ 4138 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 4139 4140 /* 4141 * Check if we need to request an update of the config. On the 4142 * next sync, we would update the config stored in vdev labels 4143 * and the cachefile (by default /etc/zfs/zpool.cache). 4144 */ 4145 spa_ld_check_for_config_update(spa, config_cache_txg, 4146 update_config_cache); 4147 4148 /* 4149 * Check all DTLs to see if anything needs resilvering. 4150 */ 4151 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 4152 vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 4153 spa_async_request(spa, SPA_ASYNC_RESILVER); 4154 4155 /* 4156 * Log the fact that we booted up (so that we can detect if 4157 * we rebooted in the middle of an operation). 4158 */ 4159 spa_history_log_version(spa, "open"); 4160 4161 spa_restart_removal(spa); 4162 spa_spawn_aux_threads(spa); 4163 4164 /* 4165 * Delete any inconsistent datasets. 4166 * 4167 * Note: 4168 * Since we may be issuing deletes for clones here, 4169 * we make sure to do so after we've spawned all the 4170 * auxiliary threads above (from which the livelist 4171 * deletion zthr is part of). 4172 */ 4173 (void) dmu_objset_find(spa_name(spa), 4174 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 4175 4176 /* 4177 * Clean up any stale temporary dataset userrefs. 4178 */ 4179 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 4180 4181 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4182 vdev_initialize_restart(spa->spa_root_vdev); 4183 spa_config_exit(spa, SCL_CONFIG, FTAG); 4184 } 4185 4186 spa_load_note(spa, "LOADED"); 4187 4188 return (0); 4189 } 4190 4191 static int 4192 spa_load_retry(spa_t *spa, spa_load_state_t state) 4193 { 4194 int mode = spa->spa_mode; 4195 4196 spa_unload(spa); 4197 spa_deactivate(spa); 4198 4199 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 4200 4201 spa_activate(spa, mode); 4202 spa_async_suspend(spa); 4203 4204 spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu", 4205 (u_longlong_t)spa->spa_load_max_txg); 4206 4207 return (spa_load(spa, state, SPA_IMPORT_EXISTING)); 4208 } 4209 4210 /* 4211 * If spa_load() fails this function will try loading prior txg's. If 4212 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 4213 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 4214 * function will not rewind the pool and will return the same error as 4215 * spa_load(). 4216 */ 4217 static int 4218 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request, 4219 int rewind_flags) 4220 { 4221 nvlist_t *loadinfo = NULL; 4222 nvlist_t *config = NULL; 4223 int load_error, rewind_error; 4224 uint64_t safe_rewind_txg; 4225 uint64_t min_txg; 4226 4227 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 4228 spa->spa_load_max_txg = spa->spa_load_txg; 4229 spa_set_log_state(spa, SPA_LOG_CLEAR); 4230 } else { 4231 spa->spa_load_max_txg = max_request; 4232 if (max_request != UINT64_MAX) 4233 spa->spa_extreme_rewind = B_TRUE; 4234 } 4235 4236 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING); 4237 if (load_error == 0) 4238 return (0); 4239 if (load_error == ZFS_ERR_NO_CHECKPOINT) { 4240 /* 4241 * When attempting checkpoint-rewind on a pool with no 4242 * checkpoint, we should not attempt to load uberblocks 4243 * from previous txgs when spa_load fails. 4244 */ 4245 ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT); 4246 return (load_error); 4247 } 4248 4249 if (spa->spa_root_vdev != NULL) 4250 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4251 4252 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 4253 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 4254 4255 if (rewind_flags & ZPOOL_NEVER_REWIND) { 4256 nvlist_free(config); 4257 return (load_error); 4258 } 4259 4260 if (state == SPA_LOAD_RECOVER) { 4261 /* Price of rolling back is discarding txgs, including log */ 4262 spa_set_log_state(spa, SPA_LOG_CLEAR); 4263 } else { 4264 /* 4265 * If we aren't rolling back save the load info from our first 4266 * import attempt so that we can restore it after attempting 4267 * to rewind. 4268 */ 4269 loadinfo = spa->spa_load_info; 4270 spa->spa_load_info = fnvlist_alloc(); 4271 } 4272 4273 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 4274 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 4275 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 4276 TXG_INITIAL : safe_rewind_txg; 4277 4278 /* 4279 * Continue as long as we're finding errors, we're still within 4280 * the acceptable rewind range, and we're still finding uberblocks 4281 */ 4282 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 4283 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 4284 if (spa->spa_load_max_txg < safe_rewind_txg) 4285 spa->spa_extreme_rewind = B_TRUE; 4286 rewind_error = spa_load_retry(spa, state); 4287 } 4288 4289 spa->spa_extreme_rewind = B_FALSE; 4290 spa->spa_load_max_txg = UINT64_MAX; 4291 4292 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 4293 spa_config_set(spa, config); 4294 else 4295 nvlist_free(config); 4296 4297 if (state == SPA_LOAD_RECOVER) { 4298 ASSERT3P(loadinfo, ==, NULL); 4299 return (rewind_error); 4300 } else { 4301 /* Store the rewind info as part of the initial load info */ 4302 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 4303 spa->spa_load_info); 4304 4305 /* Restore the initial load info */ 4306 fnvlist_free(spa->spa_load_info); 4307 spa->spa_load_info = loadinfo; 4308 4309 return (load_error); 4310 } 4311 } 4312 4313 /* 4314 * Pool Open/Import 4315 * 4316 * The import case is identical to an open except that the configuration is sent 4317 * down from userland, instead of grabbed from the configuration cache. For the 4318 * case of an open, the pool configuration will exist in the 4319 * POOL_STATE_UNINITIALIZED state. 4320 * 4321 * The stats information (gen/count/ustats) is used to gather vdev statistics at 4322 * the same time open the pool, without having to keep around the spa_t in some 4323 * ambiguous state. 4324 */ 4325 static int 4326 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 4327 nvlist_t **config) 4328 { 4329 spa_t *spa; 4330 spa_load_state_t state = SPA_LOAD_OPEN; 4331 int error; 4332 int locked = B_FALSE; 4333 4334 *spapp = NULL; 4335 4336 /* 4337 * As disgusting as this is, we need to support recursive calls to this 4338 * function because dsl_dir_open() is called during spa_load(), and ends 4339 * up calling spa_open() again. The real fix is to figure out how to 4340 * avoid dsl_dir_open() calling this in the first place. 4341 */ 4342 if (mutex_owner(&spa_namespace_lock) != curthread) { 4343 mutex_enter(&spa_namespace_lock); 4344 locked = B_TRUE; 4345 } 4346 4347 if ((spa = spa_lookup(pool)) == NULL) { 4348 if (locked) 4349 mutex_exit(&spa_namespace_lock); 4350 return (SET_ERROR(ENOENT)); 4351 } 4352 4353 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 4354 zpool_load_policy_t policy; 4355 4356 zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config, 4357 &policy); 4358 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 4359 state = SPA_LOAD_RECOVER; 4360 4361 spa_activate(spa, spa_mode_global); 4362 4363 if (state != SPA_LOAD_RECOVER) 4364 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4365 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 4366 4367 zfs_dbgmsg("spa_open_common: opening %s", pool); 4368 error = spa_load_best(spa, state, policy.zlp_txg, 4369 policy.zlp_rewind); 4370 4371 if (error == EBADF) { 4372 /* 4373 * If vdev_validate() returns failure (indicated by 4374 * EBADF), it indicates that one of the vdevs indicates 4375 * that the pool has been exported or destroyed. If 4376 * this is the case, the config cache is out of sync and 4377 * we should remove the pool from the namespace. 4378 */ 4379 spa_unload(spa); 4380 spa_deactivate(spa); 4381 spa_write_cachefile(spa, B_TRUE, B_TRUE); 4382 spa_remove(spa); 4383 if (locked) 4384 mutex_exit(&spa_namespace_lock); 4385 return (SET_ERROR(ENOENT)); 4386 } 4387 4388 if (error) { 4389 /* 4390 * We can't open the pool, but we still have useful 4391 * information: the state of each vdev after the 4392 * attempted vdev_open(). Return this to the user. 4393 */ 4394 if (config != NULL && spa->spa_config) { 4395 VERIFY(nvlist_dup(spa->spa_config, config, 4396 KM_SLEEP) == 0); 4397 VERIFY(nvlist_add_nvlist(*config, 4398 ZPOOL_CONFIG_LOAD_INFO, 4399 spa->spa_load_info) == 0); 4400 } 4401 spa_unload(spa); 4402 spa_deactivate(spa); 4403 spa->spa_last_open_failed = error; 4404 if (locked) 4405 mutex_exit(&spa_namespace_lock); 4406 *spapp = NULL; 4407 return (error); 4408 } 4409 } 4410 4411 spa_open_ref(spa, tag); 4412 4413 if (config != NULL) 4414 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4415 4416 /* 4417 * If we've recovered the pool, pass back any information we 4418 * gathered while doing the load. 4419 */ 4420 if (state == SPA_LOAD_RECOVER) { 4421 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 4422 spa->spa_load_info) == 0); 4423 } 4424 4425 if (locked) { 4426 spa->spa_last_open_failed = 0; 4427 spa->spa_last_ubsync_txg = 0; 4428 spa->spa_load_txg = 0; 4429 mutex_exit(&spa_namespace_lock); 4430 } 4431 4432 *spapp = spa; 4433 4434 return (0); 4435 } 4436 4437 int 4438 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 4439 nvlist_t **config) 4440 { 4441 return (spa_open_common(name, spapp, tag, policy, config)); 4442 } 4443 4444 int 4445 spa_open(const char *name, spa_t **spapp, void *tag) 4446 { 4447 return (spa_open_common(name, spapp, tag, NULL, NULL)); 4448 } 4449 4450 /* 4451 * Lookup the given spa_t, incrementing the inject count in the process, 4452 * preventing it from being exported or destroyed. 4453 */ 4454 spa_t * 4455 spa_inject_addref(char *name) 4456 { 4457 spa_t *spa; 4458 4459 mutex_enter(&spa_namespace_lock); 4460 if ((spa = spa_lookup(name)) == NULL) { 4461 mutex_exit(&spa_namespace_lock); 4462 return (NULL); 4463 } 4464 spa->spa_inject_ref++; 4465 mutex_exit(&spa_namespace_lock); 4466 4467 return (spa); 4468 } 4469 4470 void 4471 spa_inject_delref(spa_t *spa) 4472 { 4473 mutex_enter(&spa_namespace_lock); 4474 spa->spa_inject_ref--; 4475 mutex_exit(&spa_namespace_lock); 4476 } 4477 4478 /* 4479 * Add spares device information to the nvlist. 4480 */ 4481 static void 4482 spa_add_spares(spa_t *spa, nvlist_t *config) 4483 { 4484 nvlist_t **spares; 4485 uint_t i, nspares; 4486 nvlist_t *nvroot; 4487 uint64_t guid; 4488 vdev_stat_t *vs; 4489 uint_t vsc; 4490 uint64_t pool; 4491 4492 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4493 4494 if (spa->spa_spares.sav_count == 0) 4495 return; 4496 4497 VERIFY(nvlist_lookup_nvlist(config, 4498 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4499 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 4500 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 4501 if (nspares != 0) { 4502 VERIFY(nvlist_add_nvlist_array(nvroot, 4503 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4504 VERIFY(nvlist_lookup_nvlist_array(nvroot, 4505 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 4506 4507 /* 4508 * Go through and find any spares which have since been 4509 * repurposed as an active spare. If this is the case, update 4510 * their status appropriately. 4511 */ 4512 for (i = 0; i < nspares; i++) { 4513 VERIFY(nvlist_lookup_uint64(spares[i], 4514 ZPOOL_CONFIG_GUID, &guid) == 0); 4515 if (spa_spare_exists(guid, &pool, NULL) && 4516 pool != 0ULL) { 4517 VERIFY(nvlist_lookup_uint64_array( 4518 spares[i], ZPOOL_CONFIG_VDEV_STATS, 4519 (uint64_t **)&vs, &vsc) == 0); 4520 vs->vs_state = VDEV_STATE_CANT_OPEN; 4521 vs->vs_aux = VDEV_AUX_SPARED; 4522 } 4523 } 4524 } 4525 } 4526 4527 /* 4528 * Add l2cache device information to the nvlist, including vdev stats. 4529 */ 4530 static void 4531 spa_add_l2cache(spa_t *spa, nvlist_t *config) 4532 { 4533 nvlist_t **l2cache; 4534 uint_t i, j, nl2cache; 4535 nvlist_t *nvroot; 4536 uint64_t guid; 4537 vdev_t *vd; 4538 vdev_stat_t *vs; 4539 uint_t vsc; 4540 4541 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4542 4543 if (spa->spa_l2cache.sav_count == 0) 4544 return; 4545 4546 VERIFY(nvlist_lookup_nvlist(config, 4547 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4548 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 4549 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 4550 if (nl2cache != 0) { 4551 VERIFY(nvlist_add_nvlist_array(nvroot, 4552 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4553 VERIFY(nvlist_lookup_nvlist_array(nvroot, 4554 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 4555 4556 /* 4557 * Update level 2 cache device stats. 4558 */ 4559 4560 for (i = 0; i < nl2cache; i++) { 4561 VERIFY(nvlist_lookup_uint64(l2cache[i], 4562 ZPOOL_CONFIG_GUID, &guid) == 0); 4563 4564 vd = NULL; 4565 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 4566 if (guid == 4567 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 4568 vd = spa->spa_l2cache.sav_vdevs[j]; 4569 break; 4570 } 4571 } 4572 ASSERT(vd != NULL); 4573 4574 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 4575 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 4576 == 0); 4577 vdev_get_stats(vd, vs); 4578 } 4579 } 4580 } 4581 4582 static void 4583 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 4584 { 4585 nvlist_t *features; 4586 zap_cursor_t zc; 4587 zap_attribute_t za; 4588 4589 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 4590 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4591 4592 if (spa->spa_feat_for_read_obj != 0) { 4593 for (zap_cursor_init(&zc, spa->spa_meta_objset, 4594 spa->spa_feat_for_read_obj); 4595 zap_cursor_retrieve(&zc, &za) == 0; 4596 zap_cursor_advance(&zc)) { 4597 ASSERT(za.za_integer_length == sizeof (uint64_t) && 4598 za.za_num_integers == 1); 4599 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 4600 za.za_first_integer)); 4601 } 4602 zap_cursor_fini(&zc); 4603 } 4604 4605 if (spa->spa_feat_for_write_obj != 0) { 4606 for (zap_cursor_init(&zc, spa->spa_meta_objset, 4607 spa->spa_feat_for_write_obj); 4608 zap_cursor_retrieve(&zc, &za) == 0; 4609 zap_cursor_advance(&zc)) { 4610 ASSERT(za.za_integer_length == sizeof (uint64_t) && 4611 za.za_num_integers == 1); 4612 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 4613 za.za_first_integer)); 4614 } 4615 zap_cursor_fini(&zc); 4616 } 4617 4618 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 4619 features) == 0); 4620 nvlist_free(features); 4621 } 4622 4623 int 4624 spa_get_stats(const char *name, nvlist_t **config, 4625 char *altroot, size_t buflen) 4626 { 4627 int error; 4628 spa_t *spa; 4629 4630 *config = NULL; 4631 error = spa_open_common(name, &spa, FTAG, NULL, config); 4632 4633 if (spa != NULL) { 4634 /* 4635 * This still leaves a window of inconsistency where the spares 4636 * or l2cache devices could change and the config would be 4637 * self-inconsistent. 4638 */ 4639 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4640 4641 if (*config != NULL) { 4642 uint64_t loadtimes[2]; 4643 4644 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 4645 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 4646 VERIFY(nvlist_add_uint64_array(*config, 4647 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 4648 4649 VERIFY(nvlist_add_uint64(*config, 4650 ZPOOL_CONFIG_ERRCOUNT, 4651 spa_get_errlog_size(spa)) == 0); 4652 4653 if (spa_suspended(spa)) { 4654 VERIFY(nvlist_add_uint64(*config, 4655 ZPOOL_CONFIG_SUSPENDED, 4656 spa->spa_failmode) == 0); 4657 VERIFY(nvlist_add_uint64(*config, 4658 ZPOOL_CONFIG_SUSPENDED_REASON, 4659 spa->spa_suspended) == 0); 4660 } 4661 4662 spa_add_spares(spa, *config); 4663 spa_add_l2cache(spa, *config); 4664 spa_add_feature_stats(spa, *config); 4665 } 4666 } 4667 4668 /* 4669 * We want to get the alternate root even for faulted pools, so we cheat 4670 * and call spa_lookup() directly. 4671 */ 4672 if (altroot) { 4673 if (spa == NULL) { 4674 mutex_enter(&spa_namespace_lock); 4675 spa = spa_lookup(name); 4676 if (spa) 4677 spa_altroot(spa, altroot, buflen); 4678 else 4679 altroot[0] = '\0'; 4680 spa = NULL; 4681 mutex_exit(&spa_namespace_lock); 4682 } else { 4683 spa_altroot(spa, altroot, buflen); 4684 } 4685 } 4686 4687 if (spa != NULL) { 4688 spa_config_exit(spa, SCL_CONFIG, FTAG); 4689 spa_close(spa, FTAG); 4690 } 4691 4692 return (error); 4693 } 4694 4695 /* 4696 * Validate that the auxiliary device array is well formed. We must have an 4697 * array of nvlists, each which describes a valid leaf vdev. If this is an 4698 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 4699 * specified, as long as they are well-formed. 4700 */ 4701 static int 4702 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 4703 spa_aux_vdev_t *sav, const char *config, uint64_t version, 4704 vdev_labeltype_t label) 4705 { 4706 nvlist_t **dev; 4707 uint_t i, ndev; 4708 vdev_t *vd; 4709 int error; 4710 4711 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4712 4713 /* 4714 * It's acceptable to have no devs specified. 4715 */ 4716 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 4717 return (0); 4718 4719 if (ndev == 0) 4720 return (SET_ERROR(EINVAL)); 4721 4722 /* 4723 * Make sure the pool is formatted with a version that supports this 4724 * device type. 4725 */ 4726 if (spa_version(spa) < version) 4727 return (SET_ERROR(ENOTSUP)); 4728 4729 /* 4730 * Set the pending device list so we correctly handle device in-use 4731 * checking. 4732 */ 4733 sav->sav_pending = dev; 4734 sav->sav_npending = ndev; 4735 4736 for (i = 0; i < ndev; i++) { 4737 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 4738 mode)) != 0) 4739 goto out; 4740 4741 if (!vd->vdev_ops->vdev_op_leaf) { 4742 vdev_free(vd); 4743 error = SET_ERROR(EINVAL); 4744 goto out; 4745 } 4746 4747 vd->vdev_top = vd; 4748 4749 if ((error = vdev_open(vd)) == 0 && 4750 (error = vdev_label_init(vd, crtxg, label)) == 0) { 4751 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 4752 vd->vdev_guid) == 0); 4753 } 4754 4755 vdev_free(vd); 4756 4757 if (error && 4758 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 4759 goto out; 4760 else 4761 error = 0; 4762 } 4763 4764 out: 4765 sav->sav_pending = NULL; 4766 sav->sav_npending = 0; 4767 return (error); 4768 } 4769 4770 static int 4771 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 4772 { 4773 int error; 4774 4775 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4776 4777 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 4778 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 4779 VDEV_LABEL_SPARE)) != 0) { 4780 return (error); 4781 } 4782 4783 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 4784 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 4785 VDEV_LABEL_L2CACHE)); 4786 } 4787 4788 static void 4789 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 4790 const char *config) 4791 { 4792 int i; 4793 4794 if (sav->sav_config != NULL) { 4795 nvlist_t **olddevs; 4796 uint_t oldndevs; 4797 nvlist_t **newdevs; 4798 4799 /* 4800 * Generate new dev list by concatentating with the 4801 * current dev list. 4802 */ 4803 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 4804 &olddevs, &oldndevs) == 0); 4805 4806 newdevs = kmem_alloc(sizeof (void *) * 4807 (ndevs + oldndevs), KM_SLEEP); 4808 for (i = 0; i < oldndevs; i++) 4809 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 4810 KM_SLEEP) == 0); 4811 for (i = 0; i < ndevs; i++) 4812 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 4813 KM_SLEEP) == 0); 4814 4815 VERIFY(nvlist_remove(sav->sav_config, config, 4816 DATA_TYPE_NVLIST_ARRAY) == 0); 4817 4818 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 4819 config, newdevs, ndevs + oldndevs) == 0); 4820 for (i = 0; i < oldndevs + ndevs; i++) 4821 nvlist_free(newdevs[i]); 4822 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 4823 } else { 4824 /* 4825 * Generate a new dev list. 4826 */ 4827 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 4828 KM_SLEEP) == 0); 4829 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 4830 devs, ndevs) == 0); 4831 } 4832 } 4833 4834 /* 4835 * Stop and drop level 2 ARC devices 4836 */ 4837 void 4838 spa_l2cache_drop(spa_t *spa) 4839 { 4840 vdev_t *vd; 4841 int i; 4842 spa_aux_vdev_t *sav = &spa->spa_l2cache; 4843 4844 for (i = 0; i < sav->sav_count; i++) { 4845 uint64_t pool; 4846 4847 vd = sav->sav_vdevs[i]; 4848 ASSERT(vd != NULL); 4849 4850 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 4851 pool != 0ULL && l2arc_vdev_present(vd)) 4852 l2arc_remove_vdev(vd); 4853 } 4854 } 4855 4856 /* 4857 * Verify encryption parameters for spa creation. If we are encrypting, we must 4858 * have the encryption feature flag enabled. 4859 */ 4860 static int 4861 spa_create_check_encryption_params(dsl_crypto_params_t *dcp, 4862 boolean_t has_encryption) 4863 { 4864 if (dcp->cp_crypt != ZIO_CRYPT_OFF && 4865 dcp->cp_crypt != ZIO_CRYPT_INHERIT && 4866 !has_encryption) 4867 return (SET_ERROR(ENOTSUP)); 4868 4869 return (dmu_objset_create_crypt_check(NULL, dcp, NULL)); 4870 } 4871 4872 /* 4873 * Pool Creation 4874 */ 4875 int 4876 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 4877 nvlist_t *zplprops, dsl_crypto_params_t *dcp) 4878 { 4879 spa_t *spa; 4880 char *altroot = NULL; 4881 vdev_t *rvd; 4882 dsl_pool_t *dp; 4883 dmu_tx_t *tx; 4884 int error = 0; 4885 uint64_t txg = TXG_INITIAL; 4886 nvlist_t **spares, **l2cache; 4887 uint_t nspares, nl2cache; 4888 uint64_t version, obj; 4889 boolean_t has_features; 4890 char *poolname; 4891 nvlist_t *nvl; 4892 boolean_t has_encryption; 4893 spa_feature_t feat; 4894 char *feat_name; 4895 4896 if (props == NULL || 4897 nvlist_lookup_string(props, 4898 zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0) 4899 poolname = (char *)pool; 4900 4901 /* 4902 * If this pool already exists, return failure. 4903 */ 4904 mutex_enter(&spa_namespace_lock); 4905 if (spa_lookup(poolname) != NULL) { 4906 mutex_exit(&spa_namespace_lock); 4907 return (SET_ERROR(EEXIST)); 4908 } 4909 4910 /* 4911 * Allocate a new spa_t structure. 4912 */ 4913 nvl = fnvlist_alloc(); 4914 fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool); 4915 (void) nvlist_lookup_string(props, 4916 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4917 spa = spa_add(poolname, nvl, altroot); 4918 fnvlist_free(nvl); 4919 spa_activate(spa, spa_mode_global); 4920 4921 if (props && (error = spa_prop_validate(spa, props))) { 4922 spa_deactivate(spa); 4923 spa_remove(spa); 4924 mutex_exit(&spa_namespace_lock); 4925 return (error); 4926 } 4927 4928 /* 4929 * Temporary pool names should never be written to disk. 4930 */ 4931 if (poolname != pool) 4932 spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME; 4933 4934 has_features = B_FALSE; 4935 has_encryption = B_FALSE; 4936 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 4937 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 4938 if (zpool_prop_feature(nvpair_name(elem))) { 4939 has_features = B_TRUE; 4940 feat_name = strchr(nvpair_name(elem), '@') + 1; 4941 VERIFY0(zfeature_lookup_name(feat_name, &feat)); 4942 if (feat == SPA_FEATURE_ENCRYPTION) 4943 has_encryption = B_TRUE; 4944 } 4945 } 4946 4947 /* verify encryption params, if they were provided */ 4948 if (dcp != NULL) { 4949 error = spa_create_check_encryption_params(dcp, has_encryption); 4950 if (error != 0) { 4951 spa_deactivate(spa); 4952 spa_remove(spa); 4953 mutex_exit(&spa_namespace_lock); 4954 return (error); 4955 } 4956 } 4957 4958 if (has_features || nvlist_lookup_uint64(props, 4959 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 4960 version = SPA_VERSION; 4961 } 4962 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 4963 4964 spa->spa_first_txg = txg; 4965 spa->spa_uberblock.ub_txg = txg - 1; 4966 spa->spa_uberblock.ub_version = version; 4967 spa->spa_ubsync = spa->spa_uberblock; 4968 spa->spa_load_state = SPA_LOAD_CREATE; 4969 spa->spa_removing_phys.sr_state = DSS_NONE; 4970 spa->spa_removing_phys.sr_removing_vdev = -1; 4971 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 4972 spa->spa_indirect_vdevs_loaded = B_TRUE; 4973 4974 /* 4975 * Create "The Godfather" zio to hold all async IOs 4976 */ 4977 spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), 4978 KM_SLEEP); 4979 for (int i = 0; i < max_ncpus; i++) { 4980 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, 4981 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 4982 ZIO_FLAG_GODFATHER); 4983 } 4984 4985 /* 4986 * Create the root vdev. 4987 */ 4988 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4989 4990 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 4991 4992 ASSERT(error != 0 || rvd != NULL); 4993 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 4994 4995 if (error == 0 && !zfs_allocatable_devs(nvroot)) 4996 error = SET_ERROR(EINVAL); 4997 4998 if (error == 0 && 4999 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 5000 (error = spa_validate_aux(spa, nvroot, txg, 5001 VDEV_ALLOC_ADD)) == 0) { 5002 /* 5003 * instantiate the metaslab groups (this will dirty the vdevs) 5004 * we can no longer error exit past this point 5005 */ 5006 for (int c = 0; error == 0 && c < rvd->vdev_children; c++) { 5007 vdev_t *vd = rvd->vdev_child[c]; 5008 5009 vdev_metaslab_set_size(vd); 5010 vdev_expand(vd, txg); 5011 } 5012 } 5013 5014 spa_config_exit(spa, SCL_ALL, FTAG); 5015 5016 if (error != 0) { 5017 spa_unload(spa); 5018 spa_deactivate(spa); 5019 spa_remove(spa); 5020 mutex_exit(&spa_namespace_lock); 5021 return (error); 5022 } 5023 5024 /* 5025 * Get the list of spares, if specified. 5026 */ 5027 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5028 &spares, &nspares) == 0) { 5029 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 5030 KM_SLEEP) == 0); 5031 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 5032 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5033 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5034 spa_load_spares(spa); 5035 spa_config_exit(spa, SCL_ALL, FTAG); 5036 spa->spa_spares.sav_sync = B_TRUE; 5037 } 5038 5039 /* 5040 * Get the list of level 2 cache devices, if specified. 5041 */ 5042 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5043 &l2cache, &nl2cache) == 0) { 5044 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 5045 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5046 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5047 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5048 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5049 spa_load_l2cache(spa); 5050 spa_config_exit(spa, SCL_ALL, FTAG); 5051 spa->spa_l2cache.sav_sync = B_TRUE; 5052 } 5053 5054 spa->spa_is_initializing = B_TRUE; 5055 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg); 5056 spa->spa_is_initializing = B_FALSE; 5057 5058 /* 5059 * Create DDTs (dedup tables). 5060 */ 5061 ddt_create(spa); 5062 5063 spa_update_dspace(spa); 5064 5065 tx = dmu_tx_create_assigned(dp, txg); 5066 5067 /* 5068 * Create the pool config object. 5069 */ 5070 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 5071 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 5072 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 5073 5074 if (zap_add(spa->spa_meta_objset, 5075 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 5076 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 5077 cmn_err(CE_PANIC, "failed to add pool config"); 5078 } 5079 5080 if (zap_add(spa->spa_meta_objset, 5081 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 5082 sizeof (uint64_t), 1, &version, tx) != 0) { 5083 cmn_err(CE_PANIC, "failed to add pool version"); 5084 } 5085 5086 /* Newly created pools with the right version are always deflated. */ 5087 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 5088 spa->spa_deflate = TRUE; 5089 if (zap_add(spa->spa_meta_objset, 5090 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 5091 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 5092 cmn_err(CE_PANIC, "failed to add deflate"); 5093 } 5094 } 5095 5096 /* 5097 * Create the deferred-free bpobj. Turn off compression 5098 * because sync-to-convergence takes longer if the blocksize 5099 * keeps changing. 5100 */ 5101 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 5102 dmu_object_set_compress(spa->spa_meta_objset, obj, 5103 ZIO_COMPRESS_OFF, tx); 5104 if (zap_add(spa->spa_meta_objset, 5105 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 5106 sizeof (uint64_t), 1, &obj, tx) != 0) { 5107 cmn_err(CE_PANIC, "failed to add bpobj"); 5108 } 5109 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 5110 spa->spa_meta_objset, obj)); 5111 5112 /* 5113 * Create the pool's history object. 5114 */ 5115 if (version >= SPA_VERSION_ZPOOL_HISTORY) 5116 spa_history_create_obj(spa, tx); 5117 5118 /* 5119 * Generate some random noise for salted checksums to operate on. 5120 */ 5121 (void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes, 5122 sizeof (spa->spa_cksum_salt.zcs_bytes)); 5123 5124 /* 5125 * Set pool properties. 5126 */ 5127 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 5128 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 5129 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 5130 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 5131 spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST); 5132 5133 if (props != NULL) { 5134 spa_configfile_set(spa, props, B_FALSE); 5135 spa_sync_props(props, tx); 5136 } 5137 5138 dmu_tx_commit(tx); 5139 5140 spa->spa_sync_on = B_TRUE; 5141 txg_sync_start(spa->spa_dsl_pool); 5142 mmp_thread_start(spa); 5143 5144 /* 5145 * We explicitly wait for the first transaction to complete so that our 5146 * bean counters are appropriately updated. 5147 */ 5148 txg_wait_synced(spa->spa_dsl_pool, txg); 5149 5150 spa_spawn_aux_threads(spa); 5151 5152 spa_write_cachefile(spa, B_FALSE, B_TRUE); 5153 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE); 5154 5155 spa_history_log_version(spa, "create"); 5156 5157 /* 5158 * Don't count references from objsets that are already closed 5159 * and are making their way through the eviction process. 5160 */ 5161 spa_evicting_os_wait(spa); 5162 spa->spa_minref = zfs_refcount_count(&spa->spa_refcount); 5163 spa->spa_load_state = SPA_LOAD_NONE; 5164 5165 mutex_exit(&spa_namespace_lock); 5166 5167 return (0); 5168 } 5169 5170 #ifdef _KERNEL 5171 /* 5172 * Get the root pool information from the root disk, then import the root pool 5173 * during the system boot up time. 5174 */ 5175 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 5176 5177 static nvlist_t * 5178 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 5179 { 5180 nvlist_t *config; 5181 nvlist_t *nvtop, *nvroot; 5182 uint64_t pgid; 5183 5184 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 5185 return (NULL); 5186 5187 /* 5188 * Add this top-level vdev to the child array. 5189 */ 5190 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5191 &nvtop) == 0); 5192 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5193 &pgid) == 0); 5194 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 5195 5196 /* 5197 * Put this pool's top-level vdevs into a root vdev. 5198 */ 5199 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5200 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 5201 VDEV_TYPE_ROOT) == 0); 5202 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 5203 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 5204 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 5205 &nvtop, 1) == 0); 5206 5207 /* 5208 * Replace the existing vdev_tree with the new root vdev in 5209 * this pool's configuration (remove the old, add the new). 5210 */ 5211 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 5212 nvlist_free(nvroot); 5213 return (config); 5214 } 5215 5216 /* 5217 * Walk the vdev tree and see if we can find a device with "better" 5218 * configuration. A configuration is "better" if the label on that 5219 * device has a more recent txg. 5220 */ 5221 static void 5222 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 5223 { 5224 for (int c = 0; c < vd->vdev_children; c++) 5225 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 5226 5227 if (vd->vdev_ops->vdev_op_leaf) { 5228 nvlist_t *label; 5229 uint64_t label_txg; 5230 5231 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 5232 &label) != 0) 5233 return; 5234 5235 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 5236 &label_txg) == 0); 5237 5238 /* 5239 * Do we have a better boot device? 5240 */ 5241 if (label_txg > *txg) { 5242 *txg = label_txg; 5243 *avd = vd; 5244 } 5245 nvlist_free(label); 5246 } 5247 } 5248 5249 /* 5250 * Import a root pool. 5251 * 5252 * For x86. devpath_list will consist of devid and/or physpath name of 5253 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 5254 * The GRUB "findroot" command will return the vdev we should boot. 5255 * 5256 * For Sparc, devpath_list consists the physpath name of the booting device 5257 * no matter the rootpool is a single device pool or a mirrored pool. 5258 * e.g. 5259 * "/pci@1f,0/ide@d/disk@0,0:a" 5260 */ 5261 int 5262 spa_import_rootpool(char *devpath, char *devid) 5263 { 5264 spa_t *spa; 5265 vdev_t *rvd, *bvd, *avd = NULL; 5266 nvlist_t *config, *nvtop; 5267 uint64_t guid, txg; 5268 char *pname; 5269 int error; 5270 5271 /* 5272 * Read the label from the boot device and generate a configuration. 5273 */ 5274 config = spa_generate_rootconf(devpath, devid, &guid); 5275 #if defined(_OBP) && defined(_KERNEL) 5276 if (config == NULL) { 5277 if (strstr(devpath, "/iscsi/ssd") != NULL) { 5278 /* iscsi boot */ 5279 get_iscsi_bootpath_phy(devpath); 5280 config = spa_generate_rootconf(devpath, devid, &guid); 5281 } 5282 } 5283 #endif 5284 if (config == NULL) { 5285 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 5286 devpath); 5287 return (SET_ERROR(EIO)); 5288 } 5289 5290 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 5291 &pname) == 0); 5292 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 5293 5294 mutex_enter(&spa_namespace_lock); 5295 if ((spa = spa_lookup(pname)) != NULL) { 5296 /* 5297 * Remove the existing root pool from the namespace so that we 5298 * can replace it with the correct config we just read in. 5299 */ 5300 spa_remove(spa); 5301 } 5302 5303 spa = spa_add(pname, config, NULL); 5304 spa->spa_is_root = B_TRUE; 5305 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 5306 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 5307 &spa->spa_ubsync.ub_version) != 0) 5308 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 5309 5310 /* 5311 * Build up a vdev tree based on the boot device's label config. 5312 */ 5313 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5314 &nvtop) == 0); 5315 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5316 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 5317 VDEV_ALLOC_ROOTPOOL); 5318 spa_config_exit(spa, SCL_ALL, FTAG); 5319 if (error) { 5320 mutex_exit(&spa_namespace_lock); 5321 nvlist_free(config); 5322 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 5323 pname); 5324 return (error); 5325 } 5326 5327 /* 5328 * Get the boot vdev. 5329 */ 5330 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 5331 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 5332 (u_longlong_t)guid); 5333 error = SET_ERROR(ENOENT); 5334 goto out; 5335 } 5336 5337 /* 5338 * Determine if there is a better boot device. 5339 */ 5340 avd = bvd; 5341 spa_alt_rootvdev(rvd, &avd, &txg); 5342 if (avd != bvd) { 5343 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 5344 "try booting from '%s'", avd->vdev_path); 5345 error = SET_ERROR(EINVAL); 5346 goto out; 5347 } 5348 5349 /* 5350 * If the boot device is part of a spare vdev then ensure that 5351 * we're booting off the active spare. 5352 */ 5353 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 5354 !bvd->vdev_isspare) { 5355 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 5356 "try booting from '%s'", 5357 bvd->vdev_parent-> 5358 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 5359 error = SET_ERROR(EINVAL); 5360 goto out; 5361 } 5362 5363 error = 0; 5364 out: 5365 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5366 vdev_free(rvd); 5367 spa_config_exit(spa, SCL_ALL, FTAG); 5368 mutex_exit(&spa_namespace_lock); 5369 5370 nvlist_free(config); 5371 return (error); 5372 } 5373 5374 #endif 5375 5376 /* 5377 * Import a non-root pool into the system. 5378 */ 5379 int 5380 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 5381 { 5382 spa_t *spa; 5383 char *altroot = NULL; 5384 spa_load_state_t state = SPA_LOAD_IMPORT; 5385 zpool_load_policy_t policy; 5386 uint64_t mode = spa_mode_global; 5387 uint64_t readonly = B_FALSE; 5388 int error; 5389 nvlist_t *nvroot; 5390 nvlist_t **spares, **l2cache; 5391 uint_t nspares, nl2cache; 5392 5393 /* 5394 * If a pool with this name exists, return failure. 5395 */ 5396 mutex_enter(&spa_namespace_lock); 5397 if (spa_lookup(pool) != NULL) { 5398 mutex_exit(&spa_namespace_lock); 5399 return (SET_ERROR(EEXIST)); 5400 } 5401 5402 /* 5403 * Create and initialize the spa structure. 5404 */ 5405 (void) nvlist_lookup_string(props, 5406 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5407 (void) nvlist_lookup_uint64(props, 5408 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 5409 if (readonly) 5410 mode = FREAD; 5411 spa = spa_add(pool, config, altroot); 5412 spa->spa_import_flags = flags; 5413 5414 /* 5415 * Verbatim import - Take a pool and insert it into the namespace 5416 * as if it had been loaded at boot. 5417 */ 5418 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 5419 if (props != NULL) 5420 spa_configfile_set(spa, props, B_FALSE); 5421 5422 spa_write_cachefile(spa, B_FALSE, B_TRUE); 5423 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 5424 zfs_dbgmsg("spa_import: verbatim import of %s", pool); 5425 mutex_exit(&spa_namespace_lock); 5426 return (0); 5427 } 5428 5429 spa_activate(spa, mode); 5430 5431 /* 5432 * Don't start async tasks until we know everything is healthy. 5433 */ 5434 spa_async_suspend(spa); 5435 5436 zpool_get_load_policy(config, &policy); 5437 if (policy.zlp_rewind & ZPOOL_DO_REWIND) 5438 state = SPA_LOAD_RECOVER; 5439 5440 spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT; 5441 5442 if (state != SPA_LOAD_RECOVER) { 5443 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 5444 zfs_dbgmsg("spa_import: importing %s", pool); 5445 } else { 5446 zfs_dbgmsg("spa_import: importing %s, max_txg=%lld " 5447 "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg); 5448 } 5449 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind); 5450 5451 /* 5452 * Propagate anything learned while loading the pool and pass it 5453 * back to caller (i.e. rewind info, missing devices, etc). 5454 */ 5455 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 5456 spa->spa_load_info) == 0); 5457 5458 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5459 /* 5460 * Toss any existing sparelist, as it doesn't have any validity 5461 * anymore, and conflicts with spa_has_spare(). 5462 */ 5463 if (spa->spa_spares.sav_config) { 5464 nvlist_free(spa->spa_spares.sav_config); 5465 spa->spa_spares.sav_config = NULL; 5466 spa_load_spares(spa); 5467 } 5468 if (spa->spa_l2cache.sav_config) { 5469 nvlist_free(spa->spa_l2cache.sav_config); 5470 spa->spa_l2cache.sav_config = NULL; 5471 spa_load_l2cache(spa); 5472 } 5473 5474 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5475 &nvroot) == 0); 5476 if (error == 0) 5477 error = spa_validate_aux(spa, nvroot, -1ULL, 5478 VDEV_ALLOC_SPARE); 5479 if (error == 0) 5480 error = spa_validate_aux(spa, nvroot, -1ULL, 5481 VDEV_ALLOC_L2CACHE); 5482 spa_config_exit(spa, SCL_ALL, FTAG); 5483 5484 if (props != NULL) 5485 spa_configfile_set(spa, props, B_FALSE); 5486 5487 if (error != 0 || (props && spa_writeable(spa) && 5488 (error = spa_prop_set(spa, props)))) { 5489 spa_unload(spa); 5490 spa_deactivate(spa); 5491 spa_remove(spa); 5492 mutex_exit(&spa_namespace_lock); 5493 return (error); 5494 } 5495 5496 spa_async_resume(spa); 5497 5498 /* 5499 * Override any spares and level 2 cache devices as specified by 5500 * the user, as these may have correct device names/devids, etc. 5501 */ 5502 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 5503 &spares, &nspares) == 0) { 5504 if (spa->spa_spares.sav_config) 5505 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 5506 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 5507 else 5508 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 5509 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5510 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 5511 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 5512 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5513 spa_load_spares(spa); 5514 spa_config_exit(spa, SCL_ALL, FTAG); 5515 spa->spa_spares.sav_sync = B_TRUE; 5516 } 5517 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 5518 &l2cache, &nl2cache) == 0) { 5519 if (spa->spa_l2cache.sav_config) 5520 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 5521 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 5522 else 5523 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 5524 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5525 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 5526 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 5527 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5528 spa_load_l2cache(spa); 5529 spa_config_exit(spa, SCL_ALL, FTAG); 5530 spa->spa_l2cache.sav_sync = B_TRUE; 5531 } 5532 5533 /* 5534 * Check for any removed devices. 5535 */ 5536 if (spa->spa_autoreplace) { 5537 spa_aux_check_removed(&spa->spa_spares); 5538 spa_aux_check_removed(&spa->spa_l2cache); 5539 } 5540 5541 if (spa_writeable(spa)) { 5542 /* 5543 * Update the config cache to include the newly-imported pool. 5544 */ 5545 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5546 } 5547 5548 /* 5549 * It's possible that the pool was expanded while it was exported. 5550 * We kick off an async task to handle this for us. 5551 */ 5552 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 5553 5554 spa_history_log_version(spa, "import"); 5555 5556 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT); 5557 5558 mutex_exit(&spa_namespace_lock); 5559 5560 return (0); 5561 } 5562 5563 nvlist_t * 5564 spa_tryimport(nvlist_t *tryconfig) 5565 { 5566 nvlist_t *config = NULL; 5567 char *poolname, *cachefile; 5568 spa_t *spa; 5569 uint64_t state; 5570 int error; 5571 zpool_load_policy_t policy; 5572 5573 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 5574 return (NULL); 5575 5576 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 5577 return (NULL); 5578 5579 /* 5580 * Create and initialize the spa structure. 5581 */ 5582 mutex_enter(&spa_namespace_lock); 5583 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 5584 spa_activate(spa, FREAD); 5585 5586 /* 5587 * Rewind pool if a max txg was provided. 5588 */ 5589 zpool_get_load_policy(spa->spa_config, &policy); 5590 if (policy.zlp_txg != UINT64_MAX) { 5591 spa->spa_load_max_txg = policy.zlp_txg; 5592 spa->spa_extreme_rewind = B_TRUE; 5593 zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld", 5594 poolname, (longlong_t)policy.zlp_txg); 5595 } else { 5596 zfs_dbgmsg("spa_tryimport: importing %s", poolname); 5597 } 5598 5599 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile) 5600 == 0) { 5601 zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile); 5602 spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE; 5603 } else { 5604 spa->spa_config_source = SPA_CONFIG_SRC_SCAN; 5605 } 5606 5607 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING); 5608 5609 /* 5610 * If 'tryconfig' was at least parsable, return the current config. 5611 */ 5612 if (spa->spa_root_vdev != NULL) { 5613 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 5614 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 5615 poolname) == 0); 5616 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5617 state) == 0); 5618 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 5619 spa->spa_uberblock.ub_timestamp) == 0); 5620 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 5621 spa->spa_load_info) == 0); 5622 5623 /* 5624 * If the bootfs property exists on this pool then we 5625 * copy it out so that external consumers can tell which 5626 * pools are bootable. 5627 */ 5628 if ((!error || error == EEXIST) && spa->spa_bootfs) { 5629 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5630 5631 /* 5632 * We have to play games with the name since the 5633 * pool was opened as TRYIMPORT_NAME. 5634 */ 5635 if (dsl_dsobj_to_dsname(spa_name(spa), 5636 spa->spa_bootfs, tmpname) == 0) { 5637 char *cp; 5638 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5639 5640 cp = strchr(tmpname, '/'); 5641 if (cp == NULL) { 5642 (void) strlcpy(dsname, tmpname, 5643 MAXPATHLEN); 5644 } else { 5645 (void) snprintf(dsname, MAXPATHLEN, 5646 "%s/%s", poolname, ++cp); 5647 } 5648 VERIFY(nvlist_add_string(config, 5649 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 5650 kmem_free(dsname, MAXPATHLEN); 5651 } 5652 kmem_free(tmpname, MAXPATHLEN); 5653 } 5654 5655 /* 5656 * Add the list of hot spares and level 2 cache devices. 5657 */ 5658 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5659 spa_add_spares(spa, config); 5660 spa_add_l2cache(spa, config); 5661 spa_config_exit(spa, SCL_CONFIG, FTAG); 5662 } 5663 5664 spa_unload(spa); 5665 spa_deactivate(spa); 5666 spa_remove(spa); 5667 mutex_exit(&spa_namespace_lock); 5668 5669 return (config); 5670 } 5671 5672 /* 5673 * Pool export/destroy 5674 * 5675 * The act of destroying or exporting a pool is very simple. We make sure there 5676 * is no more pending I/O and any references to the pool are gone. Then, we 5677 * update the pool state and sync all the labels to disk, removing the 5678 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 5679 * we don't sync the labels or remove the configuration cache. 5680 */ 5681 static int 5682 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 5683 boolean_t force, boolean_t hardforce) 5684 { 5685 spa_t *spa; 5686 5687 if (oldconfig) 5688 *oldconfig = NULL; 5689 5690 if (!(spa_mode_global & FWRITE)) 5691 return (SET_ERROR(EROFS)); 5692 5693 mutex_enter(&spa_namespace_lock); 5694 if ((spa = spa_lookup(pool)) == NULL) { 5695 mutex_exit(&spa_namespace_lock); 5696 return (SET_ERROR(ENOENT)); 5697 } 5698 5699 /* 5700 * Put a hold on the pool, drop the namespace lock, stop async tasks, 5701 * reacquire the namespace lock, and see if we can export. 5702 */ 5703 spa_open_ref(spa, FTAG); 5704 mutex_exit(&spa_namespace_lock); 5705 spa_async_suspend(spa); 5706 mutex_enter(&spa_namespace_lock); 5707 spa_close(spa, FTAG); 5708 5709 /* 5710 * The pool will be in core if it's openable, 5711 * in which case we can modify its state. 5712 */ 5713 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 5714 5715 /* 5716 * Objsets may be open only because they're dirty, so we 5717 * have to force it to sync before checking spa_refcnt. 5718 */ 5719 txg_wait_synced(spa->spa_dsl_pool, 0); 5720 spa_evicting_os_wait(spa); 5721 5722 /* 5723 * A pool cannot be exported or destroyed if there are active 5724 * references. If we are resetting a pool, allow references by 5725 * fault injection handlers. 5726 */ 5727 if (!spa_refcount_zero(spa) || 5728 (spa->spa_inject_ref != 0 && 5729 new_state != POOL_STATE_UNINITIALIZED)) { 5730 spa_async_resume(spa); 5731 mutex_exit(&spa_namespace_lock); 5732 return (SET_ERROR(EBUSY)); 5733 } 5734 5735 /* 5736 * A pool cannot be exported if it has an active shared spare. 5737 * This is to prevent other pools stealing the active spare 5738 * from an exported pool. At user's own will, such pool can 5739 * be forcedly exported. 5740 */ 5741 if (!force && new_state == POOL_STATE_EXPORTED && 5742 spa_has_active_shared_spare(spa)) { 5743 spa_async_resume(spa); 5744 mutex_exit(&spa_namespace_lock); 5745 return (SET_ERROR(EXDEV)); 5746 } 5747 5748 /* 5749 * We're about to export or destroy this pool. Make sure 5750 * we stop all initializtion activity here before we 5751 * set the spa_final_txg. This will ensure that all 5752 * dirty data resulting from the initialization is 5753 * committed to disk before we unload the pool. 5754 */ 5755 if (spa->spa_root_vdev != NULL) { 5756 vdev_initialize_stop_all(spa->spa_root_vdev, 5757 VDEV_INITIALIZE_ACTIVE); 5758 } 5759 5760 /* 5761 * We want this to be reflected on every label, 5762 * so mark them all dirty. spa_unload() will do the 5763 * final sync that pushes these changes out. 5764 */ 5765 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 5766 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5767 spa->spa_state = new_state; 5768 spa->spa_final_txg = spa_last_synced_txg(spa) + 5769 TXG_DEFER_SIZE + 1; 5770 vdev_config_dirty(spa->spa_root_vdev); 5771 spa_config_exit(spa, SCL_ALL, FTAG); 5772 } 5773 } 5774 5775 spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY); 5776 5777 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 5778 spa_unload(spa); 5779 spa_deactivate(spa); 5780 } 5781 5782 if (oldconfig && spa->spa_config) 5783 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 5784 5785 if (new_state != POOL_STATE_UNINITIALIZED) { 5786 if (!hardforce) 5787 spa_write_cachefile(spa, B_TRUE, B_TRUE); 5788 spa_remove(spa); 5789 } 5790 mutex_exit(&spa_namespace_lock); 5791 5792 return (0); 5793 } 5794 5795 /* 5796 * Destroy a storage pool. 5797 */ 5798 int 5799 spa_destroy(char *pool) 5800 { 5801 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 5802 B_FALSE, B_FALSE)); 5803 } 5804 5805 /* 5806 * Export a storage pool. 5807 */ 5808 int 5809 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 5810 boolean_t hardforce) 5811 { 5812 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 5813 force, hardforce)); 5814 } 5815 5816 /* 5817 * Similar to spa_export(), this unloads the spa_t without actually removing it 5818 * from the namespace in any way. 5819 */ 5820 int 5821 spa_reset(char *pool) 5822 { 5823 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 5824 B_FALSE, B_FALSE)); 5825 } 5826 5827 /* 5828 * ========================================================================== 5829 * Device manipulation 5830 * ========================================================================== 5831 */ 5832 5833 /* 5834 * Add a device to a storage pool. 5835 */ 5836 int 5837 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 5838 { 5839 uint64_t txg, id; 5840 int error; 5841 vdev_t *rvd = spa->spa_root_vdev; 5842 vdev_t *vd, *tvd; 5843 nvlist_t **spares, **l2cache; 5844 uint_t nspares, nl2cache; 5845 5846 ASSERT(spa_writeable(spa)); 5847 5848 txg = spa_vdev_enter(spa); 5849 5850 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 5851 VDEV_ALLOC_ADD)) != 0) 5852 return (spa_vdev_exit(spa, NULL, txg, error)); 5853 5854 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 5855 5856 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 5857 &nspares) != 0) 5858 nspares = 0; 5859 5860 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 5861 &nl2cache) != 0) 5862 nl2cache = 0; 5863 5864 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 5865 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5866 5867 if (vd->vdev_children != 0 && 5868 (error = vdev_create(vd, txg, B_FALSE)) != 0) 5869 return (spa_vdev_exit(spa, vd, txg, error)); 5870 5871 /* 5872 * We must validate the spares and l2cache devices after checking the 5873 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 5874 */ 5875 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 5876 return (spa_vdev_exit(spa, vd, txg, error)); 5877 5878 /* 5879 * If we are in the middle of a device removal, we can only add 5880 * devices which match the existing devices in the pool. 5881 * If we are in the middle of a removal, or have some indirect 5882 * vdevs, we can not add raidz toplevels. 5883 */ 5884 if (spa->spa_vdev_removal != NULL || 5885 spa->spa_removing_phys.sr_prev_indirect_vdev != -1) { 5886 for (int c = 0; c < vd->vdev_children; c++) { 5887 tvd = vd->vdev_child[c]; 5888 if (spa->spa_vdev_removal != NULL && 5889 tvd->vdev_ashift != spa->spa_max_ashift) { 5890 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5891 } 5892 /* Fail if top level vdev is raidz */ 5893 if (tvd->vdev_ops == &vdev_raidz_ops) { 5894 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 5895 } 5896 /* 5897 * Need the top level mirror to be 5898 * a mirror of leaf vdevs only 5899 */ 5900 if (tvd->vdev_ops == &vdev_mirror_ops) { 5901 for (uint64_t cid = 0; 5902 cid < tvd->vdev_children; cid++) { 5903 vdev_t *cvd = tvd->vdev_child[cid]; 5904 if (!cvd->vdev_ops->vdev_op_leaf) { 5905 return (spa_vdev_exit(spa, vd, 5906 txg, EINVAL)); 5907 } 5908 } 5909 } 5910 } 5911 } 5912 5913 for (int c = 0; c < vd->vdev_children; c++) { 5914 5915 /* 5916 * Set the vdev id to the first hole, if one exists. 5917 */ 5918 for (id = 0; id < rvd->vdev_children; id++) { 5919 if (rvd->vdev_child[id]->vdev_ishole) { 5920 vdev_free(rvd->vdev_child[id]); 5921 break; 5922 } 5923 } 5924 tvd = vd->vdev_child[c]; 5925 vdev_remove_child(vd, tvd); 5926 tvd->vdev_id = id; 5927 vdev_add_child(rvd, tvd); 5928 vdev_config_dirty(tvd); 5929 } 5930 5931 if (nspares != 0) { 5932 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 5933 ZPOOL_CONFIG_SPARES); 5934 spa_load_spares(spa); 5935 spa->spa_spares.sav_sync = B_TRUE; 5936 } 5937 5938 if (nl2cache != 0) { 5939 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 5940 ZPOOL_CONFIG_L2CACHE); 5941 spa_load_l2cache(spa); 5942 spa->spa_l2cache.sav_sync = B_TRUE; 5943 } 5944 5945 /* 5946 * We have to be careful when adding new vdevs to an existing pool. 5947 * If other threads start allocating from these vdevs before we 5948 * sync the config cache, and we lose power, then upon reboot we may 5949 * fail to open the pool because there are DVAs that the config cache 5950 * can't translate. Therefore, we first add the vdevs without 5951 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 5952 * and then let spa_config_update() initialize the new metaslabs. 5953 * 5954 * spa_load() checks for added-but-not-initialized vdevs, so that 5955 * if we lose power at any point in this sequence, the remaining 5956 * steps will be completed the next time we load the pool. 5957 */ 5958 (void) spa_vdev_exit(spa, vd, txg, 0); 5959 5960 mutex_enter(&spa_namespace_lock); 5961 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5962 spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD); 5963 mutex_exit(&spa_namespace_lock); 5964 5965 return (0); 5966 } 5967 5968 /* 5969 * Attach a device to a mirror. The arguments are the path to any device 5970 * in the mirror, and the nvroot for the new device. If the path specifies 5971 * a device that is not mirrored, we automatically insert the mirror vdev. 5972 * 5973 * If 'replacing' is specified, the new device is intended to replace the 5974 * existing device; in this case the two devices are made into their own 5975 * mirror using the 'replacing' vdev, which is functionally identical to 5976 * the mirror vdev (it actually reuses all the same ops) but has a few 5977 * extra rules: you can't attach to it after it's been created, and upon 5978 * completion of resilvering, the first disk (the one being replaced) 5979 * is automatically detached. 5980 */ 5981 int 5982 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 5983 { 5984 uint64_t txg, dtl_max_txg; 5985 vdev_t *rvd = spa->spa_root_vdev; 5986 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 5987 vdev_ops_t *pvops; 5988 char *oldvdpath, *newvdpath; 5989 int newvd_isspare; 5990 int error; 5991 5992 ASSERT(spa_writeable(spa)); 5993 5994 txg = spa_vdev_enter(spa); 5995 5996 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 5997 5998 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5999 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6000 error = (spa_has_checkpoint(spa)) ? 6001 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6002 return (spa_vdev_exit(spa, NULL, txg, error)); 6003 } 6004 6005 if (spa->spa_vdev_removal != NULL) 6006 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6007 6008 if (oldvd == NULL) 6009 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6010 6011 if (!oldvd->vdev_ops->vdev_op_leaf) 6012 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6013 6014 pvd = oldvd->vdev_parent; 6015 6016 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 6017 VDEV_ALLOC_ATTACH)) != 0) 6018 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6019 6020 if (newrootvd->vdev_children != 1) 6021 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6022 6023 newvd = newrootvd->vdev_child[0]; 6024 6025 if (!newvd->vdev_ops->vdev_op_leaf) 6026 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 6027 6028 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 6029 return (spa_vdev_exit(spa, newrootvd, txg, error)); 6030 6031 /* 6032 * Spares can't replace logs 6033 */ 6034 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 6035 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6036 6037 if (!replacing) { 6038 /* 6039 * For attach, the only allowable parent is a mirror or the root 6040 * vdev. 6041 */ 6042 if (pvd->vdev_ops != &vdev_mirror_ops && 6043 pvd->vdev_ops != &vdev_root_ops) 6044 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6045 6046 pvops = &vdev_mirror_ops; 6047 } else { 6048 /* 6049 * Active hot spares can only be replaced by inactive hot 6050 * spares. 6051 */ 6052 if (pvd->vdev_ops == &vdev_spare_ops && 6053 oldvd->vdev_isspare && 6054 !spa_has_spare(spa, newvd->vdev_guid)) 6055 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6056 6057 /* 6058 * If the source is a hot spare, and the parent isn't already a 6059 * spare, then we want to create a new hot spare. Otherwise, we 6060 * want to create a replacing vdev. The user is not allowed to 6061 * attach to a spared vdev child unless the 'isspare' state is 6062 * the same (spare replaces spare, non-spare replaces 6063 * non-spare). 6064 */ 6065 if (pvd->vdev_ops == &vdev_replacing_ops && 6066 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 6067 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6068 } else if (pvd->vdev_ops == &vdev_spare_ops && 6069 newvd->vdev_isspare != oldvd->vdev_isspare) { 6070 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 6071 } 6072 6073 if (newvd->vdev_isspare) 6074 pvops = &vdev_spare_ops; 6075 else 6076 pvops = &vdev_replacing_ops; 6077 } 6078 6079 /* 6080 * Make sure the new device is big enough. 6081 */ 6082 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 6083 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 6084 6085 /* 6086 * The new device cannot have a higher alignment requirement 6087 * than the top-level vdev. 6088 */ 6089 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 6090 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 6091 6092 /* 6093 * If this is an in-place replacement, update oldvd's path and devid 6094 * to make it distinguishable from newvd, and unopenable from now on. 6095 */ 6096 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 6097 spa_strfree(oldvd->vdev_path); 6098 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 6099 KM_SLEEP); 6100 (void) sprintf(oldvd->vdev_path, "%s/%s", 6101 newvd->vdev_path, "old"); 6102 if (oldvd->vdev_devid != NULL) { 6103 spa_strfree(oldvd->vdev_devid); 6104 oldvd->vdev_devid = NULL; 6105 } 6106 } 6107 6108 /* mark the device being resilvered */ 6109 newvd->vdev_resilver_txg = txg; 6110 6111 /* 6112 * If the parent is not a mirror, or if we're replacing, insert the new 6113 * mirror/replacing/spare vdev above oldvd. 6114 */ 6115 if (pvd->vdev_ops != pvops) 6116 pvd = vdev_add_parent(oldvd, pvops); 6117 6118 ASSERT(pvd->vdev_top->vdev_parent == rvd); 6119 ASSERT(pvd->vdev_ops == pvops); 6120 ASSERT(oldvd->vdev_parent == pvd); 6121 6122 /* 6123 * Extract the new device from its root and add it to pvd. 6124 */ 6125 vdev_remove_child(newrootvd, newvd); 6126 newvd->vdev_id = pvd->vdev_children; 6127 newvd->vdev_crtxg = oldvd->vdev_crtxg; 6128 vdev_add_child(pvd, newvd); 6129 6130 tvd = newvd->vdev_top; 6131 ASSERT(pvd->vdev_top == tvd); 6132 ASSERT(tvd->vdev_parent == rvd); 6133 6134 vdev_config_dirty(tvd); 6135 6136 /* 6137 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 6138 * for any dmu_sync-ed blocks. It will propagate upward when 6139 * spa_vdev_exit() calls vdev_dtl_reassess(). 6140 */ 6141 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 6142 6143 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 6144 dtl_max_txg - TXG_INITIAL); 6145 6146 if (newvd->vdev_isspare) { 6147 spa_spare_activate(newvd); 6148 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE); 6149 } 6150 6151 oldvdpath = spa_strdup(oldvd->vdev_path); 6152 newvdpath = spa_strdup(newvd->vdev_path); 6153 newvd_isspare = newvd->vdev_isspare; 6154 6155 /* 6156 * Mark newvd's DTL dirty in this txg. 6157 */ 6158 vdev_dirty(tvd, VDD_DTL, newvd, txg); 6159 6160 /* 6161 * Schedule the resilver to restart in the future. We do this to 6162 * ensure that dmu_sync-ed blocks have been stitched into the 6163 * respective datasets. We do not do this if resilvers have been 6164 * deferred. 6165 */ 6166 if (dsl_scan_resilvering(spa_get_dsl(spa)) && 6167 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 6168 vdev_set_deferred_resilver(spa, newvd); 6169 else 6170 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 6171 6172 if (spa->spa_bootfs) 6173 spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH); 6174 6175 spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH); 6176 6177 /* 6178 * Commit the config 6179 */ 6180 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 6181 6182 spa_history_log_internal(spa, "vdev attach", NULL, 6183 "%s vdev=%s %s vdev=%s", 6184 replacing && newvd_isspare ? "spare in" : 6185 replacing ? "replace" : "attach", newvdpath, 6186 replacing ? "for" : "to", oldvdpath); 6187 6188 spa_strfree(oldvdpath); 6189 spa_strfree(newvdpath); 6190 6191 return (0); 6192 } 6193 6194 /* 6195 * Detach a device from a mirror or replacing vdev. 6196 * 6197 * If 'replace_done' is specified, only detach if the parent 6198 * is a replacing vdev. 6199 */ 6200 int 6201 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 6202 { 6203 uint64_t txg; 6204 int error; 6205 vdev_t *rvd = spa->spa_root_vdev; 6206 vdev_t *vd, *pvd, *cvd, *tvd; 6207 boolean_t unspare = B_FALSE; 6208 uint64_t unspare_guid = 0; 6209 char *vdpath; 6210 6211 ASSERT(spa_writeable(spa)); 6212 6213 txg = spa_vdev_enter(spa); 6214 6215 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 6216 6217 /* 6218 * Besides being called directly from the userland through the 6219 * ioctl interface, spa_vdev_detach() can be potentially called 6220 * at the end of spa_vdev_resilver_done(). 6221 * 6222 * In the regular case, when we have a checkpoint this shouldn't 6223 * happen as we never empty the DTLs of a vdev during the scrub 6224 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done() 6225 * should never get here when we have a checkpoint. 6226 * 6227 * That said, even in a case when we checkpoint the pool exactly 6228 * as spa_vdev_resilver_done() calls this function everything 6229 * should be fine as the resilver will return right away. 6230 */ 6231 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6232 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6233 error = (spa_has_checkpoint(spa)) ? 6234 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6235 return (spa_vdev_exit(spa, NULL, txg, error)); 6236 } 6237 6238 if (vd == NULL) 6239 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 6240 6241 if (!vd->vdev_ops->vdev_op_leaf) 6242 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6243 6244 pvd = vd->vdev_parent; 6245 6246 /* 6247 * If the parent/child relationship is not as expected, don't do it. 6248 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 6249 * vdev that's replacing B with C. The user's intent in replacing 6250 * is to go from M(A,B) to M(A,C). If the user decides to cancel 6251 * the replace by detaching C, the expected behavior is to end up 6252 * M(A,B). But suppose that right after deciding to detach C, 6253 * the replacement of B completes. We would have M(A,C), and then 6254 * ask to detach C, which would leave us with just A -- not what 6255 * the user wanted. To prevent this, we make sure that the 6256 * parent/child relationship hasn't changed -- in this example, 6257 * that C's parent is still the replacing vdev R. 6258 */ 6259 if (pvd->vdev_guid != pguid && pguid != 0) 6260 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6261 6262 /* 6263 * Only 'replacing' or 'spare' vdevs can be replaced. 6264 */ 6265 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 6266 pvd->vdev_ops != &vdev_spare_ops) 6267 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6268 6269 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 6270 spa_version(spa) >= SPA_VERSION_SPARES); 6271 6272 /* 6273 * Only mirror, replacing, and spare vdevs support detach. 6274 */ 6275 if (pvd->vdev_ops != &vdev_replacing_ops && 6276 pvd->vdev_ops != &vdev_mirror_ops && 6277 pvd->vdev_ops != &vdev_spare_ops) 6278 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 6279 6280 /* 6281 * If this device has the only valid copy of some data, 6282 * we cannot safely detach it. 6283 */ 6284 if (vdev_dtl_required(vd)) 6285 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 6286 6287 ASSERT(pvd->vdev_children >= 2); 6288 6289 /* 6290 * If we are detaching the second disk from a replacing vdev, then 6291 * check to see if we changed the original vdev's path to have "/old" 6292 * at the end in spa_vdev_attach(). If so, undo that change now. 6293 */ 6294 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 6295 vd->vdev_path != NULL) { 6296 size_t len = strlen(vd->vdev_path); 6297 6298 for (int c = 0; c < pvd->vdev_children; c++) { 6299 cvd = pvd->vdev_child[c]; 6300 6301 if (cvd == vd || cvd->vdev_path == NULL) 6302 continue; 6303 6304 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 6305 strcmp(cvd->vdev_path + len, "/old") == 0) { 6306 spa_strfree(cvd->vdev_path); 6307 cvd->vdev_path = spa_strdup(vd->vdev_path); 6308 break; 6309 } 6310 } 6311 } 6312 6313 /* 6314 * If we are detaching the original disk from a spare, then it implies 6315 * that the spare should become a real disk, and be removed from the 6316 * active spare list for the pool. 6317 */ 6318 if (pvd->vdev_ops == &vdev_spare_ops && 6319 vd->vdev_id == 0 && 6320 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 6321 unspare = B_TRUE; 6322 6323 /* 6324 * Erase the disk labels so the disk can be used for other things. 6325 * This must be done after all other error cases are handled, 6326 * but before we disembowel vd (so we can still do I/O to it). 6327 * But if we can't do it, don't treat the error as fatal -- 6328 * it may be that the unwritability of the disk is the reason 6329 * it's being detached! 6330 */ 6331 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 6332 6333 /* 6334 * Remove vd from its parent and compact the parent's children. 6335 */ 6336 vdev_remove_child(pvd, vd); 6337 vdev_compact_children(pvd); 6338 6339 /* 6340 * Remember one of the remaining children so we can get tvd below. 6341 */ 6342 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 6343 6344 /* 6345 * If we need to remove the remaining child from the list of hot spares, 6346 * do it now, marking the vdev as no longer a spare in the process. 6347 * We must do this before vdev_remove_parent(), because that can 6348 * change the GUID if it creates a new toplevel GUID. For a similar 6349 * reason, we must remove the spare now, in the same txg as the detach; 6350 * otherwise someone could attach a new sibling, change the GUID, and 6351 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 6352 */ 6353 if (unspare) { 6354 ASSERT(cvd->vdev_isspare); 6355 spa_spare_remove(cvd); 6356 unspare_guid = cvd->vdev_guid; 6357 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 6358 cvd->vdev_unspare = B_TRUE; 6359 } 6360 6361 /* 6362 * If the parent mirror/replacing vdev only has one child, 6363 * the parent is no longer needed. Remove it from the tree. 6364 */ 6365 if (pvd->vdev_children == 1) { 6366 if (pvd->vdev_ops == &vdev_spare_ops) 6367 cvd->vdev_unspare = B_FALSE; 6368 vdev_remove_parent(cvd); 6369 } 6370 6371 6372 /* 6373 * We don't set tvd until now because the parent we just removed 6374 * may have been the previous top-level vdev. 6375 */ 6376 tvd = cvd->vdev_top; 6377 ASSERT(tvd->vdev_parent == rvd); 6378 6379 /* 6380 * Reevaluate the parent vdev state. 6381 */ 6382 vdev_propagate_state(cvd); 6383 6384 /* 6385 * If the 'autoexpand' property is set on the pool then automatically 6386 * try to expand the size of the pool. For example if the device we 6387 * just detached was smaller than the others, it may be possible to 6388 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 6389 * first so that we can obtain the updated sizes of the leaf vdevs. 6390 */ 6391 if (spa->spa_autoexpand) { 6392 vdev_reopen(tvd); 6393 vdev_expand(tvd, txg); 6394 } 6395 6396 vdev_config_dirty(tvd); 6397 6398 /* 6399 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 6400 * vd->vdev_detached is set and free vd's DTL object in syncing context. 6401 * But first make sure we're not on any *other* txg's DTL list, to 6402 * prevent vd from being accessed after it's freed. 6403 */ 6404 vdpath = spa_strdup(vd->vdev_path); 6405 for (int t = 0; t < TXG_SIZE; t++) 6406 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 6407 vd->vdev_detached = B_TRUE; 6408 vdev_dirty(tvd, VDD_DTL, vd, txg); 6409 6410 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE); 6411 6412 /* hang on to the spa before we release the lock */ 6413 spa_open_ref(spa, FTAG); 6414 6415 error = spa_vdev_exit(spa, vd, txg, 0); 6416 6417 spa_history_log_internal(spa, "detach", NULL, 6418 "vdev=%s", vdpath); 6419 spa_strfree(vdpath); 6420 6421 /* 6422 * If this was the removal of the original device in a hot spare vdev, 6423 * then we want to go through and remove the device from the hot spare 6424 * list of every other pool. 6425 */ 6426 if (unspare) { 6427 spa_t *altspa = NULL; 6428 6429 mutex_enter(&spa_namespace_lock); 6430 while ((altspa = spa_next(altspa)) != NULL) { 6431 if (altspa->spa_state != POOL_STATE_ACTIVE || 6432 altspa == spa) 6433 continue; 6434 6435 spa_open_ref(altspa, FTAG); 6436 mutex_exit(&spa_namespace_lock); 6437 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 6438 mutex_enter(&spa_namespace_lock); 6439 spa_close(altspa, FTAG); 6440 } 6441 mutex_exit(&spa_namespace_lock); 6442 6443 /* search the rest of the vdevs for spares to remove */ 6444 spa_vdev_resilver_done(spa); 6445 } 6446 6447 /* all done with the spa; OK to release */ 6448 mutex_enter(&spa_namespace_lock); 6449 spa_close(spa, FTAG); 6450 mutex_exit(&spa_namespace_lock); 6451 6452 return (error); 6453 } 6454 6455 int 6456 spa_vdev_initialize(spa_t *spa, uint64_t guid, uint64_t cmd_type) 6457 { 6458 /* 6459 * We hold the namespace lock through the whole function 6460 * to prevent any changes to the pool while we're starting or 6461 * stopping initialization. The config and state locks are held so that 6462 * we can properly assess the vdev state before we commit to 6463 * the initializing operation. 6464 */ 6465 mutex_enter(&spa_namespace_lock); 6466 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6467 6468 /* Look up vdev and ensure it's a leaf. */ 6469 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 6470 if (vd == NULL || vd->vdev_detached) { 6471 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6472 mutex_exit(&spa_namespace_lock); 6473 return (SET_ERROR(ENODEV)); 6474 } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) { 6475 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6476 mutex_exit(&spa_namespace_lock); 6477 return (SET_ERROR(EINVAL)); 6478 } else if (!vdev_writeable(vd)) { 6479 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6480 mutex_exit(&spa_namespace_lock); 6481 return (SET_ERROR(EROFS)); 6482 } 6483 mutex_enter(&vd->vdev_initialize_lock); 6484 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6485 6486 /* 6487 * When we activate an initialize action we check to see 6488 * if the vdev_initialize_thread is NULL. We do this instead 6489 * of using the vdev_initialize_state since there might be 6490 * a previous initialization process which has completed but 6491 * the thread is not exited. 6492 */ 6493 if (cmd_type == POOL_INITIALIZE_DO && 6494 (vd->vdev_initialize_thread != NULL || 6495 vd->vdev_top->vdev_removing)) { 6496 mutex_exit(&vd->vdev_initialize_lock); 6497 mutex_exit(&spa_namespace_lock); 6498 return (SET_ERROR(EBUSY)); 6499 } else if (cmd_type == POOL_INITIALIZE_CANCEL && 6500 (vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE && 6501 vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) { 6502 mutex_exit(&vd->vdev_initialize_lock); 6503 mutex_exit(&spa_namespace_lock); 6504 return (SET_ERROR(ESRCH)); 6505 } else if (cmd_type == POOL_INITIALIZE_SUSPEND && 6506 vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) { 6507 mutex_exit(&vd->vdev_initialize_lock); 6508 mutex_exit(&spa_namespace_lock); 6509 return (SET_ERROR(ESRCH)); 6510 } 6511 6512 switch (cmd_type) { 6513 case POOL_INITIALIZE_DO: 6514 vdev_initialize(vd); 6515 break; 6516 case POOL_INITIALIZE_CANCEL: 6517 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED); 6518 break; 6519 case POOL_INITIALIZE_SUSPEND: 6520 vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED); 6521 break; 6522 default: 6523 panic("invalid cmd_type %llu", (unsigned long long)cmd_type); 6524 } 6525 mutex_exit(&vd->vdev_initialize_lock); 6526 6527 /* Sync out the initializing state */ 6528 txg_wait_synced(spa->spa_dsl_pool, 0); 6529 mutex_exit(&spa_namespace_lock); 6530 6531 return (0); 6532 } 6533 6534 6535 /* 6536 * Split a set of devices from their mirrors, and create a new pool from them. 6537 */ 6538 int 6539 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 6540 nvlist_t *props, boolean_t exp) 6541 { 6542 int error = 0; 6543 uint64_t txg, *glist; 6544 spa_t *newspa; 6545 uint_t c, children, lastlog; 6546 nvlist_t **child, *nvl, *tmp; 6547 dmu_tx_t *tx; 6548 char *altroot = NULL; 6549 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 6550 boolean_t activate_slog; 6551 6552 ASSERT(spa_writeable(spa)); 6553 6554 txg = spa_vdev_enter(spa); 6555 6556 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 6557 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 6558 error = (spa_has_checkpoint(spa)) ? 6559 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 6560 return (spa_vdev_exit(spa, NULL, txg, error)); 6561 } 6562 6563 /* clear the log and flush everything up to now */ 6564 activate_slog = spa_passivate_log(spa); 6565 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 6566 error = spa_reset_logs(spa); 6567 txg = spa_vdev_config_enter(spa); 6568 6569 if (activate_slog) 6570 spa_activate_log(spa); 6571 6572 if (error != 0) 6573 return (spa_vdev_exit(spa, NULL, txg, error)); 6574 6575 /* check new spa name before going any further */ 6576 if (spa_lookup(newname) != NULL) 6577 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 6578 6579 /* 6580 * scan through all the children to ensure they're all mirrors 6581 */ 6582 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 6583 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 6584 &children) != 0) 6585 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6586 6587 /* first, check to ensure we've got the right child count */ 6588 rvd = spa->spa_root_vdev; 6589 lastlog = 0; 6590 for (c = 0; c < rvd->vdev_children; c++) { 6591 vdev_t *vd = rvd->vdev_child[c]; 6592 6593 /* don't count the holes & logs as children */ 6594 if (vd->vdev_islog || !vdev_is_concrete(vd)) { 6595 if (lastlog == 0) 6596 lastlog = c; 6597 continue; 6598 } 6599 6600 lastlog = 0; 6601 } 6602 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 6603 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6604 6605 /* next, ensure no spare or cache devices are part of the split */ 6606 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 6607 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 6608 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 6609 6610 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 6611 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 6612 6613 /* then, loop over each vdev and validate it */ 6614 for (c = 0; c < children; c++) { 6615 uint64_t is_hole = 0; 6616 6617 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 6618 &is_hole); 6619 6620 if (is_hole != 0) { 6621 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 6622 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 6623 continue; 6624 } else { 6625 error = SET_ERROR(EINVAL); 6626 break; 6627 } 6628 } 6629 6630 /* which disk is going to be split? */ 6631 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 6632 &glist[c]) != 0) { 6633 error = SET_ERROR(EINVAL); 6634 break; 6635 } 6636 6637 /* look it up in the spa */ 6638 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 6639 if (vml[c] == NULL) { 6640 error = SET_ERROR(ENODEV); 6641 break; 6642 } 6643 6644 /* make sure there's nothing stopping the split */ 6645 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 6646 vml[c]->vdev_islog || 6647 !vdev_is_concrete(vml[c]) || 6648 vml[c]->vdev_isspare || 6649 vml[c]->vdev_isl2cache || 6650 !vdev_writeable(vml[c]) || 6651 vml[c]->vdev_children != 0 || 6652 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 6653 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 6654 error = SET_ERROR(EINVAL); 6655 break; 6656 } 6657 6658 if (vdev_dtl_required(vml[c])) { 6659 error = SET_ERROR(EBUSY); 6660 break; 6661 } 6662 6663 /* we need certain info from the top level */ 6664 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 6665 vml[c]->vdev_top->vdev_ms_array) == 0); 6666 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 6667 vml[c]->vdev_top->vdev_ms_shift) == 0); 6668 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 6669 vml[c]->vdev_top->vdev_asize) == 0); 6670 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 6671 vml[c]->vdev_top->vdev_ashift) == 0); 6672 6673 /* transfer per-vdev ZAPs */ 6674 ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0); 6675 VERIFY0(nvlist_add_uint64(child[c], 6676 ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap)); 6677 6678 ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0); 6679 VERIFY0(nvlist_add_uint64(child[c], 6680 ZPOOL_CONFIG_VDEV_TOP_ZAP, 6681 vml[c]->vdev_parent->vdev_top_zap)); 6682 } 6683 6684 if (error != 0) { 6685 kmem_free(vml, children * sizeof (vdev_t *)); 6686 kmem_free(glist, children * sizeof (uint64_t)); 6687 return (spa_vdev_exit(spa, NULL, txg, error)); 6688 } 6689 6690 /* stop writers from using the disks */ 6691 for (c = 0; c < children; c++) { 6692 if (vml[c] != NULL) 6693 vml[c]->vdev_offline = B_TRUE; 6694 } 6695 vdev_reopen(spa->spa_root_vdev); 6696 6697 /* 6698 * Temporarily record the splitting vdevs in the spa config. This 6699 * will disappear once the config is regenerated. 6700 */ 6701 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6702 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 6703 glist, children) == 0); 6704 kmem_free(glist, children * sizeof (uint64_t)); 6705 6706 mutex_enter(&spa->spa_props_lock); 6707 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 6708 nvl) == 0); 6709 mutex_exit(&spa->spa_props_lock); 6710 spa->spa_config_splitting = nvl; 6711 vdev_config_dirty(spa->spa_root_vdev); 6712 6713 /* configure and create the new pool */ 6714 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 6715 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 6716 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 6717 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6718 spa_version(spa)) == 0); 6719 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 6720 spa->spa_config_txg) == 0); 6721 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 6722 spa_generate_guid(NULL)) == 0); 6723 VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)); 6724 (void) nvlist_lookup_string(props, 6725 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 6726 6727 /* add the new pool to the namespace */ 6728 newspa = spa_add(newname, config, altroot); 6729 newspa->spa_avz_action = AVZ_ACTION_REBUILD; 6730 newspa->spa_config_txg = spa->spa_config_txg; 6731 spa_set_log_state(newspa, SPA_LOG_CLEAR); 6732 6733 /* release the spa config lock, retaining the namespace lock */ 6734 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 6735 6736 if (zio_injection_enabled) 6737 zio_handle_panic_injection(spa, FTAG, 1); 6738 6739 spa_activate(newspa, spa_mode_global); 6740 spa_async_suspend(newspa); 6741 6742 for (c = 0; c < children; c++) { 6743 if (vml[c] != NULL) { 6744 /* 6745 * Temporarily stop the initializing activity. We set 6746 * the state to ACTIVE so that we know to resume 6747 * the initializing once the split has completed. 6748 */ 6749 mutex_enter(&vml[c]->vdev_initialize_lock); 6750 vdev_initialize_stop(vml[c], VDEV_INITIALIZE_ACTIVE); 6751 mutex_exit(&vml[c]->vdev_initialize_lock); 6752 } 6753 } 6754 6755 newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT; 6756 6757 /* create the new pool from the disks of the original pool */ 6758 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE); 6759 if (error) 6760 goto out; 6761 6762 /* if that worked, generate a real config for the new pool */ 6763 if (newspa->spa_root_vdev != NULL) { 6764 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 6765 NV_UNIQUE_NAME, KM_SLEEP) == 0); 6766 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 6767 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 6768 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 6769 B_TRUE)); 6770 } 6771 6772 /* set the props */ 6773 if (props != NULL) { 6774 spa_configfile_set(newspa, props, B_FALSE); 6775 error = spa_prop_set(newspa, props); 6776 if (error) 6777 goto out; 6778 } 6779 6780 /* flush everything */ 6781 txg = spa_vdev_config_enter(newspa); 6782 vdev_config_dirty(newspa->spa_root_vdev); 6783 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 6784 6785 if (zio_injection_enabled) 6786 zio_handle_panic_injection(spa, FTAG, 2); 6787 6788 spa_async_resume(newspa); 6789 6790 /* finally, update the original pool's config */ 6791 txg = spa_vdev_config_enter(spa); 6792 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 6793 error = dmu_tx_assign(tx, TXG_WAIT); 6794 if (error != 0) 6795 dmu_tx_abort(tx); 6796 for (c = 0; c < children; c++) { 6797 if (vml[c] != NULL) { 6798 vdev_split(vml[c]); 6799 if (error == 0) 6800 spa_history_log_internal(spa, "detach", tx, 6801 "vdev=%s", vml[c]->vdev_path); 6802 6803 vdev_free(vml[c]); 6804 } 6805 } 6806 spa->spa_avz_action = AVZ_ACTION_REBUILD; 6807 vdev_config_dirty(spa->spa_root_vdev); 6808 spa->spa_config_splitting = NULL; 6809 nvlist_free(nvl); 6810 if (error == 0) 6811 dmu_tx_commit(tx); 6812 (void) spa_vdev_exit(spa, NULL, txg, 0); 6813 6814 if (zio_injection_enabled) 6815 zio_handle_panic_injection(spa, FTAG, 3); 6816 6817 /* split is complete; log a history record */ 6818 spa_history_log_internal(newspa, "split", NULL, 6819 "from pool %s", spa_name(spa)); 6820 6821 kmem_free(vml, children * sizeof (vdev_t *)); 6822 6823 /* if we're not going to mount the filesystems in userland, export */ 6824 if (exp) 6825 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 6826 B_FALSE, B_FALSE); 6827 6828 return (error); 6829 6830 out: 6831 spa_unload(newspa); 6832 spa_deactivate(newspa); 6833 spa_remove(newspa); 6834 6835 txg = spa_vdev_config_enter(spa); 6836 6837 /* re-online all offlined disks */ 6838 for (c = 0; c < children; c++) { 6839 if (vml[c] != NULL) 6840 vml[c]->vdev_offline = B_FALSE; 6841 } 6842 6843 /* restart initializing disks as necessary */ 6844 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 6845 6846 vdev_reopen(spa->spa_root_vdev); 6847 6848 nvlist_free(spa->spa_config_splitting); 6849 spa->spa_config_splitting = NULL; 6850 (void) spa_vdev_exit(spa, NULL, txg, error); 6851 6852 kmem_free(vml, children * sizeof (vdev_t *)); 6853 return (error); 6854 } 6855 6856 /* 6857 * Find any device that's done replacing, or a vdev marked 'unspare' that's 6858 * currently spared, so we can detach it. 6859 */ 6860 static vdev_t * 6861 spa_vdev_resilver_done_hunt(vdev_t *vd) 6862 { 6863 vdev_t *newvd, *oldvd; 6864 6865 for (int c = 0; c < vd->vdev_children; c++) { 6866 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 6867 if (oldvd != NULL) 6868 return (oldvd); 6869 } 6870 6871 /* 6872 * Check for a completed replacement. We always consider the first 6873 * vdev in the list to be the oldest vdev, and the last one to be 6874 * the newest (see spa_vdev_attach() for how that works). In 6875 * the case where the newest vdev is faulted, we will not automatically 6876 * remove it after a resilver completes. This is OK as it will require 6877 * user intervention to determine which disk the admin wishes to keep. 6878 */ 6879 if (vd->vdev_ops == &vdev_replacing_ops) { 6880 ASSERT(vd->vdev_children > 1); 6881 6882 newvd = vd->vdev_child[vd->vdev_children - 1]; 6883 oldvd = vd->vdev_child[0]; 6884 6885 if (vdev_dtl_empty(newvd, DTL_MISSING) && 6886 vdev_dtl_empty(newvd, DTL_OUTAGE) && 6887 !vdev_dtl_required(oldvd)) 6888 return (oldvd); 6889 } 6890 6891 /* 6892 * Check for a completed resilver with the 'unspare' flag set. 6893 * Also potentially update faulted state. 6894 */ 6895 if (vd->vdev_ops == &vdev_spare_ops) { 6896 vdev_t *first = vd->vdev_child[0]; 6897 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 6898 6899 if (last->vdev_unspare) { 6900 oldvd = first; 6901 newvd = last; 6902 } else if (first->vdev_unspare) { 6903 oldvd = last; 6904 newvd = first; 6905 } else { 6906 oldvd = NULL; 6907 } 6908 6909 if (oldvd != NULL && 6910 vdev_dtl_empty(newvd, DTL_MISSING) && 6911 vdev_dtl_empty(newvd, DTL_OUTAGE) && 6912 !vdev_dtl_required(oldvd)) 6913 return (oldvd); 6914 6915 vdev_propagate_state(vd); 6916 6917 /* 6918 * If there are more than two spares attached to a disk, 6919 * and those spares are not required, then we want to 6920 * attempt to free them up now so that they can be used 6921 * by other pools. Once we're back down to a single 6922 * disk+spare, we stop removing them. 6923 */ 6924 if (vd->vdev_children > 2) { 6925 newvd = vd->vdev_child[1]; 6926 6927 if (newvd->vdev_isspare && last->vdev_isspare && 6928 vdev_dtl_empty(last, DTL_MISSING) && 6929 vdev_dtl_empty(last, DTL_OUTAGE) && 6930 !vdev_dtl_required(newvd)) 6931 return (newvd); 6932 } 6933 } 6934 6935 return (NULL); 6936 } 6937 6938 static void 6939 spa_vdev_resilver_done(spa_t *spa) 6940 { 6941 vdev_t *vd, *pvd, *ppvd; 6942 uint64_t guid, sguid, pguid, ppguid; 6943 6944 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6945 6946 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 6947 pvd = vd->vdev_parent; 6948 ppvd = pvd->vdev_parent; 6949 guid = vd->vdev_guid; 6950 pguid = pvd->vdev_guid; 6951 ppguid = ppvd->vdev_guid; 6952 sguid = 0; 6953 /* 6954 * If we have just finished replacing a hot spared device, then 6955 * we need to detach the parent's first child (the original hot 6956 * spare) as well. 6957 */ 6958 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 6959 ppvd->vdev_children == 2) { 6960 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 6961 sguid = ppvd->vdev_child[1]->vdev_guid; 6962 } 6963 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 6964 6965 spa_config_exit(spa, SCL_ALL, FTAG); 6966 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 6967 return; 6968 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 6969 return; 6970 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6971 } 6972 6973 spa_config_exit(spa, SCL_ALL, FTAG); 6974 } 6975 6976 /* 6977 * Update the stored path or FRU for this vdev. 6978 */ 6979 int 6980 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 6981 boolean_t ispath) 6982 { 6983 vdev_t *vd; 6984 boolean_t sync = B_FALSE; 6985 6986 ASSERT(spa_writeable(spa)); 6987 6988 spa_vdev_state_enter(spa, SCL_ALL); 6989 6990 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 6991 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 6992 6993 if (!vd->vdev_ops->vdev_op_leaf) 6994 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 6995 6996 if (ispath) { 6997 if (strcmp(value, vd->vdev_path) != 0) { 6998 spa_strfree(vd->vdev_path); 6999 vd->vdev_path = spa_strdup(value); 7000 sync = B_TRUE; 7001 } 7002 } else { 7003 if (vd->vdev_fru == NULL) { 7004 vd->vdev_fru = spa_strdup(value); 7005 sync = B_TRUE; 7006 } else if (strcmp(value, vd->vdev_fru) != 0) { 7007 spa_strfree(vd->vdev_fru); 7008 vd->vdev_fru = spa_strdup(value); 7009 sync = B_TRUE; 7010 } 7011 } 7012 7013 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 7014 } 7015 7016 int 7017 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 7018 { 7019 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 7020 } 7021 7022 int 7023 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 7024 { 7025 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 7026 } 7027 7028 /* 7029 * ========================================================================== 7030 * SPA Scanning 7031 * ========================================================================== 7032 */ 7033 int 7034 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd) 7035 { 7036 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7037 7038 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 7039 return (SET_ERROR(EBUSY)); 7040 7041 return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd)); 7042 } 7043 7044 int 7045 spa_scan_stop(spa_t *spa) 7046 { 7047 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7048 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 7049 return (SET_ERROR(EBUSY)); 7050 return (dsl_scan_cancel(spa->spa_dsl_pool)); 7051 } 7052 7053 int 7054 spa_scan(spa_t *spa, pool_scan_func_t func) 7055 { 7056 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 7057 7058 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 7059 return (SET_ERROR(ENOTSUP)); 7060 7061 if (func == POOL_SCAN_RESILVER && 7062 !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 7063 return (SET_ERROR(ENOTSUP)); 7064 7065 /* 7066 * If a resilver was requested, but there is no DTL on a 7067 * writeable leaf device, we have nothing to do. 7068 */ 7069 if (func == POOL_SCAN_RESILVER && 7070 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 7071 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 7072 return (0); 7073 } 7074 7075 return (dsl_scan(spa->spa_dsl_pool, func)); 7076 } 7077 7078 /* 7079 * ========================================================================== 7080 * SPA async task processing 7081 * ========================================================================== 7082 */ 7083 7084 static void 7085 spa_async_remove(spa_t *spa, vdev_t *vd) 7086 { 7087 if (vd->vdev_remove_wanted) { 7088 vd->vdev_remove_wanted = B_FALSE; 7089 vd->vdev_delayed_close = B_FALSE; 7090 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 7091 7092 /* 7093 * We want to clear the stats, but we don't want to do a full 7094 * vdev_clear() as that will cause us to throw away 7095 * degraded/faulted state as well as attempt to reopen the 7096 * device, all of which is a waste. 7097 */ 7098 vd->vdev_stat.vs_read_errors = 0; 7099 vd->vdev_stat.vs_write_errors = 0; 7100 vd->vdev_stat.vs_checksum_errors = 0; 7101 7102 vdev_state_dirty(vd->vdev_top); 7103 } 7104 7105 for (int c = 0; c < vd->vdev_children; c++) 7106 spa_async_remove(spa, vd->vdev_child[c]); 7107 } 7108 7109 static void 7110 spa_async_probe(spa_t *spa, vdev_t *vd) 7111 { 7112 if (vd->vdev_probe_wanted) { 7113 vd->vdev_probe_wanted = B_FALSE; 7114 vdev_reopen(vd); /* vdev_open() does the actual probe */ 7115 } 7116 7117 for (int c = 0; c < vd->vdev_children; c++) 7118 spa_async_probe(spa, vd->vdev_child[c]); 7119 } 7120 7121 static void 7122 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 7123 { 7124 sysevent_id_t eid; 7125 nvlist_t *attr; 7126 char *physpath; 7127 7128 if (!spa->spa_autoexpand) 7129 return; 7130 7131 for (int c = 0; c < vd->vdev_children; c++) { 7132 vdev_t *cvd = vd->vdev_child[c]; 7133 spa_async_autoexpand(spa, cvd); 7134 } 7135 7136 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 7137 return; 7138 7139 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7140 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 7141 7142 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 7143 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 7144 7145 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 7146 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 7147 7148 nvlist_free(attr); 7149 kmem_free(physpath, MAXPATHLEN); 7150 } 7151 7152 static void 7153 spa_async_thread(void *arg) 7154 { 7155 spa_t *spa = (spa_t *)arg; 7156 dsl_pool_t *dp = spa->spa_dsl_pool; 7157 int tasks; 7158 7159 ASSERT(spa->spa_sync_on); 7160 7161 mutex_enter(&spa->spa_async_lock); 7162 tasks = spa->spa_async_tasks; 7163 spa->spa_async_tasks = 0; 7164 mutex_exit(&spa->spa_async_lock); 7165 7166 /* 7167 * See if the config needs to be updated. 7168 */ 7169 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 7170 uint64_t old_space, new_space; 7171 7172 mutex_enter(&spa_namespace_lock); 7173 old_space = metaslab_class_get_space(spa_normal_class(spa)); 7174 old_space += metaslab_class_get_space(spa_special_class(spa)); 7175 old_space += metaslab_class_get_space(spa_dedup_class(spa)); 7176 7177 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 7178 7179 new_space = metaslab_class_get_space(spa_normal_class(spa)); 7180 new_space += metaslab_class_get_space(spa_special_class(spa)); 7181 new_space += metaslab_class_get_space(spa_dedup_class(spa)); 7182 mutex_exit(&spa_namespace_lock); 7183 7184 /* 7185 * If the pool grew as a result of the config update, 7186 * then log an internal history event. 7187 */ 7188 if (new_space != old_space) { 7189 spa_history_log_internal(spa, "vdev online", NULL, 7190 "pool '%s' size: %llu(+%llu)", 7191 spa_name(spa), new_space, new_space - old_space); 7192 } 7193 } 7194 7195 /* 7196 * See if any devices need to be marked REMOVED. 7197 */ 7198 if (tasks & SPA_ASYNC_REMOVE) { 7199 spa_vdev_state_enter(spa, SCL_NONE); 7200 spa_async_remove(spa, spa->spa_root_vdev); 7201 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 7202 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 7203 for (int i = 0; i < spa->spa_spares.sav_count; i++) 7204 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 7205 (void) spa_vdev_state_exit(spa, NULL, 0); 7206 } 7207 7208 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 7209 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 7210 spa_async_autoexpand(spa, spa->spa_root_vdev); 7211 spa_config_exit(spa, SCL_CONFIG, FTAG); 7212 } 7213 7214 /* 7215 * See if any devices need to be probed. 7216 */ 7217 if (tasks & SPA_ASYNC_PROBE) { 7218 spa_vdev_state_enter(spa, SCL_NONE); 7219 spa_async_probe(spa, spa->spa_root_vdev); 7220 (void) spa_vdev_state_exit(spa, NULL, 0); 7221 } 7222 7223 /* 7224 * If any devices are done replacing, detach them. 7225 */ 7226 if (tasks & SPA_ASYNC_RESILVER_DONE) 7227 spa_vdev_resilver_done(spa); 7228 7229 /* 7230 * Kick off a resilver. 7231 */ 7232 if (tasks & SPA_ASYNC_RESILVER && 7233 (!dsl_scan_resilvering(dp) || 7234 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))) 7235 dsl_resilver_restart(dp, 0); 7236 7237 if (tasks & SPA_ASYNC_INITIALIZE_RESTART) { 7238 mutex_enter(&spa_namespace_lock); 7239 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 7240 vdev_initialize_restart(spa->spa_root_vdev); 7241 spa_config_exit(spa, SCL_CONFIG, FTAG); 7242 mutex_exit(&spa_namespace_lock); 7243 } 7244 7245 /* 7246 * Let the world know that we're done. 7247 */ 7248 mutex_enter(&spa->spa_async_lock); 7249 spa->spa_async_thread = NULL; 7250 cv_broadcast(&spa->spa_async_cv); 7251 mutex_exit(&spa->spa_async_lock); 7252 thread_exit(); 7253 } 7254 7255 void 7256 spa_async_suspend(spa_t *spa) 7257 { 7258 mutex_enter(&spa->spa_async_lock); 7259 spa->spa_async_suspended++; 7260 while (spa->spa_async_thread != NULL) 7261 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 7262 mutex_exit(&spa->spa_async_lock); 7263 7264 spa_vdev_remove_suspend(spa); 7265 7266 zthr_t *condense_thread = spa->spa_condense_zthr; 7267 if (condense_thread != NULL) 7268 zthr_cancel(condense_thread); 7269 7270 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 7271 if (discard_thread != NULL) 7272 zthr_cancel(discard_thread); 7273 } 7274 7275 void 7276 spa_async_resume(spa_t *spa) 7277 { 7278 mutex_enter(&spa->spa_async_lock); 7279 ASSERT(spa->spa_async_suspended != 0); 7280 spa->spa_async_suspended--; 7281 mutex_exit(&spa->spa_async_lock); 7282 spa_restart_removal(spa); 7283 7284 zthr_t *condense_thread = spa->spa_condense_zthr; 7285 if (condense_thread != NULL) 7286 zthr_resume(condense_thread); 7287 7288 zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr; 7289 if (discard_thread != NULL) 7290 zthr_resume(discard_thread); 7291 } 7292 7293 static boolean_t 7294 spa_async_tasks_pending(spa_t *spa) 7295 { 7296 uint_t non_config_tasks; 7297 uint_t config_task; 7298 boolean_t config_task_suspended; 7299 7300 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 7301 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 7302 if (spa->spa_ccw_fail_time == 0) { 7303 config_task_suspended = B_FALSE; 7304 } else { 7305 config_task_suspended = 7306 (gethrtime() - spa->spa_ccw_fail_time) < 7307 (zfs_ccw_retry_interval * NANOSEC); 7308 } 7309 7310 return (non_config_tasks || (config_task && !config_task_suspended)); 7311 } 7312 7313 static void 7314 spa_async_dispatch(spa_t *spa) 7315 { 7316 mutex_enter(&spa->spa_async_lock); 7317 if (spa_async_tasks_pending(spa) && 7318 !spa->spa_async_suspended && 7319 spa->spa_async_thread == NULL && 7320 rootdir != NULL) 7321 spa->spa_async_thread = thread_create(NULL, 0, 7322 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 7323 mutex_exit(&spa->spa_async_lock); 7324 } 7325 7326 void 7327 spa_async_request(spa_t *spa, int task) 7328 { 7329 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 7330 mutex_enter(&spa->spa_async_lock); 7331 spa->spa_async_tasks |= task; 7332 mutex_exit(&spa->spa_async_lock); 7333 } 7334 7335 /* 7336 * ========================================================================== 7337 * SPA syncing routines 7338 * ========================================================================== 7339 */ 7340 7341 static int 7342 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 7343 { 7344 bpobj_t *bpo = arg; 7345 bpobj_enqueue(bpo, bp, tx); 7346 return (0); 7347 } 7348 7349 static int 7350 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 7351 { 7352 zio_t *zio = arg; 7353 7354 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 7355 zio->io_flags)); 7356 return (0); 7357 } 7358 7359 /* 7360 * Note: this simple function is not inlined to make it easier to dtrace the 7361 * amount of time spent syncing frees. 7362 */ 7363 static void 7364 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 7365 { 7366 zio_t *zio = zio_root(spa, NULL, NULL, 0); 7367 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 7368 VERIFY(zio_wait(zio) == 0); 7369 } 7370 7371 /* 7372 * Note: this simple function is not inlined to make it easier to dtrace the 7373 * amount of time spent syncing deferred frees. 7374 */ 7375 static void 7376 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 7377 { 7378 if (spa_sync_pass(spa) != 1) 7379 return; 7380 7381 zio_t *zio = zio_root(spa, NULL, NULL, 0); 7382 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 7383 spa_free_sync_cb, zio, tx), ==, 0); 7384 VERIFY0(zio_wait(zio)); 7385 } 7386 7387 7388 static void 7389 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 7390 { 7391 char *packed = NULL; 7392 size_t bufsize; 7393 size_t nvsize = 0; 7394 dmu_buf_t *db; 7395 7396 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 7397 7398 /* 7399 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 7400 * information. This avoids the dmu_buf_will_dirty() path and 7401 * saves us a pre-read to get data we don't actually care about. 7402 */ 7403 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 7404 packed = kmem_alloc(bufsize, KM_SLEEP); 7405 7406 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 7407 KM_SLEEP) == 0); 7408 bzero(packed + nvsize, bufsize - nvsize); 7409 7410 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 7411 7412 kmem_free(packed, bufsize); 7413 7414 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 7415 dmu_buf_will_dirty(db, tx); 7416 *(uint64_t *)db->db_data = nvsize; 7417 dmu_buf_rele(db, FTAG); 7418 } 7419 7420 static void 7421 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 7422 const char *config, const char *entry) 7423 { 7424 nvlist_t *nvroot; 7425 nvlist_t **list; 7426 int i; 7427 7428 if (!sav->sav_sync) 7429 return; 7430 7431 /* 7432 * Update the MOS nvlist describing the list of available devices. 7433 * spa_validate_aux() will have already made sure this nvlist is 7434 * valid and the vdevs are labeled appropriately. 7435 */ 7436 if (sav->sav_object == 0) { 7437 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 7438 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 7439 sizeof (uint64_t), tx); 7440 VERIFY(zap_update(spa->spa_meta_objset, 7441 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 7442 &sav->sav_object, tx) == 0); 7443 } 7444 7445 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 7446 if (sav->sav_count == 0) { 7447 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 7448 } else { 7449 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 7450 for (i = 0; i < sav->sav_count; i++) 7451 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 7452 B_FALSE, VDEV_CONFIG_L2CACHE); 7453 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 7454 sav->sav_count) == 0); 7455 for (i = 0; i < sav->sav_count; i++) 7456 nvlist_free(list[i]); 7457 kmem_free(list, sav->sav_count * sizeof (void *)); 7458 } 7459 7460 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 7461 nvlist_free(nvroot); 7462 7463 sav->sav_sync = B_FALSE; 7464 } 7465 7466 /* 7467 * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t. 7468 * The all-vdev ZAP must be empty. 7469 */ 7470 static void 7471 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) 7472 { 7473 spa_t *spa = vd->vdev_spa; 7474 if (vd->vdev_top_zap != 0) { 7475 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 7476 vd->vdev_top_zap, tx)); 7477 } 7478 if (vd->vdev_leaf_zap != 0) { 7479 VERIFY0(zap_add_int(spa->spa_meta_objset, avz, 7480 vd->vdev_leaf_zap, tx)); 7481 } 7482 for (uint64_t i = 0; i < vd->vdev_children; i++) { 7483 spa_avz_build(vd->vdev_child[i], avz, tx); 7484 } 7485 } 7486 7487 static void 7488 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 7489 { 7490 nvlist_t *config; 7491 7492 /* 7493 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS, 7494 * its config may not be dirty but we still need to build per-vdev ZAPs. 7495 * Similarly, if the pool is being assembled (e.g. after a split), we 7496 * need to rebuild the AVZ although the config may not be dirty. 7497 */ 7498 if (list_is_empty(&spa->spa_config_dirty_list) && 7499 spa->spa_avz_action == AVZ_ACTION_NONE) 7500 return; 7501 7502 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 7503 7504 ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE || 7505 spa->spa_avz_action == AVZ_ACTION_INITIALIZE || 7506 spa->spa_all_vdev_zaps != 0); 7507 7508 if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { 7509 /* Make and build the new AVZ */ 7510 uint64_t new_avz = zap_create(spa->spa_meta_objset, 7511 DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); 7512 spa_avz_build(spa->spa_root_vdev, new_avz, tx); 7513 7514 /* Diff old AVZ with new one */ 7515 zap_cursor_t zc; 7516 zap_attribute_t za; 7517 7518 for (zap_cursor_init(&zc, spa->spa_meta_objset, 7519 spa->spa_all_vdev_zaps); 7520 zap_cursor_retrieve(&zc, &za) == 0; 7521 zap_cursor_advance(&zc)) { 7522 uint64_t vdzap = za.za_first_integer; 7523 if (zap_lookup_int(spa->spa_meta_objset, new_avz, 7524 vdzap) == ENOENT) { 7525 /* 7526 * ZAP is listed in old AVZ but not in new one; 7527 * destroy it 7528 */ 7529 VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap, 7530 tx)); 7531 } 7532 } 7533 7534 zap_cursor_fini(&zc); 7535 7536 /* Destroy the old AVZ */ 7537 VERIFY0(zap_destroy(spa->spa_meta_objset, 7538 spa->spa_all_vdev_zaps, tx)); 7539 7540 /* Replace the old AVZ in the dir obj with the new one */ 7541 VERIFY0(zap_update(spa->spa_meta_objset, 7542 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, 7543 sizeof (new_avz), 1, &new_avz, tx)); 7544 7545 spa->spa_all_vdev_zaps = new_avz; 7546 } else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) { 7547 zap_cursor_t zc; 7548 zap_attribute_t za; 7549 7550 /* Walk through the AVZ and destroy all listed ZAPs */ 7551 for (zap_cursor_init(&zc, spa->spa_meta_objset, 7552 spa->spa_all_vdev_zaps); 7553 zap_cursor_retrieve(&zc, &za) == 0; 7554 zap_cursor_advance(&zc)) { 7555 uint64_t zap = za.za_first_integer; 7556 VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx)); 7557 } 7558 7559 zap_cursor_fini(&zc); 7560 7561 /* Destroy and unlink the AVZ itself */ 7562 VERIFY0(zap_destroy(spa->spa_meta_objset, 7563 spa->spa_all_vdev_zaps, tx)); 7564 VERIFY0(zap_remove(spa->spa_meta_objset, 7565 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx)); 7566 spa->spa_all_vdev_zaps = 0; 7567 } 7568 7569 if (spa->spa_all_vdev_zaps == 0) { 7570 spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset, 7571 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 7572 DMU_POOL_VDEV_ZAP_MAP, tx); 7573 } 7574 spa->spa_avz_action = AVZ_ACTION_NONE; 7575 7576 /* Create ZAPs for vdevs that don't have them. */ 7577 vdev_construct_zaps(spa->spa_root_vdev, tx); 7578 7579 config = spa_config_generate(spa, spa->spa_root_vdev, 7580 dmu_tx_get_txg(tx), B_FALSE); 7581 7582 /* 7583 * If we're upgrading the spa version then make sure that 7584 * the config object gets updated with the correct version. 7585 */ 7586 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 7587 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 7588 spa->spa_uberblock.ub_version); 7589 7590 spa_config_exit(spa, SCL_STATE, FTAG); 7591 7592 nvlist_free(spa->spa_config_syncing); 7593 spa->spa_config_syncing = config; 7594 7595 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 7596 } 7597 7598 static void 7599 spa_sync_version(void *arg, dmu_tx_t *tx) 7600 { 7601 uint64_t *versionp = arg; 7602 uint64_t version = *versionp; 7603 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7604 7605 /* 7606 * Setting the version is special cased when first creating the pool. 7607 */ 7608 ASSERT(tx->tx_txg != TXG_INITIAL); 7609 7610 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 7611 ASSERT(version >= spa_version(spa)); 7612 7613 spa->spa_uberblock.ub_version = version; 7614 vdev_config_dirty(spa->spa_root_vdev); 7615 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 7616 } 7617 7618 /* 7619 * Set zpool properties. 7620 */ 7621 static void 7622 spa_sync_props(void *arg, dmu_tx_t *tx) 7623 { 7624 nvlist_t *nvp = arg; 7625 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 7626 objset_t *mos = spa->spa_meta_objset; 7627 nvpair_t *elem = NULL; 7628 7629 mutex_enter(&spa->spa_props_lock); 7630 7631 while ((elem = nvlist_next_nvpair(nvp, elem))) { 7632 uint64_t intval; 7633 char *strval, *fname; 7634 zpool_prop_t prop; 7635 const char *propname; 7636 zprop_type_t proptype; 7637 spa_feature_t fid; 7638 7639 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 7640 case ZPOOL_PROP_INVAL: 7641 /* 7642 * We checked this earlier in spa_prop_validate(). 7643 */ 7644 ASSERT(zpool_prop_feature(nvpair_name(elem))); 7645 7646 fname = strchr(nvpair_name(elem), '@') + 1; 7647 VERIFY0(zfeature_lookup_name(fname, &fid)); 7648 7649 spa_feature_enable(spa, fid, tx); 7650 spa_history_log_internal(spa, "set", tx, 7651 "%s=enabled", nvpair_name(elem)); 7652 break; 7653 7654 case ZPOOL_PROP_VERSION: 7655 intval = fnvpair_value_uint64(elem); 7656 /* 7657 * The version is synced seperatly before other 7658 * properties and should be correct by now. 7659 */ 7660 ASSERT3U(spa_version(spa), >=, intval); 7661 break; 7662 7663 case ZPOOL_PROP_ALTROOT: 7664 /* 7665 * 'altroot' is a non-persistent property. It should 7666 * have been set temporarily at creation or import time. 7667 */ 7668 ASSERT(spa->spa_root != NULL); 7669 break; 7670 7671 case ZPOOL_PROP_READONLY: 7672 case ZPOOL_PROP_CACHEFILE: 7673 /* 7674 * 'readonly' and 'cachefile' are also non-persisitent 7675 * properties. 7676 */ 7677 break; 7678 case ZPOOL_PROP_COMMENT: 7679 strval = fnvpair_value_string(elem); 7680 if (spa->spa_comment != NULL) 7681 spa_strfree(spa->spa_comment); 7682 spa->spa_comment = spa_strdup(strval); 7683 /* 7684 * We need to dirty the configuration on all the vdevs 7685 * so that their labels get updated. It's unnecessary 7686 * to do this for pool creation since the vdev's 7687 * configuratoin has already been dirtied. 7688 */ 7689 if (tx->tx_txg != TXG_INITIAL) 7690 vdev_config_dirty(spa->spa_root_vdev); 7691 spa_history_log_internal(spa, "set", tx, 7692 "%s=%s", nvpair_name(elem), strval); 7693 break; 7694 default: 7695 /* 7696 * Set pool property values in the poolprops mos object. 7697 */ 7698 if (spa->spa_pool_props_object == 0) { 7699 spa->spa_pool_props_object = 7700 zap_create_link(mos, DMU_OT_POOL_PROPS, 7701 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 7702 tx); 7703 } 7704 7705 /* normalize the property name */ 7706 propname = zpool_prop_to_name(prop); 7707 proptype = zpool_prop_get_type(prop); 7708 7709 if (nvpair_type(elem) == DATA_TYPE_STRING) { 7710 ASSERT(proptype == PROP_TYPE_STRING); 7711 strval = fnvpair_value_string(elem); 7712 VERIFY0(zap_update(mos, 7713 spa->spa_pool_props_object, propname, 7714 1, strlen(strval) + 1, strval, tx)); 7715 spa_history_log_internal(spa, "set", tx, 7716 "%s=%s", nvpair_name(elem), strval); 7717 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 7718 intval = fnvpair_value_uint64(elem); 7719 7720 if (proptype == PROP_TYPE_INDEX) { 7721 const char *unused; 7722 VERIFY0(zpool_prop_index_to_string( 7723 prop, intval, &unused)); 7724 } 7725 VERIFY0(zap_update(mos, 7726 spa->spa_pool_props_object, propname, 7727 8, 1, &intval, tx)); 7728 spa_history_log_internal(spa, "set", tx, 7729 "%s=%lld", nvpair_name(elem), intval); 7730 } else { 7731 ASSERT(0); /* not allowed */ 7732 } 7733 7734 switch (prop) { 7735 case ZPOOL_PROP_DELEGATION: 7736 spa->spa_delegation = intval; 7737 break; 7738 case ZPOOL_PROP_BOOTFS: 7739 spa->spa_bootfs = intval; 7740 break; 7741 case ZPOOL_PROP_FAILUREMODE: 7742 spa->spa_failmode = intval; 7743 break; 7744 case ZPOOL_PROP_AUTOEXPAND: 7745 spa->spa_autoexpand = intval; 7746 if (tx->tx_txg != TXG_INITIAL) 7747 spa_async_request(spa, 7748 SPA_ASYNC_AUTOEXPAND); 7749 break; 7750 case ZPOOL_PROP_MULTIHOST: 7751 spa->spa_multihost = intval; 7752 break; 7753 case ZPOOL_PROP_DEDUPDITTO: 7754 spa->spa_dedup_ditto = intval; 7755 break; 7756 default: 7757 break; 7758 } 7759 } 7760 7761 } 7762 7763 mutex_exit(&spa->spa_props_lock); 7764 } 7765 7766 /* 7767 * Perform one-time upgrade on-disk changes. spa_version() does not 7768 * reflect the new version this txg, so there must be no changes this 7769 * txg to anything that the upgrade code depends on after it executes. 7770 * Therefore this must be called after dsl_pool_sync() does the sync 7771 * tasks. 7772 */ 7773 static void 7774 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 7775 { 7776 if (spa_sync_pass(spa) != 1) 7777 return; 7778 7779 dsl_pool_t *dp = spa->spa_dsl_pool; 7780 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 7781 7782 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 7783 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 7784 dsl_pool_create_origin(dp, tx); 7785 7786 /* Keeping the origin open increases spa_minref */ 7787 spa->spa_minref += 3; 7788 } 7789 7790 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 7791 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 7792 dsl_pool_upgrade_clones(dp, tx); 7793 } 7794 7795 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 7796 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 7797 dsl_pool_upgrade_dir_clones(dp, tx); 7798 7799 /* Keeping the freedir open increases spa_minref */ 7800 spa->spa_minref += 3; 7801 } 7802 7803 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 7804 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 7805 spa_feature_create_zap_objects(spa, tx); 7806 } 7807 7808 /* 7809 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 7810 * when possibility to use lz4 compression for metadata was added 7811 * Old pools that have this feature enabled must be upgraded to have 7812 * this feature active 7813 */ 7814 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 7815 boolean_t lz4_en = spa_feature_is_enabled(spa, 7816 SPA_FEATURE_LZ4_COMPRESS); 7817 boolean_t lz4_ac = spa_feature_is_active(spa, 7818 SPA_FEATURE_LZ4_COMPRESS); 7819 7820 if (lz4_en && !lz4_ac) 7821 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 7822 } 7823 7824 /* 7825 * If we haven't written the salt, do so now. Note that the 7826 * feature may not be activated yet, but that's fine since 7827 * the presence of this ZAP entry is backwards compatible. 7828 */ 7829 if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 7830 DMU_POOL_CHECKSUM_SALT) == ENOENT) { 7831 VERIFY0(zap_add(spa->spa_meta_objset, 7832 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1, 7833 sizeof (spa->spa_cksum_salt.zcs_bytes), 7834 spa->spa_cksum_salt.zcs_bytes, tx)); 7835 } 7836 7837 rrw_exit(&dp->dp_config_rwlock, FTAG); 7838 } 7839 7840 static void 7841 vdev_indirect_state_sync_verify(vdev_t *vd) 7842 { 7843 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 7844 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 7845 7846 if (vd->vdev_ops == &vdev_indirect_ops) { 7847 ASSERT(vim != NULL); 7848 ASSERT(vib != NULL); 7849 } 7850 7851 if (vdev_obsolete_sm_object(vd) != 0) { 7852 ASSERT(vd->vdev_obsolete_sm != NULL); 7853 ASSERT(vd->vdev_removing || 7854 vd->vdev_ops == &vdev_indirect_ops); 7855 ASSERT(vdev_indirect_mapping_num_entries(vim) > 0); 7856 ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0); 7857 7858 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 7859 space_map_object(vd->vdev_obsolete_sm)); 7860 ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=, 7861 space_map_allocated(vd->vdev_obsolete_sm)); 7862 } 7863 ASSERT(vd->vdev_obsolete_segments != NULL); 7864 7865 /* 7866 * Since frees / remaps to an indirect vdev can only 7867 * happen in syncing context, the obsolete segments 7868 * tree must be empty when we start syncing. 7869 */ 7870 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 7871 } 7872 7873 /* 7874 * Set the top-level vdev's max queue depth. Evaluate each top-level's 7875 * async write queue depth in case it changed. The max queue depth will 7876 * not change in the middle of syncing out this txg. 7877 */ 7878 static void 7879 spa_sync_adjust_vdev_max_queue_depth(spa_t *spa) 7880 { 7881 ASSERT(spa_writeable(spa)); 7882 7883 vdev_t *rvd = spa->spa_root_vdev; 7884 uint32_t max_queue_depth = zfs_vdev_async_write_max_active * 7885 zfs_vdev_queue_depth_pct / 100; 7886 metaslab_class_t *normal = spa_normal_class(spa); 7887 metaslab_class_t *special = spa_special_class(spa); 7888 metaslab_class_t *dedup = spa_dedup_class(spa); 7889 7890 uint64_t slots_per_allocator = 0; 7891 for (int c = 0; c < rvd->vdev_children; c++) { 7892 vdev_t *tvd = rvd->vdev_child[c]; 7893 7894 metaslab_group_t *mg = tvd->vdev_mg; 7895 if (mg == NULL || !metaslab_group_initialized(mg)) 7896 continue; 7897 7898 metaslab_class_t *mc = mg->mg_class; 7899 if (mc != normal && mc != special && mc != dedup) 7900 continue; 7901 7902 /* 7903 * It is safe to do a lock-free check here because only async 7904 * allocations look at mg_max_alloc_queue_depth, and async 7905 * allocations all happen from spa_sync(). 7906 */ 7907 for (int i = 0; i < spa->spa_alloc_count; i++) 7908 ASSERT0(zfs_refcount_count( 7909 &(mg->mg_alloc_queue_depth[i]))); 7910 mg->mg_max_alloc_queue_depth = max_queue_depth; 7911 7912 for (int i = 0; i < spa->spa_alloc_count; i++) { 7913 mg->mg_cur_max_alloc_queue_depth[i] = 7914 zfs_vdev_def_queue_depth; 7915 } 7916 slots_per_allocator += zfs_vdev_def_queue_depth; 7917 } 7918 7919 for (int i = 0; i < spa->spa_alloc_count; i++) { 7920 ASSERT0(zfs_refcount_count(&normal->mc_alloc_slots[i])); 7921 ASSERT0(zfs_refcount_count(&special->mc_alloc_slots[i])); 7922 ASSERT0(zfs_refcount_count(&dedup->mc_alloc_slots[i])); 7923 normal->mc_alloc_max_slots[i] = slots_per_allocator; 7924 special->mc_alloc_max_slots[i] = slots_per_allocator; 7925 dedup->mc_alloc_max_slots[i] = slots_per_allocator; 7926 } 7927 normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 7928 special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 7929 dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; 7930 } 7931 7932 static void 7933 spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx) 7934 { 7935 ASSERT(spa_writeable(spa)); 7936 7937 vdev_t *rvd = spa->spa_root_vdev; 7938 for (int c = 0; c < rvd->vdev_children; c++) { 7939 vdev_t *vd = rvd->vdev_child[c]; 7940 vdev_indirect_state_sync_verify(vd); 7941 7942 if (vdev_indirect_should_condense(vd)) { 7943 spa_condense_indirect_start_sync(vd, tx); 7944 break; 7945 } 7946 } 7947 } 7948 7949 static void 7950 spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx) 7951 { 7952 objset_t *mos = spa->spa_meta_objset; 7953 dsl_pool_t *dp = spa->spa_dsl_pool; 7954 uint64_t txg = tx->tx_txg; 7955 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 7956 7957 do { 7958 int pass = ++spa->spa_sync_pass; 7959 7960 spa_sync_config_object(spa, tx); 7961 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 7962 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 7963 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 7964 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 7965 spa_errlog_sync(spa, txg); 7966 dsl_pool_sync(dp, txg); 7967 7968 if (pass < zfs_sync_pass_deferred_free) { 7969 spa_sync_frees(spa, free_bpl, tx); 7970 } else { 7971 /* 7972 * We can not defer frees in pass 1, because 7973 * we sync the deferred frees later in pass 1. 7974 */ 7975 ASSERT3U(pass, >, 1); 7976 bplist_iterate(free_bpl, bpobj_enqueue_cb, 7977 &spa->spa_deferred_bpobj, tx); 7978 } 7979 7980 ddt_sync(spa, txg); 7981 dsl_scan_sync(dp, tx); 7982 svr_sync(spa, tx); 7983 spa_sync_upgrades(spa, tx); 7984 7985 vdev_t *vd = NULL; 7986 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 7987 != NULL) 7988 vdev_sync(vd, txg); 7989 7990 /* 7991 * Note: We need to check if the MOS is dirty because we could 7992 * have marked the MOS dirty without updating the uberblock 7993 * (e.g. if we have sync tasks but no dirty user data). We need 7994 * to check the uberblock's rootbp because it is updated if we 7995 * have synced out dirty data (though in this case the MOS will 7996 * most likely also be dirty due to second order effects, we 7997 * don't want to rely on that here). 7998 */ 7999 if (pass == 1 && 8000 spa->spa_uberblock.ub_rootbp.blk_birth < txg && 8001 !dmu_objset_is_dirty(mos, txg)) { 8002 /* 8003 * Nothing changed on the first pass, therefore this 8004 * TXG is a no-op. Avoid syncing deferred frees, so 8005 * that we can keep this TXG as a no-op. 8006 */ 8007 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 8008 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 8009 ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg)); 8010 ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg)); 8011 break; 8012 } 8013 8014 spa_sync_deferred_frees(spa, tx); 8015 } while (dmu_objset_is_dirty(mos, txg)); 8016 } 8017 8018 /* 8019 * Rewrite the vdev configuration (which includes the uberblock) to 8020 * commit the transaction group. 8021 * 8022 * If there are no dirty vdevs, we sync the uberblock to a few random 8023 * top-level vdevs that are known to be visible in the config cache 8024 * (see spa_vdev_add() for a complete description). If there *are* dirty 8025 * vdevs, sync the uberblock to all vdevs. 8026 */ 8027 static void 8028 spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx) 8029 { 8030 vdev_t *rvd = spa->spa_root_vdev; 8031 uint64_t txg = tx->tx_txg; 8032 8033 for (;;) { 8034 int error = 0; 8035 8036 /* 8037 * We hold SCL_STATE to prevent vdev open/close/etc. 8038 * while we're attempting to write the vdev labels. 8039 */ 8040 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 8041 8042 if (list_is_empty(&spa->spa_config_dirty_list)) { 8043 vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL }; 8044 int svdcount = 0; 8045 int children = rvd->vdev_children; 8046 int c0 = spa_get_random(children); 8047 8048 for (int c = 0; c < children; c++) { 8049 vdev_t *vd = 8050 rvd->vdev_child[(c0 + c) % children]; 8051 8052 /* Stop when revisiting the first vdev */ 8053 if (c > 0 && svd[0] == vd) 8054 break; 8055 8056 if (vd->vdev_ms_array == 0 || 8057 vd->vdev_islog || 8058 !vdev_is_concrete(vd)) 8059 continue; 8060 8061 svd[svdcount++] = vd; 8062 if (svdcount == SPA_SYNC_MIN_VDEVS) 8063 break; 8064 } 8065 error = vdev_config_sync(svd, svdcount, txg); 8066 } else { 8067 error = vdev_config_sync(rvd->vdev_child, 8068 rvd->vdev_children, txg); 8069 } 8070 8071 if (error == 0) 8072 spa->spa_last_synced_guid = rvd->vdev_guid; 8073 8074 spa_config_exit(spa, SCL_STATE, FTAG); 8075 8076 if (error == 0) 8077 break; 8078 zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); 8079 zio_resume_wait(spa); 8080 } 8081 } 8082 8083 /* 8084 * Sync the specified transaction group. New blocks may be dirtied as 8085 * part of the process, so we iterate until it converges. 8086 */ 8087 void 8088 spa_sync(spa_t *spa, uint64_t txg) 8089 { 8090 vdev_t *vd = NULL; 8091 8092 VERIFY(spa_writeable(spa)); 8093 8094 /* 8095 * Wait for i/os issued in open context that need to complete 8096 * before this txg syncs. 8097 */ 8098 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 8099 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 8100 ZIO_FLAG_CANFAIL); 8101 8102 /* 8103 * Lock out configuration changes. 8104 */ 8105 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 8106 8107 spa->spa_syncing_txg = txg; 8108 spa->spa_sync_pass = 0; 8109 8110 for (int i = 0; i < spa->spa_alloc_count; i++) { 8111 mutex_enter(&spa->spa_alloc_locks[i]); 8112 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); 8113 mutex_exit(&spa->spa_alloc_locks[i]); 8114 } 8115 8116 /* 8117 * If there are any pending vdev state changes, convert them 8118 * into config changes that go out with this transaction group. 8119 */ 8120 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 8121 while (list_head(&spa->spa_state_dirty_list) != NULL) { 8122 /* 8123 * We need the write lock here because, for aux vdevs, 8124 * calling vdev_config_dirty() modifies sav_config. 8125 * This is ugly and will become unnecessary when we 8126 * eliminate the aux vdev wart by integrating all vdevs 8127 * into the root vdev tree. 8128 */ 8129 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8130 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 8131 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 8132 vdev_state_clean(vd); 8133 vdev_config_dirty(vd); 8134 } 8135 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 8136 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 8137 } 8138 spa_config_exit(spa, SCL_STATE, FTAG); 8139 8140 dsl_pool_t *dp = spa->spa_dsl_pool; 8141 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 8142 8143 spa->spa_sync_starttime = gethrtime(); 8144 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 8145 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 8146 8147 /* 8148 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 8149 * set spa_deflate if we have no raid-z vdevs. 8150 */ 8151 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 8152 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 8153 vdev_t *rvd = spa->spa_root_vdev; 8154 8155 int i; 8156 for (i = 0; i < rvd->vdev_children; i++) { 8157 vd = rvd->vdev_child[i]; 8158 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 8159 break; 8160 } 8161 if (i == rvd->vdev_children) { 8162 spa->spa_deflate = TRUE; 8163 VERIFY0(zap_add(spa->spa_meta_objset, 8164 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 8165 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 8166 } 8167 } 8168 8169 spa_sync_adjust_vdev_max_queue_depth(spa); 8170 8171 spa_sync_condense_indirect(spa, tx); 8172 8173 spa_sync_iterate_to_convergence(spa, tx); 8174 8175 #ifdef ZFS_DEBUG 8176 if (!list_is_empty(&spa->spa_config_dirty_list)) { 8177 /* 8178 * Make sure that the number of ZAPs for all the vdevs matches 8179 * the number of ZAPs in the per-vdev ZAP list. This only gets 8180 * called if the config is dirty; otherwise there may be 8181 * outstanding AVZ operations that weren't completed in 8182 * spa_sync_config_object. 8183 */ 8184 uint64_t all_vdev_zap_entry_count; 8185 ASSERT0(zap_count(spa->spa_meta_objset, 8186 spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count)); 8187 ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==, 8188 all_vdev_zap_entry_count); 8189 } 8190 #endif 8191 8192 if (spa->spa_vdev_removal != NULL) { 8193 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]); 8194 } 8195 8196 spa_sync_rewrite_vdev_config(spa, tx); 8197 dmu_tx_commit(tx); 8198 8199 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 8200 8201 /* 8202 * Clear the dirty config list. 8203 */ 8204 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 8205 vdev_config_clean(vd); 8206 8207 /* 8208 * Now that the new config has synced transactionally, 8209 * let it become visible to the config cache. 8210 */ 8211 if (spa->spa_config_syncing != NULL) { 8212 spa_config_set(spa, spa->spa_config_syncing); 8213 spa->spa_config_txg = txg; 8214 spa->spa_config_syncing = NULL; 8215 } 8216 8217 dsl_pool_sync_done(dp, txg); 8218 8219 for (int i = 0; i < spa->spa_alloc_count; i++) { 8220 mutex_enter(&spa->spa_alloc_locks[i]); 8221 VERIFY0(avl_numnodes(&spa->spa_alloc_trees[i])); 8222 mutex_exit(&spa->spa_alloc_locks[i]); 8223 } 8224 8225 /* 8226 * Update usable space statistics. 8227 */ 8228 while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 8229 != NULL) 8230 vdev_sync_done(vd, txg); 8231 8232 spa_update_dspace(spa); 8233 8234 /* 8235 * It had better be the case that we didn't dirty anything 8236 * since vdev_config_sync(). 8237 */ 8238 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 8239 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 8240 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 8241 8242 while (zfs_pause_spa_sync) 8243 delay(1); 8244 8245 spa->spa_sync_pass = 0; 8246 8247 /* 8248 * Update the last synced uberblock here. We want to do this at 8249 * the end of spa_sync() so that consumers of spa_last_synced_txg() 8250 * will be guaranteed that all the processing associated with 8251 * that txg has been completed. 8252 */ 8253 spa->spa_ubsync = spa->spa_uberblock; 8254 spa_config_exit(spa, SCL_CONFIG, FTAG); 8255 8256 spa_handle_ignored_writes(spa); 8257 8258 /* 8259 * If any async tasks have been requested, kick them off. 8260 */ 8261 spa_async_dispatch(spa); 8262 } 8263 8264 /* 8265 * Sync all pools. We don't want to hold the namespace lock across these 8266 * operations, so we take a reference on the spa_t and drop the lock during the 8267 * sync. 8268 */ 8269 void 8270 spa_sync_allpools(void) 8271 { 8272 spa_t *spa = NULL; 8273 mutex_enter(&spa_namespace_lock); 8274 while ((spa = spa_next(spa)) != NULL) { 8275 if (spa_state(spa) != POOL_STATE_ACTIVE || 8276 !spa_writeable(spa) || spa_suspended(spa)) 8277 continue; 8278 spa_open_ref(spa, FTAG); 8279 mutex_exit(&spa_namespace_lock); 8280 txg_wait_synced(spa_get_dsl(spa), 0); 8281 mutex_enter(&spa_namespace_lock); 8282 spa_close(spa, FTAG); 8283 } 8284 mutex_exit(&spa_namespace_lock); 8285 } 8286 8287 /* 8288 * ========================================================================== 8289 * Miscellaneous routines 8290 * ========================================================================== 8291 */ 8292 8293 /* 8294 * Remove all pools in the system. 8295 */ 8296 void 8297 spa_evict_all(void) 8298 { 8299 spa_t *spa; 8300 8301 /* 8302 * Remove all cached state. All pools should be closed now, 8303 * so every spa in the AVL tree should be unreferenced. 8304 */ 8305 mutex_enter(&spa_namespace_lock); 8306 while ((spa = spa_next(NULL)) != NULL) { 8307 /* 8308 * Stop async tasks. The async thread may need to detach 8309 * a device that's been replaced, which requires grabbing 8310 * spa_namespace_lock, so we must drop it here. 8311 */ 8312 spa_open_ref(spa, FTAG); 8313 mutex_exit(&spa_namespace_lock); 8314 spa_async_suspend(spa); 8315 mutex_enter(&spa_namespace_lock); 8316 spa_close(spa, FTAG); 8317 8318 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 8319 spa_unload(spa); 8320 spa_deactivate(spa); 8321 } 8322 spa_remove(spa); 8323 } 8324 mutex_exit(&spa_namespace_lock); 8325 } 8326 8327 vdev_t * 8328 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 8329 { 8330 vdev_t *vd; 8331 int i; 8332 8333 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 8334 return (vd); 8335 8336 if (aux) { 8337 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 8338 vd = spa->spa_l2cache.sav_vdevs[i]; 8339 if (vd->vdev_guid == guid) 8340 return (vd); 8341 } 8342 8343 for (i = 0; i < spa->spa_spares.sav_count; i++) { 8344 vd = spa->spa_spares.sav_vdevs[i]; 8345 if (vd->vdev_guid == guid) 8346 return (vd); 8347 } 8348 } 8349 8350 return (NULL); 8351 } 8352 8353 void 8354 spa_upgrade(spa_t *spa, uint64_t version) 8355 { 8356 ASSERT(spa_writeable(spa)); 8357 8358 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 8359 8360 /* 8361 * This should only be called for a non-faulted pool, and since a 8362 * future version would result in an unopenable pool, this shouldn't be 8363 * possible. 8364 */ 8365 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 8366 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 8367 8368 spa->spa_uberblock.ub_version = version; 8369 vdev_config_dirty(spa->spa_root_vdev); 8370 8371 spa_config_exit(spa, SCL_ALL, FTAG); 8372 8373 txg_wait_synced(spa_get_dsl(spa), 0); 8374 } 8375 8376 boolean_t 8377 spa_has_spare(spa_t *spa, uint64_t guid) 8378 { 8379 int i; 8380 uint64_t spareguid; 8381 spa_aux_vdev_t *sav = &spa->spa_spares; 8382 8383 for (i = 0; i < sav->sav_count; i++) 8384 if (sav->sav_vdevs[i]->vdev_guid == guid) 8385 return (B_TRUE); 8386 8387 for (i = 0; i < sav->sav_npending; i++) { 8388 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 8389 &spareguid) == 0 && spareguid == guid) 8390 return (B_TRUE); 8391 } 8392 8393 return (B_FALSE); 8394 } 8395 8396 /* 8397 * Check if a pool has an active shared spare device. 8398 * Note: reference count of an active spare is 2, as a spare and as a replace 8399 */ 8400 static boolean_t 8401 spa_has_active_shared_spare(spa_t *spa) 8402 { 8403 int i, refcnt; 8404 uint64_t pool; 8405 spa_aux_vdev_t *sav = &spa->spa_spares; 8406 8407 for (i = 0; i < sav->sav_count; i++) { 8408 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 8409 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 8410 refcnt > 2) 8411 return (B_TRUE); 8412 } 8413 8414 return (B_FALSE); 8415 } 8416 8417 sysevent_t * 8418 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 8419 { 8420 sysevent_t *ev = NULL; 8421 #ifdef _KERNEL 8422 sysevent_attr_list_t *attr = NULL; 8423 sysevent_value_t value; 8424 8425 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 8426 SE_SLEEP); 8427 ASSERT(ev != NULL); 8428 8429 value.value_type = SE_DATA_TYPE_STRING; 8430 value.value.sv_string = spa_name(spa); 8431 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 8432 goto done; 8433 8434 value.value_type = SE_DATA_TYPE_UINT64; 8435 value.value.sv_uint64 = spa_guid(spa); 8436 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 8437 goto done; 8438 8439 if (vd) { 8440 value.value_type = SE_DATA_TYPE_UINT64; 8441 value.value.sv_uint64 = vd->vdev_guid; 8442 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 8443 SE_SLEEP) != 0) 8444 goto done; 8445 8446 if (vd->vdev_path) { 8447 value.value_type = SE_DATA_TYPE_STRING; 8448 value.value.sv_string = vd->vdev_path; 8449 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 8450 &value, SE_SLEEP) != 0) 8451 goto done; 8452 } 8453 } 8454 8455 if (hist_nvl != NULL) { 8456 fnvlist_merge((nvlist_t *)attr, hist_nvl); 8457 } 8458 8459 if (sysevent_attach_attributes(ev, attr) != 0) 8460 goto done; 8461 attr = NULL; 8462 8463 done: 8464 if (attr) 8465 sysevent_free_attr(attr); 8466 8467 #endif 8468 return (ev); 8469 } 8470 8471 void 8472 spa_event_post(sysevent_t *ev) 8473 { 8474 #ifdef _KERNEL 8475 sysevent_id_t eid; 8476 8477 (void) log_sysevent(ev, SE_SLEEP, &eid); 8478 sysevent_free(ev); 8479 #endif 8480 } 8481 8482 void 8483 spa_event_discard(sysevent_t *ev) 8484 { 8485 #ifdef _KERNEL 8486 sysevent_free(ev); 8487 #endif 8488 } 8489 8490 /* 8491 * Post a sysevent corresponding to the given event. The 'name' must be one of 8492 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 8493 * filled in from the spa and (optionally) the vdev and history nvl. This 8494 * doesn't do anything in the userland libzpool, as we don't want consumers to 8495 * misinterpret ztest or zdb as real changes. 8496 */ 8497 void 8498 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) 8499 { 8500 spa_event_post(spa_event_create(spa, vd, hist_nvl, name)); 8501 } 8502