1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #ifdef _KERNEL 64 #include <sys/zone.h> 65 #endif /* _KERNEL */ 66 67 #include "zfs_prop.h" 68 #include "zfs_comutil.h" 69 70 enum zti_modes { 71 zti_mode_fixed, /* value is # of threads (min 1) */ 72 zti_mode_online_percent, /* value is % of online CPUs */ 73 zti_mode_tune, /* fill from zio_taskq_tune_* */ 74 zti_nmodes 75 }; 76 77 #define ZTI_THREAD_FIX(n) { zti_mode_fixed, (n) } 78 #define ZTI_THREAD_PCT(n) { zti_mode_online_percent, (n) } 79 #define ZTI_THREAD_TUNE { zti_mode_tune, 0 } 80 81 #define ZTI_THREAD_ONE ZTI_THREAD_FIX(1) 82 83 typedef struct zio_taskq_info { 84 const char *zti_name; 85 struct { 86 enum zti_modes zti_mode; 87 uint_t zti_value; 88 } zti_nthreads[ZIO_TASKQ_TYPES]; 89 } zio_taskq_info_t; 90 91 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 92 "issue", "intr" 93 }; 94 95 const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = { 96 /* ISSUE INTR */ 97 { "spa_zio_null", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } }, 98 { "spa_zio_read", { ZTI_THREAD_FIX(8), ZTI_THREAD_TUNE } }, 99 { "spa_zio_write", { ZTI_THREAD_TUNE, ZTI_THREAD_FIX(8) } }, 100 { "spa_zio_free", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } }, 101 { "spa_zio_claim", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } }, 102 { "spa_zio_ioctl", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } }, 103 }; 104 105 enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent; 106 uint_t zio_taskq_tune_value = 80; /* #threads = 80% of # online CPUs */ 107 108 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 109 static boolean_t spa_has_active_shared_spare(spa_t *spa); 110 111 /* 112 * ========================================================================== 113 * SPA properties routines 114 * ========================================================================== 115 */ 116 117 /* 118 * Add a (source=src, propname=propval) list to an nvlist. 119 */ 120 static void 121 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 122 uint64_t intval, zprop_source_t src) 123 { 124 const char *propname = zpool_prop_to_name(prop); 125 nvlist_t *propval; 126 127 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 128 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 129 130 if (strval != NULL) 131 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 132 else 133 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 134 135 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 136 nvlist_free(propval); 137 } 138 139 /* 140 * Get property values from the spa configuration. 141 */ 142 static void 143 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 144 { 145 uint64_t size; 146 uint64_t used; 147 uint64_t cap, version; 148 zprop_source_t src = ZPROP_SRC_NONE; 149 spa_config_dirent_t *dp; 150 151 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 152 153 if (spa->spa_root_vdev != NULL) { 154 size = spa_get_space(spa); 155 used = spa_get_alloc(spa); 156 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 157 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 158 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 159 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, 160 size - used, src); 161 162 cap = (size == 0) ? 0 : (used * 100 / size); 163 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 164 165 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 166 spa->spa_root_vdev->vdev_state, src); 167 168 version = spa_version(spa); 169 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 170 src = ZPROP_SRC_DEFAULT; 171 else 172 src = ZPROP_SRC_LOCAL; 173 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 174 } 175 176 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 177 178 if (spa->spa_root != NULL) 179 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 180 0, ZPROP_SRC_LOCAL); 181 182 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 183 if (dp->scd_path == NULL) { 184 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 185 "none", 0, ZPROP_SRC_LOCAL); 186 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 187 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 188 dp->scd_path, 0, ZPROP_SRC_LOCAL); 189 } 190 } 191 } 192 193 /* 194 * Get zpool property values. 195 */ 196 int 197 spa_prop_get(spa_t *spa, nvlist_t **nvp) 198 { 199 zap_cursor_t zc; 200 zap_attribute_t za; 201 objset_t *mos = spa->spa_meta_objset; 202 int err; 203 204 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 205 206 mutex_enter(&spa->spa_props_lock); 207 208 /* 209 * Get properties from the spa config. 210 */ 211 spa_prop_get_config(spa, nvp); 212 213 /* If no pool property object, no more prop to get. */ 214 if (spa->spa_pool_props_object == 0) { 215 mutex_exit(&spa->spa_props_lock); 216 return (0); 217 } 218 219 /* 220 * Get properties from the MOS pool property object. 221 */ 222 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 223 (err = zap_cursor_retrieve(&zc, &za)) == 0; 224 zap_cursor_advance(&zc)) { 225 uint64_t intval = 0; 226 char *strval = NULL; 227 zprop_source_t src = ZPROP_SRC_DEFAULT; 228 zpool_prop_t prop; 229 230 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 231 continue; 232 233 switch (za.za_integer_length) { 234 case 8: 235 /* integer property */ 236 if (za.za_first_integer != 237 zpool_prop_default_numeric(prop)) 238 src = ZPROP_SRC_LOCAL; 239 240 if (prop == ZPOOL_PROP_BOOTFS) { 241 dsl_pool_t *dp; 242 dsl_dataset_t *ds = NULL; 243 244 dp = spa_get_dsl(spa); 245 rw_enter(&dp->dp_config_rwlock, RW_READER); 246 if (err = dsl_dataset_hold_obj(dp, 247 za.za_first_integer, FTAG, &ds)) { 248 rw_exit(&dp->dp_config_rwlock); 249 break; 250 } 251 252 strval = kmem_alloc( 253 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 254 KM_SLEEP); 255 dsl_dataset_name(ds, strval); 256 dsl_dataset_rele(ds, FTAG); 257 rw_exit(&dp->dp_config_rwlock); 258 } else { 259 strval = NULL; 260 intval = za.za_first_integer; 261 } 262 263 spa_prop_add_list(*nvp, prop, strval, intval, src); 264 265 if (strval != NULL) 266 kmem_free(strval, 267 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 268 269 break; 270 271 case 1: 272 /* string property */ 273 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 274 err = zap_lookup(mos, spa->spa_pool_props_object, 275 za.za_name, 1, za.za_num_integers, strval); 276 if (err) { 277 kmem_free(strval, za.za_num_integers); 278 break; 279 } 280 spa_prop_add_list(*nvp, prop, strval, 0, src); 281 kmem_free(strval, za.za_num_integers); 282 break; 283 284 default: 285 break; 286 } 287 } 288 zap_cursor_fini(&zc); 289 mutex_exit(&spa->spa_props_lock); 290 out: 291 if (err && err != ENOENT) { 292 nvlist_free(*nvp); 293 *nvp = NULL; 294 return (err); 295 } 296 297 return (0); 298 } 299 300 /* 301 * Validate the given pool properties nvlist and modify the list 302 * for the property values to be set. 303 */ 304 static int 305 spa_prop_validate(spa_t *spa, nvlist_t *props) 306 { 307 nvpair_t *elem; 308 int error = 0, reset_bootfs = 0; 309 uint64_t objnum; 310 311 elem = NULL; 312 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 313 zpool_prop_t prop; 314 char *propname, *strval; 315 uint64_t intval; 316 objset_t *os; 317 char *slash; 318 319 propname = nvpair_name(elem); 320 321 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 322 return (EINVAL); 323 324 switch (prop) { 325 case ZPOOL_PROP_VERSION: 326 error = nvpair_value_uint64(elem, &intval); 327 if (!error && 328 (intval < spa_version(spa) || intval > SPA_VERSION)) 329 error = EINVAL; 330 break; 331 332 case ZPOOL_PROP_DELEGATION: 333 case ZPOOL_PROP_AUTOREPLACE: 334 case ZPOOL_PROP_LISTSNAPS: 335 error = nvpair_value_uint64(elem, &intval); 336 if (!error && intval > 1) 337 error = EINVAL; 338 break; 339 340 case ZPOOL_PROP_BOOTFS: 341 /* 342 * If the pool version is less than SPA_VERSION_BOOTFS, 343 * or the pool is still being created (version == 0), 344 * the bootfs property cannot be set. 345 */ 346 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 347 error = ENOTSUP; 348 break; 349 } 350 351 /* 352 * Make sure the vdev config is bootable 353 */ 354 if (!vdev_is_bootable(spa->spa_root_vdev)) { 355 error = ENOTSUP; 356 break; 357 } 358 359 reset_bootfs = 1; 360 361 error = nvpair_value_string(elem, &strval); 362 363 if (!error) { 364 uint64_t compress; 365 366 if (strval == NULL || strval[0] == '\0') { 367 objnum = zpool_prop_default_numeric( 368 ZPOOL_PROP_BOOTFS); 369 break; 370 } 371 372 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 373 DS_MODE_USER | DS_MODE_READONLY, &os)) 374 break; 375 376 /* We don't support gzip bootable datasets */ 377 if ((error = dsl_prop_get_integer(strval, 378 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 379 &compress, NULL)) == 0 && 380 !BOOTFS_COMPRESS_VALID(compress)) { 381 error = ENOTSUP; 382 } else { 383 objnum = dmu_objset_id(os); 384 } 385 dmu_objset_close(os); 386 } 387 break; 388 389 case ZPOOL_PROP_FAILUREMODE: 390 error = nvpair_value_uint64(elem, &intval); 391 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 392 intval > ZIO_FAILURE_MODE_PANIC)) 393 error = EINVAL; 394 395 /* 396 * This is a special case which only occurs when 397 * the pool has completely failed. This allows 398 * the user to change the in-core failmode property 399 * without syncing it out to disk (I/Os might 400 * currently be blocked). We do this by returning 401 * EIO to the caller (spa_prop_set) to trick it 402 * into thinking we encountered a property validation 403 * error. 404 */ 405 if (!error && spa_suspended(spa)) { 406 spa->spa_failmode = intval; 407 error = EIO; 408 } 409 break; 410 411 case ZPOOL_PROP_CACHEFILE: 412 if ((error = nvpair_value_string(elem, &strval)) != 0) 413 break; 414 415 if (strval[0] == '\0') 416 break; 417 418 if (strcmp(strval, "none") == 0) 419 break; 420 421 if (strval[0] != '/') { 422 error = EINVAL; 423 break; 424 } 425 426 slash = strrchr(strval, '/'); 427 ASSERT(slash != NULL); 428 429 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 430 strcmp(slash, "/..") == 0) 431 error = EINVAL; 432 break; 433 } 434 435 if (error) 436 break; 437 } 438 439 if (!error && reset_bootfs) { 440 error = nvlist_remove(props, 441 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 442 443 if (!error) { 444 error = nvlist_add_uint64(props, 445 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 446 } 447 } 448 449 return (error); 450 } 451 452 void 453 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 454 { 455 char *cachefile; 456 spa_config_dirent_t *dp; 457 458 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 459 &cachefile) != 0) 460 return; 461 462 dp = kmem_alloc(sizeof (spa_config_dirent_t), 463 KM_SLEEP); 464 465 if (cachefile[0] == '\0') 466 dp->scd_path = spa_strdup(spa_config_path); 467 else if (strcmp(cachefile, "none") == 0) 468 dp->scd_path = NULL; 469 else 470 dp->scd_path = spa_strdup(cachefile); 471 472 list_insert_head(&spa->spa_config_list, dp); 473 if (need_sync) 474 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 475 } 476 477 int 478 spa_prop_set(spa_t *spa, nvlist_t *nvp) 479 { 480 int error; 481 nvpair_t *elem; 482 boolean_t need_sync = B_FALSE; 483 zpool_prop_t prop; 484 485 if ((error = spa_prop_validate(spa, nvp)) != 0) 486 return (error); 487 488 elem = NULL; 489 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 490 if ((prop = zpool_name_to_prop( 491 nvpair_name(elem))) == ZPROP_INVAL) 492 return (EINVAL); 493 494 if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT) 495 continue; 496 497 need_sync = B_TRUE; 498 break; 499 } 500 501 if (need_sync) 502 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 503 spa, nvp, 3)); 504 else 505 return (0); 506 } 507 508 /* 509 * If the bootfs property value is dsobj, clear it. 510 */ 511 void 512 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 513 { 514 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 515 VERIFY(zap_remove(spa->spa_meta_objset, 516 spa->spa_pool_props_object, 517 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 518 spa->spa_bootfs = 0; 519 } 520 } 521 522 /* 523 * ========================================================================== 524 * SPA state manipulation (open/create/destroy/import/export) 525 * ========================================================================== 526 */ 527 528 static int 529 spa_error_entry_compare(const void *a, const void *b) 530 { 531 spa_error_entry_t *sa = (spa_error_entry_t *)a; 532 spa_error_entry_t *sb = (spa_error_entry_t *)b; 533 int ret; 534 535 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 536 sizeof (zbookmark_t)); 537 538 if (ret < 0) 539 return (-1); 540 else if (ret > 0) 541 return (1); 542 else 543 return (0); 544 } 545 546 /* 547 * Utility function which retrieves copies of the current logs and 548 * re-initializes them in the process. 549 */ 550 void 551 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 552 { 553 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 554 555 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 556 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 557 558 avl_create(&spa->spa_errlist_scrub, 559 spa_error_entry_compare, sizeof (spa_error_entry_t), 560 offsetof(spa_error_entry_t, se_avl)); 561 avl_create(&spa->spa_errlist_last, 562 spa_error_entry_compare, sizeof (spa_error_entry_t), 563 offsetof(spa_error_entry_t, se_avl)); 564 } 565 566 /* 567 * Activate an uninitialized pool. 568 */ 569 static void 570 spa_activate(spa_t *spa, int mode) 571 { 572 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 573 574 spa->spa_state = POOL_STATE_ACTIVE; 575 spa->spa_mode = mode; 576 577 spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops); 578 spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops); 579 580 for (int t = 0; t < ZIO_TYPES; t++) { 581 const zio_taskq_info_t *ztip = &zio_taskqs[t]; 582 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 583 enum zti_modes mode = ztip->zti_nthreads[q].zti_mode; 584 uint_t value = ztip->zti_nthreads[q].zti_value; 585 char name[32]; 586 587 (void) snprintf(name, sizeof (name), 588 "%s_%s", ztip->zti_name, zio_taskq_types[q]); 589 590 if (mode == zti_mode_tune) { 591 mode = zio_taskq_tune_mode; 592 value = zio_taskq_tune_value; 593 if (mode == zti_mode_tune) 594 mode = zti_mode_online_percent; 595 } 596 597 switch (mode) { 598 case zti_mode_fixed: 599 ASSERT3U(value, >=, 1); 600 value = MAX(value, 1); 601 602 spa->spa_zio_taskq[t][q] = taskq_create(name, 603 value, maxclsyspri, 50, INT_MAX, 604 TASKQ_PREPOPULATE); 605 break; 606 607 case zti_mode_online_percent: 608 spa->spa_zio_taskq[t][q] = taskq_create(name, 609 value, maxclsyspri, 50, INT_MAX, 610 TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); 611 break; 612 613 case zti_mode_tune: 614 default: 615 panic("unrecognized mode for " 616 "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) " 617 "in spa_activate()", 618 t, q, mode, value); 619 break; 620 } 621 } 622 } 623 624 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 625 offsetof(vdev_t, vdev_config_dirty_node)); 626 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 627 offsetof(vdev_t, vdev_state_dirty_node)); 628 629 txg_list_create(&spa->spa_vdev_txg_list, 630 offsetof(struct vdev, vdev_txg_node)); 631 632 avl_create(&spa->spa_errlist_scrub, 633 spa_error_entry_compare, sizeof (spa_error_entry_t), 634 offsetof(spa_error_entry_t, se_avl)); 635 avl_create(&spa->spa_errlist_last, 636 spa_error_entry_compare, sizeof (spa_error_entry_t), 637 offsetof(spa_error_entry_t, se_avl)); 638 } 639 640 /* 641 * Opposite of spa_activate(). 642 */ 643 static void 644 spa_deactivate(spa_t *spa) 645 { 646 ASSERT(spa->spa_sync_on == B_FALSE); 647 ASSERT(spa->spa_dsl_pool == NULL); 648 ASSERT(spa->spa_root_vdev == NULL); 649 ASSERT(spa->spa_async_zio_root == NULL); 650 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 651 652 txg_list_destroy(&spa->spa_vdev_txg_list); 653 654 list_destroy(&spa->spa_config_dirty_list); 655 list_destroy(&spa->spa_state_dirty_list); 656 657 for (int t = 0; t < ZIO_TYPES; t++) { 658 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 659 taskq_destroy(spa->spa_zio_taskq[t][q]); 660 spa->spa_zio_taskq[t][q] = NULL; 661 } 662 } 663 664 metaslab_class_destroy(spa->spa_normal_class); 665 spa->spa_normal_class = NULL; 666 667 metaslab_class_destroy(spa->spa_log_class); 668 spa->spa_log_class = NULL; 669 670 /* 671 * If this was part of an import or the open otherwise failed, we may 672 * still have errors left in the queues. Empty them just in case. 673 */ 674 spa_errlog_drain(spa); 675 676 avl_destroy(&spa->spa_errlist_scrub); 677 avl_destroy(&spa->spa_errlist_last); 678 679 spa->spa_state = POOL_STATE_UNINITIALIZED; 680 } 681 682 /* 683 * Verify a pool configuration, and construct the vdev tree appropriately. This 684 * will create all the necessary vdevs in the appropriate layout, with each vdev 685 * in the CLOSED state. This will prep the pool before open/creation/import. 686 * All vdev validation is done by the vdev_alloc() routine. 687 */ 688 static int 689 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 690 uint_t id, int atype) 691 { 692 nvlist_t **child; 693 uint_t c, children; 694 int error; 695 696 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 697 return (error); 698 699 if ((*vdp)->vdev_ops->vdev_op_leaf) 700 return (0); 701 702 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 703 &child, &children); 704 705 if (error == ENOENT) 706 return (0); 707 708 if (error) { 709 vdev_free(*vdp); 710 *vdp = NULL; 711 return (EINVAL); 712 } 713 714 for (c = 0; c < children; c++) { 715 vdev_t *vd; 716 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 717 atype)) != 0) { 718 vdev_free(*vdp); 719 *vdp = NULL; 720 return (error); 721 } 722 } 723 724 ASSERT(*vdp != NULL); 725 726 return (0); 727 } 728 729 /* 730 * Opposite of spa_load(). 731 */ 732 static void 733 spa_unload(spa_t *spa) 734 { 735 int i; 736 737 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 738 739 /* 740 * Stop async tasks. 741 */ 742 spa_async_suspend(spa); 743 744 /* 745 * Stop syncing. 746 */ 747 if (spa->spa_sync_on) { 748 txg_sync_stop(spa->spa_dsl_pool); 749 spa->spa_sync_on = B_FALSE; 750 } 751 752 /* 753 * Wait for any outstanding async I/O to complete. 754 */ 755 if (spa->spa_async_zio_root != NULL) { 756 (void) zio_wait(spa->spa_async_zio_root); 757 spa->spa_async_zio_root = NULL; 758 } 759 760 /* 761 * Close the dsl pool. 762 */ 763 if (spa->spa_dsl_pool) { 764 dsl_pool_close(spa->spa_dsl_pool); 765 spa->spa_dsl_pool = NULL; 766 } 767 768 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 769 770 /* 771 * Drop and purge level 2 cache 772 */ 773 spa_l2cache_drop(spa); 774 775 /* 776 * Close all vdevs. 777 */ 778 if (spa->spa_root_vdev) 779 vdev_free(spa->spa_root_vdev); 780 ASSERT(spa->spa_root_vdev == NULL); 781 782 for (i = 0; i < spa->spa_spares.sav_count; i++) 783 vdev_free(spa->spa_spares.sav_vdevs[i]); 784 if (spa->spa_spares.sav_vdevs) { 785 kmem_free(spa->spa_spares.sav_vdevs, 786 spa->spa_spares.sav_count * sizeof (void *)); 787 spa->spa_spares.sav_vdevs = NULL; 788 } 789 if (spa->spa_spares.sav_config) { 790 nvlist_free(spa->spa_spares.sav_config); 791 spa->spa_spares.sav_config = NULL; 792 } 793 spa->spa_spares.sav_count = 0; 794 795 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 796 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 797 if (spa->spa_l2cache.sav_vdevs) { 798 kmem_free(spa->spa_l2cache.sav_vdevs, 799 spa->spa_l2cache.sav_count * sizeof (void *)); 800 spa->spa_l2cache.sav_vdevs = NULL; 801 } 802 if (spa->spa_l2cache.sav_config) { 803 nvlist_free(spa->spa_l2cache.sav_config); 804 spa->spa_l2cache.sav_config = NULL; 805 } 806 spa->spa_l2cache.sav_count = 0; 807 808 spa->spa_async_suspended = 0; 809 810 spa_config_exit(spa, SCL_ALL, FTAG); 811 } 812 813 /* 814 * Load (or re-load) the current list of vdevs describing the active spares for 815 * this pool. When this is called, we have some form of basic information in 816 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 817 * then re-generate a more complete list including status information. 818 */ 819 static void 820 spa_load_spares(spa_t *spa) 821 { 822 nvlist_t **spares; 823 uint_t nspares; 824 int i; 825 vdev_t *vd, *tvd; 826 827 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 828 829 /* 830 * First, close and free any existing spare vdevs. 831 */ 832 for (i = 0; i < spa->spa_spares.sav_count; i++) { 833 vd = spa->spa_spares.sav_vdevs[i]; 834 835 /* Undo the call to spa_activate() below */ 836 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 837 B_FALSE)) != NULL && tvd->vdev_isspare) 838 spa_spare_remove(tvd); 839 vdev_close(vd); 840 vdev_free(vd); 841 } 842 843 if (spa->spa_spares.sav_vdevs) 844 kmem_free(spa->spa_spares.sav_vdevs, 845 spa->spa_spares.sav_count * sizeof (void *)); 846 847 if (spa->spa_spares.sav_config == NULL) 848 nspares = 0; 849 else 850 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 851 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 852 853 spa->spa_spares.sav_count = (int)nspares; 854 spa->spa_spares.sav_vdevs = NULL; 855 856 if (nspares == 0) 857 return; 858 859 /* 860 * Construct the array of vdevs, opening them to get status in the 861 * process. For each spare, there is potentially two different vdev_t 862 * structures associated with it: one in the list of spares (used only 863 * for basic validation purposes) and one in the active vdev 864 * configuration (if it's spared in). During this phase we open and 865 * validate each vdev on the spare list. If the vdev also exists in the 866 * active configuration, then we also mark this vdev as an active spare. 867 */ 868 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 869 KM_SLEEP); 870 for (i = 0; i < spa->spa_spares.sav_count; i++) { 871 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 872 VDEV_ALLOC_SPARE) == 0); 873 ASSERT(vd != NULL); 874 875 spa->spa_spares.sav_vdevs[i] = vd; 876 877 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 878 B_FALSE)) != NULL) { 879 if (!tvd->vdev_isspare) 880 spa_spare_add(tvd); 881 882 /* 883 * We only mark the spare active if we were successfully 884 * able to load the vdev. Otherwise, importing a pool 885 * with a bad active spare would result in strange 886 * behavior, because multiple pool would think the spare 887 * is actively in use. 888 * 889 * There is a vulnerability here to an equally bizarre 890 * circumstance, where a dead active spare is later 891 * brought back to life (onlined or otherwise). Given 892 * the rarity of this scenario, and the extra complexity 893 * it adds, we ignore the possibility. 894 */ 895 if (!vdev_is_dead(tvd)) 896 spa_spare_activate(tvd); 897 } 898 899 vd->vdev_top = vd; 900 vd->vdev_aux = &spa->spa_spares; 901 902 if (vdev_open(vd) != 0) 903 continue; 904 905 if (vdev_validate_aux(vd) == 0) 906 spa_spare_add(vd); 907 } 908 909 /* 910 * Recompute the stashed list of spares, with status information 911 * this time. 912 */ 913 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 914 DATA_TYPE_NVLIST_ARRAY) == 0); 915 916 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 917 KM_SLEEP); 918 for (i = 0; i < spa->spa_spares.sav_count; i++) 919 spares[i] = vdev_config_generate(spa, 920 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 921 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 922 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 923 for (i = 0; i < spa->spa_spares.sav_count; i++) 924 nvlist_free(spares[i]); 925 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 926 } 927 928 /* 929 * Load (or re-load) the current list of vdevs describing the active l2cache for 930 * this pool. When this is called, we have some form of basic information in 931 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 932 * then re-generate a more complete list including status information. 933 * Devices which are already active have their details maintained, and are 934 * not re-opened. 935 */ 936 static void 937 spa_load_l2cache(spa_t *spa) 938 { 939 nvlist_t **l2cache; 940 uint_t nl2cache; 941 int i, j, oldnvdevs; 942 uint64_t guid, size; 943 vdev_t *vd, **oldvdevs, **newvdevs; 944 spa_aux_vdev_t *sav = &spa->spa_l2cache; 945 946 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 947 948 if (sav->sav_config != NULL) { 949 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 950 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 951 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 952 } else { 953 nl2cache = 0; 954 } 955 956 oldvdevs = sav->sav_vdevs; 957 oldnvdevs = sav->sav_count; 958 sav->sav_vdevs = NULL; 959 sav->sav_count = 0; 960 961 /* 962 * Process new nvlist of vdevs. 963 */ 964 for (i = 0; i < nl2cache; i++) { 965 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 966 &guid) == 0); 967 968 newvdevs[i] = NULL; 969 for (j = 0; j < oldnvdevs; j++) { 970 vd = oldvdevs[j]; 971 if (vd != NULL && guid == vd->vdev_guid) { 972 /* 973 * Retain previous vdev for add/remove ops. 974 */ 975 newvdevs[i] = vd; 976 oldvdevs[j] = NULL; 977 break; 978 } 979 } 980 981 if (newvdevs[i] == NULL) { 982 /* 983 * Create new vdev 984 */ 985 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 986 VDEV_ALLOC_L2CACHE) == 0); 987 ASSERT(vd != NULL); 988 newvdevs[i] = vd; 989 990 /* 991 * Commit this vdev as an l2cache device, 992 * even if it fails to open. 993 */ 994 spa_l2cache_add(vd); 995 996 vd->vdev_top = vd; 997 vd->vdev_aux = sav; 998 999 spa_l2cache_activate(vd); 1000 1001 if (vdev_open(vd) != 0) 1002 continue; 1003 1004 (void) vdev_validate_aux(vd); 1005 1006 if (!vdev_is_dead(vd)) { 1007 size = vdev_get_rsize(vd); 1008 l2arc_add_vdev(spa, vd, 1009 VDEV_LABEL_START_SIZE, 1010 size - VDEV_LABEL_START_SIZE); 1011 } 1012 } 1013 } 1014 1015 /* 1016 * Purge vdevs that were dropped 1017 */ 1018 for (i = 0; i < oldnvdevs; i++) { 1019 uint64_t pool; 1020 1021 vd = oldvdevs[i]; 1022 if (vd != NULL) { 1023 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1024 pool != 0ULL && l2arc_vdev_present(vd)) 1025 l2arc_remove_vdev(vd); 1026 (void) vdev_close(vd); 1027 spa_l2cache_remove(vd); 1028 } 1029 } 1030 1031 if (oldvdevs) 1032 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1033 1034 if (sav->sav_config == NULL) 1035 goto out; 1036 1037 sav->sav_vdevs = newvdevs; 1038 sav->sav_count = (int)nl2cache; 1039 1040 /* 1041 * Recompute the stashed list of l2cache devices, with status 1042 * information this time. 1043 */ 1044 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1045 DATA_TYPE_NVLIST_ARRAY) == 0); 1046 1047 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1048 for (i = 0; i < sav->sav_count; i++) 1049 l2cache[i] = vdev_config_generate(spa, 1050 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 1051 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1052 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1053 out: 1054 for (i = 0; i < sav->sav_count; i++) 1055 nvlist_free(l2cache[i]); 1056 if (sav->sav_count) 1057 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1058 } 1059 1060 static int 1061 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1062 { 1063 dmu_buf_t *db; 1064 char *packed = NULL; 1065 size_t nvsize = 0; 1066 int error; 1067 *value = NULL; 1068 1069 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 1070 nvsize = *(uint64_t *)db->db_data; 1071 dmu_buf_rele(db, FTAG); 1072 1073 packed = kmem_alloc(nvsize, KM_SLEEP); 1074 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1075 DMU_READ_PREFETCH); 1076 if (error == 0) 1077 error = nvlist_unpack(packed, nvsize, value, 0); 1078 kmem_free(packed, nvsize); 1079 1080 return (error); 1081 } 1082 1083 /* 1084 * Checks to see if the given vdev could not be opened, in which case we post a 1085 * sysevent to notify the autoreplace code that the device has been removed. 1086 */ 1087 static void 1088 spa_check_removed(vdev_t *vd) 1089 { 1090 int c; 1091 1092 for (c = 0; c < vd->vdev_children; c++) 1093 spa_check_removed(vd->vdev_child[c]); 1094 1095 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 1096 zfs_post_autoreplace(vd->vdev_spa, vd); 1097 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1098 } 1099 } 1100 1101 /* 1102 * Load the slog device state from the config object since it's possible 1103 * that the label does not contain the most up-to-date information. 1104 */ 1105 void 1106 spa_load_log_state(spa_t *spa) 1107 { 1108 nvlist_t *nv, *nvroot, **child; 1109 uint64_t is_log; 1110 uint_t children, c; 1111 vdev_t *rvd = spa->spa_root_vdev; 1112 1113 VERIFY(load_nvlist(spa, spa->spa_config_object, &nv) == 0); 1114 VERIFY(nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1115 VERIFY(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1116 &child, &children) == 0); 1117 1118 for (c = 0; c < children; c++) { 1119 vdev_t *tvd = rvd->vdev_child[c]; 1120 1121 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 1122 &is_log) == 0 && is_log) 1123 vdev_load_log_state(tvd, child[c]); 1124 } 1125 nvlist_free(nv); 1126 } 1127 1128 /* 1129 * Check for missing log devices 1130 */ 1131 int 1132 spa_check_logs(spa_t *spa) 1133 { 1134 switch (spa->spa_log_state) { 1135 case SPA_LOG_MISSING: 1136 /* need to recheck in case slog has been restored */ 1137 case SPA_LOG_UNKNOWN: 1138 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 1139 DS_FIND_CHILDREN)) { 1140 spa->spa_log_state = SPA_LOG_MISSING; 1141 return (1); 1142 } 1143 break; 1144 } 1145 return (0); 1146 } 1147 1148 /* 1149 * Load an existing storage pool, using the pool's builtin spa_config as a 1150 * source of configuration information. 1151 */ 1152 static int 1153 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 1154 { 1155 int error = 0; 1156 nvlist_t *nvroot = NULL; 1157 vdev_t *rvd; 1158 uberblock_t *ub = &spa->spa_uberblock; 1159 uint64_t config_cache_txg = spa->spa_config_txg; 1160 uint64_t pool_guid; 1161 uint64_t version; 1162 uint64_t autoreplace = 0; 1163 int orig_mode = spa->spa_mode; 1164 char *ereport = FM_EREPORT_ZFS_POOL; 1165 1166 /* 1167 * If this is an untrusted config, access the pool in read-only mode. 1168 * This prevents things like resilvering recently removed devices. 1169 */ 1170 if (!mosconfig) 1171 spa->spa_mode = FREAD; 1172 1173 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1174 1175 spa->spa_load_state = state; 1176 1177 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1178 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1179 error = EINVAL; 1180 goto out; 1181 } 1182 1183 /* 1184 * Versioning wasn't explicitly added to the label until later, so if 1185 * it's not present treat it as the initial version. 1186 */ 1187 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1188 version = SPA_VERSION_INITIAL; 1189 1190 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1191 &spa->spa_config_txg); 1192 1193 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1194 spa_guid_exists(pool_guid, 0)) { 1195 error = EEXIST; 1196 goto out; 1197 } 1198 1199 spa->spa_load_guid = pool_guid; 1200 1201 /* 1202 * Create "The Godfather" zio to hold all async IOs 1203 */ 1204 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 1205 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 1206 1207 /* 1208 * Parse the configuration into a vdev tree. We explicitly set the 1209 * value that will be returned by spa_version() since parsing the 1210 * configuration requires knowing the version number. 1211 */ 1212 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1213 spa->spa_ubsync.ub_version = version; 1214 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1215 spa_config_exit(spa, SCL_ALL, FTAG); 1216 1217 if (error != 0) 1218 goto out; 1219 1220 ASSERT(spa->spa_root_vdev == rvd); 1221 ASSERT(spa_guid(spa) == pool_guid); 1222 1223 /* 1224 * Try to open all vdevs, loading each label in the process. 1225 */ 1226 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1227 error = vdev_open(rvd); 1228 spa_config_exit(spa, SCL_ALL, FTAG); 1229 if (error != 0) 1230 goto out; 1231 1232 /* 1233 * We need to validate the vdev labels against the configuration that 1234 * we have in hand, which is dependent on the setting of mosconfig. If 1235 * mosconfig is true then we're validating the vdev labels based on 1236 * that config. Otherwise, we're validating against the cached config 1237 * (zpool.cache) that was read when we loaded the zfs module, and then 1238 * later we will recursively call spa_load() and validate against 1239 * the vdev config. 1240 */ 1241 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1242 error = vdev_validate(rvd); 1243 spa_config_exit(spa, SCL_ALL, FTAG); 1244 if (error != 0) 1245 goto out; 1246 1247 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1248 error = ENXIO; 1249 goto out; 1250 } 1251 1252 /* 1253 * Find the best uberblock. 1254 */ 1255 vdev_uberblock_load(NULL, rvd, ub); 1256 1257 /* 1258 * If we weren't able to find a single valid uberblock, return failure. 1259 */ 1260 if (ub->ub_txg == 0) { 1261 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1262 VDEV_AUX_CORRUPT_DATA); 1263 error = ENXIO; 1264 goto out; 1265 } 1266 1267 /* 1268 * If the pool is newer than the code, we can't open it. 1269 */ 1270 if (ub->ub_version > SPA_VERSION) { 1271 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1272 VDEV_AUX_VERSION_NEWER); 1273 error = ENOTSUP; 1274 goto out; 1275 } 1276 1277 /* 1278 * If the vdev guid sum doesn't match the uberblock, we have an 1279 * incomplete configuration. 1280 */ 1281 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1282 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1283 VDEV_AUX_BAD_GUID_SUM); 1284 error = ENXIO; 1285 goto out; 1286 } 1287 1288 /* 1289 * Initialize internal SPA structures. 1290 */ 1291 spa->spa_state = POOL_STATE_ACTIVE; 1292 spa->spa_ubsync = spa->spa_uberblock; 1293 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1294 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1295 if (error) { 1296 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1297 VDEV_AUX_CORRUPT_DATA); 1298 goto out; 1299 } 1300 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1301 1302 if (zap_lookup(spa->spa_meta_objset, 1303 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1304 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1305 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1306 VDEV_AUX_CORRUPT_DATA); 1307 error = EIO; 1308 goto out; 1309 } 1310 1311 if (!mosconfig) { 1312 nvlist_t *newconfig; 1313 uint64_t hostid; 1314 1315 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1316 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1317 VDEV_AUX_CORRUPT_DATA); 1318 error = EIO; 1319 goto out; 1320 } 1321 1322 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig, 1323 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 1324 char *hostname; 1325 unsigned long myhostid = 0; 1326 1327 VERIFY(nvlist_lookup_string(newconfig, 1328 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1329 1330 #ifdef _KERNEL 1331 myhostid = zone_get_hostid(NULL); 1332 #else /* _KERNEL */ 1333 /* 1334 * We're emulating the system's hostid in userland, so 1335 * we can't use zone_get_hostid(). 1336 */ 1337 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1338 #endif /* _KERNEL */ 1339 if (hostid != 0 && myhostid != 0 && 1340 hostid != myhostid) { 1341 cmn_err(CE_WARN, "pool '%s' could not be " 1342 "loaded as it was last accessed by " 1343 "another system (host: %s hostid: 0x%lx). " 1344 "See: http://www.sun.com/msg/ZFS-8000-EY", 1345 spa_name(spa), hostname, 1346 (unsigned long)hostid); 1347 error = EBADF; 1348 goto out; 1349 } 1350 } 1351 1352 spa_config_set(spa, newconfig); 1353 spa_unload(spa); 1354 spa_deactivate(spa); 1355 spa_activate(spa, orig_mode); 1356 1357 return (spa_load(spa, newconfig, state, B_TRUE)); 1358 } 1359 1360 if (zap_lookup(spa->spa_meta_objset, 1361 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1362 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1363 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1364 VDEV_AUX_CORRUPT_DATA); 1365 error = EIO; 1366 goto out; 1367 } 1368 1369 /* 1370 * Load the bit that tells us to use the new accounting function 1371 * (raid-z deflation). If we have an older pool, this will not 1372 * be present. 1373 */ 1374 error = zap_lookup(spa->spa_meta_objset, 1375 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1376 sizeof (uint64_t), 1, &spa->spa_deflate); 1377 if (error != 0 && error != ENOENT) { 1378 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1379 VDEV_AUX_CORRUPT_DATA); 1380 error = EIO; 1381 goto out; 1382 } 1383 1384 /* 1385 * Load the persistent error log. If we have an older pool, this will 1386 * not be present. 1387 */ 1388 error = zap_lookup(spa->spa_meta_objset, 1389 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1390 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1391 if (error != 0 && error != ENOENT) { 1392 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1393 VDEV_AUX_CORRUPT_DATA); 1394 error = EIO; 1395 goto out; 1396 } 1397 1398 error = zap_lookup(spa->spa_meta_objset, 1399 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1400 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1401 if (error != 0 && error != ENOENT) { 1402 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1403 VDEV_AUX_CORRUPT_DATA); 1404 error = EIO; 1405 goto out; 1406 } 1407 1408 /* 1409 * Load the history object. If we have an older pool, this 1410 * will not be present. 1411 */ 1412 error = zap_lookup(spa->spa_meta_objset, 1413 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1414 sizeof (uint64_t), 1, &spa->spa_history); 1415 if (error != 0 && error != ENOENT) { 1416 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1417 VDEV_AUX_CORRUPT_DATA); 1418 error = EIO; 1419 goto out; 1420 } 1421 1422 /* 1423 * Load any hot spares for this pool. 1424 */ 1425 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1426 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1427 if (error != 0 && error != ENOENT) { 1428 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1429 VDEV_AUX_CORRUPT_DATA); 1430 error = EIO; 1431 goto out; 1432 } 1433 if (error == 0) { 1434 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1435 if (load_nvlist(spa, spa->spa_spares.sav_object, 1436 &spa->spa_spares.sav_config) != 0) { 1437 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1438 VDEV_AUX_CORRUPT_DATA); 1439 error = EIO; 1440 goto out; 1441 } 1442 1443 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1444 spa_load_spares(spa); 1445 spa_config_exit(spa, SCL_ALL, FTAG); 1446 } 1447 1448 /* 1449 * Load any level 2 ARC devices for this pool. 1450 */ 1451 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1452 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1453 &spa->spa_l2cache.sav_object); 1454 if (error != 0 && error != ENOENT) { 1455 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1456 VDEV_AUX_CORRUPT_DATA); 1457 error = EIO; 1458 goto out; 1459 } 1460 if (error == 0) { 1461 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1462 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1463 &spa->spa_l2cache.sav_config) != 0) { 1464 vdev_set_state(rvd, B_TRUE, 1465 VDEV_STATE_CANT_OPEN, 1466 VDEV_AUX_CORRUPT_DATA); 1467 error = EIO; 1468 goto out; 1469 } 1470 1471 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1472 spa_load_l2cache(spa); 1473 spa_config_exit(spa, SCL_ALL, FTAG); 1474 } 1475 1476 spa_load_log_state(spa); 1477 1478 if (spa_check_logs(spa)) { 1479 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1480 VDEV_AUX_BAD_LOG); 1481 error = ENXIO; 1482 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1483 goto out; 1484 } 1485 1486 1487 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1488 1489 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1490 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1491 1492 if (error && error != ENOENT) { 1493 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1494 VDEV_AUX_CORRUPT_DATA); 1495 error = EIO; 1496 goto out; 1497 } 1498 1499 if (error == 0) { 1500 (void) zap_lookup(spa->spa_meta_objset, 1501 spa->spa_pool_props_object, 1502 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1503 sizeof (uint64_t), 1, &spa->spa_bootfs); 1504 (void) zap_lookup(spa->spa_meta_objset, 1505 spa->spa_pool_props_object, 1506 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1507 sizeof (uint64_t), 1, &autoreplace); 1508 (void) zap_lookup(spa->spa_meta_objset, 1509 spa->spa_pool_props_object, 1510 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1511 sizeof (uint64_t), 1, &spa->spa_delegation); 1512 (void) zap_lookup(spa->spa_meta_objset, 1513 spa->spa_pool_props_object, 1514 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1515 sizeof (uint64_t), 1, &spa->spa_failmode); 1516 } 1517 1518 /* 1519 * If the 'autoreplace' property is set, then post a resource notifying 1520 * the ZFS DE that it should not issue any faults for unopenable 1521 * devices. We also iterate over the vdevs, and post a sysevent for any 1522 * unopenable vdevs so that the normal autoreplace handler can take 1523 * over. 1524 */ 1525 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1526 spa_check_removed(spa->spa_root_vdev); 1527 1528 /* 1529 * Load the vdev state for all toplevel vdevs. 1530 */ 1531 vdev_load(rvd); 1532 1533 /* 1534 * Propagate the leaf DTLs we just loaded all the way up the tree. 1535 */ 1536 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1537 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1538 spa_config_exit(spa, SCL_ALL, FTAG); 1539 1540 /* 1541 * Check the state of the root vdev. If it can't be opened, it 1542 * indicates one or more toplevel vdevs are faulted. 1543 */ 1544 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1545 error = ENXIO; 1546 goto out; 1547 } 1548 1549 if (spa_writeable(spa)) { 1550 dmu_tx_t *tx; 1551 int need_update = B_FALSE; 1552 1553 ASSERT(state != SPA_LOAD_TRYIMPORT); 1554 1555 /* 1556 * Claim log blocks that haven't been committed yet. 1557 * This must all happen in a single txg. 1558 */ 1559 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1560 spa_first_txg(spa)); 1561 (void) dmu_objset_find(spa_name(spa), 1562 zil_claim, tx, DS_FIND_CHILDREN); 1563 dmu_tx_commit(tx); 1564 1565 spa->spa_log_state = SPA_LOG_GOOD; 1566 spa->spa_sync_on = B_TRUE; 1567 txg_sync_start(spa->spa_dsl_pool); 1568 1569 /* 1570 * Wait for all claims to sync. 1571 */ 1572 txg_wait_synced(spa->spa_dsl_pool, 0); 1573 1574 /* 1575 * If the config cache is stale, or we have uninitialized 1576 * metaslabs (see spa_vdev_add()), then update the config. 1577 */ 1578 if (config_cache_txg != spa->spa_config_txg || 1579 state == SPA_LOAD_IMPORT) 1580 need_update = B_TRUE; 1581 1582 for (int c = 0; c < rvd->vdev_children; c++) 1583 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1584 need_update = B_TRUE; 1585 1586 /* 1587 * Update the config cache asychronously in case we're the 1588 * root pool, in which case the config cache isn't writable yet. 1589 */ 1590 if (need_update) 1591 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1592 1593 /* 1594 * Check all DTLs to see if anything needs resilvering. 1595 */ 1596 if (vdev_resilver_needed(rvd, NULL, NULL)) 1597 spa_async_request(spa, SPA_ASYNC_RESILVER); 1598 } 1599 1600 error = 0; 1601 out: 1602 spa->spa_minref = refcount_count(&spa->spa_refcount); 1603 if (error && error != EBADF) 1604 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1605 spa->spa_load_state = SPA_LOAD_NONE; 1606 spa->spa_ena = 0; 1607 1608 return (error); 1609 } 1610 1611 /* 1612 * Pool Open/Import 1613 * 1614 * The import case is identical to an open except that the configuration is sent 1615 * down from userland, instead of grabbed from the configuration cache. For the 1616 * case of an open, the pool configuration will exist in the 1617 * POOL_STATE_UNINITIALIZED state. 1618 * 1619 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1620 * the same time open the pool, without having to keep around the spa_t in some 1621 * ambiguous state. 1622 */ 1623 static int 1624 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1625 { 1626 spa_t *spa; 1627 int error; 1628 int locked = B_FALSE; 1629 1630 *spapp = NULL; 1631 1632 /* 1633 * As disgusting as this is, we need to support recursive calls to this 1634 * function because dsl_dir_open() is called during spa_load(), and ends 1635 * up calling spa_open() again. The real fix is to figure out how to 1636 * avoid dsl_dir_open() calling this in the first place. 1637 */ 1638 if (mutex_owner(&spa_namespace_lock) != curthread) { 1639 mutex_enter(&spa_namespace_lock); 1640 locked = B_TRUE; 1641 } 1642 1643 if ((spa = spa_lookup(pool)) == NULL) { 1644 if (locked) 1645 mutex_exit(&spa_namespace_lock); 1646 return (ENOENT); 1647 } 1648 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1649 1650 spa_activate(spa, spa_mode_global); 1651 1652 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1653 1654 if (error == EBADF) { 1655 /* 1656 * If vdev_validate() returns failure (indicated by 1657 * EBADF), it indicates that one of the vdevs indicates 1658 * that the pool has been exported or destroyed. If 1659 * this is the case, the config cache is out of sync and 1660 * we should remove the pool from the namespace. 1661 */ 1662 spa_unload(spa); 1663 spa_deactivate(spa); 1664 spa_config_sync(spa, B_TRUE, B_TRUE); 1665 spa_remove(spa); 1666 if (locked) 1667 mutex_exit(&spa_namespace_lock); 1668 return (ENOENT); 1669 } 1670 1671 if (error) { 1672 /* 1673 * We can't open the pool, but we still have useful 1674 * information: the state of each vdev after the 1675 * attempted vdev_open(). Return this to the user. 1676 */ 1677 if (config != NULL && spa->spa_root_vdev != NULL) 1678 *config = spa_config_generate(spa, NULL, -1ULL, 1679 B_TRUE); 1680 spa_unload(spa); 1681 spa_deactivate(spa); 1682 spa->spa_last_open_failed = B_TRUE; 1683 if (locked) 1684 mutex_exit(&spa_namespace_lock); 1685 *spapp = NULL; 1686 return (error); 1687 } else { 1688 spa->spa_last_open_failed = B_FALSE; 1689 } 1690 } 1691 1692 spa_open_ref(spa, tag); 1693 1694 if (locked) 1695 mutex_exit(&spa_namespace_lock); 1696 1697 *spapp = spa; 1698 1699 if (config != NULL) 1700 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1701 1702 return (0); 1703 } 1704 1705 int 1706 spa_open(const char *name, spa_t **spapp, void *tag) 1707 { 1708 return (spa_open_common(name, spapp, tag, NULL)); 1709 } 1710 1711 /* 1712 * Lookup the given spa_t, incrementing the inject count in the process, 1713 * preventing it from being exported or destroyed. 1714 */ 1715 spa_t * 1716 spa_inject_addref(char *name) 1717 { 1718 spa_t *spa; 1719 1720 mutex_enter(&spa_namespace_lock); 1721 if ((spa = spa_lookup(name)) == NULL) { 1722 mutex_exit(&spa_namespace_lock); 1723 return (NULL); 1724 } 1725 spa->spa_inject_ref++; 1726 mutex_exit(&spa_namespace_lock); 1727 1728 return (spa); 1729 } 1730 1731 void 1732 spa_inject_delref(spa_t *spa) 1733 { 1734 mutex_enter(&spa_namespace_lock); 1735 spa->spa_inject_ref--; 1736 mutex_exit(&spa_namespace_lock); 1737 } 1738 1739 /* 1740 * Add spares device information to the nvlist. 1741 */ 1742 static void 1743 spa_add_spares(spa_t *spa, nvlist_t *config) 1744 { 1745 nvlist_t **spares; 1746 uint_t i, nspares; 1747 nvlist_t *nvroot; 1748 uint64_t guid; 1749 vdev_stat_t *vs; 1750 uint_t vsc; 1751 uint64_t pool; 1752 1753 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 1754 1755 if (spa->spa_spares.sav_count == 0) 1756 return; 1757 1758 VERIFY(nvlist_lookup_nvlist(config, 1759 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1760 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1761 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1762 if (nspares != 0) { 1763 VERIFY(nvlist_add_nvlist_array(nvroot, 1764 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1765 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1766 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1767 1768 /* 1769 * Go through and find any spares which have since been 1770 * repurposed as an active spare. If this is the case, update 1771 * their status appropriately. 1772 */ 1773 for (i = 0; i < nspares; i++) { 1774 VERIFY(nvlist_lookup_uint64(spares[i], 1775 ZPOOL_CONFIG_GUID, &guid) == 0); 1776 if (spa_spare_exists(guid, &pool, NULL) && 1777 pool != 0ULL) { 1778 VERIFY(nvlist_lookup_uint64_array( 1779 spares[i], ZPOOL_CONFIG_STATS, 1780 (uint64_t **)&vs, &vsc) == 0); 1781 vs->vs_state = VDEV_STATE_CANT_OPEN; 1782 vs->vs_aux = VDEV_AUX_SPARED; 1783 } 1784 } 1785 } 1786 } 1787 1788 /* 1789 * Add l2cache device information to the nvlist, including vdev stats. 1790 */ 1791 static void 1792 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1793 { 1794 nvlist_t **l2cache; 1795 uint_t i, j, nl2cache; 1796 nvlist_t *nvroot; 1797 uint64_t guid; 1798 vdev_t *vd; 1799 vdev_stat_t *vs; 1800 uint_t vsc; 1801 1802 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 1803 1804 if (spa->spa_l2cache.sav_count == 0) 1805 return; 1806 1807 VERIFY(nvlist_lookup_nvlist(config, 1808 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1809 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1810 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1811 if (nl2cache != 0) { 1812 VERIFY(nvlist_add_nvlist_array(nvroot, 1813 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1814 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1815 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1816 1817 /* 1818 * Update level 2 cache device stats. 1819 */ 1820 1821 for (i = 0; i < nl2cache; i++) { 1822 VERIFY(nvlist_lookup_uint64(l2cache[i], 1823 ZPOOL_CONFIG_GUID, &guid) == 0); 1824 1825 vd = NULL; 1826 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1827 if (guid == 1828 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1829 vd = spa->spa_l2cache.sav_vdevs[j]; 1830 break; 1831 } 1832 } 1833 ASSERT(vd != NULL); 1834 1835 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1836 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1837 vdev_get_stats(vd, vs); 1838 } 1839 } 1840 } 1841 1842 int 1843 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1844 { 1845 int error; 1846 spa_t *spa; 1847 1848 *config = NULL; 1849 error = spa_open_common(name, &spa, FTAG, config); 1850 1851 if (spa != NULL) { 1852 /* 1853 * This still leaves a window of inconsistency where the spares 1854 * or l2cache devices could change and the config would be 1855 * self-inconsistent. 1856 */ 1857 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1858 1859 if (*config != NULL) { 1860 VERIFY(nvlist_add_uint64(*config, 1861 ZPOOL_CONFIG_ERRCOUNT, 1862 spa_get_errlog_size(spa)) == 0); 1863 1864 if (spa_suspended(spa)) 1865 VERIFY(nvlist_add_uint64(*config, 1866 ZPOOL_CONFIG_SUSPENDED, 1867 spa->spa_failmode) == 0); 1868 1869 spa_add_spares(spa, *config); 1870 spa_add_l2cache(spa, *config); 1871 } 1872 } 1873 1874 /* 1875 * We want to get the alternate root even for faulted pools, so we cheat 1876 * and call spa_lookup() directly. 1877 */ 1878 if (altroot) { 1879 if (spa == NULL) { 1880 mutex_enter(&spa_namespace_lock); 1881 spa = spa_lookup(name); 1882 if (spa) 1883 spa_altroot(spa, altroot, buflen); 1884 else 1885 altroot[0] = '\0'; 1886 spa = NULL; 1887 mutex_exit(&spa_namespace_lock); 1888 } else { 1889 spa_altroot(spa, altroot, buflen); 1890 } 1891 } 1892 1893 if (spa != NULL) { 1894 spa_config_exit(spa, SCL_CONFIG, FTAG); 1895 spa_close(spa, FTAG); 1896 } 1897 1898 return (error); 1899 } 1900 1901 /* 1902 * Validate that the auxiliary device array is well formed. We must have an 1903 * array of nvlists, each which describes a valid leaf vdev. If this is an 1904 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1905 * specified, as long as they are well-formed. 1906 */ 1907 static int 1908 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1909 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1910 vdev_labeltype_t label) 1911 { 1912 nvlist_t **dev; 1913 uint_t i, ndev; 1914 vdev_t *vd; 1915 int error; 1916 1917 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1918 1919 /* 1920 * It's acceptable to have no devs specified. 1921 */ 1922 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1923 return (0); 1924 1925 if (ndev == 0) 1926 return (EINVAL); 1927 1928 /* 1929 * Make sure the pool is formatted with a version that supports this 1930 * device type. 1931 */ 1932 if (spa_version(spa) < version) 1933 return (ENOTSUP); 1934 1935 /* 1936 * Set the pending device list so we correctly handle device in-use 1937 * checking. 1938 */ 1939 sav->sav_pending = dev; 1940 sav->sav_npending = ndev; 1941 1942 for (i = 0; i < ndev; i++) { 1943 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1944 mode)) != 0) 1945 goto out; 1946 1947 if (!vd->vdev_ops->vdev_op_leaf) { 1948 vdev_free(vd); 1949 error = EINVAL; 1950 goto out; 1951 } 1952 1953 /* 1954 * The L2ARC currently only supports disk devices in 1955 * kernel context. For user-level testing, we allow it. 1956 */ 1957 #ifdef _KERNEL 1958 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1959 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1960 error = ENOTBLK; 1961 goto out; 1962 } 1963 #endif 1964 vd->vdev_top = vd; 1965 1966 if ((error = vdev_open(vd)) == 0 && 1967 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1968 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1969 vd->vdev_guid) == 0); 1970 } 1971 1972 vdev_free(vd); 1973 1974 if (error && 1975 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1976 goto out; 1977 else 1978 error = 0; 1979 } 1980 1981 out: 1982 sav->sav_pending = NULL; 1983 sav->sav_npending = 0; 1984 return (error); 1985 } 1986 1987 static int 1988 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1989 { 1990 int error; 1991 1992 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1993 1994 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1995 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1996 VDEV_LABEL_SPARE)) != 0) { 1997 return (error); 1998 } 1999 2000 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 2001 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 2002 VDEV_LABEL_L2CACHE)); 2003 } 2004 2005 static void 2006 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 2007 const char *config) 2008 { 2009 int i; 2010 2011 if (sav->sav_config != NULL) { 2012 nvlist_t **olddevs; 2013 uint_t oldndevs; 2014 nvlist_t **newdevs; 2015 2016 /* 2017 * Generate new dev list by concatentating with the 2018 * current dev list. 2019 */ 2020 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 2021 &olddevs, &oldndevs) == 0); 2022 2023 newdevs = kmem_alloc(sizeof (void *) * 2024 (ndevs + oldndevs), KM_SLEEP); 2025 for (i = 0; i < oldndevs; i++) 2026 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 2027 KM_SLEEP) == 0); 2028 for (i = 0; i < ndevs; i++) 2029 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 2030 KM_SLEEP) == 0); 2031 2032 VERIFY(nvlist_remove(sav->sav_config, config, 2033 DATA_TYPE_NVLIST_ARRAY) == 0); 2034 2035 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 2036 config, newdevs, ndevs + oldndevs) == 0); 2037 for (i = 0; i < oldndevs + ndevs; i++) 2038 nvlist_free(newdevs[i]); 2039 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 2040 } else { 2041 /* 2042 * Generate a new dev list. 2043 */ 2044 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 2045 KM_SLEEP) == 0); 2046 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 2047 devs, ndevs) == 0); 2048 } 2049 } 2050 2051 /* 2052 * Stop and drop level 2 ARC devices 2053 */ 2054 void 2055 spa_l2cache_drop(spa_t *spa) 2056 { 2057 vdev_t *vd; 2058 int i; 2059 spa_aux_vdev_t *sav = &spa->spa_l2cache; 2060 2061 for (i = 0; i < sav->sav_count; i++) { 2062 uint64_t pool; 2063 2064 vd = sav->sav_vdevs[i]; 2065 ASSERT(vd != NULL); 2066 2067 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 2068 pool != 0ULL && l2arc_vdev_present(vd)) 2069 l2arc_remove_vdev(vd); 2070 if (vd->vdev_isl2cache) 2071 spa_l2cache_remove(vd); 2072 vdev_clear_stats(vd); 2073 (void) vdev_close(vd); 2074 } 2075 } 2076 2077 /* 2078 * Pool Creation 2079 */ 2080 int 2081 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 2082 const char *history_str, nvlist_t *zplprops) 2083 { 2084 spa_t *spa; 2085 char *altroot = NULL; 2086 vdev_t *rvd; 2087 dsl_pool_t *dp; 2088 dmu_tx_t *tx; 2089 int c, error = 0; 2090 uint64_t txg = TXG_INITIAL; 2091 nvlist_t **spares, **l2cache; 2092 uint_t nspares, nl2cache; 2093 uint64_t version; 2094 2095 /* 2096 * If this pool already exists, return failure. 2097 */ 2098 mutex_enter(&spa_namespace_lock); 2099 if (spa_lookup(pool) != NULL) { 2100 mutex_exit(&spa_namespace_lock); 2101 return (EEXIST); 2102 } 2103 2104 /* 2105 * Allocate a new spa_t structure. 2106 */ 2107 (void) nvlist_lookup_string(props, 2108 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2109 spa = spa_add(pool, altroot); 2110 spa_activate(spa, spa_mode_global); 2111 2112 spa->spa_uberblock.ub_txg = txg - 1; 2113 2114 if (props && (error = spa_prop_validate(spa, props))) { 2115 spa_deactivate(spa); 2116 spa_remove(spa); 2117 mutex_exit(&spa_namespace_lock); 2118 return (error); 2119 } 2120 2121 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 2122 &version) != 0) 2123 version = SPA_VERSION; 2124 ASSERT(version <= SPA_VERSION); 2125 spa->spa_uberblock.ub_version = version; 2126 spa->spa_ubsync = spa->spa_uberblock; 2127 2128 /* 2129 * Create "The Godfather" zio to hold all async IOs 2130 */ 2131 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2132 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2133 2134 /* 2135 * Create the root vdev. 2136 */ 2137 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2138 2139 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 2140 2141 ASSERT(error != 0 || rvd != NULL); 2142 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 2143 2144 if (error == 0 && !zfs_allocatable_devs(nvroot)) 2145 error = EINVAL; 2146 2147 if (error == 0 && 2148 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 2149 (error = spa_validate_aux(spa, nvroot, txg, 2150 VDEV_ALLOC_ADD)) == 0) { 2151 for (c = 0; c < rvd->vdev_children; c++) 2152 vdev_init(rvd->vdev_child[c], txg); 2153 vdev_config_dirty(rvd); 2154 } 2155 2156 spa_config_exit(spa, SCL_ALL, FTAG); 2157 2158 if (error != 0) { 2159 spa_unload(spa); 2160 spa_deactivate(spa); 2161 spa_remove(spa); 2162 mutex_exit(&spa_namespace_lock); 2163 return (error); 2164 } 2165 2166 /* 2167 * Get the list of spares, if specified. 2168 */ 2169 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2170 &spares, &nspares) == 0) { 2171 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 2172 KM_SLEEP) == 0); 2173 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2174 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2175 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2176 spa_load_spares(spa); 2177 spa_config_exit(spa, SCL_ALL, FTAG); 2178 spa->spa_spares.sav_sync = B_TRUE; 2179 } 2180 2181 /* 2182 * Get the list of level 2 cache devices, if specified. 2183 */ 2184 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2185 &l2cache, &nl2cache) == 0) { 2186 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2187 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2188 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2189 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2190 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2191 spa_load_l2cache(spa); 2192 spa_config_exit(spa, SCL_ALL, FTAG); 2193 spa->spa_l2cache.sav_sync = B_TRUE; 2194 } 2195 2196 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 2197 spa->spa_meta_objset = dp->dp_meta_objset; 2198 2199 tx = dmu_tx_create_assigned(dp, txg); 2200 2201 /* 2202 * Create the pool config object. 2203 */ 2204 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 2205 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 2206 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 2207 2208 if (zap_add(spa->spa_meta_objset, 2209 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 2210 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 2211 cmn_err(CE_PANIC, "failed to add pool config"); 2212 } 2213 2214 /* Newly created pools with the right version are always deflated. */ 2215 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 2216 spa->spa_deflate = TRUE; 2217 if (zap_add(spa->spa_meta_objset, 2218 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2219 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2220 cmn_err(CE_PANIC, "failed to add deflate"); 2221 } 2222 } 2223 2224 /* 2225 * Create the deferred-free bplist object. Turn off compression 2226 * because sync-to-convergence takes longer if the blocksize 2227 * keeps changing. 2228 */ 2229 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2230 1 << 14, tx); 2231 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2232 ZIO_COMPRESS_OFF, tx); 2233 2234 if (zap_add(spa->spa_meta_objset, 2235 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2236 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2237 cmn_err(CE_PANIC, "failed to add bplist"); 2238 } 2239 2240 /* 2241 * Create the pool's history object. 2242 */ 2243 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2244 spa_history_create_obj(spa, tx); 2245 2246 /* 2247 * Set pool properties. 2248 */ 2249 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2250 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2251 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2252 if (props != NULL) { 2253 spa_configfile_set(spa, props, B_FALSE); 2254 spa_sync_props(spa, props, CRED(), tx); 2255 } 2256 2257 dmu_tx_commit(tx); 2258 2259 spa->spa_sync_on = B_TRUE; 2260 txg_sync_start(spa->spa_dsl_pool); 2261 2262 /* 2263 * We explicitly wait for the first transaction to complete so that our 2264 * bean counters are appropriately updated. 2265 */ 2266 txg_wait_synced(spa->spa_dsl_pool, txg); 2267 2268 spa_config_sync(spa, B_FALSE, B_TRUE); 2269 2270 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2271 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2272 2273 spa->spa_minref = refcount_count(&spa->spa_refcount); 2274 2275 mutex_exit(&spa_namespace_lock); 2276 2277 return (0); 2278 } 2279 2280 #ifdef _KERNEL 2281 /* 2282 * Get the root pool information from the root disk, then import the root pool 2283 * during the system boot up time. 2284 */ 2285 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 2286 2287 static nvlist_t * 2288 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 2289 { 2290 nvlist_t *config; 2291 nvlist_t *nvtop, *nvroot; 2292 uint64_t pgid; 2293 2294 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 2295 return (NULL); 2296 2297 /* 2298 * Add this top-level vdev to the child array. 2299 */ 2300 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2301 &nvtop) == 0); 2302 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 2303 &pgid) == 0); 2304 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 2305 2306 /* 2307 * Put this pool's top-level vdevs into a root vdev. 2308 */ 2309 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2310 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 2311 VDEV_TYPE_ROOT) == 0); 2312 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2313 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2314 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2315 &nvtop, 1) == 0); 2316 2317 /* 2318 * Replace the existing vdev_tree with the new root vdev in 2319 * this pool's configuration (remove the old, add the new). 2320 */ 2321 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2322 nvlist_free(nvroot); 2323 return (config); 2324 } 2325 2326 /* 2327 * Walk the vdev tree and see if we can find a device with "better" 2328 * configuration. A configuration is "better" if the label on that 2329 * device has a more recent txg. 2330 */ 2331 static void 2332 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 2333 { 2334 int c; 2335 2336 for (c = 0; c < vd->vdev_children; c++) 2337 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 2338 2339 if (vd->vdev_ops->vdev_op_leaf) { 2340 nvlist_t *label; 2341 uint64_t label_txg; 2342 2343 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 2344 &label) != 0) 2345 return; 2346 2347 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 2348 &label_txg) == 0); 2349 2350 /* 2351 * Do we have a better boot device? 2352 */ 2353 if (label_txg > *txg) { 2354 *txg = label_txg; 2355 *avd = vd; 2356 } 2357 nvlist_free(label); 2358 } 2359 } 2360 2361 /* 2362 * Import a root pool. 2363 * 2364 * For x86. devpath_list will consist of devid and/or physpath name of 2365 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2366 * The GRUB "findroot" command will return the vdev we should boot. 2367 * 2368 * For Sparc, devpath_list consists the physpath name of the booting device 2369 * no matter the rootpool is a single device pool or a mirrored pool. 2370 * e.g. 2371 * "/pci@1f,0/ide@d/disk@0,0:a" 2372 */ 2373 int 2374 spa_import_rootpool(char *devpath, char *devid) 2375 { 2376 spa_t *spa; 2377 vdev_t *rvd, *bvd, *avd = NULL; 2378 nvlist_t *config, *nvtop; 2379 uint64_t guid, txg; 2380 char *pname; 2381 int error; 2382 2383 /* 2384 * Read the label from the boot device and generate a configuration. 2385 */ 2386 if ((config = spa_generate_rootconf(devpath, devid, &guid)) == NULL) { 2387 cmn_err(CE_NOTE, "Can not read the pool label from '%s'", 2388 devpath); 2389 return (EIO); 2390 } 2391 2392 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 2393 &pname) == 0); 2394 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2395 2396 mutex_enter(&spa_namespace_lock); 2397 if ((spa = spa_lookup(pname)) != NULL) { 2398 /* 2399 * Remove the existing root pool from the namespace so that we 2400 * can replace it with the correct config we just read in. 2401 */ 2402 spa_remove(spa); 2403 } 2404 2405 spa = spa_add(pname, NULL); 2406 spa->spa_is_root = B_TRUE; 2407 2408 /* 2409 * Build up a vdev tree based on the boot device's label config. 2410 */ 2411 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2412 &nvtop) == 0); 2413 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2414 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 2415 VDEV_ALLOC_ROOTPOOL); 2416 spa_config_exit(spa, SCL_ALL, FTAG); 2417 if (error) { 2418 mutex_exit(&spa_namespace_lock); 2419 nvlist_free(config); 2420 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 2421 pname); 2422 return (error); 2423 } 2424 2425 /* 2426 * Get the boot vdev. 2427 */ 2428 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 2429 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 2430 (u_longlong_t)guid); 2431 error = ENOENT; 2432 goto out; 2433 } 2434 2435 /* 2436 * Determine if there is a better boot device. 2437 */ 2438 avd = bvd; 2439 spa_alt_rootvdev(rvd, &avd, &txg); 2440 if (avd != bvd) { 2441 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 2442 "try booting from '%s'", avd->vdev_path); 2443 error = EINVAL; 2444 goto out; 2445 } 2446 2447 /* 2448 * If the boot device is part of a spare vdev then ensure that 2449 * we're booting off the active spare. 2450 */ 2451 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 2452 !bvd->vdev_isspare) { 2453 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 2454 "try booting from '%s'", 2455 bvd->vdev_parent->vdev_child[1]->vdev_path); 2456 error = EINVAL; 2457 goto out; 2458 } 2459 2460 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 2461 error = 0; 2462 out: 2463 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2464 vdev_free(rvd); 2465 spa_config_exit(spa, SCL_ALL, FTAG); 2466 mutex_exit(&spa_namespace_lock); 2467 2468 nvlist_free(config); 2469 return (error); 2470 } 2471 2472 #endif 2473 2474 /* 2475 * Take a pool and insert it into the namespace as if it had been loaded at 2476 * boot. 2477 */ 2478 int 2479 spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props) 2480 { 2481 spa_t *spa; 2482 char *altroot = NULL; 2483 2484 mutex_enter(&spa_namespace_lock); 2485 if (spa_lookup(pool) != NULL) { 2486 mutex_exit(&spa_namespace_lock); 2487 return (EEXIST); 2488 } 2489 2490 (void) nvlist_lookup_string(props, 2491 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2492 spa = spa_add(pool, altroot); 2493 2494 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 2495 2496 if (props != NULL) 2497 spa_configfile_set(spa, props, B_FALSE); 2498 2499 spa_config_sync(spa, B_FALSE, B_TRUE); 2500 2501 mutex_exit(&spa_namespace_lock); 2502 2503 return (0); 2504 } 2505 2506 /* 2507 * Import a non-root pool into the system. 2508 */ 2509 int 2510 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2511 { 2512 spa_t *spa; 2513 char *altroot = NULL; 2514 int error; 2515 nvlist_t *nvroot; 2516 nvlist_t **spares, **l2cache; 2517 uint_t nspares, nl2cache; 2518 2519 /* 2520 * If a pool with this name exists, return failure. 2521 */ 2522 mutex_enter(&spa_namespace_lock); 2523 if ((spa = spa_lookup(pool)) != NULL) { 2524 mutex_exit(&spa_namespace_lock); 2525 return (EEXIST); 2526 } 2527 2528 /* 2529 * Create and initialize the spa structure. 2530 */ 2531 (void) nvlist_lookup_string(props, 2532 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2533 spa = spa_add(pool, altroot); 2534 spa_activate(spa, spa_mode_global); 2535 2536 /* 2537 * Don't start async tasks until we know everything is healthy. 2538 */ 2539 spa_async_suspend(spa); 2540 2541 /* 2542 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 2543 * because the user-supplied config is actually the one to trust when 2544 * doing an import. 2545 */ 2546 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 2547 2548 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2549 /* 2550 * Toss any existing sparelist, as it doesn't have any validity 2551 * anymore, and conflicts with spa_has_spare(). 2552 */ 2553 if (spa->spa_spares.sav_config) { 2554 nvlist_free(spa->spa_spares.sav_config); 2555 spa->spa_spares.sav_config = NULL; 2556 spa_load_spares(spa); 2557 } 2558 if (spa->spa_l2cache.sav_config) { 2559 nvlist_free(spa->spa_l2cache.sav_config); 2560 spa->spa_l2cache.sav_config = NULL; 2561 spa_load_l2cache(spa); 2562 } 2563 2564 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2565 &nvroot) == 0); 2566 if (error == 0) 2567 error = spa_validate_aux(spa, nvroot, -1ULL, 2568 VDEV_ALLOC_SPARE); 2569 if (error == 0) 2570 error = spa_validate_aux(spa, nvroot, -1ULL, 2571 VDEV_ALLOC_L2CACHE); 2572 spa_config_exit(spa, SCL_ALL, FTAG); 2573 2574 if (props != NULL) 2575 spa_configfile_set(spa, props, B_FALSE); 2576 2577 if (error != 0 || (props && spa_writeable(spa) && 2578 (error = spa_prop_set(spa, props)))) { 2579 spa_unload(spa); 2580 spa_deactivate(spa); 2581 spa_remove(spa); 2582 mutex_exit(&spa_namespace_lock); 2583 return (error); 2584 } 2585 2586 spa_async_resume(spa); 2587 2588 /* 2589 * Override any spares and level 2 cache devices as specified by 2590 * the user, as these may have correct device names/devids, etc. 2591 */ 2592 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2593 &spares, &nspares) == 0) { 2594 if (spa->spa_spares.sav_config) 2595 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2596 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2597 else 2598 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2599 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2600 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2601 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2602 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2603 spa_load_spares(spa); 2604 spa_config_exit(spa, SCL_ALL, FTAG); 2605 spa->spa_spares.sav_sync = B_TRUE; 2606 } 2607 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2608 &l2cache, &nl2cache) == 0) { 2609 if (spa->spa_l2cache.sav_config) 2610 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2611 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2612 else 2613 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2614 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2615 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2616 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2617 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2618 spa_load_l2cache(spa); 2619 spa_config_exit(spa, SCL_ALL, FTAG); 2620 spa->spa_l2cache.sav_sync = B_TRUE; 2621 } 2622 2623 if (spa_writeable(spa)) { 2624 /* 2625 * Update the config cache to include the newly-imported pool. 2626 */ 2627 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, B_FALSE); 2628 } 2629 2630 mutex_exit(&spa_namespace_lock); 2631 2632 return (0); 2633 } 2634 2635 2636 /* 2637 * This (illegal) pool name is used when temporarily importing a spa_t in order 2638 * to get the vdev stats associated with the imported devices. 2639 */ 2640 #define TRYIMPORT_NAME "$import" 2641 2642 nvlist_t * 2643 spa_tryimport(nvlist_t *tryconfig) 2644 { 2645 nvlist_t *config = NULL; 2646 char *poolname; 2647 spa_t *spa; 2648 uint64_t state; 2649 int error; 2650 2651 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2652 return (NULL); 2653 2654 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2655 return (NULL); 2656 2657 /* 2658 * Create and initialize the spa structure. 2659 */ 2660 mutex_enter(&spa_namespace_lock); 2661 spa = spa_add(TRYIMPORT_NAME, NULL); 2662 spa_activate(spa, FREAD); 2663 2664 /* 2665 * Pass off the heavy lifting to spa_load(). 2666 * Pass TRUE for mosconfig because the user-supplied config 2667 * is actually the one to trust when doing an import. 2668 */ 2669 error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2670 2671 /* 2672 * If 'tryconfig' was at least parsable, return the current config. 2673 */ 2674 if (spa->spa_root_vdev != NULL) { 2675 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2676 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2677 poolname) == 0); 2678 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2679 state) == 0); 2680 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2681 spa->spa_uberblock.ub_timestamp) == 0); 2682 2683 /* 2684 * If the bootfs property exists on this pool then we 2685 * copy it out so that external consumers can tell which 2686 * pools are bootable. 2687 */ 2688 if ((!error || error == EEXIST) && spa->spa_bootfs) { 2689 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2690 2691 /* 2692 * We have to play games with the name since the 2693 * pool was opened as TRYIMPORT_NAME. 2694 */ 2695 if (dsl_dsobj_to_dsname(spa_name(spa), 2696 spa->spa_bootfs, tmpname) == 0) { 2697 char *cp; 2698 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2699 2700 cp = strchr(tmpname, '/'); 2701 if (cp == NULL) { 2702 (void) strlcpy(dsname, tmpname, 2703 MAXPATHLEN); 2704 } else { 2705 (void) snprintf(dsname, MAXPATHLEN, 2706 "%s/%s", poolname, ++cp); 2707 } 2708 VERIFY(nvlist_add_string(config, 2709 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2710 kmem_free(dsname, MAXPATHLEN); 2711 } 2712 kmem_free(tmpname, MAXPATHLEN); 2713 } 2714 2715 /* 2716 * Add the list of hot spares and level 2 cache devices. 2717 */ 2718 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 2719 spa_add_spares(spa, config); 2720 spa_add_l2cache(spa, config); 2721 spa_config_exit(spa, SCL_CONFIG, FTAG); 2722 } 2723 2724 spa_unload(spa); 2725 spa_deactivate(spa); 2726 spa_remove(spa); 2727 mutex_exit(&spa_namespace_lock); 2728 2729 return (config); 2730 } 2731 2732 /* 2733 * Pool export/destroy 2734 * 2735 * The act of destroying or exporting a pool is very simple. We make sure there 2736 * is no more pending I/O and any references to the pool are gone. Then, we 2737 * update the pool state and sync all the labels to disk, removing the 2738 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 2739 * we don't sync the labels or remove the configuration cache. 2740 */ 2741 static int 2742 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2743 boolean_t force, boolean_t hardforce) 2744 { 2745 spa_t *spa; 2746 2747 if (oldconfig) 2748 *oldconfig = NULL; 2749 2750 if (!(spa_mode_global & FWRITE)) 2751 return (EROFS); 2752 2753 mutex_enter(&spa_namespace_lock); 2754 if ((spa = spa_lookup(pool)) == NULL) { 2755 mutex_exit(&spa_namespace_lock); 2756 return (ENOENT); 2757 } 2758 2759 /* 2760 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2761 * reacquire the namespace lock, and see if we can export. 2762 */ 2763 spa_open_ref(spa, FTAG); 2764 mutex_exit(&spa_namespace_lock); 2765 spa_async_suspend(spa); 2766 mutex_enter(&spa_namespace_lock); 2767 spa_close(spa, FTAG); 2768 2769 /* 2770 * The pool will be in core if it's openable, 2771 * in which case we can modify its state. 2772 */ 2773 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2774 /* 2775 * Objsets may be open only because they're dirty, so we 2776 * have to force it to sync before checking spa_refcnt. 2777 */ 2778 txg_wait_synced(spa->spa_dsl_pool, 0); 2779 2780 /* 2781 * A pool cannot be exported or destroyed if there are active 2782 * references. If we are resetting a pool, allow references by 2783 * fault injection handlers. 2784 */ 2785 if (!spa_refcount_zero(spa) || 2786 (spa->spa_inject_ref != 0 && 2787 new_state != POOL_STATE_UNINITIALIZED)) { 2788 spa_async_resume(spa); 2789 mutex_exit(&spa_namespace_lock); 2790 return (EBUSY); 2791 } 2792 2793 /* 2794 * A pool cannot be exported if it has an active shared spare. 2795 * This is to prevent other pools stealing the active spare 2796 * from an exported pool. At user's own will, such pool can 2797 * be forcedly exported. 2798 */ 2799 if (!force && new_state == POOL_STATE_EXPORTED && 2800 spa_has_active_shared_spare(spa)) { 2801 spa_async_resume(spa); 2802 mutex_exit(&spa_namespace_lock); 2803 return (EXDEV); 2804 } 2805 2806 /* 2807 * We want this to be reflected on every label, 2808 * so mark them all dirty. spa_unload() will do the 2809 * final sync that pushes these changes out. 2810 */ 2811 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 2812 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2813 spa->spa_state = new_state; 2814 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2815 vdev_config_dirty(spa->spa_root_vdev); 2816 spa_config_exit(spa, SCL_ALL, FTAG); 2817 } 2818 } 2819 2820 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2821 2822 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2823 spa_unload(spa); 2824 spa_deactivate(spa); 2825 } 2826 2827 if (oldconfig && spa->spa_config) 2828 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2829 2830 if (new_state != POOL_STATE_UNINITIALIZED) { 2831 if (!hardforce) 2832 spa_config_sync(spa, B_TRUE, B_TRUE); 2833 spa_remove(spa); 2834 } 2835 mutex_exit(&spa_namespace_lock); 2836 2837 return (0); 2838 } 2839 2840 /* 2841 * Destroy a storage pool. 2842 */ 2843 int 2844 spa_destroy(char *pool) 2845 { 2846 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 2847 B_FALSE, B_FALSE)); 2848 } 2849 2850 /* 2851 * Export a storage pool. 2852 */ 2853 int 2854 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 2855 boolean_t hardforce) 2856 { 2857 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 2858 force, hardforce)); 2859 } 2860 2861 /* 2862 * Similar to spa_export(), this unloads the spa_t without actually removing it 2863 * from the namespace in any way. 2864 */ 2865 int 2866 spa_reset(char *pool) 2867 { 2868 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2869 B_FALSE, B_FALSE)); 2870 } 2871 2872 /* 2873 * ========================================================================== 2874 * Device manipulation 2875 * ========================================================================== 2876 */ 2877 2878 /* 2879 * Add a device to a storage pool. 2880 */ 2881 int 2882 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2883 { 2884 uint64_t txg; 2885 int error; 2886 vdev_t *rvd = spa->spa_root_vdev; 2887 vdev_t *vd, *tvd; 2888 nvlist_t **spares, **l2cache; 2889 uint_t nspares, nl2cache; 2890 2891 txg = spa_vdev_enter(spa); 2892 2893 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2894 VDEV_ALLOC_ADD)) != 0) 2895 return (spa_vdev_exit(spa, NULL, txg, error)); 2896 2897 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 2898 2899 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2900 &nspares) != 0) 2901 nspares = 0; 2902 2903 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2904 &nl2cache) != 0) 2905 nl2cache = 0; 2906 2907 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 2908 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2909 2910 if (vd->vdev_children != 0 && 2911 (error = vdev_create(vd, txg, B_FALSE)) != 0) 2912 return (spa_vdev_exit(spa, vd, txg, error)); 2913 2914 /* 2915 * We must validate the spares and l2cache devices after checking the 2916 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2917 */ 2918 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 2919 return (spa_vdev_exit(spa, vd, txg, error)); 2920 2921 /* 2922 * Transfer each new top-level vdev from vd to rvd. 2923 */ 2924 for (int c = 0; c < vd->vdev_children; c++) { 2925 tvd = vd->vdev_child[c]; 2926 vdev_remove_child(vd, tvd); 2927 tvd->vdev_id = rvd->vdev_children; 2928 vdev_add_child(rvd, tvd); 2929 vdev_config_dirty(tvd); 2930 } 2931 2932 if (nspares != 0) { 2933 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2934 ZPOOL_CONFIG_SPARES); 2935 spa_load_spares(spa); 2936 spa->spa_spares.sav_sync = B_TRUE; 2937 } 2938 2939 if (nl2cache != 0) { 2940 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2941 ZPOOL_CONFIG_L2CACHE); 2942 spa_load_l2cache(spa); 2943 spa->spa_l2cache.sav_sync = B_TRUE; 2944 } 2945 2946 /* 2947 * We have to be careful when adding new vdevs to an existing pool. 2948 * If other threads start allocating from these vdevs before we 2949 * sync the config cache, and we lose power, then upon reboot we may 2950 * fail to open the pool because there are DVAs that the config cache 2951 * can't translate. Therefore, we first add the vdevs without 2952 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2953 * and then let spa_config_update() initialize the new metaslabs. 2954 * 2955 * spa_load() checks for added-but-not-initialized vdevs, so that 2956 * if we lose power at any point in this sequence, the remaining 2957 * steps will be completed the next time we load the pool. 2958 */ 2959 (void) spa_vdev_exit(spa, vd, txg, 0); 2960 2961 mutex_enter(&spa_namespace_lock); 2962 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2963 mutex_exit(&spa_namespace_lock); 2964 2965 return (0); 2966 } 2967 2968 /* 2969 * Attach a device to a mirror. The arguments are the path to any device 2970 * in the mirror, and the nvroot for the new device. If the path specifies 2971 * a device that is not mirrored, we automatically insert the mirror vdev. 2972 * 2973 * If 'replacing' is specified, the new device is intended to replace the 2974 * existing device; in this case the two devices are made into their own 2975 * mirror using the 'replacing' vdev, which is functionally identical to 2976 * the mirror vdev (it actually reuses all the same ops) but has a few 2977 * extra rules: you can't attach to it after it's been created, and upon 2978 * completion of resilvering, the first disk (the one being replaced) 2979 * is automatically detached. 2980 */ 2981 int 2982 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2983 { 2984 uint64_t txg, open_txg; 2985 vdev_t *rvd = spa->spa_root_vdev; 2986 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2987 vdev_ops_t *pvops; 2988 dmu_tx_t *tx; 2989 char *oldvdpath, *newvdpath; 2990 int newvd_isspare; 2991 int error; 2992 2993 txg = spa_vdev_enter(spa); 2994 2995 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2996 2997 if (oldvd == NULL) 2998 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2999 3000 if (!oldvd->vdev_ops->vdev_op_leaf) 3001 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3002 3003 pvd = oldvd->vdev_parent; 3004 3005 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 3006 VDEV_ALLOC_ADD)) != 0) 3007 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 3008 3009 if (newrootvd->vdev_children != 1) 3010 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 3011 3012 newvd = newrootvd->vdev_child[0]; 3013 3014 if (!newvd->vdev_ops->vdev_op_leaf) 3015 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 3016 3017 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 3018 return (spa_vdev_exit(spa, newrootvd, txg, error)); 3019 3020 /* 3021 * Spares can't replace logs 3022 */ 3023 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 3024 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 3025 3026 if (!replacing) { 3027 /* 3028 * For attach, the only allowable parent is a mirror or the root 3029 * vdev. 3030 */ 3031 if (pvd->vdev_ops != &vdev_mirror_ops && 3032 pvd->vdev_ops != &vdev_root_ops) 3033 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 3034 3035 pvops = &vdev_mirror_ops; 3036 } else { 3037 /* 3038 * Active hot spares can only be replaced by inactive hot 3039 * spares. 3040 */ 3041 if (pvd->vdev_ops == &vdev_spare_ops && 3042 pvd->vdev_child[1] == oldvd && 3043 !spa_has_spare(spa, newvd->vdev_guid)) 3044 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 3045 3046 /* 3047 * If the source is a hot spare, and the parent isn't already a 3048 * spare, then we want to create a new hot spare. Otherwise, we 3049 * want to create a replacing vdev. The user is not allowed to 3050 * attach to a spared vdev child unless the 'isspare' state is 3051 * the same (spare replaces spare, non-spare replaces 3052 * non-spare). 3053 */ 3054 if (pvd->vdev_ops == &vdev_replacing_ops) 3055 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 3056 else if (pvd->vdev_ops == &vdev_spare_ops && 3057 newvd->vdev_isspare != oldvd->vdev_isspare) 3058 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 3059 else if (pvd->vdev_ops != &vdev_spare_ops && 3060 newvd->vdev_isspare) 3061 pvops = &vdev_spare_ops; 3062 else 3063 pvops = &vdev_replacing_ops; 3064 } 3065 3066 /* 3067 * Compare the new device size with the replaceable/attachable 3068 * device size. 3069 */ 3070 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 3071 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 3072 3073 /* 3074 * The new device cannot have a higher alignment requirement 3075 * than the top-level vdev. 3076 */ 3077 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 3078 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 3079 3080 /* 3081 * If this is an in-place replacement, update oldvd's path and devid 3082 * to make it distinguishable from newvd, and unopenable from now on. 3083 */ 3084 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 3085 spa_strfree(oldvd->vdev_path); 3086 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 3087 KM_SLEEP); 3088 (void) sprintf(oldvd->vdev_path, "%s/%s", 3089 newvd->vdev_path, "old"); 3090 if (oldvd->vdev_devid != NULL) { 3091 spa_strfree(oldvd->vdev_devid); 3092 oldvd->vdev_devid = NULL; 3093 } 3094 } 3095 3096 /* 3097 * If the parent is not a mirror, or if we're replacing, insert the new 3098 * mirror/replacing/spare vdev above oldvd. 3099 */ 3100 if (pvd->vdev_ops != pvops) 3101 pvd = vdev_add_parent(oldvd, pvops); 3102 3103 ASSERT(pvd->vdev_top->vdev_parent == rvd); 3104 ASSERT(pvd->vdev_ops == pvops); 3105 ASSERT(oldvd->vdev_parent == pvd); 3106 3107 /* 3108 * Extract the new device from its root and add it to pvd. 3109 */ 3110 vdev_remove_child(newrootvd, newvd); 3111 newvd->vdev_id = pvd->vdev_children; 3112 vdev_add_child(pvd, newvd); 3113 3114 /* 3115 * If newvd is smaller than oldvd, but larger than its rsize, 3116 * the addition of newvd may have decreased our parent's asize. 3117 */ 3118 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 3119 3120 tvd = newvd->vdev_top; 3121 ASSERT(pvd->vdev_top == tvd); 3122 ASSERT(tvd->vdev_parent == rvd); 3123 3124 vdev_config_dirty(tvd); 3125 3126 /* 3127 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 3128 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 3129 */ 3130 open_txg = txg + TXG_CONCURRENT_STATES - 1; 3131 3132 vdev_dtl_dirty(newvd, DTL_MISSING, 3133 TXG_INITIAL, open_txg - TXG_INITIAL + 1); 3134 3135 if (newvd->vdev_isspare) { 3136 spa_spare_activate(newvd); 3137 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 3138 } 3139 3140 oldvdpath = spa_strdup(oldvd->vdev_path); 3141 newvdpath = spa_strdup(newvd->vdev_path); 3142 newvd_isspare = newvd->vdev_isspare; 3143 3144 /* 3145 * Mark newvd's DTL dirty in this txg. 3146 */ 3147 vdev_dirty(tvd, VDD_DTL, newvd, txg); 3148 3149 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 3150 3151 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 3152 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 3153 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 3154 CRED(), "%s vdev=%s %s vdev=%s", 3155 replacing && newvd_isspare ? "spare in" : 3156 replacing ? "replace" : "attach", newvdpath, 3157 replacing ? "for" : "to", oldvdpath); 3158 dmu_tx_commit(tx); 3159 } else { 3160 dmu_tx_abort(tx); 3161 } 3162 3163 spa_strfree(oldvdpath); 3164 spa_strfree(newvdpath); 3165 3166 /* 3167 * Kick off a resilver to update newvd. 3168 */ 3169 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 3170 3171 return (0); 3172 } 3173 3174 /* 3175 * Detach a device from a mirror or replacing vdev. 3176 * If 'replace_done' is specified, only detach if the parent 3177 * is a replacing vdev. 3178 */ 3179 int 3180 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 3181 { 3182 uint64_t txg; 3183 int error; 3184 vdev_t *rvd = spa->spa_root_vdev; 3185 vdev_t *vd, *pvd, *cvd, *tvd; 3186 boolean_t unspare = B_FALSE; 3187 uint64_t unspare_guid; 3188 size_t len; 3189 3190 txg = spa_vdev_enter(spa); 3191 3192 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3193 3194 if (vd == NULL) 3195 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 3196 3197 if (!vd->vdev_ops->vdev_op_leaf) 3198 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3199 3200 pvd = vd->vdev_parent; 3201 3202 /* 3203 * If the parent/child relationship is not as expected, don't do it. 3204 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 3205 * vdev that's replacing B with C. The user's intent in replacing 3206 * is to go from M(A,B) to M(A,C). If the user decides to cancel 3207 * the replace by detaching C, the expected behavior is to end up 3208 * M(A,B). But suppose that right after deciding to detach C, 3209 * the replacement of B completes. We would have M(A,C), and then 3210 * ask to detach C, which would leave us with just A -- not what 3211 * the user wanted. To prevent this, we make sure that the 3212 * parent/child relationship hasn't changed -- in this example, 3213 * that C's parent is still the replacing vdev R. 3214 */ 3215 if (pvd->vdev_guid != pguid && pguid != 0) 3216 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3217 3218 /* 3219 * If replace_done is specified, only remove this device if it's 3220 * the first child of a replacing vdev. For the 'spare' vdev, either 3221 * disk can be removed. 3222 */ 3223 if (replace_done) { 3224 if (pvd->vdev_ops == &vdev_replacing_ops) { 3225 if (vd->vdev_id != 0) 3226 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3227 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3228 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3229 } 3230 } 3231 3232 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3233 spa_version(spa) >= SPA_VERSION_SPARES); 3234 3235 /* 3236 * Only mirror, replacing, and spare vdevs support detach. 3237 */ 3238 if (pvd->vdev_ops != &vdev_replacing_ops && 3239 pvd->vdev_ops != &vdev_mirror_ops && 3240 pvd->vdev_ops != &vdev_spare_ops) 3241 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3242 3243 /* 3244 * If this device has the only valid copy of some data, 3245 * we cannot safely detach it. 3246 */ 3247 if (vdev_dtl_required(vd)) 3248 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3249 3250 ASSERT(pvd->vdev_children >= 2); 3251 3252 /* 3253 * If we are detaching the second disk from a replacing vdev, then 3254 * check to see if we changed the original vdev's path to have "/old" 3255 * at the end in spa_vdev_attach(). If so, undo that change now. 3256 */ 3257 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3258 pvd->vdev_child[0]->vdev_path != NULL && 3259 pvd->vdev_child[1]->vdev_path != NULL) { 3260 ASSERT(pvd->vdev_child[1] == vd); 3261 cvd = pvd->vdev_child[0]; 3262 len = strlen(vd->vdev_path); 3263 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3264 strcmp(cvd->vdev_path + len, "/old") == 0) { 3265 spa_strfree(cvd->vdev_path); 3266 cvd->vdev_path = spa_strdup(vd->vdev_path); 3267 } 3268 } 3269 3270 /* 3271 * If we are detaching the original disk from a spare, then it implies 3272 * that the spare should become a real disk, and be removed from the 3273 * active spare list for the pool. 3274 */ 3275 if (pvd->vdev_ops == &vdev_spare_ops && 3276 vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare) 3277 unspare = B_TRUE; 3278 3279 /* 3280 * Erase the disk labels so the disk can be used for other things. 3281 * This must be done after all other error cases are handled, 3282 * but before we disembowel vd (so we can still do I/O to it). 3283 * But if we can't do it, don't treat the error as fatal -- 3284 * it may be that the unwritability of the disk is the reason 3285 * it's being detached! 3286 */ 3287 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3288 3289 /* 3290 * Remove vd from its parent and compact the parent's children. 3291 */ 3292 vdev_remove_child(pvd, vd); 3293 vdev_compact_children(pvd); 3294 3295 /* 3296 * Remember one of the remaining children so we can get tvd below. 3297 */ 3298 cvd = pvd->vdev_child[0]; 3299 3300 /* 3301 * If we need to remove the remaining child from the list of hot spares, 3302 * do it now, marking the vdev as no longer a spare in the process. 3303 * We must do this before vdev_remove_parent(), because that can 3304 * change the GUID if it creates a new toplevel GUID. For a similar 3305 * reason, we must remove the spare now, in the same txg as the detach; 3306 * otherwise someone could attach a new sibling, change the GUID, and 3307 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 3308 */ 3309 if (unspare) { 3310 ASSERT(cvd->vdev_isspare); 3311 spa_spare_remove(cvd); 3312 unspare_guid = cvd->vdev_guid; 3313 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3314 } 3315 3316 /* 3317 * If the parent mirror/replacing vdev only has one child, 3318 * the parent is no longer needed. Remove it from the tree. 3319 */ 3320 if (pvd->vdev_children == 1) 3321 vdev_remove_parent(cvd); 3322 3323 /* 3324 * We don't set tvd until now because the parent we just removed 3325 * may have been the previous top-level vdev. 3326 */ 3327 tvd = cvd->vdev_top; 3328 ASSERT(tvd->vdev_parent == rvd); 3329 3330 /* 3331 * Reevaluate the parent vdev state. 3332 */ 3333 vdev_propagate_state(cvd); 3334 3335 /* 3336 * If the device we just detached was smaller than the others, it may be 3337 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3338 * can't fail because the existing metaslabs are already in core, so 3339 * there's nothing to read from disk. 3340 */ 3341 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3342 3343 vdev_config_dirty(tvd); 3344 3345 /* 3346 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3347 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3348 * But first make sure we're not on any *other* txg's DTL list, to 3349 * prevent vd from being accessed after it's freed. 3350 */ 3351 for (int t = 0; t < TXG_SIZE; t++) 3352 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3353 vd->vdev_detached = B_TRUE; 3354 vdev_dirty(tvd, VDD_DTL, vd, txg); 3355 3356 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3357 3358 error = spa_vdev_exit(spa, vd, txg, 0); 3359 3360 /* 3361 * If this was the removal of the original device in a hot spare vdev, 3362 * then we want to go through and remove the device from the hot spare 3363 * list of every other pool. 3364 */ 3365 if (unspare) { 3366 spa_t *myspa = spa; 3367 spa = NULL; 3368 mutex_enter(&spa_namespace_lock); 3369 while ((spa = spa_next(spa)) != NULL) { 3370 if (spa->spa_state != POOL_STATE_ACTIVE) 3371 continue; 3372 if (spa == myspa) 3373 continue; 3374 spa_open_ref(spa, FTAG); 3375 mutex_exit(&spa_namespace_lock); 3376 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3377 mutex_enter(&spa_namespace_lock); 3378 spa_close(spa, FTAG); 3379 } 3380 mutex_exit(&spa_namespace_lock); 3381 } 3382 3383 return (error); 3384 } 3385 3386 static nvlist_t * 3387 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 3388 { 3389 for (int i = 0; i < count; i++) { 3390 uint64_t guid; 3391 3392 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 3393 &guid) == 0); 3394 3395 if (guid == target_guid) 3396 return (nvpp[i]); 3397 } 3398 3399 return (NULL); 3400 } 3401 3402 static void 3403 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 3404 nvlist_t *dev_to_remove) 3405 { 3406 nvlist_t **newdev = NULL; 3407 3408 if (count > 1) 3409 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 3410 3411 for (int i = 0, j = 0; i < count; i++) { 3412 if (dev[i] == dev_to_remove) 3413 continue; 3414 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 3415 } 3416 3417 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 3418 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 3419 3420 for (int i = 0; i < count - 1; i++) 3421 nvlist_free(newdev[i]); 3422 3423 if (count > 1) 3424 kmem_free(newdev, (count - 1) * sizeof (void *)); 3425 } 3426 3427 /* 3428 * Remove a device from the pool. Currently, this supports removing only hot 3429 * spares and level 2 ARC devices. 3430 */ 3431 int 3432 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3433 { 3434 vdev_t *vd; 3435 nvlist_t **spares, **l2cache, *nv; 3436 uint_t nspares, nl2cache; 3437 uint64_t txg = 0; 3438 int error = 0; 3439 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 3440 3441 if (!locked) 3442 txg = spa_vdev_enter(spa); 3443 3444 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3445 3446 if (spa->spa_spares.sav_vdevs != NULL && 3447 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3448 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 3449 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 3450 /* 3451 * Only remove the hot spare if it's not currently in use 3452 * in this pool. 3453 */ 3454 if (vd == NULL || unspare) { 3455 spa_vdev_remove_aux(spa->spa_spares.sav_config, 3456 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 3457 spa_load_spares(spa); 3458 spa->spa_spares.sav_sync = B_TRUE; 3459 } else { 3460 error = EBUSY; 3461 } 3462 } else if (spa->spa_l2cache.sav_vdevs != NULL && 3463 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3464 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 3465 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 3466 /* 3467 * Cache devices can always be removed. 3468 */ 3469 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 3470 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 3471 spa_load_l2cache(spa); 3472 spa->spa_l2cache.sav_sync = B_TRUE; 3473 } else if (vd != NULL) { 3474 /* 3475 * Normal vdevs cannot be removed (yet). 3476 */ 3477 error = ENOTSUP; 3478 } else { 3479 /* 3480 * There is no vdev of any kind with the specified guid. 3481 */ 3482 error = ENOENT; 3483 } 3484 3485 if (!locked) 3486 return (spa_vdev_exit(spa, NULL, txg, error)); 3487 3488 return (error); 3489 } 3490 3491 /* 3492 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3493 * current spared, so we can detach it. 3494 */ 3495 static vdev_t * 3496 spa_vdev_resilver_done_hunt(vdev_t *vd) 3497 { 3498 vdev_t *newvd, *oldvd; 3499 int c; 3500 3501 for (c = 0; c < vd->vdev_children; c++) { 3502 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3503 if (oldvd != NULL) 3504 return (oldvd); 3505 } 3506 3507 /* 3508 * Check for a completed replacement. 3509 */ 3510 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3511 oldvd = vd->vdev_child[0]; 3512 newvd = vd->vdev_child[1]; 3513 3514 if (vdev_dtl_empty(newvd, DTL_MISSING) && 3515 !vdev_dtl_required(oldvd)) 3516 return (oldvd); 3517 } 3518 3519 /* 3520 * Check for a completed resilver with the 'unspare' flag set. 3521 */ 3522 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3523 newvd = vd->vdev_child[0]; 3524 oldvd = vd->vdev_child[1]; 3525 3526 if (newvd->vdev_unspare && 3527 vdev_dtl_empty(newvd, DTL_MISSING) && 3528 !vdev_dtl_required(oldvd)) { 3529 newvd->vdev_unspare = 0; 3530 return (oldvd); 3531 } 3532 } 3533 3534 return (NULL); 3535 } 3536 3537 static void 3538 spa_vdev_resilver_done(spa_t *spa) 3539 { 3540 vdev_t *vd, *pvd, *ppvd; 3541 uint64_t guid, sguid, pguid, ppguid; 3542 3543 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3544 3545 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3546 pvd = vd->vdev_parent; 3547 ppvd = pvd->vdev_parent; 3548 guid = vd->vdev_guid; 3549 pguid = pvd->vdev_guid; 3550 ppguid = ppvd->vdev_guid; 3551 sguid = 0; 3552 /* 3553 * If we have just finished replacing a hot spared device, then 3554 * we need to detach the parent's first child (the original hot 3555 * spare) as well. 3556 */ 3557 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) { 3558 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3559 ASSERT(ppvd->vdev_children == 2); 3560 sguid = ppvd->vdev_child[1]->vdev_guid; 3561 } 3562 spa_config_exit(spa, SCL_ALL, FTAG); 3563 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 3564 return; 3565 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 3566 return; 3567 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3568 } 3569 3570 spa_config_exit(spa, SCL_ALL, FTAG); 3571 } 3572 3573 /* 3574 * Update the stored path or FRU for this vdev. Dirty the vdev configuration, 3575 * relying on spa_vdev_enter/exit() to synchronize the labels and cache. 3576 */ 3577 int 3578 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 3579 boolean_t ispath) 3580 { 3581 vdev_t *vd; 3582 uint64_t txg; 3583 3584 txg = spa_vdev_enter(spa); 3585 3586 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3587 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3588 3589 if (!vd->vdev_ops->vdev_op_leaf) 3590 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3591 3592 if (ispath) { 3593 spa_strfree(vd->vdev_path); 3594 vd->vdev_path = spa_strdup(value); 3595 } else { 3596 if (vd->vdev_fru != NULL) 3597 spa_strfree(vd->vdev_fru); 3598 vd->vdev_fru = spa_strdup(value); 3599 } 3600 3601 vdev_config_dirty(vd->vdev_top); 3602 3603 return (spa_vdev_exit(spa, NULL, txg, 0)); 3604 } 3605 3606 int 3607 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3608 { 3609 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 3610 } 3611 3612 int 3613 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 3614 { 3615 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 3616 } 3617 3618 /* 3619 * ========================================================================== 3620 * SPA Scrubbing 3621 * ========================================================================== 3622 */ 3623 3624 int 3625 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3626 { 3627 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 3628 3629 if ((uint_t)type >= POOL_SCRUB_TYPES) 3630 return (ENOTSUP); 3631 3632 /* 3633 * If a resilver was requested, but there is no DTL on a 3634 * writeable leaf device, we have nothing to do. 3635 */ 3636 if (type == POOL_SCRUB_RESILVER && 3637 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3638 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3639 return (0); 3640 } 3641 3642 if (type == POOL_SCRUB_EVERYTHING && 3643 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3644 spa->spa_dsl_pool->dp_scrub_isresilver) 3645 return (EBUSY); 3646 3647 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3648 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3649 } else if (type == POOL_SCRUB_NONE) { 3650 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3651 } else { 3652 return (EINVAL); 3653 } 3654 } 3655 3656 /* 3657 * ========================================================================== 3658 * SPA async task processing 3659 * ========================================================================== 3660 */ 3661 3662 static void 3663 spa_async_remove(spa_t *spa, vdev_t *vd) 3664 { 3665 if (vd->vdev_remove_wanted) { 3666 vd->vdev_remove_wanted = 0; 3667 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 3668 vdev_clear(spa, vd); 3669 vdev_state_dirty(vd->vdev_top); 3670 } 3671 3672 for (int c = 0; c < vd->vdev_children; c++) 3673 spa_async_remove(spa, vd->vdev_child[c]); 3674 } 3675 3676 static void 3677 spa_async_probe(spa_t *spa, vdev_t *vd) 3678 { 3679 if (vd->vdev_probe_wanted) { 3680 vd->vdev_probe_wanted = 0; 3681 vdev_reopen(vd); /* vdev_open() does the actual probe */ 3682 } 3683 3684 for (int c = 0; c < vd->vdev_children; c++) 3685 spa_async_probe(spa, vd->vdev_child[c]); 3686 } 3687 3688 static void 3689 spa_async_thread(spa_t *spa) 3690 { 3691 int tasks; 3692 3693 ASSERT(spa->spa_sync_on); 3694 3695 mutex_enter(&spa->spa_async_lock); 3696 tasks = spa->spa_async_tasks; 3697 spa->spa_async_tasks = 0; 3698 mutex_exit(&spa->spa_async_lock); 3699 3700 /* 3701 * See if the config needs to be updated. 3702 */ 3703 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3704 mutex_enter(&spa_namespace_lock); 3705 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3706 mutex_exit(&spa_namespace_lock); 3707 } 3708 3709 /* 3710 * See if any devices need to be marked REMOVED. 3711 */ 3712 if (tasks & SPA_ASYNC_REMOVE) { 3713 spa_vdev_state_enter(spa); 3714 spa_async_remove(spa, spa->spa_root_vdev); 3715 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 3716 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 3717 for (int i = 0; i < spa->spa_spares.sav_count; i++) 3718 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 3719 (void) spa_vdev_state_exit(spa, NULL, 0); 3720 } 3721 3722 /* 3723 * See if any devices need to be probed. 3724 */ 3725 if (tasks & SPA_ASYNC_PROBE) { 3726 spa_vdev_state_enter(spa); 3727 spa_async_probe(spa, spa->spa_root_vdev); 3728 (void) spa_vdev_state_exit(spa, NULL, 0); 3729 } 3730 3731 /* 3732 * If any devices are done replacing, detach them. 3733 */ 3734 if (tasks & SPA_ASYNC_RESILVER_DONE) 3735 spa_vdev_resilver_done(spa); 3736 3737 /* 3738 * Kick off a resilver. 3739 */ 3740 if (tasks & SPA_ASYNC_RESILVER) 3741 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3742 3743 /* 3744 * Let the world know that we're done. 3745 */ 3746 mutex_enter(&spa->spa_async_lock); 3747 spa->spa_async_thread = NULL; 3748 cv_broadcast(&spa->spa_async_cv); 3749 mutex_exit(&spa->spa_async_lock); 3750 thread_exit(); 3751 } 3752 3753 void 3754 spa_async_suspend(spa_t *spa) 3755 { 3756 mutex_enter(&spa->spa_async_lock); 3757 spa->spa_async_suspended++; 3758 while (spa->spa_async_thread != NULL) 3759 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3760 mutex_exit(&spa->spa_async_lock); 3761 } 3762 3763 void 3764 spa_async_resume(spa_t *spa) 3765 { 3766 mutex_enter(&spa->spa_async_lock); 3767 ASSERT(spa->spa_async_suspended != 0); 3768 spa->spa_async_suspended--; 3769 mutex_exit(&spa->spa_async_lock); 3770 } 3771 3772 static void 3773 spa_async_dispatch(spa_t *spa) 3774 { 3775 mutex_enter(&spa->spa_async_lock); 3776 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3777 spa->spa_async_thread == NULL && 3778 rootdir != NULL && !vn_is_readonly(rootdir)) 3779 spa->spa_async_thread = thread_create(NULL, 0, 3780 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3781 mutex_exit(&spa->spa_async_lock); 3782 } 3783 3784 void 3785 spa_async_request(spa_t *spa, int task) 3786 { 3787 mutex_enter(&spa->spa_async_lock); 3788 spa->spa_async_tasks |= task; 3789 mutex_exit(&spa->spa_async_lock); 3790 } 3791 3792 /* 3793 * ========================================================================== 3794 * SPA syncing routines 3795 * ========================================================================== 3796 */ 3797 3798 static void 3799 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3800 { 3801 bplist_t *bpl = &spa->spa_sync_bplist; 3802 dmu_tx_t *tx; 3803 blkptr_t blk; 3804 uint64_t itor = 0; 3805 zio_t *zio; 3806 int error; 3807 uint8_t c = 1; 3808 3809 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 3810 3811 while (bplist_iterate(bpl, &itor, &blk) == 0) { 3812 ASSERT(blk.blk_birth < txg); 3813 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL, 3814 ZIO_FLAG_MUSTSUCCEED)); 3815 } 3816 3817 error = zio_wait(zio); 3818 ASSERT3U(error, ==, 0); 3819 3820 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3821 bplist_vacate(bpl, tx); 3822 3823 /* 3824 * Pre-dirty the first block so we sync to convergence faster. 3825 * (Usually only the first block is needed.) 3826 */ 3827 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3828 dmu_tx_commit(tx); 3829 } 3830 3831 static void 3832 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3833 { 3834 char *packed = NULL; 3835 size_t bufsize; 3836 size_t nvsize = 0; 3837 dmu_buf_t *db; 3838 3839 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3840 3841 /* 3842 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 3843 * information. This avoids the dbuf_will_dirty() path and 3844 * saves us a pre-read to get data we don't actually care about. 3845 */ 3846 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); 3847 packed = kmem_alloc(bufsize, KM_SLEEP); 3848 3849 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3850 KM_SLEEP) == 0); 3851 bzero(packed + nvsize, bufsize - nvsize); 3852 3853 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 3854 3855 kmem_free(packed, bufsize); 3856 3857 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3858 dmu_buf_will_dirty(db, tx); 3859 *(uint64_t *)db->db_data = nvsize; 3860 dmu_buf_rele(db, FTAG); 3861 } 3862 3863 static void 3864 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3865 const char *config, const char *entry) 3866 { 3867 nvlist_t *nvroot; 3868 nvlist_t **list; 3869 int i; 3870 3871 if (!sav->sav_sync) 3872 return; 3873 3874 /* 3875 * Update the MOS nvlist describing the list of available devices. 3876 * spa_validate_aux() will have already made sure this nvlist is 3877 * valid and the vdevs are labeled appropriately. 3878 */ 3879 if (sav->sav_object == 0) { 3880 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3881 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3882 sizeof (uint64_t), tx); 3883 VERIFY(zap_update(spa->spa_meta_objset, 3884 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3885 &sav->sav_object, tx) == 0); 3886 } 3887 3888 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3889 if (sav->sav_count == 0) { 3890 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3891 } else { 3892 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3893 for (i = 0; i < sav->sav_count; i++) 3894 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3895 B_FALSE, B_FALSE, B_TRUE); 3896 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3897 sav->sav_count) == 0); 3898 for (i = 0; i < sav->sav_count; i++) 3899 nvlist_free(list[i]); 3900 kmem_free(list, sav->sav_count * sizeof (void *)); 3901 } 3902 3903 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3904 nvlist_free(nvroot); 3905 3906 sav->sav_sync = B_FALSE; 3907 } 3908 3909 static void 3910 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3911 { 3912 nvlist_t *config; 3913 3914 if (list_is_empty(&spa->spa_config_dirty_list)) 3915 return; 3916 3917 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3918 3919 config = spa_config_generate(spa, spa->spa_root_vdev, 3920 dmu_tx_get_txg(tx), B_FALSE); 3921 3922 spa_config_exit(spa, SCL_STATE, FTAG); 3923 3924 if (spa->spa_config_syncing) 3925 nvlist_free(spa->spa_config_syncing); 3926 spa->spa_config_syncing = config; 3927 3928 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3929 } 3930 3931 /* 3932 * Set zpool properties. 3933 */ 3934 static void 3935 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3936 { 3937 spa_t *spa = arg1; 3938 objset_t *mos = spa->spa_meta_objset; 3939 nvlist_t *nvp = arg2; 3940 nvpair_t *elem; 3941 uint64_t intval; 3942 char *strval; 3943 zpool_prop_t prop; 3944 const char *propname; 3945 zprop_type_t proptype; 3946 3947 mutex_enter(&spa->spa_props_lock); 3948 3949 elem = NULL; 3950 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3951 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3952 case ZPOOL_PROP_VERSION: 3953 /* 3954 * Only set version for non-zpool-creation cases 3955 * (set/import). spa_create() needs special care 3956 * for version setting. 3957 */ 3958 if (tx->tx_txg != TXG_INITIAL) { 3959 VERIFY(nvpair_value_uint64(elem, 3960 &intval) == 0); 3961 ASSERT(intval <= SPA_VERSION); 3962 ASSERT(intval >= spa_version(spa)); 3963 spa->spa_uberblock.ub_version = intval; 3964 vdev_config_dirty(spa->spa_root_vdev); 3965 } 3966 break; 3967 3968 case ZPOOL_PROP_ALTROOT: 3969 /* 3970 * 'altroot' is a non-persistent property. It should 3971 * have been set temporarily at creation or import time. 3972 */ 3973 ASSERT(spa->spa_root != NULL); 3974 break; 3975 3976 case ZPOOL_PROP_CACHEFILE: 3977 /* 3978 * 'cachefile' is also a non-persisitent property. 3979 */ 3980 break; 3981 default: 3982 /* 3983 * Set pool property values in the poolprops mos object. 3984 */ 3985 if (spa->spa_pool_props_object == 0) { 3986 objset_t *mos = spa->spa_meta_objset; 3987 3988 VERIFY((spa->spa_pool_props_object = 3989 zap_create(mos, DMU_OT_POOL_PROPS, 3990 DMU_OT_NONE, 0, tx)) > 0); 3991 3992 VERIFY(zap_update(mos, 3993 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3994 8, 1, &spa->spa_pool_props_object, tx) 3995 == 0); 3996 } 3997 3998 /* normalize the property name */ 3999 propname = zpool_prop_to_name(prop); 4000 proptype = zpool_prop_get_type(prop); 4001 4002 if (nvpair_type(elem) == DATA_TYPE_STRING) { 4003 ASSERT(proptype == PROP_TYPE_STRING); 4004 VERIFY(nvpair_value_string(elem, &strval) == 0); 4005 VERIFY(zap_update(mos, 4006 spa->spa_pool_props_object, propname, 4007 1, strlen(strval) + 1, strval, tx) == 0); 4008 4009 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 4010 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 4011 4012 if (proptype == PROP_TYPE_INDEX) { 4013 const char *unused; 4014 VERIFY(zpool_prop_index_to_string( 4015 prop, intval, &unused) == 0); 4016 } 4017 VERIFY(zap_update(mos, 4018 spa->spa_pool_props_object, propname, 4019 8, 1, &intval, tx) == 0); 4020 } else { 4021 ASSERT(0); /* not allowed */ 4022 } 4023 4024 switch (prop) { 4025 case ZPOOL_PROP_DELEGATION: 4026 spa->spa_delegation = intval; 4027 break; 4028 case ZPOOL_PROP_BOOTFS: 4029 spa->spa_bootfs = intval; 4030 break; 4031 case ZPOOL_PROP_FAILUREMODE: 4032 spa->spa_failmode = intval; 4033 break; 4034 default: 4035 break; 4036 } 4037 } 4038 4039 /* log internal history if this is not a zpool create */ 4040 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 4041 tx->tx_txg != TXG_INITIAL) { 4042 spa_history_internal_log(LOG_POOL_PROPSET, 4043 spa, tx, cr, "%s %lld %s", 4044 nvpair_name(elem), intval, spa_name(spa)); 4045 } 4046 } 4047 4048 mutex_exit(&spa->spa_props_lock); 4049 } 4050 4051 /* 4052 * Sync the specified transaction group. New blocks may be dirtied as 4053 * part of the process, so we iterate until it converges. 4054 */ 4055 void 4056 spa_sync(spa_t *spa, uint64_t txg) 4057 { 4058 dsl_pool_t *dp = spa->spa_dsl_pool; 4059 objset_t *mos = spa->spa_meta_objset; 4060 bplist_t *bpl = &spa->spa_sync_bplist; 4061 vdev_t *rvd = spa->spa_root_vdev; 4062 vdev_t *vd; 4063 dmu_tx_t *tx; 4064 int dirty_vdevs; 4065 int error; 4066 4067 /* 4068 * Lock out configuration changes. 4069 */ 4070 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4071 4072 spa->spa_syncing_txg = txg; 4073 spa->spa_sync_pass = 0; 4074 4075 /* 4076 * If there are any pending vdev state changes, convert them 4077 * into config changes that go out with this transaction group. 4078 */ 4079 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4080 while (list_head(&spa->spa_state_dirty_list) != NULL) { 4081 /* 4082 * We need the write lock here because, for aux vdevs, 4083 * calling vdev_config_dirty() modifies sav_config. 4084 * This is ugly and will become unnecessary when we 4085 * eliminate the aux vdev wart by integrating all vdevs 4086 * into the root vdev tree. 4087 */ 4088 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 4089 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 4090 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 4091 vdev_state_clean(vd); 4092 vdev_config_dirty(vd); 4093 } 4094 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 4095 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 4096 } 4097 spa_config_exit(spa, SCL_STATE, FTAG); 4098 4099 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 4100 4101 tx = dmu_tx_create_assigned(dp, txg); 4102 4103 /* 4104 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 4105 * set spa_deflate if we have no raid-z vdevs. 4106 */ 4107 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 4108 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 4109 int i; 4110 4111 for (i = 0; i < rvd->vdev_children; i++) { 4112 vd = rvd->vdev_child[i]; 4113 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 4114 break; 4115 } 4116 if (i == rvd->vdev_children) { 4117 spa->spa_deflate = TRUE; 4118 VERIFY(0 == zap_add(spa->spa_meta_objset, 4119 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 4120 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 4121 } 4122 } 4123 4124 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 4125 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 4126 dsl_pool_create_origin(dp, tx); 4127 4128 /* Keeping the origin open increases spa_minref */ 4129 spa->spa_minref += 3; 4130 } 4131 4132 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 4133 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 4134 dsl_pool_upgrade_clones(dp, tx); 4135 } 4136 4137 /* 4138 * If anything has changed in this txg, push the deferred frees 4139 * from the previous txg. If not, leave them alone so that we 4140 * don't generate work on an otherwise idle system. 4141 */ 4142 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 4143 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 4144 !txg_list_empty(&dp->dp_sync_tasks, txg)) 4145 spa_sync_deferred_frees(spa, txg); 4146 4147 /* 4148 * Iterate to convergence. 4149 */ 4150 do { 4151 spa->spa_sync_pass++; 4152 4153 spa_sync_config_object(spa, tx); 4154 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 4155 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 4156 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 4157 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 4158 spa_errlog_sync(spa, txg); 4159 dsl_pool_sync(dp, txg); 4160 4161 dirty_vdevs = 0; 4162 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 4163 vdev_sync(vd, txg); 4164 dirty_vdevs++; 4165 } 4166 4167 bplist_sync(bpl, tx); 4168 } while (dirty_vdevs); 4169 4170 bplist_close(bpl); 4171 4172 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4173 4174 /* 4175 * Rewrite the vdev configuration (which includes the uberblock) 4176 * to commit the transaction group. 4177 * 4178 * If there are no dirty vdevs, we sync the uberblock to a few 4179 * random top-level vdevs that are known to be visible in the 4180 * config cache (see spa_vdev_add() for a complete description). 4181 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 4182 */ 4183 for (;;) { 4184 /* 4185 * We hold SCL_STATE to prevent vdev open/close/etc. 4186 * while we're attempting to write the vdev labels. 4187 */ 4188 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4189 4190 if (list_is_empty(&spa->spa_config_dirty_list)) { 4191 vdev_t *svd[SPA_DVAS_PER_BP]; 4192 int svdcount = 0; 4193 int children = rvd->vdev_children; 4194 int c0 = spa_get_random(children); 4195 int c; 4196 4197 for (c = 0; c < children; c++) { 4198 vd = rvd->vdev_child[(c0 + c) % children]; 4199 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4200 continue; 4201 svd[svdcount++] = vd; 4202 if (svdcount == SPA_DVAS_PER_BP) 4203 break; 4204 } 4205 error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 4206 if (error != 0) 4207 error = vdev_config_sync(svd, svdcount, txg, 4208 B_TRUE); 4209 } else { 4210 error = vdev_config_sync(rvd->vdev_child, 4211 rvd->vdev_children, txg, B_FALSE); 4212 if (error != 0) 4213 error = vdev_config_sync(rvd->vdev_child, 4214 rvd->vdev_children, txg, B_TRUE); 4215 } 4216 4217 spa_config_exit(spa, SCL_STATE, FTAG); 4218 4219 if (error == 0) 4220 break; 4221 zio_suspend(spa, NULL); 4222 zio_resume_wait(spa); 4223 } 4224 dmu_tx_commit(tx); 4225 4226 /* 4227 * Clear the dirty config list. 4228 */ 4229 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 4230 vdev_config_clean(vd); 4231 4232 /* 4233 * Now that the new config has synced transactionally, 4234 * let it become visible to the config cache. 4235 */ 4236 if (spa->spa_config_syncing != NULL) { 4237 spa_config_set(spa, spa->spa_config_syncing); 4238 spa->spa_config_txg = txg; 4239 spa->spa_config_syncing = NULL; 4240 } 4241 4242 spa->spa_ubsync = spa->spa_uberblock; 4243 4244 /* 4245 * Clean up the ZIL records for the synced txg. 4246 */ 4247 dsl_pool_zil_clean(dp); 4248 4249 /* 4250 * Update usable space statistics. 4251 */ 4252 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4253 vdev_sync_done(vd, txg); 4254 4255 /* 4256 * It had better be the case that we didn't dirty anything 4257 * since vdev_config_sync(). 4258 */ 4259 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4260 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4261 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4262 ASSERT(bpl->bpl_queue == NULL); 4263 4264 spa_config_exit(spa, SCL_CONFIG, FTAG); 4265 4266 /* 4267 * If any async tasks have been requested, kick them off. 4268 */ 4269 spa_async_dispatch(spa); 4270 } 4271 4272 /* 4273 * Sync all pools. We don't want to hold the namespace lock across these 4274 * operations, so we take a reference on the spa_t and drop the lock during the 4275 * sync. 4276 */ 4277 void 4278 spa_sync_allpools(void) 4279 { 4280 spa_t *spa = NULL; 4281 mutex_enter(&spa_namespace_lock); 4282 while ((spa = spa_next(spa)) != NULL) { 4283 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa)) 4284 continue; 4285 spa_open_ref(spa, FTAG); 4286 mutex_exit(&spa_namespace_lock); 4287 txg_wait_synced(spa_get_dsl(spa), 0); 4288 mutex_enter(&spa_namespace_lock); 4289 spa_close(spa, FTAG); 4290 } 4291 mutex_exit(&spa_namespace_lock); 4292 } 4293 4294 /* 4295 * ========================================================================== 4296 * Miscellaneous routines 4297 * ========================================================================== 4298 */ 4299 4300 /* 4301 * Remove all pools in the system. 4302 */ 4303 void 4304 spa_evict_all(void) 4305 { 4306 spa_t *spa; 4307 4308 /* 4309 * Remove all cached state. All pools should be closed now, 4310 * so every spa in the AVL tree should be unreferenced. 4311 */ 4312 mutex_enter(&spa_namespace_lock); 4313 while ((spa = spa_next(NULL)) != NULL) { 4314 /* 4315 * Stop async tasks. The async thread may need to detach 4316 * a device that's been replaced, which requires grabbing 4317 * spa_namespace_lock, so we must drop it here. 4318 */ 4319 spa_open_ref(spa, FTAG); 4320 mutex_exit(&spa_namespace_lock); 4321 spa_async_suspend(spa); 4322 mutex_enter(&spa_namespace_lock); 4323 spa_close(spa, FTAG); 4324 4325 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4326 spa_unload(spa); 4327 spa_deactivate(spa); 4328 } 4329 spa_remove(spa); 4330 } 4331 mutex_exit(&spa_namespace_lock); 4332 } 4333 4334 vdev_t * 4335 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 4336 { 4337 vdev_t *vd; 4338 int i; 4339 4340 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4341 return (vd); 4342 4343 if (aux) { 4344 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4345 vd = spa->spa_l2cache.sav_vdevs[i]; 4346 if (vd->vdev_guid == guid) 4347 return (vd); 4348 } 4349 4350 for (i = 0; i < spa->spa_spares.sav_count; i++) { 4351 vd = spa->spa_spares.sav_vdevs[i]; 4352 if (vd->vdev_guid == guid) 4353 return (vd); 4354 } 4355 } 4356 4357 return (NULL); 4358 } 4359 4360 void 4361 spa_upgrade(spa_t *spa, uint64_t version) 4362 { 4363 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4364 4365 /* 4366 * This should only be called for a non-faulted pool, and since a 4367 * future version would result in an unopenable pool, this shouldn't be 4368 * possible. 4369 */ 4370 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4371 ASSERT(version >= spa->spa_uberblock.ub_version); 4372 4373 spa->spa_uberblock.ub_version = version; 4374 vdev_config_dirty(spa->spa_root_vdev); 4375 4376 spa_config_exit(spa, SCL_ALL, FTAG); 4377 4378 txg_wait_synced(spa_get_dsl(spa), 0); 4379 } 4380 4381 boolean_t 4382 spa_has_spare(spa_t *spa, uint64_t guid) 4383 { 4384 int i; 4385 uint64_t spareguid; 4386 spa_aux_vdev_t *sav = &spa->spa_spares; 4387 4388 for (i = 0; i < sav->sav_count; i++) 4389 if (sav->sav_vdevs[i]->vdev_guid == guid) 4390 return (B_TRUE); 4391 4392 for (i = 0; i < sav->sav_npending; i++) { 4393 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4394 &spareguid) == 0 && spareguid == guid) 4395 return (B_TRUE); 4396 } 4397 4398 return (B_FALSE); 4399 } 4400 4401 /* 4402 * Check if a pool has an active shared spare device. 4403 * Note: reference count of an active spare is 2, as a spare and as a replace 4404 */ 4405 static boolean_t 4406 spa_has_active_shared_spare(spa_t *spa) 4407 { 4408 int i, refcnt; 4409 uint64_t pool; 4410 spa_aux_vdev_t *sav = &spa->spa_spares; 4411 4412 for (i = 0; i < sav->sav_count; i++) { 4413 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4414 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4415 refcnt > 2) 4416 return (B_TRUE); 4417 } 4418 4419 return (B_FALSE); 4420 } 4421 4422 /* 4423 * Post a sysevent corresponding to the given event. The 'name' must be one of 4424 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4425 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4426 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4427 * or zdb as real changes. 4428 */ 4429 void 4430 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4431 { 4432 #ifdef _KERNEL 4433 sysevent_t *ev; 4434 sysevent_attr_list_t *attr = NULL; 4435 sysevent_value_t value; 4436 sysevent_id_t eid; 4437 4438 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4439 SE_SLEEP); 4440 4441 value.value_type = SE_DATA_TYPE_STRING; 4442 value.value.sv_string = spa_name(spa); 4443 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4444 goto done; 4445 4446 value.value_type = SE_DATA_TYPE_UINT64; 4447 value.value.sv_uint64 = spa_guid(spa); 4448 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4449 goto done; 4450 4451 if (vd) { 4452 value.value_type = SE_DATA_TYPE_UINT64; 4453 value.value.sv_uint64 = vd->vdev_guid; 4454 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4455 SE_SLEEP) != 0) 4456 goto done; 4457 4458 if (vd->vdev_path) { 4459 value.value_type = SE_DATA_TYPE_STRING; 4460 value.value.sv_string = vd->vdev_path; 4461 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4462 &value, SE_SLEEP) != 0) 4463 goto done; 4464 } 4465 } 4466 4467 if (sysevent_attach_attributes(ev, attr) != 0) 4468 goto done; 4469 attr = NULL; 4470 4471 (void) log_sysevent(ev, SE_SLEEP, &eid); 4472 4473 done: 4474 if (attr) 4475 sysevent_free_attr(attr); 4476 sysevent_free(ev); 4477 #endif 4478 } 4479