1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #include "zfs_prop.h" 64 #include "zfs_comutil.h" 65 66 int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 67 /* ISSUE INTR */ 68 { 1, 1 }, /* ZIO_TYPE_NULL */ 69 { 1, 8 }, /* ZIO_TYPE_READ */ 70 { 8, 1 }, /* ZIO_TYPE_WRITE */ 71 { 1, 1 }, /* ZIO_TYPE_FREE */ 72 { 1, 1 }, /* ZIO_TYPE_CLAIM */ 73 { 1, 1 }, /* ZIO_TYPE_IOCTL */ 74 }; 75 76 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 77 static boolean_t spa_has_active_shared_spare(spa_t *spa); 78 79 /* 80 * ========================================================================== 81 * SPA properties routines 82 * ========================================================================== 83 */ 84 85 /* 86 * Add a (source=src, propname=propval) list to an nvlist. 87 */ 88 static void 89 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 90 uint64_t intval, zprop_source_t src) 91 { 92 const char *propname = zpool_prop_to_name(prop); 93 nvlist_t *propval; 94 95 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 96 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 97 98 if (strval != NULL) 99 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 100 else 101 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 102 103 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 104 nvlist_free(propval); 105 } 106 107 /* 108 * Get property values from the spa configuration. 109 */ 110 static void 111 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 112 { 113 uint64_t size = spa_get_space(spa); 114 uint64_t used = spa_get_alloc(spa); 115 uint64_t cap, version; 116 zprop_source_t src = ZPROP_SRC_NONE; 117 spa_config_dirent_t *dp; 118 119 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 120 121 /* 122 * readonly properties 123 */ 124 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 125 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 126 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 127 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 128 129 cap = (size == 0) ? 0 : (used * 100 / size); 130 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 131 132 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 133 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 134 spa->spa_root_vdev->vdev_state, src); 135 136 /* 137 * settable properties that are not stored in the pool property object. 138 */ 139 version = spa_version(spa); 140 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 141 src = ZPROP_SRC_DEFAULT; 142 else 143 src = ZPROP_SRC_LOCAL; 144 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 145 146 if (spa->spa_root != NULL) 147 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 148 0, ZPROP_SRC_LOCAL); 149 150 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 151 if (dp->scd_path == NULL) { 152 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 153 "none", 0, ZPROP_SRC_LOCAL); 154 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 155 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 156 dp->scd_path, 0, ZPROP_SRC_LOCAL); 157 } 158 } 159 } 160 161 /* 162 * Get zpool property values. 163 */ 164 int 165 spa_prop_get(spa_t *spa, nvlist_t **nvp) 166 { 167 zap_cursor_t zc; 168 zap_attribute_t za; 169 objset_t *mos = spa->spa_meta_objset; 170 int err; 171 172 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 173 174 mutex_enter(&spa->spa_props_lock); 175 176 /* 177 * Get properties from the spa config. 178 */ 179 spa_prop_get_config(spa, nvp); 180 181 /* If no pool property object, no more prop to get. */ 182 if (spa->spa_pool_props_object == 0) { 183 mutex_exit(&spa->spa_props_lock); 184 return (0); 185 } 186 187 /* 188 * Get properties from the MOS pool property object. 189 */ 190 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 191 (err = zap_cursor_retrieve(&zc, &za)) == 0; 192 zap_cursor_advance(&zc)) { 193 uint64_t intval = 0; 194 char *strval = NULL; 195 zprop_source_t src = ZPROP_SRC_DEFAULT; 196 zpool_prop_t prop; 197 198 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 199 continue; 200 201 switch (za.za_integer_length) { 202 case 8: 203 /* integer property */ 204 if (za.za_first_integer != 205 zpool_prop_default_numeric(prop)) 206 src = ZPROP_SRC_LOCAL; 207 208 if (prop == ZPOOL_PROP_BOOTFS) { 209 dsl_pool_t *dp; 210 dsl_dataset_t *ds = NULL; 211 212 dp = spa_get_dsl(spa); 213 rw_enter(&dp->dp_config_rwlock, RW_READER); 214 if (err = dsl_dataset_hold_obj(dp, 215 za.za_first_integer, FTAG, &ds)) { 216 rw_exit(&dp->dp_config_rwlock); 217 break; 218 } 219 220 strval = kmem_alloc( 221 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 222 KM_SLEEP); 223 dsl_dataset_name(ds, strval); 224 dsl_dataset_rele(ds, FTAG); 225 rw_exit(&dp->dp_config_rwlock); 226 } else { 227 strval = NULL; 228 intval = za.za_first_integer; 229 } 230 231 spa_prop_add_list(*nvp, prop, strval, intval, src); 232 233 if (strval != NULL) 234 kmem_free(strval, 235 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 236 237 break; 238 239 case 1: 240 /* string property */ 241 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 242 err = zap_lookup(mos, spa->spa_pool_props_object, 243 za.za_name, 1, za.za_num_integers, strval); 244 if (err) { 245 kmem_free(strval, za.za_num_integers); 246 break; 247 } 248 spa_prop_add_list(*nvp, prop, strval, 0, src); 249 kmem_free(strval, za.za_num_integers); 250 break; 251 252 default: 253 break; 254 } 255 } 256 zap_cursor_fini(&zc); 257 mutex_exit(&spa->spa_props_lock); 258 out: 259 if (err && err != ENOENT) { 260 nvlist_free(*nvp); 261 *nvp = NULL; 262 return (err); 263 } 264 265 return (0); 266 } 267 268 /* 269 * Validate the given pool properties nvlist and modify the list 270 * for the property values to be set. 271 */ 272 static int 273 spa_prop_validate(spa_t *spa, nvlist_t *props) 274 { 275 nvpair_t *elem; 276 int error = 0, reset_bootfs = 0; 277 uint64_t objnum; 278 279 elem = NULL; 280 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 281 zpool_prop_t prop; 282 char *propname, *strval; 283 uint64_t intval; 284 objset_t *os; 285 char *slash; 286 287 propname = nvpair_name(elem); 288 289 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 290 return (EINVAL); 291 292 switch (prop) { 293 case ZPOOL_PROP_VERSION: 294 error = nvpair_value_uint64(elem, &intval); 295 if (!error && 296 (intval < spa_version(spa) || intval > SPA_VERSION)) 297 error = EINVAL; 298 break; 299 300 case ZPOOL_PROP_DELEGATION: 301 case ZPOOL_PROP_AUTOREPLACE: 302 case ZPOOL_PROP_LISTSNAPS: 303 error = nvpair_value_uint64(elem, &intval); 304 if (!error && intval > 1) 305 error = EINVAL; 306 break; 307 308 case ZPOOL_PROP_BOOTFS: 309 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 310 error = ENOTSUP; 311 break; 312 } 313 314 /* 315 * Make sure the vdev config is bootable 316 */ 317 if (!vdev_is_bootable(spa->spa_root_vdev)) { 318 error = ENOTSUP; 319 break; 320 } 321 322 reset_bootfs = 1; 323 324 error = nvpair_value_string(elem, &strval); 325 326 if (!error) { 327 uint64_t compress; 328 329 if (strval == NULL || strval[0] == '\0') { 330 objnum = zpool_prop_default_numeric( 331 ZPOOL_PROP_BOOTFS); 332 break; 333 } 334 335 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 336 DS_MODE_USER | DS_MODE_READONLY, &os)) 337 break; 338 339 /* We don't support gzip bootable datasets */ 340 if ((error = dsl_prop_get_integer(strval, 341 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 342 &compress, NULL)) == 0 && 343 !BOOTFS_COMPRESS_VALID(compress)) { 344 error = ENOTSUP; 345 } else { 346 objnum = dmu_objset_id(os); 347 } 348 dmu_objset_close(os); 349 } 350 break; 351 352 case ZPOOL_PROP_FAILUREMODE: 353 error = nvpair_value_uint64(elem, &intval); 354 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 355 intval > ZIO_FAILURE_MODE_PANIC)) 356 error = EINVAL; 357 358 /* 359 * This is a special case which only occurs when 360 * the pool has completely failed. This allows 361 * the user to change the in-core failmode property 362 * without syncing it out to disk (I/Os might 363 * currently be blocked). We do this by returning 364 * EIO to the caller (spa_prop_set) to trick it 365 * into thinking we encountered a property validation 366 * error. 367 */ 368 if (!error && spa_suspended(spa)) { 369 spa->spa_failmode = intval; 370 error = EIO; 371 } 372 break; 373 374 case ZPOOL_PROP_CACHEFILE: 375 if ((error = nvpair_value_string(elem, &strval)) != 0) 376 break; 377 378 if (strval[0] == '\0') 379 break; 380 381 if (strcmp(strval, "none") == 0) 382 break; 383 384 if (strval[0] != '/') { 385 error = EINVAL; 386 break; 387 } 388 389 slash = strrchr(strval, '/'); 390 ASSERT(slash != NULL); 391 392 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 393 strcmp(slash, "/..") == 0) 394 error = EINVAL; 395 break; 396 } 397 398 if (error) 399 break; 400 } 401 402 if (!error && reset_bootfs) { 403 error = nvlist_remove(props, 404 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 405 406 if (!error) { 407 error = nvlist_add_uint64(props, 408 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 409 } 410 } 411 412 return (error); 413 } 414 415 int 416 spa_prop_set(spa_t *spa, nvlist_t *nvp) 417 { 418 int error; 419 420 if ((error = spa_prop_validate(spa, nvp)) != 0) 421 return (error); 422 423 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 424 spa, nvp, 3)); 425 } 426 427 /* 428 * If the bootfs property value is dsobj, clear it. 429 */ 430 void 431 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 432 { 433 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 434 VERIFY(zap_remove(spa->spa_meta_objset, 435 spa->spa_pool_props_object, 436 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 437 spa->spa_bootfs = 0; 438 } 439 } 440 441 /* 442 * ========================================================================== 443 * SPA state manipulation (open/create/destroy/import/export) 444 * ========================================================================== 445 */ 446 447 static int 448 spa_error_entry_compare(const void *a, const void *b) 449 { 450 spa_error_entry_t *sa = (spa_error_entry_t *)a; 451 spa_error_entry_t *sb = (spa_error_entry_t *)b; 452 int ret; 453 454 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 455 sizeof (zbookmark_t)); 456 457 if (ret < 0) 458 return (-1); 459 else if (ret > 0) 460 return (1); 461 else 462 return (0); 463 } 464 465 /* 466 * Utility function which retrieves copies of the current logs and 467 * re-initializes them in the process. 468 */ 469 void 470 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 471 { 472 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 473 474 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 475 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 476 477 avl_create(&spa->spa_errlist_scrub, 478 spa_error_entry_compare, sizeof (spa_error_entry_t), 479 offsetof(spa_error_entry_t, se_avl)); 480 avl_create(&spa->spa_errlist_last, 481 spa_error_entry_compare, sizeof (spa_error_entry_t), 482 offsetof(spa_error_entry_t, se_avl)); 483 } 484 485 /* 486 * Activate an uninitialized pool. 487 */ 488 static void 489 spa_activate(spa_t *spa) 490 { 491 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 492 493 spa->spa_state = POOL_STATE_ACTIVE; 494 495 spa->spa_normal_class = metaslab_class_create(); 496 spa->spa_log_class = metaslab_class_create(); 497 498 for (int t = 0; t < ZIO_TYPES; t++) { 499 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 500 spa->spa_zio_taskq[t][q] = taskq_create("spa_zio", 501 zio_taskq_threads[t][q], maxclsyspri, 50, 502 INT_MAX, TASKQ_PREPOPULATE); 503 } 504 } 505 506 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 507 offsetof(vdev_t, vdev_config_dirty_node)); 508 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 509 offsetof(vdev_t, vdev_state_dirty_node)); 510 511 txg_list_create(&spa->spa_vdev_txg_list, 512 offsetof(struct vdev, vdev_txg_node)); 513 514 avl_create(&spa->spa_errlist_scrub, 515 spa_error_entry_compare, sizeof (spa_error_entry_t), 516 offsetof(spa_error_entry_t, se_avl)); 517 avl_create(&spa->spa_errlist_last, 518 spa_error_entry_compare, sizeof (spa_error_entry_t), 519 offsetof(spa_error_entry_t, se_avl)); 520 } 521 522 /* 523 * Opposite of spa_activate(). 524 */ 525 static void 526 spa_deactivate(spa_t *spa) 527 { 528 ASSERT(spa->spa_sync_on == B_FALSE); 529 ASSERT(spa->spa_dsl_pool == NULL); 530 ASSERT(spa->spa_root_vdev == NULL); 531 532 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 533 534 txg_list_destroy(&spa->spa_vdev_txg_list); 535 536 list_destroy(&spa->spa_config_dirty_list); 537 list_destroy(&spa->spa_state_dirty_list); 538 539 for (int t = 0; t < ZIO_TYPES; t++) { 540 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 541 taskq_destroy(spa->spa_zio_taskq[t][q]); 542 spa->spa_zio_taskq[t][q] = NULL; 543 } 544 } 545 546 metaslab_class_destroy(spa->spa_normal_class); 547 spa->spa_normal_class = NULL; 548 549 metaslab_class_destroy(spa->spa_log_class); 550 spa->spa_log_class = NULL; 551 552 /* 553 * If this was part of an import or the open otherwise failed, we may 554 * still have errors left in the queues. Empty them just in case. 555 */ 556 spa_errlog_drain(spa); 557 558 avl_destroy(&spa->spa_errlist_scrub); 559 avl_destroy(&spa->spa_errlist_last); 560 561 spa->spa_state = POOL_STATE_UNINITIALIZED; 562 } 563 564 /* 565 * Verify a pool configuration, and construct the vdev tree appropriately. This 566 * will create all the necessary vdevs in the appropriate layout, with each vdev 567 * in the CLOSED state. This will prep the pool before open/creation/import. 568 * All vdev validation is done by the vdev_alloc() routine. 569 */ 570 static int 571 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 572 uint_t id, int atype) 573 { 574 nvlist_t **child; 575 uint_t c, children; 576 int error; 577 578 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 579 return (error); 580 581 if ((*vdp)->vdev_ops->vdev_op_leaf) 582 return (0); 583 584 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 585 &child, &children); 586 587 if (error == ENOENT) 588 return (0); 589 590 if (error) { 591 vdev_free(*vdp); 592 *vdp = NULL; 593 return (EINVAL); 594 } 595 596 for (c = 0; c < children; c++) { 597 vdev_t *vd; 598 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 599 atype)) != 0) { 600 vdev_free(*vdp); 601 *vdp = NULL; 602 return (error); 603 } 604 } 605 606 ASSERT(*vdp != NULL); 607 608 return (0); 609 } 610 611 /* 612 * Opposite of spa_load(). 613 */ 614 static void 615 spa_unload(spa_t *spa) 616 { 617 int i; 618 619 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 620 621 /* 622 * Stop async tasks. 623 */ 624 spa_async_suspend(spa); 625 626 /* 627 * Stop syncing. 628 */ 629 if (spa->spa_sync_on) { 630 txg_sync_stop(spa->spa_dsl_pool); 631 spa->spa_sync_on = B_FALSE; 632 } 633 634 /* 635 * Wait for any outstanding async I/O to complete. 636 */ 637 mutex_enter(&spa->spa_async_root_lock); 638 while (spa->spa_async_root_count != 0) 639 cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock); 640 mutex_exit(&spa->spa_async_root_lock); 641 642 /* 643 * Drop and purge level 2 cache 644 */ 645 spa_l2cache_drop(spa); 646 647 /* 648 * Close the dsl pool. 649 */ 650 if (spa->spa_dsl_pool) { 651 dsl_pool_close(spa->spa_dsl_pool); 652 spa->spa_dsl_pool = NULL; 653 } 654 655 /* 656 * Close all vdevs. 657 */ 658 if (spa->spa_root_vdev) 659 vdev_free(spa->spa_root_vdev); 660 ASSERT(spa->spa_root_vdev == NULL); 661 662 for (i = 0; i < spa->spa_spares.sav_count; i++) 663 vdev_free(spa->spa_spares.sav_vdevs[i]); 664 if (spa->spa_spares.sav_vdevs) { 665 kmem_free(spa->spa_spares.sav_vdevs, 666 spa->spa_spares.sav_count * sizeof (void *)); 667 spa->spa_spares.sav_vdevs = NULL; 668 } 669 if (spa->spa_spares.sav_config) { 670 nvlist_free(spa->spa_spares.sav_config); 671 spa->spa_spares.sav_config = NULL; 672 } 673 spa->spa_spares.sav_count = 0; 674 675 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 676 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 677 if (spa->spa_l2cache.sav_vdevs) { 678 kmem_free(spa->spa_l2cache.sav_vdevs, 679 spa->spa_l2cache.sav_count * sizeof (void *)); 680 spa->spa_l2cache.sav_vdevs = NULL; 681 } 682 if (spa->spa_l2cache.sav_config) { 683 nvlist_free(spa->spa_l2cache.sav_config); 684 spa->spa_l2cache.sav_config = NULL; 685 } 686 spa->spa_l2cache.sav_count = 0; 687 688 spa->spa_async_suspended = 0; 689 } 690 691 /* 692 * Load (or re-load) the current list of vdevs describing the active spares for 693 * this pool. When this is called, we have some form of basic information in 694 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 695 * then re-generate a more complete list including status information. 696 */ 697 static void 698 spa_load_spares(spa_t *spa) 699 { 700 nvlist_t **spares; 701 uint_t nspares; 702 int i; 703 vdev_t *vd, *tvd; 704 705 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 706 707 /* 708 * First, close and free any existing spare vdevs. 709 */ 710 for (i = 0; i < spa->spa_spares.sav_count; i++) { 711 vd = spa->spa_spares.sav_vdevs[i]; 712 713 /* Undo the call to spa_activate() below */ 714 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 715 B_FALSE)) != NULL && tvd->vdev_isspare) 716 spa_spare_remove(tvd); 717 vdev_close(vd); 718 vdev_free(vd); 719 } 720 721 if (spa->spa_spares.sav_vdevs) 722 kmem_free(spa->spa_spares.sav_vdevs, 723 spa->spa_spares.sav_count * sizeof (void *)); 724 725 if (spa->spa_spares.sav_config == NULL) 726 nspares = 0; 727 else 728 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 729 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 730 731 spa->spa_spares.sav_count = (int)nspares; 732 spa->spa_spares.sav_vdevs = NULL; 733 734 if (nspares == 0) 735 return; 736 737 /* 738 * Construct the array of vdevs, opening them to get status in the 739 * process. For each spare, there is potentially two different vdev_t 740 * structures associated with it: one in the list of spares (used only 741 * for basic validation purposes) and one in the active vdev 742 * configuration (if it's spared in). During this phase we open and 743 * validate each vdev on the spare list. If the vdev also exists in the 744 * active configuration, then we also mark this vdev as an active spare. 745 */ 746 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 747 KM_SLEEP); 748 for (i = 0; i < spa->spa_spares.sav_count; i++) { 749 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 750 VDEV_ALLOC_SPARE) == 0); 751 ASSERT(vd != NULL); 752 753 spa->spa_spares.sav_vdevs[i] = vd; 754 755 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 756 B_FALSE)) != NULL) { 757 if (!tvd->vdev_isspare) 758 spa_spare_add(tvd); 759 760 /* 761 * We only mark the spare active if we were successfully 762 * able to load the vdev. Otherwise, importing a pool 763 * with a bad active spare would result in strange 764 * behavior, because multiple pool would think the spare 765 * is actively in use. 766 * 767 * There is a vulnerability here to an equally bizarre 768 * circumstance, where a dead active spare is later 769 * brought back to life (onlined or otherwise). Given 770 * the rarity of this scenario, and the extra complexity 771 * it adds, we ignore the possibility. 772 */ 773 if (!vdev_is_dead(tvd)) 774 spa_spare_activate(tvd); 775 } 776 777 vd->vdev_top = vd; 778 779 if (vdev_open(vd) != 0) 780 continue; 781 782 if (vdev_validate_aux(vd) == 0) 783 spa_spare_add(vd); 784 } 785 786 /* 787 * Recompute the stashed list of spares, with status information 788 * this time. 789 */ 790 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 791 DATA_TYPE_NVLIST_ARRAY) == 0); 792 793 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 794 KM_SLEEP); 795 for (i = 0; i < spa->spa_spares.sav_count; i++) 796 spares[i] = vdev_config_generate(spa, 797 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 798 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 799 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 800 for (i = 0; i < spa->spa_spares.sav_count; i++) 801 nvlist_free(spares[i]); 802 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 803 } 804 805 /* 806 * Load (or re-load) the current list of vdevs describing the active l2cache for 807 * this pool. When this is called, we have some form of basic information in 808 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 809 * then re-generate a more complete list including status information. 810 * Devices which are already active have their details maintained, and are 811 * not re-opened. 812 */ 813 static void 814 spa_load_l2cache(spa_t *spa) 815 { 816 nvlist_t **l2cache; 817 uint_t nl2cache; 818 int i, j, oldnvdevs; 819 uint64_t guid, size; 820 vdev_t *vd, **oldvdevs, **newvdevs; 821 spa_aux_vdev_t *sav = &spa->spa_l2cache; 822 823 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 824 825 if (sav->sav_config != NULL) { 826 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 827 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 828 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 829 } else { 830 nl2cache = 0; 831 } 832 833 oldvdevs = sav->sav_vdevs; 834 oldnvdevs = sav->sav_count; 835 sav->sav_vdevs = NULL; 836 sav->sav_count = 0; 837 838 /* 839 * Process new nvlist of vdevs. 840 */ 841 for (i = 0; i < nl2cache; i++) { 842 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 843 &guid) == 0); 844 845 newvdevs[i] = NULL; 846 for (j = 0; j < oldnvdevs; j++) { 847 vd = oldvdevs[j]; 848 if (vd != NULL && guid == vd->vdev_guid) { 849 /* 850 * Retain previous vdev for add/remove ops. 851 */ 852 newvdevs[i] = vd; 853 oldvdevs[j] = NULL; 854 break; 855 } 856 } 857 858 if (newvdevs[i] == NULL) { 859 /* 860 * Create new vdev 861 */ 862 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 863 VDEV_ALLOC_L2CACHE) == 0); 864 ASSERT(vd != NULL); 865 newvdevs[i] = vd; 866 867 /* 868 * Commit this vdev as an l2cache device, 869 * even if it fails to open. 870 */ 871 spa_l2cache_add(vd); 872 873 vd->vdev_top = vd; 874 vd->vdev_aux = sav; 875 876 spa_l2cache_activate(vd); 877 878 if (vdev_open(vd) != 0) 879 continue; 880 881 (void) vdev_validate_aux(vd); 882 883 if (!vdev_is_dead(vd)) { 884 size = vdev_get_rsize(vd); 885 l2arc_add_vdev(spa, vd, 886 VDEV_LABEL_START_SIZE, 887 size - VDEV_LABEL_START_SIZE); 888 } 889 } 890 } 891 892 /* 893 * Purge vdevs that were dropped 894 */ 895 for (i = 0; i < oldnvdevs; i++) { 896 uint64_t pool; 897 898 vd = oldvdevs[i]; 899 if (vd != NULL) { 900 if ((spa_mode & FWRITE) && 901 spa_l2cache_exists(vd->vdev_guid, &pool) && 902 pool != 0ULL && 903 l2arc_vdev_present(vd)) { 904 l2arc_remove_vdev(vd); 905 } 906 (void) vdev_close(vd); 907 spa_l2cache_remove(vd); 908 } 909 } 910 911 if (oldvdevs) 912 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 913 914 if (sav->sav_config == NULL) 915 goto out; 916 917 sav->sav_vdevs = newvdevs; 918 sav->sav_count = (int)nl2cache; 919 920 /* 921 * Recompute the stashed list of l2cache devices, with status 922 * information this time. 923 */ 924 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 925 DATA_TYPE_NVLIST_ARRAY) == 0); 926 927 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 928 for (i = 0; i < sav->sav_count; i++) 929 l2cache[i] = vdev_config_generate(spa, 930 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 931 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 932 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 933 out: 934 for (i = 0; i < sav->sav_count; i++) 935 nvlist_free(l2cache[i]); 936 if (sav->sav_count) 937 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 938 } 939 940 static int 941 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 942 { 943 dmu_buf_t *db; 944 char *packed = NULL; 945 size_t nvsize = 0; 946 int error; 947 *value = NULL; 948 949 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 950 nvsize = *(uint64_t *)db->db_data; 951 dmu_buf_rele(db, FTAG); 952 953 packed = kmem_alloc(nvsize, KM_SLEEP); 954 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 955 if (error == 0) 956 error = nvlist_unpack(packed, nvsize, value, 0); 957 kmem_free(packed, nvsize); 958 959 return (error); 960 } 961 962 /* 963 * Checks to see if the given vdev could not be opened, in which case we post a 964 * sysevent to notify the autoreplace code that the device has been removed. 965 */ 966 static void 967 spa_check_removed(vdev_t *vd) 968 { 969 int c; 970 971 for (c = 0; c < vd->vdev_children; c++) 972 spa_check_removed(vd->vdev_child[c]); 973 974 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 975 zfs_post_autoreplace(vd->vdev_spa, vd); 976 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 977 } 978 } 979 980 /* 981 * Check for missing log devices 982 */ 983 int 984 spa_check_logs(spa_t *spa) 985 { 986 switch (spa->spa_log_state) { 987 case SPA_LOG_MISSING: 988 /* need to recheck in case slog has been restored */ 989 case SPA_LOG_UNKNOWN: 990 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 991 DS_FIND_CHILDREN)) { 992 spa->spa_log_state = SPA_LOG_MISSING; 993 return (1); 994 } 995 break; 996 997 case SPA_LOG_CLEAR: 998 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, 999 DS_FIND_CHILDREN); 1000 break; 1001 } 1002 spa->spa_log_state = SPA_LOG_GOOD; 1003 return (0); 1004 } 1005 1006 /* 1007 * Load an existing storage pool, using the pool's builtin spa_config as a 1008 * source of configuration information. 1009 */ 1010 static int 1011 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 1012 { 1013 int error = 0; 1014 nvlist_t *nvroot = NULL; 1015 vdev_t *rvd; 1016 uberblock_t *ub = &spa->spa_uberblock; 1017 uint64_t config_cache_txg = spa->spa_config_txg; 1018 uint64_t pool_guid; 1019 uint64_t version; 1020 uint64_t autoreplace = 0; 1021 char *ereport = FM_EREPORT_ZFS_POOL; 1022 1023 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1024 1025 spa->spa_load_state = state; 1026 1027 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1028 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1029 error = EINVAL; 1030 goto out; 1031 } 1032 1033 /* 1034 * Versioning wasn't explicitly added to the label until later, so if 1035 * it's not present treat it as the initial version. 1036 */ 1037 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1038 version = SPA_VERSION_INITIAL; 1039 1040 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1041 &spa->spa_config_txg); 1042 1043 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1044 spa_guid_exists(pool_guid, 0)) { 1045 error = EEXIST; 1046 goto out; 1047 } 1048 1049 spa->spa_load_guid = pool_guid; 1050 1051 /* 1052 * Parse the configuration into a vdev tree. We explicitly set the 1053 * value that will be returned by spa_version() since parsing the 1054 * configuration requires knowing the version number. 1055 */ 1056 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1057 spa->spa_ubsync.ub_version = version; 1058 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1059 spa_config_exit(spa, SCL_ALL, FTAG); 1060 1061 if (error != 0) 1062 goto out; 1063 1064 ASSERT(spa->spa_root_vdev == rvd); 1065 ASSERT(spa_guid(spa) == pool_guid); 1066 1067 /* 1068 * Try to open all vdevs, loading each label in the process. 1069 */ 1070 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1071 error = vdev_open(rvd); 1072 spa_config_exit(spa, SCL_ALL, FTAG); 1073 if (error != 0) 1074 goto out; 1075 1076 /* 1077 * Validate the labels for all leaf vdevs. We need to grab the config 1078 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER. 1079 */ 1080 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1081 error = vdev_validate(rvd); 1082 spa_config_exit(spa, SCL_ALL, FTAG); 1083 1084 if (error != 0) 1085 goto out; 1086 1087 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1088 error = ENXIO; 1089 goto out; 1090 } 1091 1092 /* 1093 * Find the best uberblock. 1094 */ 1095 vdev_uberblock_load(NULL, rvd, ub); 1096 1097 /* 1098 * If we weren't able to find a single valid uberblock, return failure. 1099 */ 1100 if (ub->ub_txg == 0) { 1101 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1102 VDEV_AUX_CORRUPT_DATA); 1103 error = ENXIO; 1104 goto out; 1105 } 1106 1107 /* 1108 * If the pool is newer than the code, we can't open it. 1109 */ 1110 if (ub->ub_version > SPA_VERSION) { 1111 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1112 VDEV_AUX_VERSION_NEWER); 1113 error = ENOTSUP; 1114 goto out; 1115 } 1116 1117 /* 1118 * If the vdev guid sum doesn't match the uberblock, we have an 1119 * incomplete configuration. 1120 */ 1121 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1122 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1123 VDEV_AUX_BAD_GUID_SUM); 1124 error = ENXIO; 1125 goto out; 1126 } 1127 1128 /* 1129 * Initialize internal SPA structures. 1130 */ 1131 spa->spa_state = POOL_STATE_ACTIVE; 1132 spa->spa_ubsync = spa->spa_uberblock; 1133 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1134 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1135 if (error) { 1136 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1137 VDEV_AUX_CORRUPT_DATA); 1138 goto out; 1139 } 1140 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1141 1142 if (zap_lookup(spa->spa_meta_objset, 1143 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1144 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1145 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1146 VDEV_AUX_CORRUPT_DATA); 1147 error = EIO; 1148 goto out; 1149 } 1150 1151 if (!mosconfig) { 1152 nvlist_t *newconfig; 1153 uint64_t hostid; 1154 1155 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1156 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1157 VDEV_AUX_CORRUPT_DATA); 1158 error = EIO; 1159 goto out; 1160 } 1161 1162 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig, 1163 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 1164 char *hostname; 1165 unsigned long myhostid = 0; 1166 1167 VERIFY(nvlist_lookup_string(newconfig, 1168 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1169 1170 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1171 if (hostid != 0 && myhostid != 0 && 1172 (unsigned long)hostid != myhostid) { 1173 cmn_err(CE_WARN, "pool '%s' could not be " 1174 "loaded as it was last accessed by " 1175 "another system (host: %s hostid: 0x%lx). " 1176 "See: http://www.sun.com/msg/ZFS-8000-EY", 1177 spa_name(spa), hostname, 1178 (unsigned long)hostid); 1179 error = EBADF; 1180 goto out; 1181 } 1182 } 1183 1184 spa_config_set(spa, newconfig); 1185 spa_unload(spa); 1186 spa_deactivate(spa); 1187 spa_activate(spa); 1188 1189 return (spa_load(spa, newconfig, state, B_TRUE)); 1190 } 1191 1192 if (zap_lookup(spa->spa_meta_objset, 1193 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1194 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1195 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1196 VDEV_AUX_CORRUPT_DATA); 1197 error = EIO; 1198 goto out; 1199 } 1200 1201 /* 1202 * Load the bit that tells us to use the new accounting function 1203 * (raid-z deflation). If we have an older pool, this will not 1204 * be present. 1205 */ 1206 error = zap_lookup(spa->spa_meta_objset, 1207 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1208 sizeof (uint64_t), 1, &spa->spa_deflate); 1209 if (error != 0 && error != ENOENT) { 1210 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1211 VDEV_AUX_CORRUPT_DATA); 1212 error = EIO; 1213 goto out; 1214 } 1215 1216 /* 1217 * Load the persistent error log. If we have an older pool, this will 1218 * not be present. 1219 */ 1220 error = zap_lookup(spa->spa_meta_objset, 1221 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1222 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1223 if (error != 0 && error != ENOENT) { 1224 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1225 VDEV_AUX_CORRUPT_DATA); 1226 error = EIO; 1227 goto out; 1228 } 1229 1230 error = zap_lookup(spa->spa_meta_objset, 1231 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1232 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1233 if (error != 0 && error != ENOENT) { 1234 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1235 VDEV_AUX_CORRUPT_DATA); 1236 error = EIO; 1237 goto out; 1238 } 1239 1240 /* 1241 * Load the history object. If we have an older pool, this 1242 * will not be present. 1243 */ 1244 error = zap_lookup(spa->spa_meta_objset, 1245 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1246 sizeof (uint64_t), 1, &spa->spa_history); 1247 if (error != 0 && error != ENOENT) { 1248 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1249 VDEV_AUX_CORRUPT_DATA); 1250 error = EIO; 1251 goto out; 1252 } 1253 1254 /* 1255 * Load any hot spares for this pool. 1256 */ 1257 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1258 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1259 if (error != 0 && error != ENOENT) { 1260 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1261 VDEV_AUX_CORRUPT_DATA); 1262 error = EIO; 1263 goto out; 1264 } 1265 if (error == 0) { 1266 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1267 if (load_nvlist(spa, spa->spa_spares.sav_object, 1268 &spa->spa_spares.sav_config) != 0) { 1269 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1270 VDEV_AUX_CORRUPT_DATA); 1271 error = EIO; 1272 goto out; 1273 } 1274 1275 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1276 spa_load_spares(spa); 1277 spa_config_exit(spa, SCL_ALL, FTAG); 1278 } 1279 1280 /* 1281 * Load any level 2 ARC devices for this pool. 1282 */ 1283 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1284 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1285 &spa->spa_l2cache.sav_object); 1286 if (error != 0 && error != ENOENT) { 1287 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1288 VDEV_AUX_CORRUPT_DATA); 1289 error = EIO; 1290 goto out; 1291 } 1292 if (error == 0) { 1293 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1294 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1295 &spa->spa_l2cache.sav_config) != 0) { 1296 vdev_set_state(rvd, B_TRUE, 1297 VDEV_STATE_CANT_OPEN, 1298 VDEV_AUX_CORRUPT_DATA); 1299 error = EIO; 1300 goto out; 1301 } 1302 1303 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1304 spa_load_l2cache(spa); 1305 spa_config_exit(spa, SCL_ALL, FTAG); 1306 } 1307 1308 if (spa_check_logs(spa)) { 1309 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1310 VDEV_AUX_BAD_LOG); 1311 error = ENXIO; 1312 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1313 goto out; 1314 } 1315 1316 1317 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1318 1319 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1320 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1321 1322 if (error && error != ENOENT) { 1323 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1324 VDEV_AUX_CORRUPT_DATA); 1325 error = EIO; 1326 goto out; 1327 } 1328 1329 if (error == 0) { 1330 (void) zap_lookup(spa->spa_meta_objset, 1331 spa->spa_pool_props_object, 1332 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1333 sizeof (uint64_t), 1, &spa->spa_bootfs); 1334 (void) zap_lookup(spa->spa_meta_objset, 1335 spa->spa_pool_props_object, 1336 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1337 sizeof (uint64_t), 1, &autoreplace); 1338 (void) zap_lookup(spa->spa_meta_objset, 1339 spa->spa_pool_props_object, 1340 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1341 sizeof (uint64_t), 1, &spa->spa_delegation); 1342 (void) zap_lookup(spa->spa_meta_objset, 1343 spa->spa_pool_props_object, 1344 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1345 sizeof (uint64_t), 1, &spa->spa_failmode); 1346 } 1347 1348 /* 1349 * If the 'autoreplace' property is set, then post a resource notifying 1350 * the ZFS DE that it should not issue any faults for unopenable 1351 * devices. We also iterate over the vdevs, and post a sysevent for any 1352 * unopenable vdevs so that the normal autoreplace handler can take 1353 * over. 1354 */ 1355 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1356 spa_check_removed(spa->spa_root_vdev); 1357 1358 /* 1359 * Load the vdev state for all toplevel vdevs. 1360 */ 1361 vdev_load(rvd); 1362 1363 /* 1364 * Propagate the leaf DTLs we just loaded all the way up the tree. 1365 */ 1366 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1367 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1368 spa_config_exit(spa, SCL_ALL, FTAG); 1369 1370 /* 1371 * Check the state of the root vdev. If it can't be opened, it 1372 * indicates one or more toplevel vdevs are faulted. 1373 */ 1374 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1375 error = ENXIO; 1376 goto out; 1377 } 1378 1379 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1380 dmu_tx_t *tx; 1381 int need_update = B_FALSE; 1382 int c; 1383 1384 /* 1385 * Claim log blocks that haven't been committed yet. 1386 * This must all happen in a single txg. 1387 */ 1388 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1389 spa_first_txg(spa)); 1390 (void) dmu_objset_find(spa_name(spa), 1391 zil_claim, tx, DS_FIND_CHILDREN); 1392 dmu_tx_commit(tx); 1393 1394 spa->spa_sync_on = B_TRUE; 1395 txg_sync_start(spa->spa_dsl_pool); 1396 1397 /* 1398 * Wait for all claims to sync. 1399 */ 1400 txg_wait_synced(spa->spa_dsl_pool, 0); 1401 1402 /* 1403 * If the config cache is stale, or we have uninitialized 1404 * metaslabs (see spa_vdev_add()), then update the config. 1405 */ 1406 if (config_cache_txg != spa->spa_config_txg || 1407 state == SPA_LOAD_IMPORT) 1408 need_update = B_TRUE; 1409 1410 for (c = 0; c < rvd->vdev_children; c++) 1411 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1412 need_update = B_TRUE; 1413 1414 /* 1415 * Update the config cache asychronously in case we're the 1416 * root pool, in which case the config cache isn't writable yet. 1417 */ 1418 if (need_update) 1419 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1420 } 1421 1422 error = 0; 1423 out: 1424 spa->spa_minref = refcount_count(&spa->spa_refcount); 1425 if (error && error != EBADF) 1426 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1427 spa->spa_load_state = SPA_LOAD_NONE; 1428 spa->spa_ena = 0; 1429 1430 return (error); 1431 } 1432 1433 /* 1434 * Pool Open/Import 1435 * 1436 * The import case is identical to an open except that the configuration is sent 1437 * down from userland, instead of grabbed from the configuration cache. For the 1438 * case of an open, the pool configuration will exist in the 1439 * POOL_STATE_UNINITIALIZED state. 1440 * 1441 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1442 * the same time open the pool, without having to keep around the spa_t in some 1443 * ambiguous state. 1444 */ 1445 static int 1446 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1447 { 1448 spa_t *spa; 1449 int error; 1450 int locked = B_FALSE; 1451 1452 *spapp = NULL; 1453 1454 /* 1455 * As disgusting as this is, we need to support recursive calls to this 1456 * function because dsl_dir_open() is called during spa_load(), and ends 1457 * up calling spa_open() again. The real fix is to figure out how to 1458 * avoid dsl_dir_open() calling this in the first place. 1459 */ 1460 if (mutex_owner(&spa_namespace_lock) != curthread) { 1461 mutex_enter(&spa_namespace_lock); 1462 locked = B_TRUE; 1463 } 1464 1465 if ((spa = spa_lookup(pool)) == NULL) { 1466 if (locked) 1467 mutex_exit(&spa_namespace_lock); 1468 return (ENOENT); 1469 } 1470 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1471 1472 spa_activate(spa); 1473 1474 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1475 1476 if (error == EBADF) { 1477 /* 1478 * If vdev_validate() returns failure (indicated by 1479 * EBADF), it indicates that one of the vdevs indicates 1480 * that the pool has been exported or destroyed. If 1481 * this is the case, the config cache is out of sync and 1482 * we should remove the pool from the namespace. 1483 */ 1484 spa_unload(spa); 1485 spa_deactivate(spa); 1486 spa_config_sync(spa, B_TRUE, B_TRUE); 1487 spa_remove(spa); 1488 if (locked) 1489 mutex_exit(&spa_namespace_lock); 1490 return (ENOENT); 1491 } 1492 1493 if (error) { 1494 /* 1495 * We can't open the pool, but we still have useful 1496 * information: the state of each vdev after the 1497 * attempted vdev_open(). Return this to the user. 1498 */ 1499 if (config != NULL && spa->spa_root_vdev != NULL) 1500 *config = spa_config_generate(spa, NULL, -1ULL, 1501 B_TRUE); 1502 spa_unload(spa); 1503 spa_deactivate(spa); 1504 spa->spa_last_open_failed = B_TRUE; 1505 if (locked) 1506 mutex_exit(&spa_namespace_lock); 1507 *spapp = NULL; 1508 return (error); 1509 } else { 1510 spa->spa_last_open_failed = B_FALSE; 1511 } 1512 } 1513 1514 spa_open_ref(spa, tag); 1515 1516 if (locked) 1517 mutex_exit(&spa_namespace_lock); 1518 1519 *spapp = spa; 1520 1521 if (config != NULL) 1522 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1523 1524 return (0); 1525 } 1526 1527 int 1528 spa_open(const char *name, spa_t **spapp, void *tag) 1529 { 1530 return (spa_open_common(name, spapp, tag, NULL)); 1531 } 1532 1533 /* 1534 * Lookup the given spa_t, incrementing the inject count in the process, 1535 * preventing it from being exported or destroyed. 1536 */ 1537 spa_t * 1538 spa_inject_addref(char *name) 1539 { 1540 spa_t *spa; 1541 1542 mutex_enter(&spa_namespace_lock); 1543 if ((spa = spa_lookup(name)) == NULL) { 1544 mutex_exit(&spa_namespace_lock); 1545 return (NULL); 1546 } 1547 spa->spa_inject_ref++; 1548 mutex_exit(&spa_namespace_lock); 1549 1550 return (spa); 1551 } 1552 1553 void 1554 spa_inject_delref(spa_t *spa) 1555 { 1556 mutex_enter(&spa_namespace_lock); 1557 spa->spa_inject_ref--; 1558 mutex_exit(&spa_namespace_lock); 1559 } 1560 1561 /* 1562 * Add spares device information to the nvlist. 1563 */ 1564 static void 1565 spa_add_spares(spa_t *spa, nvlist_t *config) 1566 { 1567 nvlist_t **spares; 1568 uint_t i, nspares; 1569 nvlist_t *nvroot; 1570 uint64_t guid; 1571 vdev_stat_t *vs; 1572 uint_t vsc; 1573 uint64_t pool; 1574 1575 if (spa->spa_spares.sav_count == 0) 1576 return; 1577 1578 VERIFY(nvlist_lookup_nvlist(config, 1579 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1580 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1581 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1582 if (nspares != 0) { 1583 VERIFY(nvlist_add_nvlist_array(nvroot, 1584 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1585 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1586 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1587 1588 /* 1589 * Go through and find any spares which have since been 1590 * repurposed as an active spare. If this is the case, update 1591 * their status appropriately. 1592 */ 1593 for (i = 0; i < nspares; i++) { 1594 VERIFY(nvlist_lookup_uint64(spares[i], 1595 ZPOOL_CONFIG_GUID, &guid) == 0); 1596 if (spa_spare_exists(guid, &pool, NULL) && 1597 pool != 0ULL) { 1598 VERIFY(nvlist_lookup_uint64_array( 1599 spares[i], ZPOOL_CONFIG_STATS, 1600 (uint64_t **)&vs, &vsc) == 0); 1601 vs->vs_state = VDEV_STATE_CANT_OPEN; 1602 vs->vs_aux = VDEV_AUX_SPARED; 1603 } 1604 } 1605 } 1606 } 1607 1608 /* 1609 * Add l2cache device information to the nvlist, including vdev stats. 1610 */ 1611 static void 1612 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1613 { 1614 nvlist_t **l2cache; 1615 uint_t i, j, nl2cache; 1616 nvlist_t *nvroot; 1617 uint64_t guid; 1618 vdev_t *vd; 1619 vdev_stat_t *vs; 1620 uint_t vsc; 1621 1622 if (spa->spa_l2cache.sav_count == 0) 1623 return; 1624 1625 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1626 1627 VERIFY(nvlist_lookup_nvlist(config, 1628 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1629 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1630 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1631 if (nl2cache != 0) { 1632 VERIFY(nvlist_add_nvlist_array(nvroot, 1633 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1634 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1635 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1636 1637 /* 1638 * Update level 2 cache device stats. 1639 */ 1640 1641 for (i = 0; i < nl2cache; i++) { 1642 VERIFY(nvlist_lookup_uint64(l2cache[i], 1643 ZPOOL_CONFIG_GUID, &guid) == 0); 1644 1645 vd = NULL; 1646 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1647 if (guid == 1648 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1649 vd = spa->spa_l2cache.sav_vdevs[j]; 1650 break; 1651 } 1652 } 1653 ASSERT(vd != NULL); 1654 1655 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1656 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1657 vdev_get_stats(vd, vs); 1658 } 1659 } 1660 1661 spa_config_exit(spa, SCL_CONFIG, FTAG); 1662 } 1663 1664 int 1665 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1666 { 1667 int error; 1668 spa_t *spa; 1669 1670 *config = NULL; 1671 error = spa_open_common(name, &spa, FTAG, config); 1672 1673 if (spa && *config != NULL) { 1674 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1675 spa_get_errlog_size(spa)) == 0); 1676 1677 if (spa_suspended(spa)) 1678 VERIFY(nvlist_add_uint64(*config, 1679 ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0); 1680 1681 spa_add_spares(spa, *config); 1682 spa_add_l2cache(spa, *config); 1683 } 1684 1685 /* 1686 * We want to get the alternate root even for faulted pools, so we cheat 1687 * and call spa_lookup() directly. 1688 */ 1689 if (altroot) { 1690 if (spa == NULL) { 1691 mutex_enter(&spa_namespace_lock); 1692 spa = spa_lookup(name); 1693 if (spa) 1694 spa_altroot(spa, altroot, buflen); 1695 else 1696 altroot[0] = '\0'; 1697 spa = NULL; 1698 mutex_exit(&spa_namespace_lock); 1699 } else { 1700 spa_altroot(spa, altroot, buflen); 1701 } 1702 } 1703 1704 if (spa != NULL) 1705 spa_close(spa, FTAG); 1706 1707 return (error); 1708 } 1709 1710 /* 1711 * Validate that the auxiliary device array is well formed. We must have an 1712 * array of nvlists, each which describes a valid leaf vdev. If this is an 1713 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1714 * specified, as long as they are well-formed. 1715 */ 1716 static int 1717 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1718 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1719 vdev_labeltype_t label) 1720 { 1721 nvlist_t **dev; 1722 uint_t i, ndev; 1723 vdev_t *vd; 1724 int error; 1725 1726 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1727 1728 /* 1729 * It's acceptable to have no devs specified. 1730 */ 1731 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1732 return (0); 1733 1734 if (ndev == 0) 1735 return (EINVAL); 1736 1737 /* 1738 * Make sure the pool is formatted with a version that supports this 1739 * device type. 1740 */ 1741 if (spa_version(spa) < version) 1742 return (ENOTSUP); 1743 1744 /* 1745 * Set the pending device list so we correctly handle device in-use 1746 * checking. 1747 */ 1748 sav->sav_pending = dev; 1749 sav->sav_npending = ndev; 1750 1751 for (i = 0; i < ndev; i++) { 1752 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1753 mode)) != 0) 1754 goto out; 1755 1756 if (!vd->vdev_ops->vdev_op_leaf) { 1757 vdev_free(vd); 1758 error = EINVAL; 1759 goto out; 1760 } 1761 1762 /* 1763 * The L2ARC currently only supports disk devices in 1764 * kernel context. For user-level testing, we allow it. 1765 */ 1766 #ifdef _KERNEL 1767 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1768 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1769 error = ENOTBLK; 1770 goto out; 1771 } 1772 #endif 1773 vd->vdev_top = vd; 1774 1775 if ((error = vdev_open(vd)) == 0 && 1776 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1777 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1778 vd->vdev_guid) == 0); 1779 } 1780 1781 vdev_free(vd); 1782 1783 if (error && 1784 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1785 goto out; 1786 else 1787 error = 0; 1788 } 1789 1790 out: 1791 sav->sav_pending = NULL; 1792 sav->sav_npending = 0; 1793 return (error); 1794 } 1795 1796 static int 1797 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1798 { 1799 int error; 1800 1801 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1802 1803 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1804 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1805 VDEV_LABEL_SPARE)) != 0) { 1806 return (error); 1807 } 1808 1809 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1810 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1811 VDEV_LABEL_L2CACHE)); 1812 } 1813 1814 static void 1815 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1816 const char *config) 1817 { 1818 int i; 1819 1820 if (sav->sav_config != NULL) { 1821 nvlist_t **olddevs; 1822 uint_t oldndevs; 1823 nvlist_t **newdevs; 1824 1825 /* 1826 * Generate new dev list by concatentating with the 1827 * current dev list. 1828 */ 1829 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1830 &olddevs, &oldndevs) == 0); 1831 1832 newdevs = kmem_alloc(sizeof (void *) * 1833 (ndevs + oldndevs), KM_SLEEP); 1834 for (i = 0; i < oldndevs; i++) 1835 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1836 KM_SLEEP) == 0); 1837 for (i = 0; i < ndevs; i++) 1838 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1839 KM_SLEEP) == 0); 1840 1841 VERIFY(nvlist_remove(sav->sav_config, config, 1842 DATA_TYPE_NVLIST_ARRAY) == 0); 1843 1844 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1845 config, newdevs, ndevs + oldndevs) == 0); 1846 for (i = 0; i < oldndevs + ndevs; i++) 1847 nvlist_free(newdevs[i]); 1848 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1849 } else { 1850 /* 1851 * Generate a new dev list. 1852 */ 1853 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1854 KM_SLEEP) == 0); 1855 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1856 devs, ndevs) == 0); 1857 } 1858 } 1859 1860 /* 1861 * Stop and drop level 2 ARC devices 1862 */ 1863 void 1864 spa_l2cache_drop(spa_t *spa) 1865 { 1866 vdev_t *vd; 1867 int i; 1868 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1869 1870 for (i = 0; i < sav->sav_count; i++) { 1871 uint64_t pool; 1872 1873 vd = sav->sav_vdevs[i]; 1874 ASSERT(vd != NULL); 1875 1876 if ((spa_mode & FWRITE) && 1877 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1878 l2arc_vdev_present(vd)) { 1879 l2arc_remove_vdev(vd); 1880 } 1881 if (vd->vdev_isl2cache) 1882 spa_l2cache_remove(vd); 1883 vdev_clear_stats(vd); 1884 (void) vdev_close(vd); 1885 } 1886 } 1887 1888 /* 1889 * Pool Creation 1890 */ 1891 int 1892 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1893 const char *history_str, nvlist_t *zplprops) 1894 { 1895 spa_t *spa; 1896 char *altroot = NULL; 1897 vdev_t *rvd; 1898 dsl_pool_t *dp; 1899 dmu_tx_t *tx; 1900 int c, error = 0; 1901 uint64_t txg = TXG_INITIAL; 1902 nvlist_t **spares, **l2cache; 1903 uint_t nspares, nl2cache; 1904 uint64_t version; 1905 1906 /* 1907 * If this pool already exists, return failure. 1908 */ 1909 mutex_enter(&spa_namespace_lock); 1910 if (spa_lookup(pool) != NULL) { 1911 mutex_exit(&spa_namespace_lock); 1912 return (EEXIST); 1913 } 1914 1915 /* 1916 * Allocate a new spa_t structure. 1917 */ 1918 (void) nvlist_lookup_string(props, 1919 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1920 spa = spa_add(pool, altroot); 1921 spa_activate(spa); 1922 1923 spa->spa_uberblock.ub_txg = txg - 1; 1924 1925 if (props && (error = spa_prop_validate(spa, props))) { 1926 spa_unload(spa); 1927 spa_deactivate(spa); 1928 spa_remove(spa); 1929 mutex_exit(&spa_namespace_lock); 1930 return (error); 1931 } 1932 1933 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1934 &version) != 0) 1935 version = SPA_VERSION; 1936 ASSERT(version <= SPA_VERSION); 1937 spa->spa_uberblock.ub_version = version; 1938 spa->spa_ubsync = spa->spa_uberblock; 1939 1940 /* 1941 * Create the root vdev. 1942 */ 1943 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1944 1945 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1946 1947 ASSERT(error != 0 || rvd != NULL); 1948 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1949 1950 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1951 error = EINVAL; 1952 1953 if (error == 0 && 1954 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1955 (error = spa_validate_aux(spa, nvroot, txg, 1956 VDEV_ALLOC_ADD)) == 0) { 1957 for (c = 0; c < rvd->vdev_children; c++) 1958 vdev_init(rvd->vdev_child[c], txg); 1959 vdev_config_dirty(rvd); 1960 } 1961 1962 spa_config_exit(spa, SCL_ALL, FTAG); 1963 1964 if (error != 0) { 1965 spa_unload(spa); 1966 spa_deactivate(spa); 1967 spa_remove(spa); 1968 mutex_exit(&spa_namespace_lock); 1969 return (error); 1970 } 1971 1972 /* 1973 * Get the list of spares, if specified. 1974 */ 1975 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1976 &spares, &nspares) == 0) { 1977 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1978 KM_SLEEP) == 0); 1979 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1980 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1981 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1982 spa_load_spares(spa); 1983 spa_config_exit(spa, SCL_ALL, FTAG); 1984 spa->spa_spares.sav_sync = B_TRUE; 1985 } 1986 1987 /* 1988 * Get the list of level 2 cache devices, if specified. 1989 */ 1990 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1991 &l2cache, &nl2cache) == 0) { 1992 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1993 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1994 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1995 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1996 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1997 spa_load_l2cache(spa); 1998 spa_config_exit(spa, SCL_ALL, FTAG); 1999 spa->spa_l2cache.sav_sync = B_TRUE; 2000 } 2001 2002 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 2003 spa->spa_meta_objset = dp->dp_meta_objset; 2004 2005 tx = dmu_tx_create_assigned(dp, txg); 2006 2007 /* 2008 * Create the pool config object. 2009 */ 2010 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 2011 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 2012 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 2013 2014 if (zap_add(spa->spa_meta_objset, 2015 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 2016 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 2017 cmn_err(CE_PANIC, "failed to add pool config"); 2018 } 2019 2020 /* Newly created pools with the right version are always deflated. */ 2021 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 2022 spa->spa_deflate = TRUE; 2023 if (zap_add(spa->spa_meta_objset, 2024 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2025 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2026 cmn_err(CE_PANIC, "failed to add deflate"); 2027 } 2028 } 2029 2030 /* 2031 * Create the deferred-free bplist object. Turn off compression 2032 * because sync-to-convergence takes longer if the blocksize 2033 * keeps changing. 2034 */ 2035 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2036 1 << 14, tx); 2037 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2038 ZIO_COMPRESS_OFF, tx); 2039 2040 if (zap_add(spa->spa_meta_objset, 2041 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2042 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2043 cmn_err(CE_PANIC, "failed to add bplist"); 2044 } 2045 2046 /* 2047 * Create the pool's history object. 2048 */ 2049 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2050 spa_history_create_obj(spa, tx); 2051 2052 /* 2053 * Set pool properties. 2054 */ 2055 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2056 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2057 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2058 if (props) 2059 spa_sync_props(spa, props, CRED(), tx); 2060 2061 dmu_tx_commit(tx); 2062 2063 spa->spa_sync_on = B_TRUE; 2064 txg_sync_start(spa->spa_dsl_pool); 2065 2066 /* 2067 * We explicitly wait for the first transaction to complete so that our 2068 * bean counters are appropriately updated. 2069 */ 2070 txg_wait_synced(spa->spa_dsl_pool, txg); 2071 2072 spa_config_sync(spa, B_FALSE, B_TRUE); 2073 2074 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2075 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2076 2077 mutex_exit(&spa_namespace_lock); 2078 2079 spa->spa_minref = refcount_count(&spa->spa_refcount); 2080 2081 return (0); 2082 } 2083 2084 /* 2085 * Import the given pool into the system. We set up the necessary spa_t and 2086 * then call spa_load() to do the dirty work. 2087 */ 2088 static int 2089 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2090 boolean_t isroot, boolean_t allowfaulted) 2091 { 2092 spa_t *spa; 2093 char *altroot = NULL; 2094 int error, loaderr; 2095 nvlist_t *nvroot; 2096 nvlist_t **spares, **l2cache; 2097 uint_t nspares, nl2cache; 2098 2099 /* 2100 * If a pool with this name exists, return failure. 2101 */ 2102 mutex_enter(&spa_namespace_lock); 2103 if ((spa = spa_lookup(pool)) != NULL) { 2104 if (isroot) { 2105 /* 2106 * Remove the existing root pool from the 2107 * namespace so that we can replace it with 2108 * the correct config we just read in. 2109 */ 2110 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 2111 spa_remove(spa); 2112 } else { 2113 mutex_exit(&spa_namespace_lock); 2114 return (EEXIST); 2115 } 2116 } 2117 2118 /* 2119 * Create and initialize the spa structure. 2120 */ 2121 (void) nvlist_lookup_string(props, 2122 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2123 spa = spa_add(pool, altroot); 2124 spa_activate(spa); 2125 2126 if (allowfaulted) 2127 spa->spa_import_faulted = B_TRUE; 2128 spa->spa_is_root = isroot; 2129 2130 /* 2131 * Pass off the heavy lifting to spa_load(). 2132 * Pass TRUE for mosconfig (unless this is a root pool) because 2133 * the user-supplied config is actually the one to trust when 2134 * doing an import. 2135 */ 2136 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2137 2138 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2139 /* 2140 * Toss any existing sparelist, as it doesn't have any validity anymore, 2141 * and conflicts with spa_has_spare(). 2142 */ 2143 if (!isroot && spa->spa_spares.sav_config) { 2144 nvlist_free(spa->spa_spares.sav_config); 2145 spa->spa_spares.sav_config = NULL; 2146 spa_load_spares(spa); 2147 } 2148 if (!isroot && spa->spa_l2cache.sav_config) { 2149 nvlist_free(spa->spa_l2cache.sav_config); 2150 spa->spa_l2cache.sav_config = NULL; 2151 spa_load_l2cache(spa); 2152 } 2153 2154 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2155 &nvroot) == 0); 2156 if (error == 0) 2157 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2158 if (error == 0) 2159 error = spa_validate_aux(spa, nvroot, -1ULL, 2160 VDEV_ALLOC_L2CACHE); 2161 spa_config_exit(spa, SCL_ALL, FTAG); 2162 2163 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2164 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2165 /* 2166 * If we failed to load the pool, but 'allowfaulted' is 2167 * set, then manually set the config as if the config 2168 * passed in was specified in the cache file. 2169 */ 2170 error = 0; 2171 spa->spa_import_faulted = B_FALSE; 2172 if (spa->spa_config == NULL) 2173 spa->spa_config = spa_config_generate(spa, 2174 NULL, -1ULL, B_TRUE); 2175 spa_unload(spa); 2176 spa_deactivate(spa); 2177 spa_config_sync(spa, B_FALSE, B_TRUE); 2178 } else { 2179 spa_unload(spa); 2180 spa_deactivate(spa); 2181 spa_remove(spa); 2182 } 2183 mutex_exit(&spa_namespace_lock); 2184 return (error); 2185 } 2186 2187 /* 2188 * Override any spares and level 2 cache devices as specified by 2189 * the user, as these may have correct device names/devids, etc. 2190 */ 2191 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2192 &spares, &nspares) == 0) { 2193 if (spa->spa_spares.sav_config) 2194 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2195 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2196 else 2197 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2198 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2199 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2200 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2201 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2202 spa_load_spares(spa); 2203 spa_config_exit(spa, SCL_ALL, FTAG); 2204 spa->spa_spares.sav_sync = B_TRUE; 2205 } 2206 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2207 &l2cache, &nl2cache) == 0) { 2208 if (spa->spa_l2cache.sav_config) 2209 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2210 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2211 else 2212 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2213 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2214 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2215 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2216 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2217 spa_load_l2cache(spa); 2218 spa_config_exit(spa, SCL_ALL, FTAG); 2219 spa->spa_l2cache.sav_sync = B_TRUE; 2220 } 2221 2222 if (spa_mode & FWRITE) { 2223 /* 2224 * Update the config cache to include the newly-imported pool. 2225 */ 2226 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2227 } 2228 2229 spa->spa_import_faulted = B_FALSE; 2230 mutex_exit(&spa_namespace_lock); 2231 2232 return (0); 2233 } 2234 2235 #ifdef _KERNEL 2236 /* 2237 * Build a "root" vdev for a top level vdev read in from a rootpool 2238 * device label. 2239 */ 2240 static void 2241 spa_build_rootpool_config(nvlist_t *config) 2242 { 2243 nvlist_t *nvtop, *nvroot; 2244 uint64_t pgid; 2245 2246 /* 2247 * Add this top-level vdev to the child array. 2248 */ 2249 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2250 == 0); 2251 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2252 == 0); 2253 2254 /* 2255 * Put this pool's top-level vdevs into a root vdev. 2256 */ 2257 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2258 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2259 == 0); 2260 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2261 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2262 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2263 &nvtop, 1) == 0); 2264 2265 /* 2266 * Replace the existing vdev_tree with the new root vdev in 2267 * this pool's configuration (remove the old, add the new). 2268 */ 2269 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2270 nvlist_free(nvroot); 2271 } 2272 2273 /* 2274 * Get the root pool information from the root disk, then import the root pool 2275 * during the system boot up time. 2276 */ 2277 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 2278 2279 int 2280 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2281 uint64_t *besttxg) 2282 { 2283 nvlist_t *config; 2284 uint64_t txg; 2285 int error; 2286 2287 if (error = vdev_disk_read_rootlabel(devpath, devid, &config)) 2288 return (error); 2289 2290 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2291 2292 if (bestconf != NULL) 2293 *bestconf = config; 2294 else 2295 nvlist_free(config); 2296 *besttxg = txg; 2297 return (0); 2298 } 2299 2300 boolean_t 2301 spa_rootdev_validate(nvlist_t *nv) 2302 { 2303 uint64_t ival; 2304 2305 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2306 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2307 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2308 return (B_FALSE); 2309 2310 return (B_TRUE); 2311 } 2312 2313 2314 /* 2315 * Given the boot device's physical path or devid, check if the device 2316 * is in a valid state. If so, return the configuration from the vdev 2317 * label. 2318 */ 2319 int 2320 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2321 { 2322 nvlist_t *conf = NULL; 2323 uint64_t txg = 0; 2324 nvlist_t *nvtop, **child; 2325 char *type; 2326 char *bootpath = NULL; 2327 uint_t children, c; 2328 char *tmp; 2329 int error; 2330 2331 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2332 *tmp = '\0'; 2333 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) { 2334 cmn_err(CE_NOTE, "error reading device label"); 2335 return (error); 2336 } 2337 if (txg == 0) { 2338 cmn_err(CE_NOTE, "this device is detached"); 2339 nvlist_free(conf); 2340 return (EINVAL); 2341 } 2342 2343 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2344 &nvtop) == 0); 2345 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2346 2347 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2348 if (spa_rootdev_validate(nvtop)) { 2349 goto out; 2350 } else { 2351 nvlist_free(conf); 2352 return (EINVAL); 2353 } 2354 } 2355 2356 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2357 2358 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2359 &child, &children) == 0); 2360 2361 /* 2362 * Go thru vdevs in the mirror to see if the given device 2363 * has the most recent txg. Only the device with the most 2364 * recent txg has valid information and should be booted. 2365 */ 2366 for (c = 0; c < children; c++) { 2367 char *cdevid, *cpath; 2368 uint64_t tmptxg; 2369 2370 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2371 &cpath) != 0) 2372 return (EINVAL); 2373 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, 2374 &cdevid) != 0) 2375 return (EINVAL); 2376 if ((spa_check_rootconf(cpath, cdevid, NULL, 2377 &tmptxg) == 0) && (tmptxg > txg)) { 2378 txg = tmptxg; 2379 VERIFY(nvlist_lookup_string(child[c], 2380 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2381 } 2382 } 2383 2384 /* Does the best device match the one we've booted from? */ 2385 if (bootpath) { 2386 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2387 return (EINVAL); 2388 } 2389 out: 2390 *bestconf = conf; 2391 return (0); 2392 } 2393 2394 /* 2395 * Import a root pool. 2396 * 2397 * For x86. devpath_list will consist of devid and/or physpath name of 2398 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2399 * The GRUB "findroot" command will return the vdev we should boot. 2400 * 2401 * For Sparc, devpath_list consists the physpath name of the booting device 2402 * no matter the rootpool is a single device pool or a mirrored pool. 2403 * e.g. 2404 * "/pci@1f,0/ide@d/disk@0,0:a" 2405 */ 2406 int 2407 spa_import_rootpool(char *devpath, char *devid) 2408 { 2409 nvlist_t *conf = NULL; 2410 char *pname; 2411 int error; 2412 2413 /* 2414 * Get the vdev pathname and configuation from the most 2415 * recently updated vdev (highest txg). 2416 */ 2417 if (error = spa_get_rootconf(devpath, devid, &conf)) 2418 goto msg_out; 2419 2420 /* 2421 * Add type "root" vdev to the config. 2422 */ 2423 spa_build_rootpool_config(conf); 2424 2425 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2426 2427 /* 2428 * We specify 'allowfaulted' for this to be treated like spa_open() 2429 * instead of spa_import(). This prevents us from marking vdevs as 2430 * persistently unavailable, and generates FMA ereports as if it were a 2431 * pool open, not import. 2432 */ 2433 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2434 ASSERT(error != EEXIST); 2435 2436 nvlist_free(conf); 2437 return (error); 2438 2439 msg_out: 2440 cmn_err(CE_NOTE, "\n" 2441 " *************************************************** \n" 2442 " * This device is not bootable! * \n" 2443 " * It is either offlined or detached or faulted. * \n" 2444 " * Please try to boot from a different device. * \n" 2445 " *************************************************** "); 2446 2447 return (error); 2448 } 2449 #endif 2450 2451 /* 2452 * Import a non-root pool into the system. 2453 */ 2454 int 2455 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2456 { 2457 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2458 } 2459 2460 int 2461 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2462 { 2463 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2464 } 2465 2466 2467 /* 2468 * This (illegal) pool name is used when temporarily importing a spa_t in order 2469 * to get the vdev stats associated with the imported devices. 2470 */ 2471 #define TRYIMPORT_NAME "$import" 2472 2473 nvlist_t * 2474 spa_tryimport(nvlist_t *tryconfig) 2475 { 2476 nvlist_t *config = NULL; 2477 char *poolname; 2478 spa_t *spa; 2479 uint64_t state; 2480 2481 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2482 return (NULL); 2483 2484 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2485 return (NULL); 2486 2487 /* 2488 * Create and initialize the spa structure. 2489 */ 2490 mutex_enter(&spa_namespace_lock); 2491 spa = spa_add(TRYIMPORT_NAME, NULL); 2492 spa_activate(spa); 2493 2494 /* 2495 * Pass off the heavy lifting to spa_load(). 2496 * Pass TRUE for mosconfig because the user-supplied config 2497 * is actually the one to trust when doing an import. 2498 */ 2499 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2500 2501 /* 2502 * If 'tryconfig' was at least parsable, return the current config. 2503 */ 2504 if (spa->spa_root_vdev != NULL) { 2505 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2506 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2507 poolname) == 0); 2508 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2509 state) == 0); 2510 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2511 spa->spa_uberblock.ub_timestamp) == 0); 2512 2513 /* 2514 * If the bootfs property exists on this pool then we 2515 * copy it out so that external consumers can tell which 2516 * pools are bootable. 2517 */ 2518 if (spa->spa_bootfs) { 2519 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2520 2521 /* 2522 * We have to play games with the name since the 2523 * pool was opened as TRYIMPORT_NAME. 2524 */ 2525 if (dsl_dsobj_to_dsname(spa_name(spa), 2526 spa->spa_bootfs, tmpname) == 0) { 2527 char *cp; 2528 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2529 2530 cp = strchr(tmpname, '/'); 2531 if (cp == NULL) { 2532 (void) strlcpy(dsname, tmpname, 2533 MAXPATHLEN); 2534 } else { 2535 (void) snprintf(dsname, MAXPATHLEN, 2536 "%s/%s", poolname, ++cp); 2537 } 2538 VERIFY(nvlist_add_string(config, 2539 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2540 kmem_free(dsname, MAXPATHLEN); 2541 } 2542 kmem_free(tmpname, MAXPATHLEN); 2543 } 2544 2545 /* 2546 * Add the list of hot spares and level 2 cache devices. 2547 */ 2548 spa_add_spares(spa, config); 2549 spa_add_l2cache(spa, config); 2550 } 2551 2552 spa_unload(spa); 2553 spa_deactivate(spa); 2554 spa_remove(spa); 2555 mutex_exit(&spa_namespace_lock); 2556 2557 return (config); 2558 } 2559 2560 /* 2561 * Pool export/destroy 2562 * 2563 * The act of destroying or exporting a pool is very simple. We make sure there 2564 * is no more pending I/O and any references to the pool are gone. Then, we 2565 * update the pool state and sync all the labels to disk, removing the 2566 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 2567 * we don't sync the labels or remove the configuration cache. 2568 */ 2569 static int 2570 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2571 boolean_t force, boolean_t hardforce) 2572 { 2573 spa_t *spa; 2574 2575 if (oldconfig) 2576 *oldconfig = NULL; 2577 2578 if (!(spa_mode & FWRITE)) 2579 return (EROFS); 2580 2581 mutex_enter(&spa_namespace_lock); 2582 if ((spa = spa_lookup(pool)) == NULL) { 2583 mutex_exit(&spa_namespace_lock); 2584 return (ENOENT); 2585 } 2586 2587 /* 2588 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2589 * reacquire the namespace lock, and see if we can export. 2590 */ 2591 spa_open_ref(spa, FTAG); 2592 mutex_exit(&spa_namespace_lock); 2593 spa_async_suspend(spa); 2594 mutex_enter(&spa_namespace_lock); 2595 spa_close(spa, FTAG); 2596 2597 /* 2598 * The pool will be in core if it's openable, 2599 * in which case we can modify its state. 2600 */ 2601 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2602 /* 2603 * Objsets may be open only because they're dirty, so we 2604 * have to force it to sync before checking spa_refcnt. 2605 */ 2606 txg_wait_synced(spa->spa_dsl_pool, 0); 2607 2608 /* 2609 * A pool cannot be exported or destroyed if there are active 2610 * references. If we are resetting a pool, allow references by 2611 * fault injection handlers. 2612 */ 2613 if (!spa_refcount_zero(spa) || 2614 (spa->spa_inject_ref != 0 && 2615 new_state != POOL_STATE_UNINITIALIZED)) { 2616 spa_async_resume(spa); 2617 mutex_exit(&spa_namespace_lock); 2618 return (EBUSY); 2619 } 2620 2621 /* 2622 * A pool cannot be exported if it has an active shared spare. 2623 * This is to prevent other pools stealing the active spare 2624 * from an exported pool. At user's own will, such pool can 2625 * be forcedly exported. 2626 */ 2627 if (!force && new_state == POOL_STATE_EXPORTED && 2628 spa_has_active_shared_spare(spa)) { 2629 spa_async_resume(spa); 2630 mutex_exit(&spa_namespace_lock); 2631 return (EXDEV); 2632 } 2633 2634 /* 2635 * We want this to be reflected on every label, 2636 * so mark them all dirty. spa_unload() will do the 2637 * final sync that pushes these changes out. 2638 */ 2639 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 2640 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2641 spa->spa_state = new_state; 2642 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2643 vdev_config_dirty(spa->spa_root_vdev); 2644 spa_config_exit(spa, SCL_ALL, FTAG); 2645 } 2646 } 2647 2648 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2649 2650 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2651 spa_unload(spa); 2652 spa_deactivate(spa); 2653 } 2654 2655 if (oldconfig && spa->spa_config) 2656 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2657 2658 if (new_state != POOL_STATE_UNINITIALIZED) { 2659 if (!hardforce) 2660 spa_config_sync(spa, B_TRUE, B_TRUE); 2661 spa_remove(spa); 2662 } 2663 mutex_exit(&spa_namespace_lock); 2664 2665 return (0); 2666 } 2667 2668 /* 2669 * Destroy a storage pool. 2670 */ 2671 int 2672 spa_destroy(char *pool) 2673 { 2674 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 2675 B_FALSE, B_FALSE)); 2676 } 2677 2678 /* 2679 * Export a storage pool. 2680 */ 2681 int 2682 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 2683 boolean_t hardforce) 2684 { 2685 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 2686 force, hardforce)); 2687 } 2688 2689 /* 2690 * Similar to spa_export(), this unloads the spa_t without actually removing it 2691 * from the namespace in any way. 2692 */ 2693 int 2694 spa_reset(char *pool) 2695 { 2696 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2697 B_FALSE, B_FALSE)); 2698 } 2699 2700 /* 2701 * ========================================================================== 2702 * Device manipulation 2703 * ========================================================================== 2704 */ 2705 2706 /* 2707 * Add a device to a storage pool. 2708 */ 2709 int 2710 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2711 { 2712 uint64_t txg; 2713 int c, error; 2714 vdev_t *rvd = spa->spa_root_vdev; 2715 vdev_t *vd, *tvd; 2716 nvlist_t **spares, **l2cache; 2717 uint_t nspares, nl2cache; 2718 2719 txg = spa_vdev_enter(spa); 2720 2721 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2722 VDEV_ALLOC_ADD)) != 0) 2723 return (spa_vdev_exit(spa, NULL, txg, error)); 2724 2725 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 2726 2727 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2728 &nspares) != 0) 2729 nspares = 0; 2730 2731 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2732 &nl2cache) != 0) 2733 nl2cache = 0; 2734 2735 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 2736 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2737 2738 if (vd->vdev_children != 0 && 2739 (error = vdev_create(vd, txg, B_FALSE)) != 0) 2740 return (spa_vdev_exit(spa, vd, txg, error)); 2741 2742 /* 2743 * We must validate the spares and l2cache devices after checking the 2744 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2745 */ 2746 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 2747 return (spa_vdev_exit(spa, vd, txg, error)); 2748 2749 /* 2750 * Transfer each new top-level vdev from vd to rvd. 2751 */ 2752 for (c = 0; c < vd->vdev_children; c++) { 2753 tvd = vd->vdev_child[c]; 2754 vdev_remove_child(vd, tvd); 2755 tvd->vdev_id = rvd->vdev_children; 2756 vdev_add_child(rvd, tvd); 2757 vdev_config_dirty(tvd); 2758 } 2759 2760 if (nspares != 0) { 2761 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2762 ZPOOL_CONFIG_SPARES); 2763 spa_load_spares(spa); 2764 spa->spa_spares.sav_sync = B_TRUE; 2765 } 2766 2767 if (nl2cache != 0) { 2768 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2769 ZPOOL_CONFIG_L2CACHE); 2770 spa_load_l2cache(spa); 2771 spa->spa_l2cache.sav_sync = B_TRUE; 2772 } 2773 2774 /* 2775 * We have to be careful when adding new vdevs to an existing pool. 2776 * If other threads start allocating from these vdevs before we 2777 * sync the config cache, and we lose power, then upon reboot we may 2778 * fail to open the pool because there are DVAs that the config cache 2779 * can't translate. Therefore, we first add the vdevs without 2780 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2781 * and then let spa_config_update() initialize the new metaslabs. 2782 * 2783 * spa_load() checks for added-but-not-initialized vdevs, so that 2784 * if we lose power at any point in this sequence, the remaining 2785 * steps will be completed the next time we load the pool. 2786 */ 2787 (void) spa_vdev_exit(spa, vd, txg, 0); 2788 2789 mutex_enter(&spa_namespace_lock); 2790 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2791 mutex_exit(&spa_namespace_lock); 2792 2793 return (0); 2794 } 2795 2796 /* 2797 * Attach a device to a mirror. The arguments are the path to any device 2798 * in the mirror, and the nvroot for the new device. If the path specifies 2799 * a device that is not mirrored, we automatically insert the mirror vdev. 2800 * 2801 * If 'replacing' is specified, the new device is intended to replace the 2802 * existing device; in this case the two devices are made into their own 2803 * mirror using the 'replacing' vdev, which is functionally identical to 2804 * the mirror vdev (it actually reuses all the same ops) but has a few 2805 * extra rules: you can't attach to it after it's been created, and upon 2806 * completion of resilvering, the first disk (the one being replaced) 2807 * is automatically detached. 2808 */ 2809 int 2810 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2811 { 2812 uint64_t txg, open_txg; 2813 vdev_t *rvd = spa->spa_root_vdev; 2814 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2815 vdev_ops_t *pvops; 2816 dmu_tx_t *tx; 2817 char *oldvdpath, *newvdpath; 2818 int newvd_isspare; 2819 int error; 2820 2821 txg = spa_vdev_enter(spa); 2822 2823 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2824 2825 if (oldvd == NULL) 2826 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2827 2828 if (!oldvd->vdev_ops->vdev_op_leaf) 2829 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2830 2831 pvd = oldvd->vdev_parent; 2832 2833 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2834 VDEV_ALLOC_ADD)) != 0) 2835 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2836 2837 if (newrootvd->vdev_children != 1) 2838 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2839 2840 newvd = newrootvd->vdev_child[0]; 2841 2842 if (!newvd->vdev_ops->vdev_op_leaf) 2843 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2844 2845 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2846 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2847 2848 /* 2849 * Spares can't replace logs 2850 */ 2851 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 2852 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2853 2854 if (!replacing) { 2855 /* 2856 * For attach, the only allowable parent is a mirror or the root 2857 * vdev. 2858 */ 2859 if (pvd->vdev_ops != &vdev_mirror_ops && 2860 pvd->vdev_ops != &vdev_root_ops) 2861 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2862 2863 pvops = &vdev_mirror_ops; 2864 } else { 2865 /* 2866 * Active hot spares can only be replaced by inactive hot 2867 * spares. 2868 */ 2869 if (pvd->vdev_ops == &vdev_spare_ops && 2870 pvd->vdev_child[1] == oldvd && 2871 !spa_has_spare(spa, newvd->vdev_guid)) 2872 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2873 2874 /* 2875 * If the source is a hot spare, and the parent isn't already a 2876 * spare, then we want to create a new hot spare. Otherwise, we 2877 * want to create a replacing vdev. The user is not allowed to 2878 * attach to a spared vdev child unless the 'isspare' state is 2879 * the same (spare replaces spare, non-spare replaces 2880 * non-spare). 2881 */ 2882 if (pvd->vdev_ops == &vdev_replacing_ops) 2883 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2884 else if (pvd->vdev_ops == &vdev_spare_ops && 2885 newvd->vdev_isspare != oldvd->vdev_isspare) 2886 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2887 else if (pvd->vdev_ops != &vdev_spare_ops && 2888 newvd->vdev_isspare) 2889 pvops = &vdev_spare_ops; 2890 else 2891 pvops = &vdev_replacing_ops; 2892 } 2893 2894 /* 2895 * Compare the new device size with the replaceable/attachable 2896 * device size. 2897 */ 2898 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2899 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2900 2901 /* 2902 * The new device cannot have a higher alignment requirement 2903 * than the top-level vdev. 2904 */ 2905 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2906 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2907 2908 /* 2909 * If this is an in-place replacement, update oldvd's path and devid 2910 * to make it distinguishable from newvd, and unopenable from now on. 2911 */ 2912 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2913 spa_strfree(oldvd->vdev_path); 2914 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2915 KM_SLEEP); 2916 (void) sprintf(oldvd->vdev_path, "%s/%s", 2917 newvd->vdev_path, "old"); 2918 if (oldvd->vdev_devid != NULL) { 2919 spa_strfree(oldvd->vdev_devid); 2920 oldvd->vdev_devid = NULL; 2921 } 2922 } 2923 2924 /* 2925 * If the parent is not a mirror, or if we're replacing, insert the new 2926 * mirror/replacing/spare vdev above oldvd. 2927 */ 2928 if (pvd->vdev_ops != pvops) 2929 pvd = vdev_add_parent(oldvd, pvops); 2930 2931 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2932 ASSERT(pvd->vdev_ops == pvops); 2933 ASSERT(oldvd->vdev_parent == pvd); 2934 2935 /* 2936 * Extract the new device from its root and add it to pvd. 2937 */ 2938 vdev_remove_child(newrootvd, newvd); 2939 newvd->vdev_id = pvd->vdev_children; 2940 vdev_add_child(pvd, newvd); 2941 2942 /* 2943 * If newvd is smaller than oldvd, but larger than its rsize, 2944 * the addition of newvd may have decreased our parent's asize. 2945 */ 2946 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2947 2948 tvd = newvd->vdev_top; 2949 ASSERT(pvd->vdev_top == tvd); 2950 ASSERT(tvd->vdev_parent == rvd); 2951 2952 vdev_config_dirty(tvd); 2953 2954 /* 2955 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2956 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2957 */ 2958 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2959 2960 mutex_enter(&newvd->vdev_dtl_lock); 2961 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2962 open_txg - TXG_INITIAL + 1); 2963 mutex_exit(&newvd->vdev_dtl_lock); 2964 2965 if (newvd->vdev_isspare) 2966 spa_spare_activate(newvd); 2967 oldvdpath = spa_strdup(oldvd->vdev_path); 2968 newvdpath = spa_strdup(newvd->vdev_path); 2969 newvd_isspare = newvd->vdev_isspare; 2970 2971 /* 2972 * Mark newvd's DTL dirty in this txg. 2973 */ 2974 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2975 2976 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2977 2978 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2979 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 2980 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 2981 CRED(), "%s vdev=%s %s vdev=%s", 2982 replacing && newvd_isspare ? "spare in" : 2983 replacing ? "replace" : "attach", newvdpath, 2984 replacing ? "for" : "to", oldvdpath); 2985 dmu_tx_commit(tx); 2986 } else { 2987 dmu_tx_abort(tx); 2988 } 2989 2990 spa_strfree(oldvdpath); 2991 spa_strfree(newvdpath); 2992 2993 /* 2994 * Kick off a resilver to update newvd. 2995 */ 2996 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 2997 2998 return (0); 2999 } 3000 3001 /* 3002 * Detach a device from a mirror or replacing vdev. 3003 * If 'replace_done' is specified, only detach if the parent 3004 * is a replacing vdev. 3005 */ 3006 int 3007 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 3008 { 3009 uint64_t txg; 3010 int c, t, error; 3011 vdev_t *rvd = spa->spa_root_vdev; 3012 vdev_t *vd, *pvd, *cvd, *tvd; 3013 boolean_t unspare = B_FALSE; 3014 uint64_t unspare_guid; 3015 size_t len; 3016 3017 txg = spa_vdev_enter(spa); 3018 3019 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3020 3021 if (vd == NULL) 3022 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 3023 3024 if (!vd->vdev_ops->vdev_op_leaf) 3025 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3026 3027 pvd = vd->vdev_parent; 3028 3029 /* 3030 * If replace_done is specified, only remove this device if it's 3031 * the first child of a replacing vdev. For the 'spare' vdev, either 3032 * disk can be removed. 3033 */ 3034 if (replace_done) { 3035 if (pvd->vdev_ops == &vdev_replacing_ops) { 3036 if (vd->vdev_id != 0) 3037 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3038 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3039 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3040 } 3041 } 3042 3043 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3044 spa_version(spa) >= SPA_VERSION_SPARES); 3045 3046 /* 3047 * Only mirror, replacing, and spare vdevs support detach. 3048 */ 3049 if (pvd->vdev_ops != &vdev_replacing_ops && 3050 pvd->vdev_ops != &vdev_mirror_ops && 3051 pvd->vdev_ops != &vdev_spare_ops) 3052 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3053 3054 /* 3055 * If there's only one replica, you can't detach it. 3056 */ 3057 if (pvd->vdev_children <= 1) 3058 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3059 3060 /* 3061 * If all siblings have non-empty DTLs, this device may have the only 3062 * valid copy of the data, which means we cannot safely detach it. 3063 * 3064 * XXX -- as in the vdev_offline() case, we really want a more 3065 * precise DTL check. 3066 */ 3067 for (c = 0; c < pvd->vdev_children; c++) { 3068 uint64_t dirty; 3069 3070 cvd = pvd->vdev_child[c]; 3071 if (cvd == vd) 3072 continue; 3073 if (vdev_is_dead(cvd)) 3074 continue; 3075 mutex_enter(&cvd->vdev_dtl_lock); 3076 dirty = cvd->vdev_dtl_map.sm_space | 3077 cvd->vdev_dtl_scrub.sm_space; 3078 mutex_exit(&cvd->vdev_dtl_lock); 3079 if (!dirty) 3080 break; 3081 } 3082 3083 if (c == pvd->vdev_children) 3084 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3085 3086 /* 3087 * If we are detaching the second disk from a replacing vdev, then 3088 * check to see if we changed the original vdev's path to have "/old" 3089 * at the end in spa_vdev_attach(). If so, undo that change now. 3090 */ 3091 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3092 pvd->vdev_child[0]->vdev_path != NULL && 3093 pvd->vdev_child[1]->vdev_path != NULL) { 3094 ASSERT(pvd->vdev_child[1] == vd); 3095 cvd = pvd->vdev_child[0]; 3096 len = strlen(vd->vdev_path); 3097 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3098 strcmp(cvd->vdev_path + len, "/old") == 0) { 3099 spa_strfree(cvd->vdev_path); 3100 cvd->vdev_path = spa_strdup(vd->vdev_path); 3101 } 3102 } 3103 3104 /* 3105 * If we are detaching the original disk from a spare, then it implies 3106 * that the spare should become a real disk, and be removed from the 3107 * active spare list for the pool. 3108 */ 3109 if (pvd->vdev_ops == &vdev_spare_ops && 3110 vd->vdev_id == 0) 3111 unspare = B_TRUE; 3112 3113 /* 3114 * Erase the disk labels so the disk can be used for other things. 3115 * This must be done after all other error cases are handled, 3116 * but before we disembowel vd (so we can still do I/O to it). 3117 * But if we can't do it, don't treat the error as fatal -- 3118 * it may be that the unwritability of the disk is the reason 3119 * it's being detached! 3120 */ 3121 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3122 3123 /* 3124 * Remove vd from its parent and compact the parent's children. 3125 */ 3126 vdev_remove_child(pvd, vd); 3127 vdev_compact_children(pvd); 3128 3129 /* 3130 * Remember one of the remaining children so we can get tvd below. 3131 */ 3132 cvd = pvd->vdev_child[0]; 3133 3134 /* 3135 * If we need to remove the remaining child from the list of hot spares, 3136 * do it now, marking the vdev as no longer a spare in the process. We 3137 * must do this before vdev_remove_parent(), because that can change the 3138 * GUID if it creates a new toplevel GUID. 3139 */ 3140 if (unspare) { 3141 ASSERT(cvd->vdev_isspare); 3142 spa_spare_remove(cvd); 3143 unspare_guid = cvd->vdev_guid; 3144 } 3145 3146 /* 3147 * If the parent mirror/replacing vdev only has one child, 3148 * the parent is no longer needed. Remove it from the tree. 3149 */ 3150 if (pvd->vdev_children == 1) 3151 vdev_remove_parent(cvd); 3152 3153 /* 3154 * We don't set tvd until now because the parent we just removed 3155 * may have been the previous top-level vdev. 3156 */ 3157 tvd = cvd->vdev_top; 3158 ASSERT(tvd->vdev_parent == rvd); 3159 3160 /* 3161 * Reevaluate the parent vdev state. 3162 */ 3163 vdev_propagate_state(cvd); 3164 3165 /* 3166 * If the device we just detached was smaller than the others, it may be 3167 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3168 * can't fail because the existing metaslabs are already in core, so 3169 * there's nothing to read from disk. 3170 */ 3171 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3172 3173 vdev_config_dirty(tvd); 3174 3175 /* 3176 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3177 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3178 * But first make sure we're not on any *other* txg's DTL list, to 3179 * prevent vd from being accessed after it's freed. 3180 */ 3181 for (t = 0; t < TXG_SIZE; t++) 3182 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3183 vd->vdev_detached = B_TRUE; 3184 vdev_dirty(tvd, VDD_DTL, vd, txg); 3185 3186 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3187 3188 error = spa_vdev_exit(spa, vd, txg, 0); 3189 3190 /* 3191 * If this was the removal of the original device in a hot spare vdev, 3192 * then we want to go through and remove the device from the hot spare 3193 * list of every other pool. 3194 */ 3195 if (unspare) { 3196 spa = NULL; 3197 mutex_enter(&spa_namespace_lock); 3198 while ((spa = spa_next(spa)) != NULL) { 3199 if (spa->spa_state != POOL_STATE_ACTIVE) 3200 continue; 3201 spa_open_ref(spa, FTAG); 3202 mutex_exit(&spa_namespace_lock); 3203 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3204 mutex_enter(&spa_namespace_lock); 3205 spa_close(spa, FTAG); 3206 } 3207 mutex_exit(&spa_namespace_lock); 3208 } 3209 3210 return (error); 3211 } 3212 3213 static nvlist_t * 3214 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 3215 { 3216 for (int i = 0; i < count; i++) { 3217 uint64_t guid; 3218 3219 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 3220 &guid) == 0); 3221 3222 if (guid == target_guid) 3223 return (nvpp[i]); 3224 } 3225 3226 return (NULL); 3227 } 3228 3229 static void 3230 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 3231 nvlist_t *dev_to_remove) 3232 { 3233 nvlist_t **newdev = NULL; 3234 3235 if (count > 1) 3236 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 3237 3238 for (int i = 0, j = 0; i < count; i++) { 3239 if (dev[i] == dev_to_remove) 3240 continue; 3241 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 3242 } 3243 3244 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 3245 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 3246 3247 for (int i = 0; i < count - 1; i++) 3248 nvlist_free(newdev[i]); 3249 3250 if (count > 1) 3251 kmem_free(newdev, (count - 1) * sizeof (void *)); 3252 } 3253 3254 /* 3255 * Remove a device from the pool. Currently, this supports removing only hot 3256 * spares and level 2 ARC devices. 3257 */ 3258 int 3259 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3260 { 3261 vdev_t *vd; 3262 nvlist_t **spares, **l2cache, *nv; 3263 uint_t nspares, nl2cache; 3264 uint64_t txg; 3265 int error = 0; 3266 3267 txg = spa_vdev_enter(spa); 3268 3269 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3270 3271 if (spa->spa_spares.sav_vdevs != NULL && 3272 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3273 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 3274 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 3275 /* 3276 * Only remove the hot spare if it's not currently in use 3277 * in this pool. 3278 */ 3279 if (vd == NULL || unspare) { 3280 spa_vdev_remove_aux(spa->spa_spares.sav_config, 3281 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 3282 spa_load_spares(spa); 3283 spa->spa_spares.sav_sync = B_TRUE; 3284 } else { 3285 error = EBUSY; 3286 } 3287 } else if (spa->spa_l2cache.sav_vdevs != NULL && 3288 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3289 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 3290 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 3291 /* 3292 * Cache devices can always be removed. 3293 */ 3294 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 3295 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 3296 spa_load_l2cache(spa); 3297 spa->spa_l2cache.sav_sync = B_TRUE; 3298 } else if (vd != NULL) { 3299 /* 3300 * Normal vdevs cannot be removed (yet). 3301 */ 3302 error = ENOTSUP; 3303 } else { 3304 /* 3305 * There is no vdev of any kind with the specified guid. 3306 */ 3307 error = ENOENT; 3308 } 3309 3310 return (spa_vdev_exit(spa, NULL, txg, error)); 3311 } 3312 3313 /* 3314 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3315 * current spared, so we can detach it. 3316 */ 3317 static vdev_t * 3318 spa_vdev_resilver_done_hunt(vdev_t *vd) 3319 { 3320 vdev_t *newvd, *oldvd; 3321 int c; 3322 3323 for (c = 0; c < vd->vdev_children; c++) { 3324 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3325 if (oldvd != NULL) 3326 return (oldvd); 3327 } 3328 3329 /* 3330 * Check for a completed replacement. 3331 */ 3332 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3333 oldvd = vd->vdev_child[0]; 3334 newvd = vd->vdev_child[1]; 3335 3336 mutex_enter(&newvd->vdev_dtl_lock); 3337 if (newvd->vdev_dtl_map.sm_space == 0 && 3338 newvd->vdev_dtl_scrub.sm_space == 0) { 3339 mutex_exit(&newvd->vdev_dtl_lock); 3340 return (oldvd); 3341 } 3342 mutex_exit(&newvd->vdev_dtl_lock); 3343 } 3344 3345 /* 3346 * Check for a completed resilver with the 'unspare' flag set. 3347 */ 3348 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3349 newvd = vd->vdev_child[0]; 3350 oldvd = vd->vdev_child[1]; 3351 3352 mutex_enter(&newvd->vdev_dtl_lock); 3353 if (newvd->vdev_unspare && 3354 newvd->vdev_dtl_map.sm_space == 0 && 3355 newvd->vdev_dtl_scrub.sm_space == 0) { 3356 newvd->vdev_unspare = 0; 3357 mutex_exit(&newvd->vdev_dtl_lock); 3358 return (oldvd); 3359 } 3360 mutex_exit(&newvd->vdev_dtl_lock); 3361 } 3362 3363 return (NULL); 3364 } 3365 3366 static void 3367 spa_vdev_resilver_done(spa_t *spa) 3368 { 3369 vdev_t *vd; 3370 vdev_t *pvd; 3371 uint64_t guid; 3372 uint64_t pguid = 0; 3373 3374 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3375 3376 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3377 guid = vd->vdev_guid; 3378 /* 3379 * If we have just finished replacing a hot spared device, then 3380 * we need to detach the parent's first child (the original hot 3381 * spare) as well. 3382 */ 3383 pvd = vd->vdev_parent; 3384 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3385 pvd->vdev_id == 0) { 3386 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3387 ASSERT(pvd->vdev_parent->vdev_children == 2); 3388 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3389 } 3390 spa_config_exit(spa, SCL_CONFIG, FTAG); 3391 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3392 return; 3393 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3394 return; 3395 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3396 } 3397 3398 spa_config_exit(spa, SCL_CONFIG, FTAG); 3399 } 3400 3401 /* 3402 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3403 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3404 */ 3405 int 3406 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3407 { 3408 vdev_t *vd; 3409 uint64_t txg; 3410 3411 txg = spa_vdev_enter(spa); 3412 3413 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3414 /* 3415 * Determine if this is a reference to a hot spare device. If 3416 * it is, update the path manually as there is no associated 3417 * vdev_t that can be synced to disk. 3418 */ 3419 nvlist_t **spares; 3420 uint_t i, nspares; 3421 3422 if (spa->spa_spares.sav_config != NULL) { 3423 VERIFY(nvlist_lookup_nvlist_array( 3424 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3425 &spares, &nspares) == 0); 3426 for (i = 0; i < nspares; i++) { 3427 uint64_t theguid; 3428 VERIFY(nvlist_lookup_uint64(spares[i], 3429 ZPOOL_CONFIG_GUID, &theguid) == 0); 3430 if (theguid == guid) { 3431 VERIFY(nvlist_add_string(spares[i], 3432 ZPOOL_CONFIG_PATH, newpath) == 0); 3433 spa_load_spares(spa); 3434 spa->spa_spares.sav_sync = B_TRUE; 3435 return (spa_vdev_exit(spa, NULL, txg, 3436 0)); 3437 } 3438 } 3439 } 3440 3441 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3442 } 3443 3444 if (!vd->vdev_ops->vdev_op_leaf) 3445 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3446 3447 spa_strfree(vd->vdev_path); 3448 vd->vdev_path = spa_strdup(newpath); 3449 3450 vdev_config_dirty(vd->vdev_top); 3451 3452 return (spa_vdev_exit(spa, NULL, txg, 0)); 3453 } 3454 3455 /* 3456 * ========================================================================== 3457 * SPA Scrubbing 3458 * ========================================================================== 3459 */ 3460 3461 int 3462 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3463 { 3464 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 3465 3466 if ((uint_t)type >= POOL_SCRUB_TYPES) 3467 return (ENOTSUP); 3468 3469 /* 3470 * If a resilver was requested, but there is no DTL on a 3471 * writeable leaf device, we have nothing to do. 3472 */ 3473 if (type == POOL_SCRUB_RESILVER && 3474 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3475 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3476 return (0); 3477 } 3478 3479 if (type == POOL_SCRUB_EVERYTHING && 3480 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3481 spa->spa_dsl_pool->dp_scrub_isresilver) 3482 return (EBUSY); 3483 3484 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3485 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3486 } else if (type == POOL_SCRUB_NONE) { 3487 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3488 } else { 3489 return (EINVAL); 3490 } 3491 } 3492 3493 /* 3494 * ========================================================================== 3495 * SPA async task processing 3496 * ========================================================================== 3497 */ 3498 3499 static void 3500 spa_async_remove(spa_t *spa, vdev_t *vd) 3501 { 3502 if (vd->vdev_remove_wanted) { 3503 vd->vdev_remove_wanted = 0; 3504 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 3505 vdev_clear(spa, vd); 3506 vdev_state_dirty(vd->vdev_top); 3507 } 3508 3509 for (int c = 0; c < vd->vdev_children; c++) 3510 spa_async_remove(spa, vd->vdev_child[c]); 3511 } 3512 3513 static void 3514 spa_async_probe(spa_t *spa, vdev_t *vd) 3515 { 3516 if (vd->vdev_probe_wanted) { 3517 vd->vdev_probe_wanted = 0; 3518 vdev_reopen(vd); /* vdev_open() does the actual probe */ 3519 } 3520 3521 for (int c = 0; c < vd->vdev_children; c++) 3522 spa_async_probe(spa, vd->vdev_child[c]); 3523 } 3524 3525 static void 3526 spa_async_thread(spa_t *spa) 3527 { 3528 int tasks; 3529 3530 ASSERT(spa->spa_sync_on); 3531 3532 mutex_enter(&spa->spa_async_lock); 3533 tasks = spa->spa_async_tasks; 3534 spa->spa_async_tasks = 0; 3535 mutex_exit(&spa->spa_async_lock); 3536 3537 /* 3538 * See if the config needs to be updated. 3539 */ 3540 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3541 mutex_enter(&spa_namespace_lock); 3542 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3543 mutex_exit(&spa_namespace_lock); 3544 } 3545 3546 /* 3547 * See if any devices need to be marked REMOVED. 3548 */ 3549 if (tasks & SPA_ASYNC_REMOVE) { 3550 spa_vdev_state_enter(spa); 3551 spa_async_remove(spa, spa->spa_root_vdev); 3552 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 3553 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 3554 for (int i = 0; i < spa->spa_spares.sav_count; i++) 3555 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 3556 (void) spa_vdev_state_exit(spa, NULL, 0); 3557 } 3558 3559 /* 3560 * See if any devices need to be probed. 3561 */ 3562 if (tasks & SPA_ASYNC_PROBE) { 3563 spa_vdev_state_enter(spa); 3564 spa_async_probe(spa, spa->spa_root_vdev); 3565 (void) spa_vdev_state_exit(spa, NULL, 0); 3566 } 3567 3568 /* 3569 * If any devices are done replacing, detach them. 3570 */ 3571 if (tasks & SPA_ASYNC_RESILVER_DONE) 3572 spa_vdev_resilver_done(spa); 3573 3574 /* 3575 * Kick off a resilver. 3576 */ 3577 if (tasks & SPA_ASYNC_RESILVER) 3578 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3579 3580 /* 3581 * Let the world know that we're done. 3582 */ 3583 mutex_enter(&spa->spa_async_lock); 3584 spa->spa_async_thread = NULL; 3585 cv_broadcast(&spa->spa_async_cv); 3586 mutex_exit(&spa->spa_async_lock); 3587 thread_exit(); 3588 } 3589 3590 void 3591 spa_async_suspend(spa_t *spa) 3592 { 3593 mutex_enter(&spa->spa_async_lock); 3594 spa->spa_async_suspended++; 3595 while (spa->spa_async_thread != NULL) 3596 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3597 mutex_exit(&spa->spa_async_lock); 3598 } 3599 3600 void 3601 spa_async_resume(spa_t *spa) 3602 { 3603 mutex_enter(&spa->spa_async_lock); 3604 ASSERT(spa->spa_async_suspended != 0); 3605 spa->spa_async_suspended--; 3606 mutex_exit(&spa->spa_async_lock); 3607 } 3608 3609 static void 3610 spa_async_dispatch(spa_t *spa) 3611 { 3612 mutex_enter(&spa->spa_async_lock); 3613 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3614 spa->spa_async_thread == NULL && 3615 rootdir != NULL && !vn_is_readonly(rootdir)) 3616 spa->spa_async_thread = thread_create(NULL, 0, 3617 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3618 mutex_exit(&spa->spa_async_lock); 3619 } 3620 3621 void 3622 spa_async_request(spa_t *spa, int task) 3623 { 3624 mutex_enter(&spa->spa_async_lock); 3625 spa->spa_async_tasks |= task; 3626 mutex_exit(&spa->spa_async_lock); 3627 } 3628 3629 /* 3630 * ========================================================================== 3631 * SPA syncing routines 3632 * ========================================================================== 3633 */ 3634 3635 static void 3636 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3637 { 3638 bplist_t *bpl = &spa->spa_sync_bplist; 3639 dmu_tx_t *tx; 3640 blkptr_t blk; 3641 uint64_t itor = 0; 3642 zio_t *zio; 3643 int error; 3644 uint8_t c = 1; 3645 3646 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 3647 3648 while (bplist_iterate(bpl, &itor, &blk) == 0) { 3649 ASSERT(blk.blk_birth < txg); 3650 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL, 3651 ZIO_FLAG_MUSTSUCCEED)); 3652 } 3653 3654 error = zio_wait(zio); 3655 ASSERT3U(error, ==, 0); 3656 3657 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3658 bplist_vacate(bpl, tx); 3659 3660 /* 3661 * Pre-dirty the first block so we sync to convergence faster. 3662 * (Usually only the first block is needed.) 3663 */ 3664 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3665 dmu_tx_commit(tx); 3666 } 3667 3668 static void 3669 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3670 { 3671 char *packed = NULL; 3672 size_t bufsize; 3673 size_t nvsize = 0; 3674 dmu_buf_t *db; 3675 3676 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3677 3678 /* 3679 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 3680 * information. This avoids the dbuf_will_dirty() path and 3681 * saves us a pre-read to get data we don't actually care about. 3682 */ 3683 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); 3684 packed = kmem_alloc(bufsize, KM_SLEEP); 3685 3686 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3687 KM_SLEEP) == 0); 3688 bzero(packed + nvsize, bufsize - nvsize); 3689 3690 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 3691 3692 kmem_free(packed, bufsize); 3693 3694 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3695 dmu_buf_will_dirty(db, tx); 3696 *(uint64_t *)db->db_data = nvsize; 3697 dmu_buf_rele(db, FTAG); 3698 } 3699 3700 static void 3701 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3702 const char *config, const char *entry) 3703 { 3704 nvlist_t *nvroot; 3705 nvlist_t **list; 3706 int i; 3707 3708 if (!sav->sav_sync) 3709 return; 3710 3711 /* 3712 * Update the MOS nvlist describing the list of available devices. 3713 * spa_validate_aux() will have already made sure this nvlist is 3714 * valid and the vdevs are labeled appropriately. 3715 */ 3716 if (sav->sav_object == 0) { 3717 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3718 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3719 sizeof (uint64_t), tx); 3720 VERIFY(zap_update(spa->spa_meta_objset, 3721 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3722 &sav->sav_object, tx) == 0); 3723 } 3724 3725 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3726 if (sav->sav_count == 0) { 3727 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3728 } else { 3729 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3730 for (i = 0; i < sav->sav_count; i++) 3731 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3732 B_FALSE, B_FALSE, B_TRUE); 3733 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3734 sav->sav_count) == 0); 3735 for (i = 0; i < sav->sav_count; i++) 3736 nvlist_free(list[i]); 3737 kmem_free(list, sav->sav_count * sizeof (void *)); 3738 } 3739 3740 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3741 nvlist_free(nvroot); 3742 3743 sav->sav_sync = B_FALSE; 3744 } 3745 3746 static void 3747 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3748 { 3749 nvlist_t *config; 3750 3751 if (list_is_empty(&spa->spa_config_dirty_list)) 3752 return; 3753 3754 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3755 3756 config = spa_config_generate(spa, spa->spa_root_vdev, 3757 dmu_tx_get_txg(tx), B_FALSE); 3758 3759 spa_config_exit(spa, SCL_STATE, FTAG); 3760 3761 if (spa->spa_config_syncing) 3762 nvlist_free(spa->spa_config_syncing); 3763 spa->spa_config_syncing = config; 3764 3765 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3766 } 3767 3768 /* 3769 * Set zpool properties. 3770 */ 3771 static void 3772 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3773 { 3774 spa_t *spa = arg1; 3775 objset_t *mos = spa->spa_meta_objset; 3776 nvlist_t *nvp = arg2; 3777 nvpair_t *elem; 3778 uint64_t intval; 3779 char *strval; 3780 zpool_prop_t prop; 3781 const char *propname; 3782 zprop_type_t proptype; 3783 spa_config_dirent_t *dp; 3784 3785 mutex_enter(&spa->spa_props_lock); 3786 3787 elem = NULL; 3788 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3789 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3790 case ZPOOL_PROP_VERSION: 3791 /* 3792 * Only set version for non-zpool-creation cases 3793 * (set/import). spa_create() needs special care 3794 * for version setting. 3795 */ 3796 if (tx->tx_txg != TXG_INITIAL) { 3797 VERIFY(nvpair_value_uint64(elem, 3798 &intval) == 0); 3799 ASSERT(intval <= SPA_VERSION); 3800 ASSERT(intval >= spa_version(spa)); 3801 spa->spa_uberblock.ub_version = intval; 3802 vdev_config_dirty(spa->spa_root_vdev); 3803 } 3804 break; 3805 3806 case ZPOOL_PROP_ALTROOT: 3807 /* 3808 * 'altroot' is a non-persistent property. It should 3809 * have been set temporarily at creation or import time. 3810 */ 3811 ASSERT(spa->spa_root != NULL); 3812 break; 3813 3814 case ZPOOL_PROP_CACHEFILE: 3815 /* 3816 * 'cachefile' is a non-persistent property, but note 3817 * an async request that the config cache needs to be 3818 * udpated. 3819 */ 3820 VERIFY(nvpair_value_string(elem, &strval) == 0); 3821 3822 dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP); 3823 3824 if (strval[0] == '\0') 3825 dp->scd_path = spa_strdup(spa_config_path); 3826 else if (strcmp(strval, "none") == 0) 3827 dp->scd_path = NULL; 3828 else 3829 dp->scd_path = spa_strdup(strval); 3830 3831 list_insert_head(&spa->spa_config_list, dp); 3832 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3833 break; 3834 default: 3835 /* 3836 * Set pool property values in the poolprops mos object. 3837 */ 3838 if (spa->spa_pool_props_object == 0) { 3839 objset_t *mos = spa->spa_meta_objset; 3840 3841 VERIFY((spa->spa_pool_props_object = 3842 zap_create(mos, DMU_OT_POOL_PROPS, 3843 DMU_OT_NONE, 0, tx)) > 0); 3844 3845 VERIFY(zap_update(mos, 3846 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3847 8, 1, &spa->spa_pool_props_object, tx) 3848 == 0); 3849 } 3850 3851 /* normalize the property name */ 3852 propname = zpool_prop_to_name(prop); 3853 proptype = zpool_prop_get_type(prop); 3854 3855 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3856 ASSERT(proptype == PROP_TYPE_STRING); 3857 VERIFY(nvpair_value_string(elem, &strval) == 0); 3858 VERIFY(zap_update(mos, 3859 spa->spa_pool_props_object, propname, 3860 1, strlen(strval) + 1, strval, tx) == 0); 3861 3862 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3863 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3864 3865 if (proptype == PROP_TYPE_INDEX) { 3866 const char *unused; 3867 VERIFY(zpool_prop_index_to_string( 3868 prop, intval, &unused) == 0); 3869 } 3870 VERIFY(zap_update(mos, 3871 spa->spa_pool_props_object, propname, 3872 8, 1, &intval, tx) == 0); 3873 } else { 3874 ASSERT(0); /* not allowed */ 3875 } 3876 3877 switch (prop) { 3878 case ZPOOL_PROP_DELEGATION: 3879 spa->spa_delegation = intval; 3880 break; 3881 case ZPOOL_PROP_BOOTFS: 3882 spa->spa_bootfs = intval; 3883 break; 3884 case ZPOOL_PROP_FAILUREMODE: 3885 spa->spa_failmode = intval; 3886 break; 3887 default: 3888 break; 3889 } 3890 } 3891 3892 /* log internal history if this is not a zpool create */ 3893 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3894 tx->tx_txg != TXG_INITIAL) { 3895 spa_history_internal_log(LOG_POOL_PROPSET, 3896 spa, tx, cr, "%s %lld %s", 3897 nvpair_name(elem), intval, spa_name(spa)); 3898 } 3899 } 3900 3901 mutex_exit(&spa->spa_props_lock); 3902 } 3903 3904 /* 3905 * Sync the specified transaction group. New blocks may be dirtied as 3906 * part of the process, so we iterate until it converges. 3907 */ 3908 void 3909 spa_sync(spa_t *spa, uint64_t txg) 3910 { 3911 dsl_pool_t *dp = spa->spa_dsl_pool; 3912 objset_t *mos = spa->spa_meta_objset; 3913 bplist_t *bpl = &spa->spa_sync_bplist; 3914 vdev_t *rvd = spa->spa_root_vdev; 3915 vdev_t *vd; 3916 dmu_tx_t *tx; 3917 int dirty_vdevs; 3918 int error; 3919 3920 /* 3921 * Lock out configuration changes. 3922 */ 3923 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3924 3925 spa->spa_syncing_txg = txg; 3926 spa->spa_sync_pass = 0; 3927 3928 /* 3929 * If there are any pending vdev state changes, convert them 3930 * into config changes that go out with this transaction group. 3931 */ 3932 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3933 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 3934 vdev_state_clean(vd); 3935 vdev_config_dirty(vd); 3936 } 3937 spa_config_exit(spa, SCL_STATE, FTAG); 3938 3939 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3940 3941 tx = dmu_tx_create_assigned(dp, txg); 3942 3943 /* 3944 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 3945 * set spa_deflate if we have no raid-z vdevs. 3946 */ 3947 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 3948 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 3949 int i; 3950 3951 for (i = 0; i < rvd->vdev_children; i++) { 3952 vd = rvd->vdev_child[i]; 3953 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 3954 break; 3955 } 3956 if (i == rvd->vdev_children) { 3957 spa->spa_deflate = TRUE; 3958 VERIFY(0 == zap_add(spa->spa_meta_objset, 3959 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3960 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 3961 } 3962 } 3963 3964 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 3965 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 3966 dsl_pool_create_origin(dp, tx); 3967 3968 /* Keeping the origin open increases spa_minref */ 3969 spa->spa_minref += 3; 3970 } 3971 3972 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 3973 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 3974 dsl_pool_upgrade_clones(dp, tx); 3975 } 3976 3977 /* 3978 * If anything has changed in this txg, push the deferred frees 3979 * from the previous txg. If not, leave them alone so that we 3980 * don't generate work on an otherwise idle system. 3981 */ 3982 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 3983 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 3984 !txg_list_empty(&dp->dp_sync_tasks, txg)) 3985 spa_sync_deferred_frees(spa, txg); 3986 3987 /* 3988 * Iterate to convergence. 3989 */ 3990 do { 3991 spa->spa_sync_pass++; 3992 3993 spa_sync_config_object(spa, tx); 3994 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 3995 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 3996 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 3997 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 3998 spa_errlog_sync(spa, txg); 3999 dsl_pool_sync(dp, txg); 4000 4001 dirty_vdevs = 0; 4002 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 4003 vdev_sync(vd, txg); 4004 dirty_vdevs++; 4005 } 4006 4007 bplist_sync(bpl, tx); 4008 } while (dirty_vdevs); 4009 4010 bplist_close(bpl); 4011 4012 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4013 4014 /* 4015 * Rewrite the vdev configuration (which includes the uberblock) 4016 * to commit the transaction group. 4017 * 4018 * If there are no dirty vdevs, we sync the uberblock to a few 4019 * random top-level vdevs that are known to be visible in the 4020 * config cache (see spa_vdev_add() for a complete description). 4021 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 4022 */ 4023 for (;;) { 4024 /* 4025 * We hold SCL_STATE to prevent vdev open/close/etc. 4026 * while we're attempting to write the vdev labels. 4027 */ 4028 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4029 4030 if (list_is_empty(&spa->spa_config_dirty_list)) { 4031 vdev_t *svd[SPA_DVAS_PER_BP]; 4032 int svdcount = 0; 4033 int children = rvd->vdev_children; 4034 int c0 = spa_get_random(children); 4035 int c; 4036 4037 for (c = 0; c < children; c++) { 4038 vd = rvd->vdev_child[(c0 + c) % children]; 4039 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4040 continue; 4041 svd[svdcount++] = vd; 4042 if (svdcount == SPA_DVAS_PER_BP) 4043 break; 4044 } 4045 error = vdev_config_sync(svd, svdcount, txg); 4046 } else { 4047 error = vdev_config_sync(rvd->vdev_child, 4048 rvd->vdev_children, txg); 4049 } 4050 4051 spa_config_exit(spa, SCL_STATE, FTAG); 4052 4053 if (error == 0) 4054 break; 4055 zio_suspend(spa, NULL); 4056 zio_resume_wait(spa); 4057 } 4058 dmu_tx_commit(tx); 4059 4060 /* 4061 * Clear the dirty config list. 4062 */ 4063 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 4064 vdev_config_clean(vd); 4065 4066 /* 4067 * Now that the new config has synced transactionally, 4068 * let it become visible to the config cache. 4069 */ 4070 if (spa->spa_config_syncing != NULL) { 4071 spa_config_set(spa, spa->spa_config_syncing); 4072 spa->spa_config_txg = txg; 4073 spa->spa_config_syncing = NULL; 4074 } 4075 4076 spa->spa_ubsync = spa->spa_uberblock; 4077 4078 /* 4079 * Clean up the ZIL records for the synced txg. 4080 */ 4081 dsl_pool_zil_clean(dp); 4082 4083 /* 4084 * Update usable space statistics. 4085 */ 4086 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4087 vdev_sync_done(vd, txg); 4088 4089 /* 4090 * It had better be the case that we didn't dirty anything 4091 * since vdev_config_sync(). 4092 */ 4093 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4094 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4095 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4096 ASSERT(bpl->bpl_queue == NULL); 4097 4098 spa_config_exit(spa, SCL_CONFIG, FTAG); 4099 4100 /* 4101 * If any async tasks have been requested, kick them off. 4102 */ 4103 spa_async_dispatch(spa); 4104 } 4105 4106 /* 4107 * Sync all pools. We don't want to hold the namespace lock across these 4108 * operations, so we take a reference on the spa_t and drop the lock during the 4109 * sync. 4110 */ 4111 void 4112 spa_sync_allpools(void) 4113 { 4114 spa_t *spa = NULL; 4115 mutex_enter(&spa_namespace_lock); 4116 while ((spa = spa_next(spa)) != NULL) { 4117 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa)) 4118 continue; 4119 spa_open_ref(spa, FTAG); 4120 mutex_exit(&spa_namespace_lock); 4121 txg_wait_synced(spa_get_dsl(spa), 0); 4122 mutex_enter(&spa_namespace_lock); 4123 spa_close(spa, FTAG); 4124 } 4125 mutex_exit(&spa_namespace_lock); 4126 } 4127 4128 /* 4129 * ========================================================================== 4130 * Miscellaneous routines 4131 * ========================================================================== 4132 */ 4133 4134 /* 4135 * Remove all pools in the system. 4136 */ 4137 void 4138 spa_evict_all(void) 4139 { 4140 spa_t *spa; 4141 4142 /* 4143 * Remove all cached state. All pools should be closed now, 4144 * so every spa in the AVL tree should be unreferenced. 4145 */ 4146 mutex_enter(&spa_namespace_lock); 4147 while ((spa = spa_next(NULL)) != NULL) { 4148 /* 4149 * Stop async tasks. The async thread may need to detach 4150 * a device that's been replaced, which requires grabbing 4151 * spa_namespace_lock, so we must drop it here. 4152 */ 4153 spa_open_ref(spa, FTAG); 4154 mutex_exit(&spa_namespace_lock); 4155 spa_async_suspend(spa); 4156 mutex_enter(&spa_namespace_lock); 4157 spa_close(spa, FTAG); 4158 4159 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4160 spa_unload(spa); 4161 spa_deactivate(spa); 4162 } 4163 spa_remove(spa); 4164 } 4165 mutex_exit(&spa_namespace_lock); 4166 } 4167 4168 vdev_t * 4169 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4170 { 4171 vdev_t *vd; 4172 int i; 4173 4174 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4175 return (vd); 4176 4177 if (l2cache) { 4178 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4179 vd = spa->spa_l2cache.sav_vdevs[i]; 4180 if (vd->vdev_guid == guid) 4181 return (vd); 4182 } 4183 } 4184 4185 return (NULL); 4186 } 4187 4188 void 4189 spa_upgrade(spa_t *spa, uint64_t version) 4190 { 4191 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4192 4193 /* 4194 * This should only be called for a non-faulted pool, and since a 4195 * future version would result in an unopenable pool, this shouldn't be 4196 * possible. 4197 */ 4198 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4199 ASSERT(version >= spa->spa_uberblock.ub_version); 4200 4201 spa->spa_uberblock.ub_version = version; 4202 vdev_config_dirty(spa->spa_root_vdev); 4203 4204 spa_config_exit(spa, SCL_ALL, FTAG); 4205 4206 txg_wait_synced(spa_get_dsl(spa), 0); 4207 } 4208 4209 boolean_t 4210 spa_has_spare(spa_t *spa, uint64_t guid) 4211 { 4212 int i; 4213 uint64_t spareguid; 4214 spa_aux_vdev_t *sav = &spa->spa_spares; 4215 4216 for (i = 0; i < sav->sav_count; i++) 4217 if (sav->sav_vdevs[i]->vdev_guid == guid) 4218 return (B_TRUE); 4219 4220 for (i = 0; i < sav->sav_npending; i++) { 4221 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4222 &spareguid) == 0 && spareguid == guid) 4223 return (B_TRUE); 4224 } 4225 4226 return (B_FALSE); 4227 } 4228 4229 /* 4230 * Check if a pool has an active shared spare device. 4231 * Note: reference count of an active spare is 2, as a spare and as a replace 4232 */ 4233 static boolean_t 4234 spa_has_active_shared_spare(spa_t *spa) 4235 { 4236 int i, refcnt; 4237 uint64_t pool; 4238 spa_aux_vdev_t *sav = &spa->spa_spares; 4239 4240 for (i = 0; i < sav->sav_count; i++) { 4241 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4242 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4243 refcnt > 2) 4244 return (B_TRUE); 4245 } 4246 4247 return (B_FALSE); 4248 } 4249 4250 /* 4251 * Post a sysevent corresponding to the given event. The 'name' must be one of 4252 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4253 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4254 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4255 * or zdb as real changes. 4256 */ 4257 void 4258 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4259 { 4260 #ifdef _KERNEL 4261 sysevent_t *ev; 4262 sysevent_attr_list_t *attr = NULL; 4263 sysevent_value_t value; 4264 sysevent_id_t eid; 4265 4266 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4267 SE_SLEEP); 4268 4269 value.value_type = SE_DATA_TYPE_STRING; 4270 value.value.sv_string = spa_name(spa); 4271 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4272 goto done; 4273 4274 value.value_type = SE_DATA_TYPE_UINT64; 4275 value.value.sv_uint64 = spa_guid(spa); 4276 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4277 goto done; 4278 4279 if (vd) { 4280 value.value_type = SE_DATA_TYPE_UINT64; 4281 value.value.sv_uint64 = vd->vdev_guid; 4282 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4283 SE_SLEEP) != 0) 4284 goto done; 4285 4286 if (vd->vdev_path) { 4287 value.value_type = SE_DATA_TYPE_STRING; 4288 value.value.sv_string = vd->vdev_path; 4289 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4290 &value, SE_SLEEP) != 0) 4291 goto done; 4292 } 4293 } 4294 4295 if (sysevent_attach_attributes(ev, attr) != 0) 4296 goto done; 4297 attr = NULL; 4298 4299 (void) log_sysevent(ev, SE_SLEEP, &eid); 4300 4301 done: 4302 if (attr) 4303 sysevent_free_attr(attr); 4304 sysevent_free(ev); 4305 #endif 4306 } 4307