1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #include "zfs_prop.h" 64 #include "zfs_comutil.h" 65 66 int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 67 /* ISSUE INTR */ 68 { 1, 1 }, /* ZIO_TYPE_NULL */ 69 { 1, 8 }, /* ZIO_TYPE_READ */ 70 { 8, 1 }, /* ZIO_TYPE_WRITE */ 71 { 1, 1 }, /* ZIO_TYPE_FREE */ 72 { 1, 1 }, /* ZIO_TYPE_CLAIM */ 73 { 1, 1 }, /* ZIO_TYPE_IOCTL */ 74 }; 75 76 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 77 static boolean_t spa_has_active_shared_spare(spa_t *spa); 78 79 /* 80 * ========================================================================== 81 * SPA properties routines 82 * ========================================================================== 83 */ 84 85 /* 86 * Add a (source=src, propname=propval) list to an nvlist. 87 */ 88 static void 89 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 90 uint64_t intval, zprop_source_t src) 91 { 92 const char *propname = zpool_prop_to_name(prop); 93 nvlist_t *propval; 94 95 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 96 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 97 98 if (strval != NULL) 99 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 100 else 101 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 102 103 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 104 nvlist_free(propval); 105 } 106 107 /* 108 * Get property values from the spa configuration. 109 */ 110 static void 111 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 112 { 113 uint64_t size = spa_get_space(spa); 114 uint64_t used = spa_get_alloc(spa); 115 uint64_t cap, version; 116 zprop_source_t src = ZPROP_SRC_NONE; 117 spa_config_dirent_t *dp; 118 119 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 120 121 /* 122 * readonly properties 123 */ 124 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 125 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 126 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 127 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 128 129 cap = (size == 0) ? 0 : (used * 100 / size); 130 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 131 132 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 133 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 134 spa->spa_root_vdev->vdev_state, src); 135 136 /* 137 * settable properties that are not stored in the pool property object. 138 */ 139 version = spa_version(spa); 140 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 141 src = ZPROP_SRC_DEFAULT; 142 else 143 src = ZPROP_SRC_LOCAL; 144 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 145 146 if (spa->spa_root != NULL) 147 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 148 0, ZPROP_SRC_LOCAL); 149 150 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 151 if (dp->scd_path == NULL) { 152 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 153 "none", 0, ZPROP_SRC_LOCAL); 154 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 155 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 156 dp->scd_path, 0, ZPROP_SRC_LOCAL); 157 } 158 } 159 } 160 161 /* 162 * Get zpool property values. 163 */ 164 int 165 spa_prop_get(spa_t *spa, nvlist_t **nvp) 166 { 167 zap_cursor_t zc; 168 zap_attribute_t za; 169 objset_t *mos = spa->spa_meta_objset; 170 int err; 171 172 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 173 174 mutex_enter(&spa->spa_props_lock); 175 176 /* 177 * Get properties from the spa config. 178 */ 179 spa_prop_get_config(spa, nvp); 180 181 /* If no pool property object, no more prop to get. */ 182 if (spa->spa_pool_props_object == 0) { 183 mutex_exit(&spa->spa_props_lock); 184 return (0); 185 } 186 187 /* 188 * Get properties from the MOS pool property object. 189 */ 190 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 191 (err = zap_cursor_retrieve(&zc, &za)) == 0; 192 zap_cursor_advance(&zc)) { 193 uint64_t intval = 0; 194 char *strval = NULL; 195 zprop_source_t src = ZPROP_SRC_DEFAULT; 196 zpool_prop_t prop; 197 198 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 199 continue; 200 201 switch (za.za_integer_length) { 202 case 8: 203 /* integer property */ 204 if (za.za_first_integer != 205 zpool_prop_default_numeric(prop)) 206 src = ZPROP_SRC_LOCAL; 207 208 if (prop == ZPOOL_PROP_BOOTFS) { 209 dsl_pool_t *dp; 210 dsl_dataset_t *ds = NULL; 211 212 dp = spa_get_dsl(spa); 213 rw_enter(&dp->dp_config_rwlock, RW_READER); 214 if (err = dsl_dataset_hold_obj(dp, 215 za.za_first_integer, FTAG, &ds)) { 216 rw_exit(&dp->dp_config_rwlock); 217 break; 218 } 219 220 strval = kmem_alloc( 221 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 222 KM_SLEEP); 223 dsl_dataset_name(ds, strval); 224 dsl_dataset_rele(ds, FTAG); 225 rw_exit(&dp->dp_config_rwlock); 226 } else { 227 strval = NULL; 228 intval = za.za_first_integer; 229 } 230 231 spa_prop_add_list(*nvp, prop, strval, intval, src); 232 233 if (strval != NULL) 234 kmem_free(strval, 235 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 236 237 break; 238 239 case 1: 240 /* string property */ 241 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 242 err = zap_lookup(mos, spa->spa_pool_props_object, 243 za.za_name, 1, za.za_num_integers, strval); 244 if (err) { 245 kmem_free(strval, za.za_num_integers); 246 break; 247 } 248 spa_prop_add_list(*nvp, prop, strval, 0, src); 249 kmem_free(strval, za.za_num_integers); 250 break; 251 252 default: 253 break; 254 } 255 } 256 zap_cursor_fini(&zc); 257 mutex_exit(&spa->spa_props_lock); 258 out: 259 if (err && err != ENOENT) { 260 nvlist_free(*nvp); 261 *nvp = NULL; 262 return (err); 263 } 264 265 return (0); 266 } 267 268 /* 269 * Validate the given pool properties nvlist and modify the list 270 * for the property values to be set. 271 */ 272 static int 273 spa_prop_validate(spa_t *spa, nvlist_t *props) 274 { 275 nvpair_t *elem; 276 int error = 0, reset_bootfs = 0; 277 uint64_t objnum; 278 279 elem = NULL; 280 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 281 zpool_prop_t prop; 282 char *propname, *strval; 283 uint64_t intval; 284 objset_t *os; 285 char *slash; 286 287 propname = nvpair_name(elem); 288 289 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 290 return (EINVAL); 291 292 switch (prop) { 293 case ZPOOL_PROP_VERSION: 294 error = nvpair_value_uint64(elem, &intval); 295 if (!error && 296 (intval < spa_version(spa) || intval > SPA_VERSION)) 297 error = EINVAL; 298 break; 299 300 case ZPOOL_PROP_DELEGATION: 301 case ZPOOL_PROP_AUTOREPLACE: 302 case ZPOOL_PROP_LISTSNAPS: 303 error = nvpair_value_uint64(elem, &intval); 304 if (!error && intval > 1) 305 error = EINVAL; 306 break; 307 308 case ZPOOL_PROP_BOOTFS: 309 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 310 error = ENOTSUP; 311 break; 312 } 313 314 /* 315 * Make sure the vdev config is bootable 316 */ 317 if (!vdev_is_bootable(spa->spa_root_vdev)) { 318 error = ENOTSUP; 319 break; 320 } 321 322 reset_bootfs = 1; 323 324 error = nvpair_value_string(elem, &strval); 325 326 if (!error) { 327 uint64_t compress; 328 329 if (strval == NULL || strval[0] == '\0') { 330 objnum = zpool_prop_default_numeric( 331 ZPOOL_PROP_BOOTFS); 332 break; 333 } 334 335 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 336 DS_MODE_USER | DS_MODE_READONLY, &os)) 337 break; 338 339 /* We don't support gzip bootable datasets */ 340 if ((error = dsl_prop_get_integer(strval, 341 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 342 &compress, NULL)) == 0 && 343 !BOOTFS_COMPRESS_VALID(compress)) { 344 error = ENOTSUP; 345 } else { 346 objnum = dmu_objset_id(os); 347 } 348 dmu_objset_close(os); 349 } 350 break; 351 352 case ZPOOL_PROP_FAILUREMODE: 353 error = nvpair_value_uint64(elem, &intval); 354 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 355 intval > ZIO_FAILURE_MODE_PANIC)) 356 error = EINVAL; 357 358 /* 359 * This is a special case which only occurs when 360 * the pool has completely failed. This allows 361 * the user to change the in-core failmode property 362 * without syncing it out to disk (I/Os might 363 * currently be blocked). We do this by returning 364 * EIO to the caller (spa_prop_set) to trick it 365 * into thinking we encountered a property validation 366 * error. 367 */ 368 if (!error && spa_suspended(spa)) { 369 spa->spa_failmode = intval; 370 error = EIO; 371 } 372 break; 373 374 case ZPOOL_PROP_CACHEFILE: 375 if ((error = nvpair_value_string(elem, &strval)) != 0) 376 break; 377 378 if (strval[0] == '\0') 379 break; 380 381 if (strcmp(strval, "none") == 0) 382 break; 383 384 if (strval[0] != '/') { 385 error = EINVAL; 386 break; 387 } 388 389 slash = strrchr(strval, '/'); 390 ASSERT(slash != NULL); 391 392 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 393 strcmp(slash, "/..") == 0) 394 error = EINVAL; 395 break; 396 } 397 398 if (error) 399 break; 400 } 401 402 if (!error && reset_bootfs) { 403 error = nvlist_remove(props, 404 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 405 406 if (!error) { 407 error = nvlist_add_uint64(props, 408 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 409 } 410 } 411 412 return (error); 413 } 414 415 int 416 spa_prop_set(spa_t *spa, nvlist_t *nvp) 417 { 418 int error; 419 420 if ((error = spa_prop_validate(spa, nvp)) != 0) 421 return (error); 422 423 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 424 spa, nvp, 3)); 425 } 426 427 /* 428 * If the bootfs property value is dsobj, clear it. 429 */ 430 void 431 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 432 { 433 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 434 VERIFY(zap_remove(spa->spa_meta_objset, 435 spa->spa_pool_props_object, 436 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 437 spa->spa_bootfs = 0; 438 } 439 } 440 441 /* 442 * ========================================================================== 443 * SPA state manipulation (open/create/destroy/import/export) 444 * ========================================================================== 445 */ 446 447 static int 448 spa_error_entry_compare(const void *a, const void *b) 449 { 450 spa_error_entry_t *sa = (spa_error_entry_t *)a; 451 spa_error_entry_t *sb = (spa_error_entry_t *)b; 452 int ret; 453 454 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 455 sizeof (zbookmark_t)); 456 457 if (ret < 0) 458 return (-1); 459 else if (ret > 0) 460 return (1); 461 else 462 return (0); 463 } 464 465 /* 466 * Utility function which retrieves copies of the current logs and 467 * re-initializes them in the process. 468 */ 469 void 470 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 471 { 472 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 473 474 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 475 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 476 477 avl_create(&spa->spa_errlist_scrub, 478 spa_error_entry_compare, sizeof (spa_error_entry_t), 479 offsetof(spa_error_entry_t, se_avl)); 480 avl_create(&spa->spa_errlist_last, 481 spa_error_entry_compare, sizeof (spa_error_entry_t), 482 offsetof(spa_error_entry_t, se_avl)); 483 } 484 485 /* 486 * Activate an uninitialized pool. 487 */ 488 static void 489 spa_activate(spa_t *spa) 490 { 491 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 492 493 spa->spa_state = POOL_STATE_ACTIVE; 494 495 spa->spa_normal_class = metaslab_class_create(); 496 spa->spa_log_class = metaslab_class_create(); 497 498 for (int t = 0; t < ZIO_TYPES; t++) { 499 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 500 spa->spa_zio_taskq[t][q] = taskq_create("spa_zio", 501 zio_taskq_threads[t][q], maxclsyspri, 50, 502 INT_MAX, TASKQ_PREPOPULATE); 503 } 504 } 505 506 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 507 offsetof(vdev_t, vdev_config_dirty_node)); 508 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 509 offsetof(vdev_t, vdev_state_dirty_node)); 510 511 txg_list_create(&spa->spa_vdev_txg_list, 512 offsetof(struct vdev, vdev_txg_node)); 513 514 avl_create(&spa->spa_errlist_scrub, 515 spa_error_entry_compare, sizeof (spa_error_entry_t), 516 offsetof(spa_error_entry_t, se_avl)); 517 avl_create(&spa->spa_errlist_last, 518 spa_error_entry_compare, sizeof (spa_error_entry_t), 519 offsetof(spa_error_entry_t, se_avl)); 520 } 521 522 /* 523 * Opposite of spa_activate(). 524 */ 525 static void 526 spa_deactivate(spa_t *spa) 527 { 528 ASSERT(spa->spa_sync_on == B_FALSE); 529 ASSERT(spa->spa_dsl_pool == NULL); 530 ASSERT(spa->spa_root_vdev == NULL); 531 532 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 533 534 txg_list_destroy(&spa->spa_vdev_txg_list); 535 536 list_destroy(&spa->spa_config_dirty_list); 537 list_destroy(&spa->spa_state_dirty_list); 538 539 for (int t = 0; t < ZIO_TYPES; t++) { 540 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 541 taskq_destroy(spa->spa_zio_taskq[t][q]); 542 spa->spa_zio_taskq[t][q] = NULL; 543 } 544 } 545 546 metaslab_class_destroy(spa->spa_normal_class); 547 spa->spa_normal_class = NULL; 548 549 metaslab_class_destroy(spa->spa_log_class); 550 spa->spa_log_class = NULL; 551 552 /* 553 * If this was part of an import or the open otherwise failed, we may 554 * still have errors left in the queues. Empty them just in case. 555 */ 556 spa_errlog_drain(spa); 557 558 avl_destroy(&spa->spa_errlist_scrub); 559 avl_destroy(&spa->spa_errlist_last); 560 561 spa->spa_state = POOL_STATE_UNINITIALIZED; 562 } 563 564 /* 565 * Verify a pool configuration, and construct the vdev tree appropriately. This 566 * will create all the necessary vdevs in the appropriate layout, with each vdev 567 * in the CLOSED state. This will prep the pool before open/creation/import. 568 * All vdev validation is done by the vdev_alloc() routine. 569 */ 570 static int 571 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 572 uint_t id, int atype) 573 { 574 nvlist_t **child; 575 uint_t c, children; 576 int error; 577 578 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 579 return (error); 580 581 if ((*vdp)->vdev_ops->vdev_op_leaf) 582 return (0); 583 584 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 585 &child, &children); 586 587 if (error == ENOENT) 588 return (0); 589 590 if (error) { 591 vdev_free(*vdp); 592 *vdp = NULL; 593 return (EINVAL); 594 } 595 596 for (c = 0; c < children; c++) { 597 vdev_t *vd; 598 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 599 atype)) != 0) { 600 vdev_free(*vdp); 601 *vdp = NULL; 602 return (error); 603 } 604 } 605 606 ASSERT(*vdp != NULL); 607 608 return (0); 609 } 610 611 /* 612 * Opposite of spa_load(). 613 */ 614 static void 615 spa_unload(spa_t *spa) 616 { 617 int i; 618 619 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 620 621 /* 622 * Stop async tasks. 623 */ 624 spa_async_suspend(spa); 625 626 /* 627 * Stop syncing. 628 */ 629 if (spa->spa_sync_on) { 630 txg_sync_stop(spa->spa_dsl_pool); 631 spa->spa_sync_on = B_FALSE; 632 } 633 634 /* 635 * Wait for any outstanding async I/O to complete. 636 */ 637 mutex_enter(&spa->spa_async_root_lock); 638 while (spa->spa_async_root_count != 0) 639 cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock); 640 mutex_exit(&spa->spa_async_root_lock); 641 642 /* 643 * Drop and purge level 2 cache 644 */ 645 spa_l2cache_drop(spa); 646 647 /* 648 * Close the dsl pool. 649 */ 650 if (spa->spa_dsl_pool) { 651 dsl_pool_close(spa->spa_dsl_pool); 652 spa->spa_dsl_pool = NULL; 653 } 654 655 /* 656 * Close all vdevs. 657 */ 658 if (spa->spa_root_vdev) 659 vdev_free(spa->spa_root_vdev); 660 ASSERT(spa->spa_root_vdev == NULL); 661 662 for (i = 0; i < spa->spa_spares.sav_count; i++) 663 vdev_free(spa->spa_spares.sav_vdevs[i]); 664 if (spa->spa_spares.sav_vdevs) { 665 kmem_free(spa->spa_spares.sav_vdevs, 666 spa->spa_spares.sav_count * sizeof (void *)); 667 spa->spa_spares.sav_vdevs = NULL; 668 } 669 if (spa->spa_spares.sav_config) { 670 nvlist_free(spa->spa_spares.sav_config); 671 spa->spa_spares.sav_config = NULL; 672 } 673 spa->spa_spares.sav_count = 0; 674 675 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 676 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 677 if (spa->spa_l2cache.sav_vdevs) { 678 kmem_free(spa->spa_l2cache.sav_vdevs, 679 spa->spa_l2cache.sav_count * sizeof (void *)); 680 spa->spa_l2cache.sav_vdevs = NULL; 681 } 682 if (spa->spa_l2cache.sav_config) { 683 nvlist_free(spa->spa_l2cache.sav_config); 684 spa->spa_l2cache.sav_config = NULL; 685 } 686 spa->spa_l2cache.sav_count = 0; 687 688 spa->spa_async_suspended = 0; 689 } 690 691 /* 692 * Load (or re-load) the current list of vdevs describing the active spares for 693 * this pool. When this is called, we have some form of basic information in 694 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 695 * then re-generate a more complete list including status information. 696 */ 697 static void 698 spa_load_spares(spa_t *spa) 699 { 700 nvlist_t **spares; 701 uint_t nspares; 702 int i; 703 vdev_t *vd, *tvd; 704 705 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 706 707 /* 708 * First, close and free any existing spare vdevs. 709 */ 710 for (i = 0; i < spa->spa_spares.sav_count; i++) { 711 vd = spa->spa_spares.sav_vdevs[i]; 712 713 /* Undo the call to spa_activate() below */ 714 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 715 B_FALSE)) != NULL && tvd->vdev_isspare) 716 spa_spare_remove(tvd); 717 vdev_close(vd); 718 vdev_free(vd); 719 } 720 721 if (spa->spa_spares.sav_vdevs) 722 kmem_free(spa->spa_spares.sav_vdevs, 723 spa->spa_spares.sav_count * sizeof (void *)); 724 725 if (spa->spa_spares.sav_config == NULL) 726 nspares = 0; 727 else 728 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 729 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 730 731 spa->spa_spares.sav_count = (int)nspares; 732 spa->spa_spares.sav_vdevs = NULL; 733 734 if (nspares == 0) 735 return; 736 737 /* 738 * Construct the array of vdevs, opening them to get status in the 739 * process. For each spare, there is potentially two different vdev_t 740 * structures associated with it: one in the list of spares (used only 741 * for basic validation purposes) and one in the active vdev 742 * configuration (if it's spared in). During this phase we open and 743 * validate each vdev on the spare list. If the vdev also exists in the 744 * active configuration, then we also mark this vdev as an active spare. 745 */ 746 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 747 KM_SLEEP); 748 for (i = 0; i < spa->spa_spares.sav_count; i++) { 749 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 750 VDEV_ALLOC_SPARE) == 0); 751 ASSERT(vd != NULL); 752 753 spa->spa_spares.sav_vdevs[i] = vd; 754 755 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 756 B_FALSE)) != NULL) { 757 if (!tvd->vdev_isspare) 758 spa_spare_add(tvd); 759 760 /* 761 * We only mark the spare active if we were successfully 762 * able to load the vdev. Otherwise, importing a pool 763 * with a bad active spare would result in strange 764 * behavior, because multiple pool would think the spare 765 * is actively in use. 766 * 767 * There is a vulnerability here to an equally bizarre 768 * circumstance, where a dead active spare is later 769 * brought back to life (onlined or otherwise). Given 770 * the rarity of this scenario, and the extra complexity 771 * it adds, we ignore the possibility. 772 */ 773 if (!vdev_is_dead(tvd)) 774 spa_spare_activate(tvd); 775 } 776 777 vd->vdev_top = vd; 778 779 if (vdev_open(vd) != 0) 780 continue; 781 782 if (vdev_validate_aux(vd) == 0) 783 spa_spare_add(vd); 784 } 785 786 /* 787 * Recompute the stashed list of spares, with status information 788 * this time. 789 */ 790 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 791 DATA_TYPE_NVLIST_ARRAY) == 0); 792 793 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 794 KM_SLEEP); 795 for (i = 0; i < spa->spa_spares.sav_count; i++) 796 spares[i] = vdev_config_generate(spa, 797 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 798 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 799 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 800 for (i = 0; i < spa->spa_spares.sav_count; i++) 801 nvlist_free(spares[i]); 802 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 803 } 804 805 /* 806 * Load (or re-load) the current list of vdevs describing the active l2cache for 807 * this pool. When this is called, we have some form of basic information in 808 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 809 * then re-generate a more complete list including status information. 810 * Devices which are already active have their details maintained, and are 811 * not re-opened. 812 */ 813 static void 814 spa_load_l2cache(spa_t *spa) 815 { 816 nvlist_t **l2cache; 817 uint_t nl2cache; 818 int i, j, oldnvdevs; 819 uint64_t guid, size; 820 vdev_t *vd, **oldvdevs, **newvdevs; 821 spa_aux_vdev_t *sav = &spa->spa_l2cache; 822 823 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 824 825 if (sav->sav_config != NULL) { 826 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 827 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 828 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 829 } else { 830 nl2cache = 0; 831 } 832 833 oldvdevs = sav->sav_vdevs; 834 oldnvdevs = sav->sav_count; 835 sav->sav_vdevs = NULL; 836 sav->sav_count = 0; 837 838 /* 839 * Process new nvlist of vdevs. 840 */ 841 for (i = 0; i < nl2cache; i++) { 842 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 843 &guid) == 0); 844 845 newvdevs[i] = NULL; 846 for (j = 0; j < oldnvdevs; j++) { 847 vd = oldvdevs[j]; 848 if (vd != NULL && guid == vd->vdev_guid) { 849 /* 850 * Retain previous vdev for add/remove ops. 851 */ 852 newvdevs[i] = vd; 853 oldvdevs[j] = NULL; 854 break; 855 } 856 } 857 858 if (newvdevs[i] == NULL) { 859 /* 860 * Create new vdev 861 */ 862 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 863 VDEV_ALLOC_L2CACHE) == 0); 864 ASSERT(vd != NULL); 865 newvdevs[i] = vd; 866 867 /* 868 * Commit this vdev as an l2cache device, 869 * even if it fails to open. 870 */ 871 spa_l2cache_add(vd); 872 873 vd->vdev_top = vd; 874 vd->vdev_aux = sav; 875 876 spa_l2cache_activate(vd); 877 878 if (vdev_open(vd) != 0) 879 continue; 880 881 (void) vdev_validate_aux(vd); 882 883 if (!vdev_is_dead(vd)) { 884 size = vdev_get_rsize(vd); 885 l2arc_add_vdev(spa, vd, 886 VDEV_LABEL_START_SIZE, 887 size - VDEV_LABEL_START_SIZE); 888 } 889 } 890 } 891 892 /* 893 * Purge vdevs that were dropped 894 */ 895 for (i = 0; i < oldnvdevs; i++) { 896 uint64_t pool; 897 898 vd = oldvdevs[i]; 899 if (vd != NULL) { 900 if ((spa_mode & FWRITE) && 901 spa_l2cache_exists(vd->vdev_guid, &pool) && 902 pool != 0ULL && 903 l2arc_vdev_present(vd)) { 904 l2arc_remove_vdev(vd); 905 } 906 (void) vdev_close(vd); 907 spa_l2cache_remove(vd); 908 } 909 } 910 911 if (oldvdevs) 912 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 913 914 if (sav->sav_config == NULL) 915 goto out; 916 917 sav->sav_vdevs = newvdevs; 918 sav->sav_count = (int)nl2cache; 919 920 /* 921 * Recompute the stashed list of l2cache devices, with status 922 * information this time. 923 */ 924 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 925 DATA_TYPE_NVLIST_ARRAY) == 0); 926 927 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 928 for (i = 0; i < sav->sav_count; i++) 929 l2cache[i] = vdev_config_generate(spa, 930 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 931 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 932 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 933 out: 934 for (i = 0; i < sav->sav_count; i++) 935 nvlist_free(l2cache[i]); 936 if (sav->sav_count) 937 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 938 } 939 940 static int 941 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 942 { 943 dmu_buf_t *db; 944 char *packed = NULL; 945 size_t nvsize = 0; 946 int error; 947 *value = NULL; 948 949 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 950 nvsize = *(uint64_t *)db->db_data; 951 dmu_buf_rele(db, FTAG); 952 953 packed = kmem_alloc(nvsize, KM_SLEEP); 954 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 955 if (error == 0) 956 error = nvlist_unpack(packed, nvsize, value, 0); 957 kmem_free(packed, nvsize); 958 959 return (error); 960 } 961 962 /* 963 * Checks to see if the given vdev could not be opened, in which case we post a 964 * sysevent to notify the autoreplace code that the device has been removed. 965 */ 966 static void 967 spa_check_removed(vdev_t *vd) 968 { 969 int c; 970 971 for (c = 0; c < vd->vdev_children; c++) 972 spa_check_removed(vd->vdev_child[c]); 973 974 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 975 zfs_post_autoreplace(vd->vdev_spa, vd); 976 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 977 } 978 } 979 980 /* 981 * Check for missing log devices 982 */ 983 int 984 spa_check_logs(spa_t *spa) 985 { 986 switch (spa->spa_log_state) { 987 case SPA_LOG_MISSING: 988 /* need to recheck in case slog has been restored */ 989 case SPA_LOG_UNKNOWN: 990 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 991 DS_FIND_CHILDREN)) { 992 spa->spa_log_state = SPA_LOG_MISSING; 993 return (1); 994 } 995 break; 996 997 case SPA_LOG_CLEAR: 998 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, 999 DS_FIND_CHILDREN); 1000 break; 1001 } 1002 spa->spa_log_state = SPA_LOG_GOOD; 1003 return (0); 1004 } 1005 1006 /* 1007 * Load an existing storage pool, using the pool's builtin spa_config as a 1008 * source of configuration information. 1009 */ 1010 static int 1011 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 1012 { 1013 int error = 0; 1014 nvlist_t *nvroot = NULL; 1015 vdev_t *rvd; 1016 uberblock_t *ub = &spa->spa_uberblock; 1017 uint64_t config_cache_txg = spa->spa_config_txg; 1018 uint64_t pool_guid; 1019 uint64_t version; 1020 uint64_t autoreplace = 0; 1021 char *ereport = FM_EREPORT_ZFS_POOL; 1022 1023 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1024 1025 spa->spa_load_state = state; 1026 1027 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1028 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1029 error = EINVAL; 1030 goto out; 1031 } 1032 1033 /* 1034 * Versioning wasn't explicitly added to the label until later, so if 1035 * it's not present treat it as the initial version. 1036 */ 1037 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1038 version = SPA_VERSION_INITIAL; 1039 1040 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1041 &spa->spa_config_txg); 1042 1043 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1044 spa_guid_exists(pool_guid, 0)) { 1045 error = EEXIST; 1046 goto out; 1047 } 1048 1049 spa->spa_load_guid = pool_guid; 1050 1051 /* 1052 * Parse the configuration into a vdev tree. We explicitly set the 1053 * value that will be returned by spa_version() since parsing the 1054 * configuration requires knowing the version number. 1055 */ 1056 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1057 spa->spa_ubsync.ub_version = version; 1058 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1059 spa_config_exit(spa, SCL_ALL, FTAG); 1060 1061 if (error != 0) 1062 goto out; 1063 1064 ASSERT(spa->spa_root_vdev == rvd); 1065 ASSERT(spa_guid(spa) == pool_guid); 1066 1067 /* 1068 * Try to open all vdevs, loading each label in the process. 1069 */ 1070 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1071 error = vdev_open(rvd); 1072 spa_config_exit(spa, SCL_ALL, FTAG); 1073 if (error != 0) 1074 goto out; 1075 1076 /* 1077 * Validate the labels for all leaf vdevs. We need to grab the config 1078 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER. 1079 */ 1080 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1081 error = vdev_validate(rvd); 1082 spa_config_exit(spa, SCL_ALL, FTAG); 1083 1084 if (error != 0) 1085 goto out; 1086 1087 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1088 error = ENXIO; 1089 goto out; 1090 } 1091 1092 /* 1093 * Find the best uberblock. 1094 */ 1095 vdev_uberblock_load(NULL, rvd, ub); 1096 1097 /* 1098 * If we weren't able to find a single valid uberblock, return failure. 1099 */ 1100 if (ub->ub_txg == 0) { 1101 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1102 VDEV_AUX_CORRUPT_DATA); 1103 error = ENXIO; 1104 goto out; 1105 } 1106 1107 /* 1108 * If the pool is newer than the code, we can't open it. 1109 */ 1110 if (ub->ub_version > SPA_VERSION) { 1111 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1112 VDEV_AUX_VERSION_NEWER); 1113 error = ENOTSUP; 1114 goto out; 1115 } 1116 1117 /* 1118 * If the vdev guid sum doesn't match the uberblock, we have an 1119 * incomplete configuration. 1120 */ 1121 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1122 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1123 VDEV_AUX_BAD_GUID_SUM); 1124 error = ENXIO; 1125 goto out; 1126 } 1127 1128 /* 1129 * Initialize internal SPA structures. 1130 */ 1131 spa->spa_state = POOL_STATE_ACTIVE; 1132 spa->spa_ubsync = spa->spa_uberblock; 1133 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1134 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1135 if (error) { 1136 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1137 VDEV_AUX_CORRUPT_DATA); 1138 goto out; 1139 } 1140 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1141 1142 if (zap_lookup(spa->spa_meta_objset, 1143 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1144 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1145 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1146 VDEV_AUX_CORRUPT_DATA); 1147 error = EIO; 1148 goto out; 1149 } 1150 1151 if (!mosconfig) { 1152 nvlist_t *newconfig; 1153 uint64_t hostid; 1154 1155 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1156 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1157 VDEV_AUX_CORRUPT_DATA); 1158 error = EIO; 1159 goto out; 1160 } 1161 1162 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig, 1163 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 1164 char *hostname; 1165 unsigned long myhostid = 0; 1166 1167 VERIFY(nvlist_lookup_string(newconfig, 1168 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1169 1170 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1171 if (hostid != 0 && myhostid != 0 && 1172 (unsigned long)hostid != myhostid) { 1173 cmn_err(CE_WARN, "pool '%s' could not be " 1174 "loaded as it was last accessed by " 1175 "another system (host: %s hostid: 0x%lx). " 1176 "See: http://www.sun.com/msg/ZFS-8000-EY", 1177 spa_name(spa), hostname, 1178 (unsigned long)hostid); 1179 error = EBADF; 1180 goto out; 1181 } 1182 } 1183 1184 spa_config_set(spa, newconfig); 1185 spa_unload(spa); 1186 spa_deactivate(spa); 1187 spa_activate(spa); 1188 1189 return (spa_load(spa, newconfig, state, B_TRUE)); 1190 } 1191 1192 if (zap_lookup(spa->spa_meta_objset, 1193 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1194 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1195 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1196 VDEV_AUX_CORRUPT_DATA); 1197 error = EIO; 1198 goto out; 1199 } 1200 1201 /* 1202 * Load the bit that tells us to use the new accounting function 1203 * (raid-z deflation). If we have an older pool, this will not 1204 * be present. 1205 */ 1206 error = zap_lookup(spa->spa_meta_objset, 1207 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1208 sizeof (uint64_t), 1, &spa->spa_deflate); 1209 if (error != 0 && error != ENOENT) { 1210 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1211 VDEV_AUX_CORRUPT_DATA); 1212 error = EIO; 1213 goto out; 1214 } 1215 1216 /* 1217 * Load the persistent error log. If we have an older pool, this will 1218 * not be present. 1219 */ 1220 error = zap_lookup(spa->spa_meta_objset, 1221 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1222 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1223 if (error != 0 && error != ENOENT) { 1224 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1225 VDEV_AUX_CORRUPT_DATA); 1226 error = EIO; 1227 goto out; 1228 } 1229 1230 error = zap_lookup(spa->spa_meta_objset, 1231 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1232 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1233 if (error != 0 && error != ENOENT) { 1234 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1235 VDEV_AUX_CORRUPT_DATA); 1236 error = EIO; 1237 goto out; 1238 } 1239 1240 /* 1241 * Load the history object. If we have an older pool, this 1242 * will not be present. 1243 */ 1244 error = zap_lookup(spa->spa_meta_objset, 1245 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1246 sizeof (uint64_t), 1, &spa->spa_history); 1247 if (error != 0 && error != ENOENT) { 1248 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1249 VDEV_AUX_CORRUPT_DATA); 1250 error = EIO; 1251 goto out; 1252 } 1253 1254 /* 1255 * Load any hot spares for this pool. 1256 */ 1257 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1258 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1259 if (error != 0 && error != ENOENT) { 1260 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1261 VDEV_AUX_CORRUPT_DATA); 1262 error = EIO; 1263 goto out; 1264 } 1265 if (error == 0) { 1266 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1267 if (load_nvlist(spa, spa->spa_spares.sav_object, 1268 &spa->spa_spares.sav_config) != 0) { 1269 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1270 VDEV_AUX_CORRUPT_DATA); 1271 error = EIO; 1272 goto out; 1273 } 1274 1275 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1276 spa_load_spares(spa); 1277 spa_config_exit(spa, SCL_ALL, FTAG); 1278 } 1279 1280 /* 1281 * Load any level 2 ARC devices for this pool. 1282 */ 1283 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1284 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1285 &spa->spa_l2cache.sav_object); 1286 if (error != 0 && error != ENOENT) { 1287 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1288 VDEV_AUX_CORRUPT_DATA); 1289 error = EIO; 1290 goto out; 1291 } 1292 if (error == 0) { 1293 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1294 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1295 &spa->spa_l2cache.sav_config) != 0) { 1296 vdev_set_state(rvd, B_TRUE, 1297 VDEV_STATE_CANT_OPEN, 1298 VDEV_AUX_CORRUPT_DATA); 1299 error = EIO; 1300 goto out; 1301 } 1302 1303 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1304 spa_load_l2cache(spa); 1305 spa_config_exit(spa, SCL_ALL, FTAG); 1306 } 1307 1308 if (spa_check_logs(spa)) { 1309 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1310 VDEV_AUX_BAD_LOG); 1311 error = ENXIO; 1312 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1313 goto out; 1314 } 1315 1316 1317 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1318 1319 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1320 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1321 1322 if (error && error != ENOENT) { 1323 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1324 VDEV_AUX_CORRUPT_DATA); 1325 error = EIO; 1326 goto out; 1327 } 1328 1329 if (error == 0) { 1330 (void) zap_lookup(spa->spa_meta_objset, 1331 spa->spa_pool_props_object, 1332 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1333 sizeof (uint64_t), 1, &spa->spa_bootfs); 1334 (void) zap_lookup(spa->spa_meta_objset, 1335 spa->spa_pool_props_object, 1336 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1337 sizeof (uint64_t), 1, &autoreplace); 1338 (void) zap_lookup(spa->spa_meta_objset, 1339 spa->spa_pool_props_object, 1340 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1341 sizeof (uint64_t), 1, &spa->spa_delegation); 1342 (void) zap_lookup(spa->spa_meta_objset, 1343 spa->spa_pool_props_object, 1344 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1345 sizeof (uint64_t), 1, &spa->spa_failmode); 1346 } 1347 1348 /* 1349 * If the 'autoreplace' property is set, then post a resource notifying 1350 * the ZFS DE that it should not issue any faults for unopenable 1351 * devices. We also iterate over the vdevs, and post a sysevent for any 1352 * unopenable vdevs so that the normal autoreplace handler can take 1353 * over. 1354 */ 1355 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1356 spa_check_removed(spa->spa_root_vdev); 1357 1358 /* 1359 * Load the vdev state for all toplevel vdevs. 1360 */ 1361 vdev_load(rvd); 1362 1363 /* 1364 * Propagate the leaf DTLs we just loaded all the way up the tree. 1365 */ 1366 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1367 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1368 spa_config_exit(spa, SCL_ALL, FTAG); 1369 1370 /* 1371 * Check the state of the root vdev. If it can't be opened, it 1372 * indicates one or more toplevel vdevs are faulted. 1373 */ 1374 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1375 error = ENXIO; 1376 goto out; 1377 } 1378 1379 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1380 dmu_tx_t *tx; 1381 int need_update = B_FALSE; 1382 int c; 1383 1384 /* 1385 * Claim log blocks that haven't been committed yet. 1386 * This must all happen in a single txg. 1387 */ 1388 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1389 spa_first_txg(spa)); 1390 (void) dmu_objset_find(spa_name(spa), 1391 zil_claim, tx, DS_FIND_CHILDREN); 1392 dmu_tx_commit(tx); 1393 1394 spa->spa_sync_on = B_TRUE; 1395 txg_sync_start(spa->spa_dsl_pool); 1396 1397 /* 1398 * Wait for all claims to sync. 1399 */ 1400 txg_wait_synced(spa->spa_dsl_pool, 0); 1401 1402 /* 1403 * If the config cache is stale, or we have uninitialized 1404 * metaslabs (see spa_vdev_add()), then update the config. 1405 */ 1406 if (config_cache_txg != spa->spa_config_txg || 1407 state == SPA_LOAD_IMPORT) 1408 need_update = B_TRUE; 1409 1410 for (c = 0; c < rvd->vdev_children; c++) 1411 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1412 need_update = B_TRUE; 1413 1414 /* 1415 * Update the config cache asychronously in case we're the 1416 * root pool, in which case the config cache isn't writable yet. 1417 */ 1418 if (need_update) 1419 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1420 } 1421 1422 error = 0; 1423 out: 1424 spa->spa_minref = refcount_count(&spa->spa_refcount); 1425 if (error && error != EBADF) 1426 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1427 spa->spa_load_state = SPA_LOAD_NONE; 1428 spa->spa_ena = 0; 1429 1430 return (error); 1431 } 1432 1433 /* 1434 * Pool Open/Import 1435 * 1436 * The import case is identical to an open except that the configuration is sent 1437 * down from userland, instead of grabbed from the configuration cache. For the 1438 * case of an open, the pool configuration will exist in the 1439 * POOL_STATE_UNINITIALIZED state. 1440 * 1441 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1442 * the same time open the pool, without having to keep around the spa_t in some 1443 * ambiguous state. 1444 */ 1445 static int 1446 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1447 { 1448 spa_t *spa; 1449 int error; 1450 int locked = B_FALSE; 1451 1452 *spapp = NULL; 1453 1454 /* 1455 * As disgusting as this is, we need to support recursive calls to this 1456 * function because dsl_dir_open() is called during spa_load(), and ends 1457 * up calling spa_open() again. The real fix is to figure out how to 1458 * avoid dsl_dir_open() calling this in the first place. 1459 */ 1460 if (mutex_owner(&spa_namespace_lock) != curthread) { 1461 mutex_enter(&spa_namespace_lock); 1462 locked = B_TRUE; 1463 } 1464 1465 if ((spa = spa_lookup(pool)) == NULL) { 1466 if (locked) 1467 mutex_exit(&spa_namespace_lock); 1468 return (ENOENT); 1469 } 1470 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1471 1472 spa_activate(spa); 1473 1474 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1475 1476 if (error == EBADF) { 1477 /* 1478 * If vdev_validate() returns failure (indicated by 1479 * EBADF), it indicates that one of the vdevs indicates 1480 * that the pool has been exported or destroyed. If 1481 * this is the case, the config cache is out of sync and 1482 * we should remove the pool from the namespace. 1483 */ 1484 spa_unload(spa); 1485 spa_deactivate(spa); 1486 spa_config_sync(spa, B_TRUE, B_TRUE); 1487 spa_remove(spa); 1488 if (locked) 1489 mutex_exit(&spa_namespace_lock); 1490 return (ENOENT); 1491 } 1492 1493 if (error) { 1494 /* 1495 * We can't open the pool, but we still have useful 1496 * information: the state of each vdev after the 1497 * attempted vdev_open(). Return this to the user. 1498 */ 1499 if (config != NULL && spa->spa_root_vdev != NULL) 1500 *config = spa_config_generate(spa, NULL, -1ULL, 1501 B_TRUE); 1502 spa_unload(spa); 1503 spa_deactivate(spa); 1504 spa->spa_last_open_failed = B_TRUE; 1505 if (locked) 1506 mutex_exit(&spa_namespace_lock); 1507 *spapp = NULL; 1508 return (error); 1509 } else { 1510 spa->spa_last_open_failed = B_FALSE; 1511 } 1512 } 1513 1514 spa_open_ref(spa, tag); 1515 1516 if (locked) 1517 mutex_exit(&spa_namespace_lock); 1518 1519 *spapp = spa; 1520 1521 if (config != NULL) 1522 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1523 1524 return (0); 1525 } 1526 1527 int 1528 spa_open(const char *name, spa_t **spapp, void *tag) 1529 { 1530 return (spa_open_common(name, spapp, tag, NULL)); 1531 } 1532 1533 /* 1534 * Lookup the given spa_t, incrementing the inject count in the process, 1535 * preventing it from being exported or destroyed. 1536 */ 1537 spa_t * 1538 spa_inject_addref(char *name) 1539 { 1540 spa_t *spa; 1541 1542 mutex_enter(&spa_namespace_lock); 1543 if ((spa = spa_lookup(name)) == NULL) { 1544 mutex_exit(&spa_namespace_lock); 1545 return (NULL); 1546 } 1547 spa->spa_inject_ref++; 1548 mutex_exit(&spa_namespace_lock); 1549 1550 return (spa); 1551 } 1552 1553 void 1554 spa_inject_delref(spa_t *spa) 1555 { 1556 mutex_enter(&spa_namespace_lock); 1557 spa->spa_inject_ref--; 1558 mutex_exit(&spa_namespace_lock); 1559 } 1560 1561 /* 1562 * Add spares device information to the nvlist. 1563 */ 1564 static void 1565 spa_add_spares(spa_t *spa, nvlist_t *config) 1566 { 1567 nvlist_t **spares; 1568 uint_t i, nspares; 1569 nvlist_t *nvroot; 1570 uint64_t guid; 1571 vdev_stat_t *vs; 1572 uint_t vsc; 1573 uint64_t pool; 1574 1575 if (spa->spa_spares.sav_count == 0) 1576 return; 1577 1578 VERIFY(nvlist_lookup_nvlist(config, 1579 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1580 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1581 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1582 if (nspares != 0) { 1583 VERIFY(nvlist_add_nvlist_array(nvroot, 1584 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1585 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1586 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1587 1588 /* 1589 * Go through and find any spares which have since been 1590 * repurposed as an active spare. If this is the case, update 1591 * their status appropriately. 1592 */ 1593 for (i = 0; i < nspares; i++) { 1594 VERIFY(nvlist_lookup_uint64(spares[i], 1595 ZPOOL_CONFIG_GUID, &guid) == 0); 1596 if (spa_spare_exists(guid, &pool, NULL) && 1597 pool != 0ULL) { 1598 VERIFY(nvlist_lookup_uint64_array( 1599 spares[i], ZPOOL_CONFIG_STATS, 1600 (uint64_t **)&vs, &vsc) == 0); 1601 vs->vs_state = VDEV_STATE_CANT_OPEN; 1602 vs->vs_aux = VDEV_AUX_SPARED; 1603 } 1604 } 1605 } 1606 } 1607 1608 /* 1609 * Add l2cache device information to the nvlist, including vdev stats. 1610 */ 1611 static void 1612 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1613 { 1614 nvlist_t **l2cache; 1615 uint_t i, j, nl2cache; 1616 nvlist_t *nvroot; 1617 uint64_t guid; 1618 vdev_t *vd; 1619 vdev_stat_t *vs; 1620 uint_t vsc; 1621 1622 if (spa->spa_l2cache.sav_count == 0) 1623 return; 1624 1625 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1626 1627 VERIFY(nvlist_lookup_nvlist(config, 1628 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1629 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1630 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1631 if (nl2cache != 0) { 1632 VERIFY(nvlist_add_nvlist_array(nvroot, 1633 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1634 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1635 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1636 1637 /* 1638 * Update level 2 cache device stats. 1639 */ 1640 1641 for (i = 0; i < nl2cache; i++) { 1642 VERIFY(nvlist_lookup_uint64(l2cache[i], 1643 ZPOOL_CONFIG_GUID, &guid) == 0); 1644 1645 vd = NULL; 1646 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1647 if (guid == 1648 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1649 vd = spa->spa_l2cache.sav_vdevs[j]; 1650 break; 1651 } 1652 } 1653 ASSERT(vd != NULL); 1654 1655 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1656 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1657 vdev_get_stats(vd, vs); 1658 } 1659 } 1660 1661 spa_config_exit(spa, SCL_CONFIG, FTAG); 1662 } 1663 1664 int 1665 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1666 { 1667 int error; 1668 spa_t *spa; 1669 1670 *config = NULL; 1671 error = spa_open_common(name, &spa, FTAG, config); 1672 1673 if (spa && *config != NULL) { 1674 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1675 spa_get_errlog_size(spa)) == 0); 1676 1677 if (spa_suspended(spa)) 1678 VERIFY(nvlist_add_uint64(*config, 1679 ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0); 1680 1681 spa_add_spares(spa, *config); 1682 spa_add_l2cache(spa, *config); 1683 } 1684 1685 /* 1686 * We want to get the alternate root even for faulted pools, so we cheat 1687 * and call spa_lookup() directly. 1688 */ 1689 if (altroot) { 1690 if (spa == NULL) { 1691 mutex_enter(&spa_namespace_lock); 1692 spa = spa_lookup(name); 1693 if (spa) 1694 spa_altroot(spa, altroot, buflen); 1695 else 1696 altroot[0] = '\0'; 1697 spa = NULL; 1698 mutex_exit(&spa_namespace_lock); 1699 } else { 1700 spa_altroot(spa, altroot, buflen); 1701 } 1702 } 1703 1704 if (spa != NULL) 1705 spa_close(spa, FTAG); 1706 1707 return (error); 1708 } 1709 1710 /* 1711 * Validate that the auxiliary device array is well formed. We must have an 1712 * array of nvlists, each which describes a valid leaf vdev. If this is an 1713 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1714 * specified, as long as they are well-formed. 1715 */ 1716 static int 1717 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1718 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1719 vdev_labeltype_t label) 1720 { 1721 nvlist_t **dev; 1722 uint_t i, ndev; 1723 vdev_t *vd; 1724 int error; 1725 1726 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1727 1728 /* 1729 * It's acceptable to have no devs specified. 1730 */ 1731 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1732 return (0); 1733 1734 if (ndev == 0) 1735 return (EINVAL); 1736 1737 /* 1738 * Make sure the pool is formatted with a version that supports this 1739 * device type. 1740 */ 1741 if (spa_version(spa) < version) 1742 return (ENOTSUP); 1743 1744 /* 1745 * Set the pending device list so we correctly handle device in-use 1746 * checking. 1747 */ 1748 sav->sav_pending = dev; 1749 sav->sav_npending = ndev; 1750 1751 for (i = 0; i < ndev; i++) { 1752 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1753 mode)) != 0) 1754 goto out; 1755 1756 if (!vd->vdev_ops->vdev_op_leaf) { 1757 vdev_free(vd); 1758 error = EINVAL; 1759 goto out; 1760 } 1761 1762 /* 1763 * The L2ARC currently only supports disk devices in 1764 * kernel context. For user-level testing, we allow it. 1765 */ 1766 #ifdef _KERNEL 1767 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1768 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1769 error = ENOTBLK; 1770 goto out; 1771 } 1772 #endif 1773 vd->vdev_top = vd; 1774 1775 if ((error = vdev_open(vd)) == 0 && 1776 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1777 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1778 vd->vdev_guid) == 0); 1779 } 1780 1781 vdev_free(vd); 1782 1783 if (error && 1784 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1785 goto out; 1786 else 1787 error = 0; 1788 } 1789 1790 out: 1791 sav->sav_pending = NULL; 1792 sav->sav_npending = 0; 1793 return (error); 1794 } 1795 1796 static int 1797 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1798 { 1799 int error; 1800 1801 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1802 1803 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1804 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1805 VDEV_LABEL_SPARE)) != 0) { 1806 return (error); 1807 } 1808 1809 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1810 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1811 VDEV_LABEL_L2CACHE)); 1812 } 1813 1814 static void 1815 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1816 const char *config) 1817 { 1818 int i; 1819 1820 if (sav->sav_config != NULL) { 1821 nvlist_t **olddevs; 1822 uint_t oldndevs; 1823 nvlist_t **newdevs; 1824 1825 /* 1826 * Generate new dev list by concatentating with the 1827 * current dev list. 1828 */ 1829 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1830 &olddevs, &oldndevs) == 0); 1831 1832 newdevs = kmem_alloc(sizeof (void *) * 1833 (ndevs + oldndevs), KM_SLEEP); 1834 for (i = 0; i < oldndevs; i++) 1835 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1836 KM_SLEEP) == 0); 1837 for (i = 0; i < ndevs; i++) 1838 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1839 KM_SLEEP) == 0); 1840 1841 VERIFY(nvlist_remove(sav->sav_config, config, 1842 DATA_TYPE_NVLIST_ARRAY) == 0); 1843 1844 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1845 config, newdevs, ndevs + oldndevs) == 0); 1846 for (i = 0; i < oldndevs + ndevs; i++) 1847 nvlist_free(newdevs[i]); 1848 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1849 } else { 1850 /* 1851 * Generate a new dev list. 1852 */ 1853 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1854 KM_SLEEP) == 0); 1855 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1856 devs, ndevs) == 0); 1857 } 1858 } 1859 1860 /* 1861 * Stop and drop level 2 ARC devices 1862 */ 1863 void 1864 spa_l2cache_drop(spa_t *spa) 1865 { 1866 vdev_t *vd; 1867 int i; 1868 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1869 1870 for (i = 0; i < sav->sav_count; i++) { 1871 uint64_t pool; 1872 1873 vd = sav->sav_vdevs[i]; 1874 ASSERT(vd != NULL); 1875 1876 if ((spa_mode & FWRITE) && 1877 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1878 l2arc_vdev_present(vd)) { 1879 l2arc_remove_vdev(vd); 1880 } 1881 if (vd->vdev_isl2cache) 1882 spa_l2cache_remove(vd); 1883 vdev_clear_stats(vd); 1884 (void) vdev_close(vd); 1885 } 1886 } 1887 1888 /* 1889 * Pool Creation 1890 */ 1891 int 1892 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1893 const char *history_str, nvlist_t *zplprops) 1894 { 1895 spa_t *spa; 1896 char *altroot = NULL; 1897 vdev_t *rvd; 1898 dsl_pool_t *dp; 1899 dmu_tx_t *tx; 1900 int c, error = 0; 1901 uint64_t txg = TXG_INITIAL; 1902 nvlist_t **spares, **l2cache; 1903 uint_t nspares, nl2cache; 1904 uint64_t version; 1905 1906 /* 1907 * If this pool already exists, return failure. 1908 */ 1909 mutex_enter(&spa_namespace_lock); 1910 if (spa_lookup(pool) != NULL) { 1911 mutex_exit(&spa_namespace_lock); 1912 return (EEXIST); 1913 } 1914 1915 /* 1916 * Allocate a new spa_t structure. 1917 */ 1918 (void) nvlist_lookup_string(props, 1919 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1920 spa = spa_add(pool, altroot); 1921 spa_activate(spa); 1922 1923 spa->spa_uberblock.ub_txg = txg - 1; 1924 1925 if (props && (error = spa_prop_validate(spa, props))) { 1926 spa_unload(spa); 1927 spa_deactivate(spa); 1928 spa_remove(spa); 1929 mutex_exit(&spa_namespace_lock); 1930 return (error); 1931 } 1932 1933 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1934 &version) != 0) 1935 version = SPA_VERSION; 1936 ASSERT(version <= SPA_VERSION); 1937 spa->spa_uberblock.ub_version = version; 1938 spa->spa_ubsync = spa->spa_uberblock; 1939 1940 /* 1941 * Create the root vdev. 1942 */ 1943 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1944 1945 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1946 1947 ASSERT(error != 0 || rvd != NULL); 1948 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1949 1950 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1951 error = EINVAL; 1952 1953 if (error == 0 && 1954 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1955 (error = spa_validate_aux(spa, nvroot, txg, 1956 VDEV_ALLOC_ADD)) == 0) { 1957 for (c = 0; c < rvd->vdev_children; c++) 1958 vdev_init(rvd->vdev_child[c], txg); 1959 vdev_config_dirty(rvd); 1960 } 1961 1962 spa_config_exit(spa, SCL_ALL, FTAG); 1963 1964 if (error != 0) { 1965 spa_unload(spa); 1966 spa_deactivate(spa); 1967 spa_remove(spa); 1968 mutex_exit(&spa_namespace_lock); 1969 return (error); 1970 } 1971 1972 /* 1973 * Get the list of spares, if specified. 1974 */ 1975 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1976 &spares, &nspares) == 0) { 1977 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1978 KM_SLEEP) == 0); 1979 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1980 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1981 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1982 spa_load_spares(spa); 1983 spa_config_exit(spa, SCL_ALL, FTAG); 1984 spa->spa_spares.sav_sync = B_TRUE; 1985 } 1986 1987 /* 1988 * Get the list of level 2 cache devices, if specified. 1989 */ 1990 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1991 &l2cache, &nl2cache) == 0) { 1992 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1993 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1994 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1995 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1996 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1997 spa_load_l2cache(spa); 1998 spa_config_exit(spa, SCL_ALL, FTAG); 1999 spa->spa_l2cache.sav_sync = B_TRUE; 2000 } 2001 2002 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 2003 spa->spa_meta_objset = dp->dp_meta_objset; 2004 2005 tx = dmu_tx_create_assigned(dp, txg); 2006 2007 /* 2008 * Create the pool config object. 2009 */ 2010 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 2011 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 2012 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 2013 2014 if (zap_add(spa->spa_meta_objset, 2015 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 2016 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 2017 cmn_err(CE_PANIC, "failed to add pool config"); 2018 } 2019 2020 /* Newly created pools with the right version are always deflated. */ 2021 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 2022 spa->spa_deflate = TRUE; 2023 if (zap_add(spa->spa_meta_objset, 2024 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2025 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2026 cmn_err(CE_PANIC, "failed to add deflate"); 2027 } 2028 } 2029 2030 /* 2031 * Create the deferred-free bplist object. Turn off compression 2032 * because sync-to-convergence takes longer if the blocksize 2033 * keeps changing. 2034 */ 2035 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2036 1 << 14, tx); 2037 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2038 ZIO_COMPRESS_OFF, tx); 2039 2040 if (zap_add(spa->spa_meta_objset, 2041 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2042 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2043 cmn_err(CE_PANIC, "failed to add bplist"); 2044 } 2045 2046 /* 2047 * Create the pool's history object. 2048 */ 2049 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2050 spa_history_create_obj(spa, tx); 2051 2052 /* 2053 * Set pool properties. 2054 */ 2055 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2056 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2057 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2058 if (props) 2059 spa_sync_props(spa, props, CRED(), tx); 2060 2061 dmu_tx_commit(tx); 2062 2063 spa->spa_sync_on = B_TRUE; 2064 txg_sync_start(spa->spa_dsl_pool); 2065 2066 /* 2067 * We explicitly wait for the first transaction to complete so that our 2068 * bean counters are appropriately updated. 2069 */ 2070 txg_wait_synced(spa->spa_dsl_pool, txg); 2071 2072 spa_config_sync(spa, B_FALSE, B_TRUE); 2073 2074 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2075 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2076 2077 mutex_exit(&spa_namespace_lock); 2078 2079 spa->spa_minref = refcount_count(&spa->spa_refcount); 2080 2081 return (0); 2082 } 2083 2084 /* 2085 * Import the given pool into the system. We set up the necessary spa_t and 2086 * then call spa_load() to do the dirty work. 2087 */ 2088 static int 2089 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2090 boolean_t isroot, boolean_t allowfaulted) 2091 { 2092 spa_t *spa; 2093 char *altroot = NULL; 2094 int error, loaderr; 2095 nvlist_t *nvroot; 2096 nvlist_t **spares, **l2cache; 2097 uint_t nspares, nl2cache; 2098 2099 /* 2100 * If a pool with this name exists, return failure. 2101 */ 2102 mutex_enter(&spa_namespace_lock); 2103 if (spa_lookup(pool) != NULL) { 2104 mutex_exit(&spa_namespace_lock); 2105 return (EEXIST); 2106 } 2107 2108 /* 2109 * Create and initialize the spa structure. 2110 */ 2111 (void) nvlist_lookup_string(props, 2112 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2113 spa = spa_add(pool, altroot); 2114 spa_activate(spa); 2115 2116 if (allowfaulted) 2117 spa->spa_import_faulted = B_TRUE; 2118 spa->spa_is_root = isroot; 2119 2120 /* 2121 * Pass off the heavy lifting to spa_load(). 2122 * Pass TRUE for mosconfig (unless this is a root pool) because 2123 * the user-supplied config is actually the one to trust when 2124 * doing an import. 2125 */ 2126 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2127 2128 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2129 /* 2130 * Toss any existing sparelist, as it doesn't have any validity anymore, 2131 * and conflicts with spa_has_spare(). 2132 */ 2133 if (!isroot && spa->spa_spares.sav_config) { 2134 nvlist_free(spa->spa_spares.sav_config); 2135 spa->spa_spares.sav_config = NULL; 2136 spa_load_spares(spa); 2137 } 2138 if (!isroot && spa->spa_l2cache.sav_config) { 2139 nvlist_free(spa->spa_l2cache.sav_config); 2140 spa->spa_l2cache.sav_config = NULL; 2141 spa_load_l2cache(spa); 2142 } 2143 2144 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2145 &nvroot) == 0); 2146 if (error == 0) 2147 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2148 if (error == 0) 2149 error = spa_validate_aux(spa, nvroot, -1ULL, 2150 VDEV_ALLOC_L2CACHE); 2151 spa_config_exit(spa, SCL_ALL, FTAG); 2152 2153 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2154 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2155 /* 2156 * If we failed to load the pool, but 'allowfaulted' is 2157 * set, then manually set the config as if the config 2158 * passed in was specified in the cache file. 2159 */ 2160 error = 0; 2161 spa->spa_import_faulted = B_FALSE; 2162 if (spa->spa_config == NULL) 2163 spa->spa_config = spa_config_generate(spa, 2164 NULL, -1ULL, B_TRUE); 2165 spa_unload(spa); 2166 spa_deactivate(spa); 2167 spa_config_sync(spa, B_FALSE, B_TRUE); 2168 } else { 2169 spa_unload(spa); 2170 spa_deactivate(spa); 2171 spa_remove(spa); 2172 } 2173 mutex_exit(&spa_namespace_lock); 2174 return (error); 2175 } 2176 2177 /* 2178 * Override any spares and level 2 cache devices as specified by 2179 * the user, as these may have correct device names/devids, etc. 2180 */ 2181 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2182 &spares, &nspares) == 0) { 2183 if (spa->spa_spares.sav_config) 2184 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2185 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2186 else 2187 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2188 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2189 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2190 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2191 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2192 spa_load_spares(spa); 2193 spa_config_exit(spa, SCL_ALL, FTAG); 2194 spa->spa_spares.sav_sync = B_TRUE; 2195 } 2196 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2197 &l2cache, &nl2cache) == 0) { 2198 if (spa->spa_l2cache.sav_config) 2199 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2200 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2201 else 2202 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2203 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2204 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2205 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2206 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2207 spa_load_l2cache(spa); 2208 spa_config_exit(spa, SCL_ALL, FTAG); 2209 spa->spa_l2cache.sav_sync = B_TRUE; 2210 } 2211 2212 if (spa_mode & FWRITE) { 2213 /* 2214 * Update the config cache to include the newly-imported pool. 2215 */ 2216 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2217 } 2218 2219 spa->spa_import_faulted = B_FALSE; 2220 mutex_exit(&spa_namespace_lock); 2221 2222 return (0); 2223 } 2224 2225 #ifdef _KERNEL 2226 /* 2227 * Build a "root" vdev for a top level vdev read in from a rootpool 2228 * device label. 2229 */ 2230 static void 2231 spa_build_rootpool_config(nvlist_t *config) 2232 { 2233 nvlist_t *nvtop, *nvroot; 2234 uint64_t pgid; 2235 2236 /* 2237 * Add this top-level vdev to the child array. 2238 */ 2239 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2240 == 0); 2241 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2242 == 0); 2243 2244 /* 2245 * Put this pool's top-level vdevs into a root vdev. 2246 */ 2247 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2248 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2249 == 0); 2250 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2251 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2252 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2253 &nvtop, 1) == 0); 2254 2255 /* 2256 * Replace the existing vdev_tree with the new root vdev in 2257 * this pool's configuration (remove the old, add the new). 2258 */ 2259 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2260 nvlist_free(nvroot); 2261 } 2262 2263 /* 2264 * Get the root pool information from the root disk, then import the root pool 2265 * during the system boot up time. 2266 */ 2267 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 2268 2269 int 2270 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2271 uint64_t *besttxg) 2272 { 2273 nvlist_t *config; 2274 uint64_t txg; 2275 int error; 2276 2277 if (error = vdev_disk_read_rootlabel(devpath, devid, &config)) 2278 return (error); 2279 2280 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2281 2282 if (bestconf != NULL) 2283 *bestconf = config; 2284 else 2285 nvlist_free(config); 2286 *besttxg = txg; 2287 return (0); 2288 } 2289 2290 boolean_t 2291 spa_rootdev_validate(nvlist_t *nv) 2292 { 2293 uint64_t ival; 2294 2295 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2296 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2297 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2298 return (B_FALSE); 2299 2300 return (B_TRUE); 2301 } 2302 2303 2304 /* 2305 * Given the boot device's physical path or devid, check if the device 2306 * is in a valid state. If so, return the configuration from the vdev 2307 * label. 2308 */ 2309 int 2310 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2311 { 2312 nvlist_t *conf = NULL; 2313 uint64_t txg = 0; 2314 nvlist_t *nvtop, **child; 2315 char *type; 2316 char *bootpath = NULL; 2317 uint_t children, c; 2318 char *tmp; 2319 int error; 2320 2321 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2322 *tmp = '\0'; 2323 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) { 2324 cmn_err(CE_NOTE, "error reading device label"); 2325 return (error); 2326 } 2327 if (txg == 0) { 2328 cmn_err(CE_NOTE, "this device is detached"); 2329 nvlist_free(conf); 2330 return (EINVAL); 2331 } 2332 2333 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2334 &nvtop) == 0); 2335 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2336 2337 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2338 if (spa_rootdev_validate(nvtop)) { 2339 goto out; 2340 } else { 2341 nvlist_free(conf); 2342 return (EINVAL); 2343 } 2344 } 2345 2346 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2347 2348 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2349 &child, &children) == 0); 2350 2351 /* 2352 * Go thru vdevs in the mirror to see if the given device 2353 * has the most recent txg. Only the device with the most 2354 * recent txg has valid information and should be booted. 2355 */ 2356 for (c = 0; c < children; c++) { 2357 char *cdevid, *cpath; 2358 uint64_t tmptxg; 2359 2360 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2361 &cpath) != 0) 2362 return (EINVAL); 2363 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, 2364 &cdevid) != 0) 2365 return (EINVAL); 2366 if ((spa_check_rootconf(cpath, cdevid, NULL, 2367 &tmptxg) == 0) && (tmptxg > txg)) { 2368 txg = tmptxg; 2369 VERIFY(nvlist_lookup_string(child[c], 2370 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2371 } 2372 } 2373 2374 /* Does the best device match the one we've booted from? */ 2375 if (bootpath) { 2376 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2377 return (EINVAL); 2378 } 2379 out: 2380 *bestconf = conf; 2381 return (0); 2382 } 2383 2384 /* 2385 * Import a root pool. 2386 * 2387 * For x86. devpath_list will consist of devid and/or physpath name of 2388 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2389 * The GRUB "findroot" command will return the vdev we should boot. 2390 * 2391 * For Sparc, devpath_list consists the physpath name of the booting device 2392 * no matter the rootpool is a single device pool or a mirrored pool. 2393 * e.g. 2394 * "/pci@1f,0/ide@d/disk@0,0:a" 2395 */ 2396 int 2397 spa_import_rootpool(char *devpath, char *devid) 2398 { 2399 nvlist_t *conf = NULL; 2400 char *pname; 2401 int error; 2402 2403 /* 2404 * Get the vdev pathname and configuation from the most 2405 * recently updated vdev (highest txg). 2406 */ 2407 if (error = spa_get_rootconf(devpath, devid, &conf)) 2408 goto msg_out; 2409 2410 /* 2411 * Add type "root" vdev to the config. 2412 */ 2413 spa_build_rootpool_config(conf); 2414 2415 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2416 2417 /* 2418 * We specify 'allowfaulted' for this to be treated like spa_open() 2419 * instead of spa_import(). This prevents us from marking vdevs as 2420 * persistently unavailable, and generates FMA ereports as if it were a 2421 * pool open, not import. 2422 */ 2423 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2424 if (error == EEXIST) 2425 error = 0; 2426 2427 nvlist_free(conf); 2428 return (error); 2429 2430 msg_out: 2431 cmn_err(CE_NOTE, "\n" 2432 " *************************************************** \n" 2433 " * This device is not bootable! * \n" 2434 " * It is either offlined or detached or faulted. * \n" 2435 " * Please try to boot from a different device. * \n" 2436 " *************************************************** "); 2437 2438 return (error); 2439 } 2440 #endif 2441 2442 /* 2443 * Import a non-root pool into the system. 2444 */ 2445 int 2446 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2447 { 2448 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2449 } 2450 2451 int 2452 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2453 { 2454 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2455 } 2456 2457 2458 /* 2459 * This (illegal) pool name is used when temporarily importing a spa_t in order 2460 * to get the vdev stats associated with the imported devices. 2461 */ 2462 #define TRYIMPORT_NAME "$import" 2463 2464 nvlist_t * 2465 spa_tryimport(nvlist_t *tryconfig) 2466 { 2467 nvlist_t *config = NULL; 2468 char *poolname; 2469 spa_t *spa; 2470 uint64_t state; 2471 2472 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2473 return (NULL); 2474 2475 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2476 return (NULL); 2477 2478 /* 2479 * Create and initialize the spa structure. 2480 */ 2481 mutex_enter(&spa_namespace_lock); 2482 spa = spa_add(TRYIMPORT_NAME, NULL); 2483 spa_activate(spa); 2484 2485 /* 2486 * Pass off the heavy lifting to spa_load(). 2487 * Pass TRUE for mosconfig because the user-supplied config 2488 * is actually the one to trust when doing an import. 2489 */ 2490 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2491 2492 /* 2493 * If 'tryconfig' was at least parsable, return the current config. 2494 */ 2495 if (spa->spa_root_vdev != NULL) { 2496 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2497 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2498 poolname) == 0); 2499 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2500 state) == 0); 2501 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2502 spa->spa_uberblock.ub_timestamp) == 0); 2503 2504 /* 2505 * If the bootfs property exists on this pool then we 2506 * copy it out so that external consumers can tell which 2507 * pools are bootable. 2508 */ 2509 if (spa->spa_bootfs) { 2510 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2511 2512 /* 2513 * We have to play games with the name since the 2514 * pool was opened as TRYIMPORT_NAME. 2515 */ 2516 if (dsl_dsobj_to_dsname(spa_name(spa), 2517 spa->spa_bootfs, tmpname) == 0) { 2518 char *cp; 2519 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2520 2521 cp = strchr(tmpname, '/'); 2522 if (cp == NULL) { 2523 (void) strlcpy(dsname, tmpname, 2524 MAXPATHLEN); 2525 } else { 2526 (void) snprintf(dsname, MAXPATHLEN, 2527 "%s/%s", poolname, ++cp); 2528 } 2529 VERIFY(nvlist_add_string(config, 2530 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2531 kmem_free(dsname, MAXPATHLEN); 2532 } 2533 kmem_free(tmpname, MAXPATHLEN); 2534 } 2535 2536 /* 2537 * Add the list of hot spares and level 2 cache devices. 2538 */ 2539 spa_add_spares(spa, config); 2540 spa_add_l2cache(spa, config); 2541 } 2542 2543 spa_unload(spa); 2544 spa_deactivate(spa); 2545 spa_remove(spa); 2546 mutex_exit(&spa_namespace_lock); 2547 2548 return (config); 2549 } 2550 2551 /* 2552 * Pool export/destroy 2553 * 2554 * The act of destroying or exporting a pool is very simple. We make sure there 2555 * is no more pending I/O and any references to the pool are gone. Then, we 2556 * update the pool state and sync all the labels to disk, removing the 2557 * configuration from the cache afterwards. 2558 */ 2559 static int 2560 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2561 boolean_t force) 2562 { 2563 spa_t *spa; 2564 2565 if (oldconfig) 2566 *oldconfig = NULL; 2567 2568 if (!(spa_mode & FWRITE)) 2569 return (EROFS); 2570 2571 mutex_enter(&spa_namespace_lock); 2572 if ((spa = spa_lookup(pool)) == NULL) { 2573 mutex_exit(&spa_namespace_lock); 2574 return (ENOENT); 2575 } 2576 2577 /* 2578 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2579 * reacquire the namespace lock, and see if we can export. 2580 */ 2581 spa_open_ref(spa, FTAG); 2582 mutex_exit(&spa_namespace_lock); 2583 spa_async_suspend(spa); 2584 mutex_enter(&spa_namespace_lock); 2585 spa_close(spa, FTAG); 2586 2587 /* 2588 * The pool will be in core if it's openable, 2589 * in which case we can modify its state. 2590 */ 2591 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2592 /* 2593 * Objsets may be open only because they're dirty, so we 2594 * have to force it to sync before checking spa_refcnt. 2595 */ 2596 txg_wait_synced(spa->spa_dsl_pool, 0); 2597 2598 /* 2599 * A pool cannot be exported or destroyed if there are active 2600 * references. If we are resetting a pool, allow references by 2601 * fault injection handlers. 2602 */ 2603 if (!spa_refcount_zero(spa) || 2604 (spa->spa_inject_ref != 0 && 2605 new_state != POOL_STATE_UNINITIALIZED)) { 2606 spa_async_resume(spa); 2607 mutex_exit(&spa_namespace_lock); 2608 return (EBUSY); 2609 } 2610 2611 /* 2612 * A pool cannot be exported if it has an active shared spare. 2613 * This is to prevent other pools stealing the active spare 2614 * from an exported pool. At user's own will, such pool can 2615 * be forcedly exported. 2616 */ 2617 if (!force && new_state == POOL_STATE_EXPORTED && 2618 spa_has_active_shared_spare(spa)) { 2619 spa_async_resume(spa); 2620 mutex_exit(&spa_namespace_lock); 2621 return (EXDEV); 2622 } 2623 2624 /* 2625 * We want this to be reflected on every label, 2626 * so mark them all dirty. spa_unload() will do the 2627 * final sync that pushes these changes out. 2628 */ 2629 if (new_state != POOL_STATE_UNINITIALIZED) { 2630 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2631 spa->spa_state = new_state; 2632 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2633 vdev_config_dirty(spa->spa_root_vdev); 2634 spa_config_exit(spa, SCL_ALL, FTAG); 2635 } 2636 } 2637 2638 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2639 2640 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2641 spa_unload(spa); 2642 spa_deactivate(spa); 2643 } 2644 2645 if (oldconfig && spa->spa_config) 2646 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2647 2648 if (new_state != POOL_STATE_UNINITIALIZED) { 2649 spa_config_sync(spa, B_TRUE, B_TRUE); 2650 spa_remove(spa); 2651 } 2652 mutex_exit(&spa_namespace_lock); 2653 2654 return (0); 2655 } 2656 2657 /* 2658 * Destroy a storage pool. 2659 */ 2660 int 2661 spa_destroy(char *pool) 2662 { 2663 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE)); 2664 } 2665 2666 /* 2667 * Export a storage pool. 2668 */ 2669 int 2670 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force) 2671 { 2672 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force)); 2673 } 2674 2675 /* 2676 * Similar to spa_export(), this unloads the spa_t without actually removing it 2677 * from the namespace in any way. 2678 */ 2679 int 2680 spa_reset(char *pool) 2681 { 2682 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2683 B_FALSE)); 2684 } 2685 2686 /* 2687 * ========================================================================== 2688 * Device manipulation 2689 * ========================================================================== 2690 */ 2691 2692 /* 2693 * Add a device to a storage pool. 2694 */ 2695 int 2696 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2697 { 2698 uint64_t txg; 2699 int c, error; 2700 vdev_t *rvd = spa->spa_root_vdev; 2701 vdev_t *vd, *tvd; 2702 nvlist_t **spares, **l2cache; 2703 uint_t nspares, nl2cache; 2704 2705 txg = spa_vdev_enter(spa); 2706 2707 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2708 VDEV_ALLOC_ADD)) != 0) 2709 return (spa_vdev_exit(spa, NULL, txg, error)); 2710 2711 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 2712 2713 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2714 &nspares) != 0) 2715 nspares = 0; 2716 2717 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2718 &nl2cache) != 0) 2719 nl2cache = 0; 2720 2721 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 2722 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2723 2724 if (vd->vdev_children != 0 && 2725 (error = vdev_create(vd, txg, B_FALSE)) != 0) 2726 return (spa_vdev_exit(spa, vd, txg, error)); 2727 2728 /* 2729 * We must validate the spares and l2cache devices after checking the 2730 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2731 */ 2732 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 2733 return (spa_vdev_exit(spa, vd, txg, error)); 2734 2735 /* 2736 * Transfer each new top-level vdev from vd to rvd. 2737 */ 2738 for (c = 0; c < vd->vdev_children; c++) { 2739 tvd = vd->vdev_child[c]; 2740 vdev_remove_child(vd, tvd); 2741 tvd->vdev_id = rvd->vdev_children; 2742 vdev_add_child(rvd, tvd); 2743 vdev_config_dirty(tvd); 2744 } 2745 2746 if (nspares != 0) { 2747 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2748 ZPOOL_CONFIG_SPARES); 2749 spa_load_spares(spa); 2750 spa->spa_spares.sav_sync = B_TRUE; 2751 } 2752 2753 if (nl2cache != 0) { 2754 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2755 ZPOOL_CONFIG_L2CACHE); 2756 spa_load_l2cache(spa); 2757 spa->spa_l2cache.sav_sync = B_TRUE; 2758 } 2759 2760 /* 2761 * We have to be careful when adding new vdevs to an existing pool. 2762 * If other threads start allocating from these vdevs before we 2763 * sync the config cache, and we lose power, then upon reboot we may 2764 * fail to open the pool because there are DVAs that the config cache 2765 * can't translate. Therefore, we first add the vdevs without 2766 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2767 * and then let spa_config_update() initialize the new metaslabs. 2768 * 2769 * spa_load() checks for added-but-not-initialized vdevs, so that 2770 * if we lose power at any point in this sequence, the remaining 2771 * steps will be completed the next time we load the pool. 2772 */ 2773 (void) spa_vdev_exit(spa, vd, txg, 0); 2774 2775 mutex_enter(&spa_namespace_lock); 2776 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2777 mutex_exit(&spa_namespace_lock); 2778 2779 return (0); 2780 } 2781 2782 /* 2783 * Attach a device to a mirror. The arguments are the path to any device 2784 * in the mirror, and the nvroot for the new device. If the path specifies 2785 * a device that is not mirrored, we automatically insert the mirror vdev. 2786 * 2787 * If 'replacing' is specified, the new device is intended to replace the 2788 * existing device; in this case the two devices are made into their own 2789 * mirror using the 'replacing' vdev, which is functionally identical to 2790 * the mirror vdev (it actually reuses all the same ops) but has a few 2791 * extra rules: you can't attach to it after it's been created, and upon 2792 * completion of resilvering, the first disk (the one being replaced) 2793 * is automatically detached. 2794 */ 2795 int 2796 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2797 { 2798 uint64_t txg, open_txg; 2799 vdev_t *rvd = spa->spa_root_vdev; 2800 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2801 vdev_ops_t *pvops; 2802 dmu_tx_t *tx; 2803 char *oldvdpath, *newvdpath; 2804 int newvd_isspare; 2805 int error; 2806 2807 txg = spa_vdev_enter(spa); 2808 2809 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2810 2811 if (oldvd == NULL) 2812 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2813 2814 if (!oldvd->vdev_ops->vdev_op_leaf) 2815 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2816 2817 pvd = oldvd->vdev_parent; 2818 2819 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2820 VDEV_ALLOC_ADD)) != 0) 2821 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2822 2823 if (newrootvd->vdev_children != 1) 2824 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2825 2826 newvd = newrootvd->vdev_child[0]; 2827 2828 if (!newvd->vdev_ops->vdev_op_leaf) 2829 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2830 2831 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2832 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2833 2834 /* 2835 * Spares can't replace logs 2836 */ 2837 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 2838 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2839 2840 if (!replacing) { 2841 /* 2842 * For attach, the only allowable parent is a mirror or the root 2843 * vdev. 2844 */ 2845 if (pvd->vdev_ops != &vdev_mirror_ops && 2846 pvd->vdev_ops != &vdev_root_ops) 2847 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2848 2849 pvops = &vdev_mirror_ops; 2850 } else { 2851 /* 2852 * Active hot spares can only be replaced by inactive hot 2853 * spares. 2854 */ 2855 if (pvd->vdev_ops == &vdev_spare_ops && 2856 pvd->vdev_child[1] == oldvd && 2857 !spa_has_spare(spa, newvd->vdev_guid)) 2858 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2859 2860 /* 2861 * If the source is a hot spare, and the parent isn't already a 2862 * spare, then we want to create a new hot spare. Otherwise, we 2863 * want to create a replacing vdev. The user is not allowed to 2864 * attach to a spared vdev child unless the 'isspare' state is 2865 * the same (spare replaces spare, non-spare replaces 2866 * non-spare). 2867 */ 2868 if (pvd->vdev_ops == &vdev_replacing_ops) 2869 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2870 else if (pvd->vdev_ops == &vdev_spare_ops && 2871 newvd->vdev_isspare != oldvd->vdev_isspare) 2872 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2873 else if (pvd->vdev_ops != &vdev_spare_ops && 2874 newvd->vdev_isspare) 2875 pvops = &vdev_spare_ops; 2876 else 2877 pvops = &vdev_replacing_ops; 2878 } 2879 2880 /* 2881 * Compare the new device size with the replaceable/attachable 2882 * device size. 2883 */ 2884 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2885 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2886 2887 /* 2888 * The new device cannot have a higher alignment requirement 2889 * than the top-level vdev. 2890 */ 2891 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2892 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2893 2894 /* 2895 * If this is an in-place replacement, update oldvd's path and devid 2896 * to make it distinguishable from newvd, and unopenable from now on. 2897 */ 2898 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2899 spa_strfree(oldvd->vdev_path); 2900 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2901 KM_SLEEP); 2902 (void) sprintf(oldvd->vdev_path, "%s/%s", 2903 newvd->vdev_path, "old"); 2904 if (oldvd->vdev_devid != NULL) { 2905 spa_strfree(oldvd->vdev_devid); 2906 oldvd->vdev_devid = NULL; 2907 } 2908 } 2909 2910 /* 2911 * If the parent is not a mirror, or if we're replacing, insert the new 2912 * mirror/replacing/spare vdev above oldvd. 2913 */ 2914 if (pvd->vdev_ops != pvops) 2915 pvd = vdev_add_parent(oldvd, pvops); 2916 2917 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2918 ASSERT(pvd->vdev_ops == pvops); 2919 ASSERT(oldvd->vdev_parent == pvd); 2920 2921 /* 2922 * Extract the new device from its root and add it to pvd. 2923 */ 2924 vdev_remove_child(newrootvd, newvd); 2925 newvd->vdev_id = pvd->vdev_children; 2926 vdev_add_child(pvd, newvd); 2927 2928 /* 2929 * If newvd is smaller than oldvd, but larger than its rsize, 2930 * the addition of newvd may have decreased our parent's asize. 2931 */ 2932 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2933 2934 tvd = newvd->vdev_top; 2935 ASSERT(pvd->vdev_top == tvd); 2936 ASSERT(tvd->vdev_parent == rvd); 2937 2938 vdev_config_dirty(tvd); 2939 2940 /* 2941 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2942 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2943 */ 2944 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2945 2946 mutex_enter(&newvd->vdev_dtl_lock); 2947 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2948 open_txg - TXG_INITIAL + 1); 2949 mutex_exit(&newvd->vdev_dtl_lock); 2950 2951 if (newvd->vdev_isspare) 2952 spa_spare_activate(newvd); 2953 oldvdpath = spa_strdup(oldvd->vdev_path); 2954 newvdpath = spa_strdup(newvd->vdev_path); 2955 newvd_isspare = newvd->vdev_isspare; 2956 2957 /* 2958 * Mark newvd's DTL dirty in this txg. 2959 */ 2960 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2961 2962 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2963 2964 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2965 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 2966 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 2967 CRED(), "%s vdev=%s %s vdev=%s", 2968 replacing && newvd_isspare ? "spare in" : 2969 replacing ? "replace" : "attach", newvdpath, 2970 replacing ? "for" : "to", oldvdpath); 2971 dmu_tx_commit(tx); 2972 } else { 2973 dmu_tx_abort(tx); 2974 } 2975 2976 spa_strfree(oldvdpath); 2977 spa_strfree(newvdpath); 2978 2979 /* 2980 * Kick off a resilver to update newvd. 2981 */ 2982 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 2983 2984 return (0); 2985 } 2986 2987 /* 2988 * Detach a device from a mirror or replacing vdev. 2989 * If 'replace_done' is specified, only detach if the parent 2990 * is a replacing vdev. 2991 */ 2992 int 2993 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2994 { 2995 uint64_t txg; 2996 int c, t, error; 2997 vdev_t *rvd = spa->spa_root_vdev; 2998 vdev_t *vd, *pvd, *cvd, *tvd; 2999 boolean_t unspare = B_FALSE; 3000 uint64_t unspare_guid; 3001 size_t len; 3002 3003 txg = spa_vdev_enter(spa); 3004 3005 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3006 3007 if (vd == NULL) 3008 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 3009 3010 if (!vd->vdev_ops->vdev_op_leaf) 3011 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3012 3013 pvd = vd->vdev_parent; 3014 3015 /* 3016 * If replace_done is specified, only remove this device if it's 3017 * the first child of a replacing vdev. For the 'spare' vdev, either 3018 * disk can be removed. 3019 */ 3020 if (replace_done) { 3021 if (pvd->vdev_ops == &vdev_replacing_ops) { 3022 if (vd->vdev_id != 0) 3023 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3024 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3025 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3026 } 3027 } 3028 3029 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3030 spa_version(spa) >= SPA_VERSION_SPARES); 3031 3032 /* 3033 * Only mirror, replacing, and spare vdevs support detach. 3034 */ 3035 if (pvd->vdev_ops != &vdev_replacing_ops && 3036 pvd->vdev_ops != &vdev_mirror_ops && 3037 pvd->vdev_ops != &vdev_spare_ops) 3038 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3039 3040 /* 3041 * If there's only one replica, you can't detach it. 3042 */ 3043 if (pvd->vdev_children <= 1) 3044 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3045 3046 /* 3047 * If all siblings have non-empty DTLs, this device may have the only 3048 * valid copy of the data, which means we cannot safely detach it. 3049 * 3050 * XXX -- as in the vdev_offline() case, we really want a more 3051 * precise DTL check. 3052 */ 3053 for (c = 0; c < pvd->vdev_children; c++) { 3054 uint64_t dirty; 3055 3056 cvd = pvd->vdev_child[c]; 3057 if (cvd == vd) 3058 continue; 3059 if (vdev_is_dead(cvd)) 3060 continue; 3061 mutex_enter(&cvd->vdev_dtl_lock); 3062 dirty = cvd->vdev_dtl_map.sm_space | 3063 cvd->vdev_dtl_scrub.sm_space; 3064 mutex_exit(&cvd->vdev_dtl_lock); 3065 if (!dirty) 3066 break; 3067 } 3068 3069 if (c == pvd->vdev_children) 3070 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3071 3072 /* 3073 * If we are detaching the second disk from a replacing vdev, then 3074 * check to see if we changed the original vdev's path to have "/old" 3075 * at the end in spa_vdev_attach(). If so, undo that change now. 3076 */ 3077 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3078 pvd->vdev_child[0]->vdev_path != NULL && 3079 pvd->vdev_child[1]->vdev_path != NULL) { 3080 ASSERT(pvd->vdev_child[1] == vd); 3081 cvd = pvd->vdev_child[0]; 3082 len = strlen(vd->vdev_path); 3083 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3084 strcmp(cvd->vdev_path + len, "/old") == 0) { 3085 spa_strfree(cvd->vdev_path); 3086 cvd->vdev_path = spa_strdup(vd->vdev_path); 3087 } 3088 } 3089 3090 /* 3091 * If we are detaching the original disk from a spare, then it implies 3092 * that the spare should become a real disk, and be removed from the 3093 * active spare list for the pool. 3094 */ 3095 if (pvd->vdev_ops == &vdev_spare_ops && 3096 vd->vdev_id == 0) 3097 unspare = B_TRUE; 3098 3099 /* 3100 * Erase the disk labels so the disk can be used for other things. 3101 * This must be done after all other error cases are handled, 3102 * but before we disembowel vd (so we can still do I/O to it). 3103 * But if we can't do it, don't treat the error as fatal -- 3104 * it may be that the unwritability of the disk is the reason 3105 * it's being detached! 3106 */ 3107 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3108 3109 /* 3110 * Remove vd from its parent and compact the parent's children. 3111 */ 3112 vdev_remove_child(pvd, vd); 3113 vdev_compact_children(pvd); 3114 3115 /* 3116 * Remember one of the remaining children so we can get tvd below. 3117 */ 3118 cvd = pvd->vdev_child[0]; 3119 3120 /* 3121 * If we need to remove the remaining child from the list of hot spares, 3122 * do it now, marking the vdev as no longer a spare in the process. We 3123 * must do this before vdev_remove_parent(), because that can change the 3124 * GUID if it creates a new toplevel GUID. 3125 */ 3126 if (unspare) { 3127 ASSERT(cvd->vdev_isspare); 3128 spa_spare_remove(cvd); 3129 unspare_guid = cvd->vdev_guid; 3130 } 3131 3132 /* 3133 * If the parent mirror/replacing vdev only has one child, 3134 * the parent is no longer needed. Remove it from the tree. 3135 */ 3136 if (pvd->vdev_children == 1) 3137 vdev_remove_parent(cvd); 3138 3139 /* 3140 * We don't set tvd until now because the parent we just removed 3141 * may have been the previous top-level vdev. 3142 */ 3143 tvd = cvd->vdev_top; 3144 ASSERT(tvd->vdev_parent == rvd); 3145 3146 /* 3147 * Reevaluate the parent vdev state. 3148 */ 3149 vdev_propagate_state(cvd); 3150 3151 /* 3152 * If the device we just detached was smaller than the others, it may be 3153 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3154 * can't fail because the existing metaslabs are already in core, so 3155 * there's nothing to read from disk. 3156 */ 3157 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3158 3159 vdev_config_dirty(tvd); 3160 3161 /* 3162 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3163 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3164 * But first make sure we're not on any *other* txg's DTL list, to 3165 * prevent vd from being accessed after it's freed. 3166 */ 3167 for (t = 0; t < TXG_SIZE; t++) 3168 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3169 vd->vdev_detached = B_TRUE; 3170 vdev_dirty(tvd, VDD_DTL, vd, txg); 3171 3172 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3173 3174 error = spa_vdev_exit(spa, vd, txg, 0); 3175 3176 /* 3177 * If this was the removal of the original device in a hot spare vdev, 3178 * then we want to go through and remove the device from the hot spare 3179 * list of every other pool. 3180 */ 3181 if (unspare) { 3182 spa = NULL; 3183 mutex_enter(&spa_namespace_lock); 3184 while ((spa = spa_next(spa)) != NULL) { 3185 if (spa->spa_state != POOL_STATE_ACTIVE) 3186 continue; 3187 spa_open_ref(spa, FTAG); 3188 mutex_exit(&spa_namespace_lock); 3189 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3190 mutex_enter(&spa_namespace_lock); 3191 spa_close(spa, FTAG); 3192 } 3193 mutex_exit(&spa_namespace_lock); 3194 } 3195 3196 return (error); 3197 } 3198 3199 static nvlist_t * 3200 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 3201 { 3202 for (int i = 0; i < count; i++) { 3203 uint64_t guid; 3204 3205 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 3206 &guid) == 0); 3207 3208 if (guid == target_guid) 3209 return (nvpp[i]); 3210 } 3211 3212 return (NULL); 3213 } 3214 3215 static void 3216 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 3217 nvlist_t *dev_to_remove) 3218 { 3219 nvlist_t **newdev = NULL; 3220 3221 if (count > 1) 3222 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 3223 3224 for (int i = 0, j = 0; i < count; i++) { 3225 if (dev[i] == dev_to_remove) 3226 continue; 3227 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 3228 } 3229 3230 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 3231 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 3232 3233 for (int i = 0; i < count - 1; i++) 3234 nvlist_free(newdev[i]); 3235 3236 if (count > 1) 3237 kmem_free(newdev, (count - 1) * sizeof (void *)); 3238 } 3239 3240 /* 3241 * Remove a device from the pool. Currently, this supports removing only hot 3242 * spares and level 2 ARC devices. 3243 */ 3244 int 3245 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3246 { 3247 vdev_t *vd; 3248 nvlist_t **spares, **l2cache, *nv; 3249 uint_t nspares, nl2cache; 3250 uint64_t txg; 3251 int error = 0; 3252 3253 txg = spa_vdev_enter(spa); 3254 3255 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3256 3257 if (spa->spa_spares.sav_vdevs != NULL && 3258 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3259 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 3260 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 3261 /* 3262 * Only remove the hot spare if it's not currently in use 3263 * in this pool. 3264 */ 3265 if (vd == NULL || unspare) { 3266 spa_vdev_remove_aux(spa->spa_spares.sav_config, 3267 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 3268 spa_load_spares(spa); 3269 spa->spa_spares.sav_sync = B_TRUE; 3270 } else { 3271 error = EBUSY; 3272 } 3273 } else if (spa->spa_l2cache.sav_vdevs != NULL && 3274 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3275 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 3276 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 3277 /* 3278 * Cache devices can always be removed. 3279 */ 3280 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 3281 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 3282 spa_load_l2cache(spa); 3283 spa->spa_l2cache.sav_sync = B_TRUE; 3284 } else if (vd != NULL) { 3285 /* 3286 * Normal vdevs cannot be removed (yet). 3287 */ 3288 error = ENOTSUP; 3289 } else { 3290 /* 3291 * There is no vdev of any kind with the specified guid. 3292 */ 3293 error = ENOENT; 3294 } 3295 3296 return (spa_vdev_exit(spa, NULL, txg, error)); 3297 } 3298 3299 /* 3300 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3301 * current spared, so we can detach it. 3302 */ 3303 static vdev_t * 3304 spa_vdev_resilver_done_hunt(vdev_t *vd) 3305 { 3306 vdev_t *newvd, *oldvd; 3307 int c; 3308 3309 for (c = 0; c < vd->vdev_children; c++) { 3310 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3311 if (oldvd != NULL) 3312 return (oldvd); 3313 } 3314 3315 /* 3316 * Check for a completed replacement. 3317 */ 3318 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3319 oldvd = vd->vdev_child[0]; 3320 newvd = vd->vdev_child[1]; 3321 3322 mutex_enter(&newvd->vdev_dtl_lock); 3323 if (newvd->vdev_dtl_map.sm_space == 0 && 3324 newvd->vdev_dtl_scrub.sm_space == 0) { 3325 mutex_exit(&newvd->vdev_dtl_lock); 3326 return (oldvd); 3327 } 3328 mutex_exit(&newvd->vdev_dtl_lock); 3329 } 3330 3331 /* 3332 * Check for a completed resilver with the 'unspare' flag set. 3333 */ 3334 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3335 newvd = vd->vdev_child[0]; 3336 oldvd = vd->vdev_child[1]; 3337 3338 mutex_enter(&newvd->vdev_dtl_lock); 3339 if (newvd->vdev_unspare && 3340 newvd->vdev_dtl_map.sm_space == 0 && 3341 newvd->vdev_dtl_scrub.sm_space == 0) { 3342 newvd->vdev_unspare = 0; 3343 mutex_exit(&newvd->vdev_dtl_lock); 3344 return (oldvd); 3345 } 3346 mutex_exit(&newvd->vdev_dtl_lock); 3347 } 3348 3349 return (NULL); 3350 } 3351 3352 static void 3353 spa_vdev_resilver_done(spa_t *spa) 3354 { 3355 vdev_t *vd; 3356 vdev_t *pvd; 3357 uint64_t guid; 3358 uint64_t pguid = 0; 3359 3360 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3361 3362 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3363 guid = vd->vdev_guid; 3364 /* 3365 * If we have just finished replacing a hot spared device, then 3366 * we need to detach the parent's first child (the original hot 3367 * spare) as well. 3368 */ 3369 pvd = vd->vdev_parent; 3370 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3371 pvd->vdev_id == 0) { 3372 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3373 ASSERT(pvd->vdev_parent->vdev_children == 2); 3374 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3375 } 3376 spa_config_exit(spa, SCL_CONFIG, FTAG); 3377 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3378 return; 3379 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3380 return; 3381 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3382 } 3383 3384 spa_config_exit(spa, SCL_CONFIG, FTAG); 3385 } 3386 3387 /* 3388 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3389 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3390 */ 3391 int 3392 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3393 { 3394 vdev_t *vd; 3395 uint64_t txg; 3396 3397 txg = spa_vdev_enter(spa); 3398 3399 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3400 /* 3401 * Determine if this is a reference to a hot spare device. If 3402 * it is, update the path manually as there is no associated 3403 * vdev_t that can be synced to disk. 3404 */ 3405 nvlist_t **spares; 3406 uint_t i, nspares; 3407 3408 if (spa->spa_spares.sav_config != NULL) { 3409 VERIFY(nvlist_lookup_nvlist_array( 3410 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3411 &spares, &nspares) == 0); 3412 for (i = 0; i < nspares; i++) { 3413 uint64_t theguid; 3414 VERIFY(nvlist_lookup_uint64(spares[i], 3415 ZPOOL_CONFIG_GUID, &theguid) == 0); 3416 if (theguid == guid) { 3417 VERIFY(nvlist_add_string(spares[i], 3418 ZPOOL_CONFIG_PATH, newpath) == 0); 3419 spa_load_spares(spa); 3420 spa->spa_spares.sav_sync = B_TRUE; 3421 return (spa_vdev_exit(spa, NULL, txg, 3422 0)); 3423 } 3424 } 3425 } 3426 3427 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3428 } 3429 3430 if (!vd->vdev_ops->vdev_op_leaf) 3431 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3432 3433 spa_strfree(vd->vdev_path); 3434 vd->vdev_path = spa_strdup(newpath); 3435 3436 vdev_config_dirty(vd->vdev_top); 3437 3438 return (spa_vdev_exit(spa, NULL, txg, 0)); 3439 } 3440 3441 /* 3442 * ========================================================================== 3443 * SPA Scrubbing 3444 * ========================================================================== 3445 */ 3446 3447 int 3448 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3449 { 3450 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 3451 3452 if ((uint_t)type >= POOL_SCRUB_TYPES) 3453 return (ENOTSUP); 3454 3455 /* 3456 * If a resilver was requested, but there is no DTL on a 3457 * writeable leaf device, we have nothing to do. 3458 */ 3459 if (type == POOL_SCRUB_RESILVER && 3460 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3461 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3462 return (0); 3463 } 3464 3465 if (type == POOL_SCRUB_EVERYTHING && 3466 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3467 spa->spa_dsl_pool->dp_scrub_isresilver) 3468 return (EBUSY); 3469 3470 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3471 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3472 } else if (type == POOL_SCRUB_NONE) { 3473 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3474 } else { 3475 return (EINVAL); 3476 } 3477 } 3478 3479 /* 3480 * ========================================================================== 3481 * SPA async task processing 3482 * ========================================================================== 3483 */ 3484 3485 static void 3486 spa_async_remove(spa_t *spa, vdev_t *vd) 3487 { 3488 if (vd->vdev_remove_wanted) { 3489 vd->vdev_remove_wanted = 0; 3490 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 3491 vdev_clear(spa, vd); 3492 vdev_state_dirty(vd->vdev_top); 3493 } 3494 3495 for (int c = 0; c < vd->vdev_children; c++) 3496 spa_async_remove(spa, vd->vdev_child[c]); 3497 } 3498 3499 static void 3500 spa_async_probe(spa_t *spa, vdev_t *vd) 3501 { 3502 if (vd->vdev_probe_wanted) { 3503 vd->vdev_probe_wanted = 0; 3504 vdev_reopen(vd); /* vdev_open() does the actual probe */ 3505 } 3506 3507 for (int c = 0; c < vd->vdev_children; c++) 3508 spa_async_probe(spa, vd->vdev_child[c]); 3509 } 3510 3511 static void 3512 spa_async_thread(spa_t *spa) 3513 { 3514 int tasks; 3515 3516 ASSERT(spa->spa_sync_on); 3517 3518 mutex_enter(&spa->spa_async_lock); 3519 tasks = spa->spa_async_tasks; 3520 spa->spa_async_tasks = 0; 3521 mutex_exit(&spa->spa_async_lock); 3522 3523 /* 3524 * See if the config needs to be updated. 3525 */ 3526 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3527 mutex_enter(&spa_namespace_lock); 3528 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3529 mutex_exit(&spa_namespace_lock); 3530 } 3531 3532 /* 3533 * See if any devices need to be marked REMOVED. 3534 */ 3535 if (tasks & SPA_ASYNC_REMOVE) { 3536 spa_vdev_state_enter(spa); 3537 spa_async_remove(spa, spa->spa_root_vdev); 3538 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 3539 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 3540 for (int i = 0; i < spa->spa_spares.sav_count; i++) 3541 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 3542 (void) spa_vdev_state_exit(spa, NULL, 0); 3543 } 3544 3545 /* 3546 * See if any devices need to be probed. 3547 */ 3548 if (tasks & SPA_ASYNC_PROBE) { 3549 spa_vdev_state_enter(spa); 3550 spa_async_probe(spa, spa->spa_root_vdev); 3551 (void) spa_vdev_state_exit(spa, NULL, 0); 3552 } 3553 3554 /* 3555 * If any devices are done replacing, detach them. 3556 */ 3557 if (tasks & SPA_ASYNC_RESILVER_DONE) 3558 spa_vdev_resilver_done(spa); 3559 3560 /* 3561 * Kick off a resilver. 3562 */ 3563 if (tasks & SPA_ASYNC_RESILVER) 3564 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3565 3566 /* 3567 * Let the world know that we're done. 3568 */ 3569 mutex_enter(&spa->spa_async_lock); 3570 spa->spa_async_thread = NULL; 3571 cv_broadcast(&spa->spa_async_cv); 3572 mutex_exit(&spa->spa_async_lock); 3573 thread_exit(); 3574 } 3575 3576 void 3577 spa_async_suspend(spa_t *spa) 3578 { 3579 mutex_enter(&spa->spa_async_lock); 3580 spa->spa_async_suspended++; 3581 while (spa->spa_async_thread != NULL) 3582 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3583 mutex_exit(&spa->spa_async_lock); 3584 } 3585 3586 void 3587 spa_async_resume(spa_t *spa) 3588 { 3589 mutex_enter(&spa->spa_async_lock); 3590 ASSERT(spa->spa_async_suspended != 0); 3591 spa->spa_async_suspended--; 3592 mutex_exit(&spa->spa_async_lock); 3593 } 3594 3595 static void 3596 spa_async_dispatch(spa_t *spa) 3597 { 3598 mutex_enter(&spa->spa_async_lock); 3599 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3600 spa->spa_async_thread == NULL && 3601 rootdir != NULL && !vn_is_readonly(rootdir)) 3602 spa->spa_async_thread = thread_create(NULL, 0, 3603 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3604 mutex_exit(&spa->spa_async_lock); 3605 } 3606 3607 void 3608 spa_async_request(spa_t *spa, int task) 3609 { 3610 mutex_enter(&spa->spa_async_lock); 3611 spa->spa_async_tasks |= task; 3612 mutex_exit(&spa->spa_async_lock); 3613 } 3614 3615 /* 3616 * ========================================================================== 3617 * SPA syncing routines 3618 * ========================================================================== 3619 */ 3620 3621 static void 3622 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3623 { 3624 bplist_t *bpl = &spa->spa_sync_bplist; 3625 dmu_tx_t *tx; 3626 blkptr_t blk; 3627 uint64_t itor = 0; 3628 zio_t *zio; 3629 int error; 3630 uint8_t c = 1; 3631 3632 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 3633 3634 while (bplist_iterate(bpl, &itor, &blk) == 0) { 3635 ASSERT(blk.blk_birth < txg); 3636 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL, 3637 ZIO_FLAG_MUSTSUCCEED)); 3638 } 3639 3640 error = zio_wait(zio); 3641 ASSERT3U(error, ==, 0); 3642 3643 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3644 bplist_vacate(bpl, tx); 3645 3646 /* 3647 * Pre-dirty the first block so we sync to convergence faster. 3648 * (Usually only the first block is needed.) 3649 */ 3650 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3651 dmu_tx_commit(tx); 3652 } 3653 3654 static void 3655 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3656 { 3657 char *packed = NULL; 3658 size_t bufsize; 3659 size_t nvsize = 0; 3660 dmu_buf_t *db; 3661 3662 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3663 3664 /* 3665 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 3666 * information. This avoids the dbuf_will_dirty() path and 3667 * saves us a pre-read to get data we don't actually care about. 3668 */ 3669 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); 3670 packed = kmem_alloc(bufsize, KM_SLEEP); 3671 3672 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3673 KM_SLEEP) == 0); 3674 bzero(packed + nvsize, bufsize - nvsize); 3675 3676 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 3677 3678 kmem_free(packed, bufsize); 3679 3680 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3681 dmu_buf_will_dirty(db, tx); 3682 *(uint64_t *)db->db_data = nvsize; 3683 dmu_buf_rele(db, FTAG); 3684 } 3685 3686 static void 3687 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3688 const char *config, const char *entry) 3689 { 3690 nvlist_t *nvroot; 3691 nvlist_t **list; 3692 int i; 3693 3694 if (!sav->sav_sync) 3695 return; 3696 3697 /* 3698 * Update the MOS nvlist describing the list of available devices. 3699 * spa_validate_aux() will have already made sure this nvlist is 3700 * valid and the vdevs are labeled appropriately. 3701 */ 3702 if (sav->sav_object == 0) { 3703 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3704 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3705 sizeof (uint64_t), tx); 3706 VERIFY(zap_update(spa->spa_meta_objset, 3707 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3708 &sav->sav_object, tx) == 0); 3709 } 3710 3711 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3712 if (sav->sav_count == 0) { 3713 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3714 } else { 3715 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3716 for (i = 0; i < sav->sav_count; i++) 3717 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3718 B_FALSE, B_FALSE, B_TRUE); 3719 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3720 sav->sav_count) == 0); 3721 for (i = 0; i < sav->sav_count; i++) 3722 nvlist_free(list[i]); 3723 kmem_free(list, sav->sav_count * sizeof (void *)); 3724 } 3725 3726 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3727 nvlist_free(nvroot); 3728 3729 sav->sav_sync = B_FALSE; 3730 } 3731 3732 static void 3733 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3734 { 3735 nvlist_t *config; 3736 3737 if (list_is_empty(&spa->spa_config_dirty_list)) 3738 return; 3739 3740 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3741 3742 config = spa_config_generate(spa, spa->spa_root_vdev, 3743 dmu_tx_get_txg(tx), B_FALSE); 3744 3745 spa_config_exit(spa, SCL_STATE, FTAG); 3746 3747 if (spa->spa_config_syncing) 3748 nvlist_free(spa->spa_config_syncing); 3749 spa->spa_config_syncing = config; 3750 3751 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3752 } 3753 3754 /* 3755 * Set zpool properties. 3756 */ 3757 static void 3758 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3759 { 3760 spa_t *spa = arg1; 3761 objset_t *mos = spa->spa_meta_objset; 3762 nvlist_t *nvp = arg2; 3763 nvpair_t *elem; 3764 uint64_t intval; 3765 char *strval; 3766 zpool_prop_t prop; 3767 const char *propname; 3768 zprop_type_t proptype; 3769 spa_config_dirent_t *dp; 3770 3771 mutex_enter(&spa->spa_props_lock); 3772 3773 elem = NULL; 3774 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3775 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3776 case ZPOOL_PROP_VERSION: 3777 /* 3778 * Only set version for non-zpool-creation cases 3779 * (set/import). spa_create() needs special care 3780 * for version setting. 3781 */ 3782 if (tx->tx_txg != TXG_INITIAL) { 3783 VERIFY(nvpair_value_uint64(elem, 3784 &intval) == 0); 3785 ASSERT(intval <= SPA_VERSION); 3786 ASSERT(intval >= spa_version(spa)); 3787 spa->spa_uberblock.ub_version = intval; 3788 vdev_config_dirty(spa->spa_root_vdev); 3789 } 3790 break; 3791 3792 case ZPOOL_PROP_ALTROOT: 3793 /* 3794 * 'altroot' is a non-persistent property. It should 3795 * have been set temporarily at creation or import time. 3796 */ 3797 ASSERT(spa->spa_root != NULL); 3798 break; 3799 3800 case ZPOOL_PROP_CACHEFILE: 3801 /* 3802 * 'cachefile' is a non-persistent property, but note 3803 * an async request that the config cache needs to be 3804 * udpated. 3805 */ 3806 VERIFY(nvpair_value_string(elem, &strval) == 0); 3807 3808 dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP); 3809 3810 if (strval[0] == '\0') 3811 dp->scd_path = spa_strdup(spa_config_path); 3812 else if (strcmp(strval, "none") == 0) 3813 dp->scd_path = NULL; 3814 else 3815 dp->scd_path = spa_strdup(strval); 3816 3817 list_insert_head(&spa->spa_config_list, dp); 3818 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3819 break; 3820 default: 3821 /* 3822 * Set pool property values in the poolprops mos object. 3823 */ 3824 if (spa->spa_pool_props_object == 0) { 3825 objset_t *mos = spa->spa_meta_objset; 3826 3827 VERIFY((spa->spa_pool_props_object = 3828 zap_create(mos, DMU_OT_POOL_PROPS, 3829 DMU_OT_NONE, 0, tx)) > 0); 3830 3831 VERIFY(zap_update(mos, 3832 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3833 8, 1, &spa->spa_pool_props_object, tx) 3834 == 0); 3835 } 3836 3837 /* normalize the property name */ 3838 propname = zpool_prop_to_name(prop); 3839 proptype = zpool_prop_get_type(prop); 3840 3841 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3842 ASSERT(proptype == PROP_TYPE_STRING); 3843 VERIFY(nvpair_value_string(elem, &strval) == 0); 3844 VERIFY(zap_update(mos, 3845 spa->spa_pool_props_object, propname, 3846 1, strlen(strval) + 1, strval, tx) == 0); 3847 3848 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3849 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3850 3851 if (proptype == PROP_TYPE_INDEX) { 3852 const char *unused; 3853 VERIFY(zpool_prop_index_to_string( 3854 prop, intval, &unused) == 0); 3855 } 3856 VERIFY(zap_update(mos, 3857 spa->spa_pool_props_object, propname, 3858 8, 1, &intval, tx) == 0); 3859 } else { 3860 ASSERT(0); /* not allowed */ 3861 } 3862 3863 switch (prop) { 3864 case ZPOOL_PROP_DELEGATION: 3865 spa->spa_delegation = intval; 3866 break; 3867 case ZPOOL_PROP_BOOTFS: 3868 spa->spa_bootfs = intval; 3869 break; 3870 case ZPOOL_PROP_FAILUREMODE: 3871 spa->spa_failmode = intval; 3872 break; 3873 default: 3874 break; 3875 } 3876 } 3877 3878 /* log internal history if this is not a zpool create */ 3879 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3880 tx->tx_txg != TXG_INITIAL) { 3881 spa_history_internal_log(LOG_POOL_PROPSET, 3882 spa, tx, cr, "%s %lld %s", 3883 nvpair_name(elem), intval, spa_name(spa)); 3884 } 3885 } 3886 3887 mutex_exit(&spa->spa_props_lock); 3888 } 3889 3890 /* 3891 * Sync the specified transaction group. New blocks may be dirtied as 3892 * part of the process, so we iterate until it converges. 3893 */ 3894 void 3895 spa_sync(spa_t *spa, uint64_t txg) 3896 { 3897 dsl_pool_t *dp = spa->spa_dsl_pool; 3898 objset_t *mos = spa->spa_meta_objset; 3899 bplist_t *bpl = &spa->spa_sync_bplist; 3900 vdev_t *rvd = spa->spa_root_vdev; 3901 vdev_t *vd; 3902 dmu_tx_t *tx; 3903 int dirty_vdevs; 3904 int error; 3905 3906 /* 3907 * Lock out configuration changes. 3908 */ 3909 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3910 3911 spa->spa_syncing_txg = txg; 3912 spa->spa_sync_pass = 0; 3913 3914 /* 3915 * If there are any pending vdev state changes, convert them 3916 * into config changes that go out with this transaction group. 3917 */ 3918 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3919 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 3920 vdev_state_clean(vd); 3921 vdev_config_dirty(vd); 3922 } 3923 spa_config_exit(spa, SCL_STATE, FTAG); 3924 3925 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3926 3927 tx = dmu_tx_create_assigned(dp, txg); 3928 3929 /* 3930 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 3931 * set spa_deflate if we have no raid-z vdevs. 3932 */ 3933 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 3934 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 3935 int i; 3936 3937 for (i = 0; i < rvd->vdev_children; i++) { 3938 vd = rvd->vdev_child[i]; 3939 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 3940 break; 3941 } 3942 if (i == rvd->vdev_children) { 3943 spa->spa_deflate = TRUE; 3944 VERIFY(0 == zap_add(spa->spa_meta_objset, 3945 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3946 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 3947 } 3948 } 3949 3950 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 3951 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 3952 dsl_pool_create_origin(dp, tx); 3953 3954 /* Keeping the origin open increases spa_minref */ 3955 spa->spa_minref += 3; 3956 } 3957 3958 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 3959 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 3960 dsl_pool_upgrade_clones(dp, tx); 3961 } 3962 3963 /* 3964 * If anything has changed in this txg, push the deferred frees 3965 * from the previous txg. If not, leave them alone so that we 3966 * don't generate work on an otherwise idle system. 3967 */ 3968 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 3969 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 3970 !txg_list_empty(&dp->dp_sync_tasks, txg)) 3971 spa_sync_deferred_frees(spa, txg); 3972 3973 /* 3974 * Iterate to convergence. 3975 */ 3976 do { 3977 spa->spa_sync_pass++; 3978 3979 spa_sync_config_object(spa, tx); 3980 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 3981 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 3982 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 3983 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 3984 spa_errlog_sync(spa, txg); 3985 dsl_pool_sync(dp, txg); 3986 3987 dirty_vdevs = 0; 3988 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 3989 vdev_sync(vd, txg); 3990 dirty_vdevs++; 3991 } 3992 3993 bplist_sync(bpl, tx); 3994 } while (dirty_vdevs); 3995 3996 bplist_close(bpl); 3997 3998 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 3999 4000 /* 4001 * Rewrite the vdev configuration (which includes the uberblock) 4002 * to commit the transaction group. 4003 * 4004 * If there are no dirty vdevs, we sync the uberblock to a few 4005 * random top-level vdevs that are known to be visible in the 4006 * config cache (see spa_vdev_add() for a complete description). 4007 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 4008 */ 4009 for (;;) { 4010 /* 4011 * We hold SCL_STATE to prevent vdev open/close/etc. 4012 * while we're attempting to write the vdev labels. 4013 */ 4014 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4015 4016 if (list_is_empty(&spa->spa_config_dirty_list)) { 4017 vdev_t *svd[SPA_DVAS_PER_BP]; 4018 int svdcount = 0; 4019 int children = rvd->vdev_children; 4020 int c0 = spa_get_random(children); 4021 int c; 4022 4023 for (c = 0; c < children; c++) { 4024 vd = rvd->vdev_child[(c0 + c) % children]; 4025 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4026 continue; 4027 svd[svdcount++] = vd; 4028 if (svdcount == SPA_DVAS_PER_BP) 4029 break; 4030 } 4031 error = vdev_config_sync(svd, svdcount, txg); 4032 } else { 4033 error = vdev_config_sync(rvd->vdev_child, 4034 rvd->vdev_children, txg); 4035 } 4036 4037 spa_config_exit(spa, SCL_STATE, FTAG); 4038 4039 if (error == 0) 4040 break; 4041 zio_suspend(spa, NULL); 4042 zio_resume_wait(spa); 4043 } 4044 dmu_tx_commit(tx); 4045 4046 /* 4047 * Clear the dirty config list. 4048 */ 4049 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 4050 vdev_config_clean(vd); 4051 4052 /* 4053 * Now that the new config has synced transactionally, 4054 * let it become visible to the config cache. 4055 */ 4056 if (spa->spa_config_syncing != NULL) { 4057 spa_config_set(spa, spa->spa_config_syncing); 4058 spa->spa_config_txg = txg; 4059 spa->spa_config_syncing = NULL; 4060 } 4061 4062 spa->spa_traverse_wanted = B_TRUE; 4063 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4064 spa->spa_traverse_wanted = B_FALSE; 4065 spa->spa_ubsync = spa->spa_uberblock; 4066 rw_exit(&spa->spa_traverse_lock); 4067 4068 /* 4069 * Clean up the ZIL records for the synced txg. 4070 */ 4071 dsl_pool_zil_clean(dp); 4072 4073 /* 4074 * Update usable space statistics. 4075 */ 4076 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4077 vdev_sync_done(vd, txg); 4078 4079 /* 4080 * It had better be the case that we didn't dirty anything 4081 * since vdev_config_sync(). 4082 */ 4083 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4084 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4085 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4086 ASSERT(bpl->bpl_queue == NULL); 4087 4088 spa_config_exit(spa, SCL_CONFIG, FTAG); 4089 4090 /* 4091 * If any async tasks have been requested, kick them off. 4092 */ 4093 spa_async_dispatch(spa); 4094 } 4095 4096 /* 4097 * Sync all pools. We don't want to hold the namespace lock across these 4098 * operations, so we take a reference on the spa_t and drop the lock during the 4099 * sync. 4100 */ 4101 void 4102 spa_sync_allpools(void) 4103 { 4104 spa_t *spa = NULL; 4105 mutex_enter(&spa_namespace_lock); 4106 while ((spa = spa_next(spa)) != NULL) { 4107 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa)) 4108 continue; 4109 spa_open_ref(spa, FTAG); 4110 mutex_exit(&spa_namespace_lock); 4111 txg_wait_synced(spa_get_dsl(spa), 0); 4112 mutex_enter(&spa_namespace_lock); 4113 spa_close(spa, FTAG); 4114 } 4115 mutex_exit(&spa_namespace_lock); 4116 } 4117 4118 /* 4119 * ========================================================================== 4120 * Miscellaneous routines 4121 * ========================================================================== 4122 */ 4123 4124 /* 4125 * Remove all pools in the system. 4126 */ 4127 void 4128 spa_evict_all(void) 4129 { 4130 spa_t *spa; 4131 4132 /* 4133 * Remove all cached state. All pools should be closed now, 4134 * so every spa in the AVL tree should be unreferenced. 4135 */ 4136 mutex_enter(&spa_namespace_lock); 4137 while ((spa = spa_next(NULL)) != NULL) { 4138 /* 4139 * Stop async tasks. The async thread may need to detach 4140 * a device that's been replaced, which requires grabbing 4141 * spa_namespace_lock, so we must drop it here. 4142 */ 4143 spa_open_ref(spa, FTAG); 4144 mutex_exit(&spa_namespace_lock); 4145 spa_async_suspend(spa); 4146 mutex_enter(&spa_namespace_lock); 4147 spa_close(spa, FTAG); 4148 4149 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4150 spa_unload(spa); 4151 spa_deactivate(spa); 4152 } 4153 spa_remove(spa); 4154 } 4155 mutex_exit(&spa_namespace_lock); 4156 } 4157 4158 vdev_t * 4159 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4160 { 4161 vdev_t *vd; 4162 int i; 4163 4164 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4165 return (vd); 4166 4167 if (l2cache) { 4168 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4169 vd = spa->spa_l2cache.sav_vdevs[i]; 4170 if (vd->vdev_guid == guid) 4171 return (vd); 4172 } 4173 } 4174 4175 return (NULL); 4176 } 4177 4178 void 4179 spa_upgrade(spa_t *spa, uint64_t version) 4180 { 4181 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4182 4183 /* 4184 * This should only be called for a non-faulted pool, and since a 4185 * future version would result in an unopenable pool, this shouldn't be 4186 * possible. 4187 */ 4188 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4189 ASSERT(version >= spa->spa_uberblock.ub_version); 4190 4191 spa->spa_uberblock.ub_version = version; 4192 vdev_config_dirty(spa->spa_root_vdev); 4193 4194 spa_config_exit(spa, SCL_ALL, FTAG); 4195 4196 txg_wait_synced(spa_get_dsl(spa), 0); 4197 } 4198 4199 boolean_t 4200 spa_has_spare(spa_t *spa, uint64_t guid) 4201 { 4202 int i; 4203 uint64_t spareguid; 4204 spa_aux_vdev_t *sav = &spa->spa_spares; 4205 4206 for (i = 0; i < sav->sav_count; i++) 4207 if (sav->sav_vdevs[i]->vdev_guid == guid) 4208 return (B_TRUE); 4209 4210 for (i = 0; i < sav->sav_npending; i++) { 4211 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4212 &spareguid) == 0 && spareguid == guid) 4213 return (B_TRUE); 4214 } 4215 4216 return (B_FALSE); 4217 } 4218 4219 /* 4220 * Check if a pool has an active shared spare device. 4221 * Note: reference count of an active spare is 2, as a spare and as a replace 4222 */ 4223 static boolean_t 4224 spa_has_active_shared_spare(spa_t *spa) 4225 { 4226 int i, refcnt; 4227 uint64_t pool; 4228 spa_aux_vdev_t *sav = &spa->spa_spares; 4229 4230 for (i = 0; i < sav->sav_count; i++) { 4231 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4232 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4233 refcnt > 2) 4234 return (B_TRUE); 4235 } 4236 4237 return (B_FALSE); 4238 } 4239 4240 /* 4241 * Post a sysevent corresponding to the given event. The 'name' must be one of 4242 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4243 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4244 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4245 * or zdb as real changes. 4246 */ 4247 void 4248 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4249 { 4250 #ifdef _KERNEL 4251 sysevent_t *ev; 4252 sysevent_attr_list_t *attr = NULL; 4253 sysevent_value_t value; 4254 sysevent_id_t eid; 4255 4256 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4257 SE_SLEEP); 4258 4259 value.value_type = SE_DATA_TYPE_STRING; 4260 value.value.sv_string = spa_name(spa); 4261 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4262 goto done; 4263 4264 value.value_type = SE_DATA_TYPE_UINT64; 4265 value.value.sv_uint64 = spa_guid(spa); 4266 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4267 goto done; 4268 4269 if (vd) { 4270 value.value_type = SE_DATA_TYPE_UINT64; 4271 value.value.sv_uint64 = vd->vdev_guid; 4272 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4273 SE_SLEEP) != 0) 4274 goto done; 4275 4276 if (vd->vdev_path) { 4277 value.value_type = SE_DATA_TYPE_STRING; 4278 value.value.sv_string = vd->vdev_path; 4279 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4280 &value, SE_SLEEP) != 0) 4281 goto done; 4282 } 4283 } 4284 4285 if (sysevent_attach_attributes(ev, attr) != 0) 4286 goto done; 4287 attr = NULL; 4288 4289 (void) log_sysevent(ev, SE_SLEEP, &eid); 4290 4291 done: 4292 if (attr) 4293 sysevent_free_attr(attr); 4294 sysevent_free(ev); 4295 #endif 4296 } 4297