1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #include "zfs_prop.h" 64 #include "zfs_comutil.h" 65 66 int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 67 /* ISSUE INTR */ 68 { 1, 1 }, /* ZIO_TYPE_NULL */ 69 { 1, 8 }, /* ZIO_TYPE_READ */ 70 { 8, 1 }, /* ZIO_TYPE_WRITE */ 71 { 1, 1 }, /* ZIO_TYPE_FREE */ 72 { 1, 1 }, /* ZIO_TYPE_CLAIM */ 73 { 1, 1 }, /* ZIO_TYPE_IOCTL */ 74 }; 75 76 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 77 static boolean_t spa_has_active_shared_spare(spa_t *spa); 78 79 /* 80 * ========================================================================== 81 * SPA properties routines 82 * ========================================================================== 83 */ 84 85 /* 86 * Add a (source=src, propname=propval) list to an nvlist. 87 */ 88 static void 89 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 90 uint64_t intval, zprop_source_t src) 91 { 92 const char *propname = zpool_prop_to_name(prop); 93 nvlist_t *propval; 94 95 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 96 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 97 98 if (strval != NULL) 99 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 100 else 101 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 102 103 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 104 nvlist_free(propval); 105 } 106 107 /* 108 * Get property values from the spa configuration. 109 */ 110 static void 111 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 112 { 113 uint64_t size; 114 uint64_t used; 115 uint64_t cap, version; 116 zprop_source_t src = ZPROP_SRC_NONE; 117 spa_config_dirent_t *dp; 118 119 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 120 121 if (spa->spa_root_vdev != NULL) { 122 size = spa_get_space(spa); 123 used = spa_get_alloc(spa); 124 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 125 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 126 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 127 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, 128 size - used, src); 129 130 cap = (size == 0) ? 0 : (used * 100 / size); 131 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 132 133 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 134 spa->spa_root_vdev->vdev_state, src); 135 136 version = spa_version(spa); 137 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 138 src = ZPROP_SRC_DEFAULT; 139 else 140 src = ZPROP_SRC_LOCAL; 141 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 142 } 143 144 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 145 146 if (spa->spa_root != NULL) 147 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 148 0, ZPROP_SRC_LOCAL); 149 150 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 151 if (dp->scd_path == NULL) { 152 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 153 "none", 0, ZPROP_SRC_LOCAL); 154 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 155 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 156 dp->scd_path, 0, ZPROP_SRC_LOCAL); 157 } 158 } 159 } 160 161 /* 162 * Get zpool property values. 163 */ 164 int 165 spa_prop_get(spa_t *spa, nvlist_t **nvp) 166 { 167 zap_cursor_t zc; 168 zap_attribute_t za; 169 objset_t *mos = spa->spa_meta_objset; 170 int err; 171 172 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 173 174 mutex_enter(&spa->spa_props_lock); 175 176 /* 177 * Get properties from the spa config. 178 */ 179 spa_prop_get_config(spa, nvp); 180 181 /* If no pool property object, no more prop to get. */ 182 if (spa->spa_pool_props_object == 0) { 183 mutex_exit(&spa->spa_props_lock); 184 return (0); 185 } 186 187 /* 188 * Get properties from the MOS pool property object. 189 */ 190 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 191 (err = zap_cursor_retrieve(&zc, &za)) == 0; 192 zap_cursor_advance(&zc)) { 193 uint64_t intval = 0; 194 char *strval = NULL; 195 zprop_source_t src = ZPROP_SRC_DEFAULT; 196 zpool_prop_t prop; 197 198 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 199 continue; 200 201 switch (za.za_integer_length) { 202 case 8: 203 /* integer property */ 204 if (za.za_first_integer != 205 zpool_prop_default_numeric(prop)) 206 src = ZPROP_SRC_LOCAL; 207 208 if (prop == ZPOOL_PROP_BOOTFS) { 209 dsl_pool_t *dp; 210 dsl_dataset_t *ds = NULL; 211 212 dp = spa_get_dsl(spa); 213 rw_enter(&dp->dp_config_rwlock, RW_READER); 214 if (err = dsl_dataset_hold_obj(dp, 215 za.za_first_integer, FTAG, &ds)) { 216 rw_exit(&dp->dp_config_rwlock); 217 break; 218 } 219 220 strval = kmem_alloc( 221 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 222 KM_SLEEP); 223 dsl_dataset_name(ds, strval); 224 dsl_dataset_rele(ds, FTAG); 225 rw_exit(&dp->dp_config_rwlock); 226 } else { 227 strval = NULL; 228 intval = za.za_first_integer; 229 } 230 231 spa_prop_add_list(*nvp, prop, strval, intval, src); 232 233 if (strval != NULL) 234 kmem_free(strval, 235 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 236 237 break; 238 239 case 1: 240 /* string property */ 241 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 242 err = zap_lookup(mos, spa->spa_pool_props_object, 243 za.za_name, 1, za.za_num_integers, strval); 244 if (err) { 245 kmem_free(strval, za.za_num_integers); 246 break; 247 } 248 spa_prop_add_list(*nvp, prop, strval, 0, src); 249 kmem_free(strval, za.za_num_integers); 250 break; 251 252 default: 253 break; 254 } 255 } 256 zap_cursor_fini(&zc); 257 mutex_exit(&spa->spa_props_lock); 258 out: 259 if (err && err != ENOENT) { 260 nvlist_free(*nvp); 261 *nvp = NULL; 262 return (err); 263 } 264 265 return (0); 266 } 267 268 /* 269 * Validate the given pool properties nvlist and modify the list 270 * for the property values to be set. 271 */ 272 static int 273 spa_prop_validate(spa_t *spa, nvlist_t *props) 274 { 275 nvpair_t *elem; 276 int error = 0, reset_bootfs = 0; 277 uint64_t objnum; 278 279 elem = NULL; 280 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 281 zpool_prop_t prop; 282 char *propname, *strval; 283 uint64_t intval; 284 objset_t *os; 285 char *slash; 286 287 propname = nvpair_name(elem); 288 289 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 290 return (EINVAL); 291 292 switch (prop) { 293 case ZPOOL_PROP_VERSION: 294 error = nvpair_value_uint64(elem, &intval); 295 if (!error && 296 (intval < spa_version(spa) || intval > SPA_VERSION)) 297 error = EINVAL; 298 break; 299 300 case ZPOOL_PROP_DELEGATION: 301 case ZPOOL_PROP_AUTOREPLACE: 302 case ZPOOL_PROP_LISTSNAPS: 303 error = nvpair_value_uint64(elem, &intval); 304 if (!error && intval > 1) 305 error = EINVAL; 306 break; 307 308 case ZPOOL_PROP_BOOTFS: 309 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 310 error = ENOTSUP; 311 break; 312 } 313 314 /* 315 * Make sure the vdev config is bootable 316 */ 317 if (!vdev_is_bootable(spa->spa_root_vdev)) { 318 error = ENOTSUP; 319 break; 320 } 321 322 reset_bootfs = 1; 323 324 error = nvpair_value_string(elem, &strval); 325 326 if (!error) { 327 uint64_t compress; 328 329 if (strval == NULL || strval[0] == '\0') { 330 objnum = zpool_prop_default_numeric( 331 ZPOOL_PROP_BOOTFS); 332 break; 333 } 334 335 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 336 DS_MODE_USER | DS_MODE_READONLY, &os)) 337 break; 338 339 /* We don't support gzip bootable datasets */ 340 if ((error = dsl_prop_get_integer(strval, 341 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 342 &compress, NULL)) == 0 && 343 !BOOTFS_COMPRESS_VALID(compress)) { 344 error = ENOTSUP; 345 } else { 346 objnum = dmu_objset_id(os); 347 } 348 dmu_objset_close(os); 349 } 350 break; 351 352 case ZPOOL_PROP_FAILUREMODE: 353 error = nvpair_value_uint64(elem, &intval); 354 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 355 intval > ZIO_FAILURE_MODE_PANIC)) 356 error = EINVAL; 357 358 /* 359 * This is a special case which only occurs when 360 * the pool has completely failed. This allows 361 * the user to change the in-core failmode property 362 * without syncing it out to disk (I/Os might 363 * currently be blocked). We do this by returning 364 * EIO to the caller (spa_prop_set) to trick it 365 * into thinking we encountered a property validation 366 * error. 367 */ 368 if (!error && spa_suspended(spa)) { 369 spa->spa_failmode = intval; 370 error = EIO; 371 } 372 break; 373 374 case ZPOOL_PROP_CACHEFILE: 375 if ((error = nvpair_value_string(elem, &strval)) != 0) 376 break; 377 378 if (strval[0] == '\0') 379 break; 380 381 if (strcmp(strval, "none") == 0) 382 break; 383 384 if (strval[0] != '/') { 385 error = EINVAL; 386 break; 387 } 388 389 slash = strrchr(strval, '/'); 390 ASSERT(slash != NULL); 391 392 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 393 strcmp(slash, "/..") == 0) 394 error = EINVAL; 395 break; 396 } 397 398 if (error) 399 break; 400 } 401 402 if (!error && reset_bootfs) { 403 error = nvlist_remove(props, 404 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 405 406 if (!error) { 407 error = nvlist_add_uint64(props, 408 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 409 } 410 } 411 412 return (error); 413 } 414 415 void 416 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 417 { 418 char *cachefile; 419 spa_config_dirent_t *dp; 420 421 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 422 &cachefile) != 0) 423 return; 424 425 dp = kmem_alloc(sizeof (spa_config_dirent_t), 426 KM_SLEEP); 427 428 if (cachefile[0] == '\0') 429 dp->scd_path = spa_strdup(spa_config_path); 430 else if (strcmp(cachefile, "none") == 0) 431 dp->scd_path = NULL; 432 else 433 dp->scd_path = spa_strdup(cachefile); 434 435 list_insert_head(&spa->spa_config_list, dp); 436 if (need_sync) 437 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 438 } 439 440 int 441 spa_prop_set(spa_t *spa, nvlist_t *nvp) 442 { 443 int error; 444 nvpair_t *elem; 445 boolean_t need_sync = B_FALSE; 446 zpool_prop_t prop; 447 448 if ((error = spa_prop_validate(spa, nvp)) != 0) 449 return (error); 450 451 elem = NULL; 452 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 453 if ((prop = zpool_name_to_prop( 454 nvpair_name(elem))) == ZPROP_INVAL) 455 return (EINVAL); 456 457 if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT) 458 continue; 459 460 need_sync = B_TRUE; 461 break; 462 } 463 464 if (need_sync) 465 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 466 spa, nvp, 3)); 467 else 468 return (0); 469 } 470 471 /* 472 * If the bootfs property value is dsobj, clear it. 473 */ 474 void 475 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 476 { 477 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 478 VERIFY(zap_remove(spa->spa_meta_objset, 479 spa->spa_pool_props_object, 480 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 481 spa->spa_bootfs = 0; 482 } 483 } 484 485 /* 486 * ========================================================================== 487 * SPA state manipulation (open/create/destroy/import/export) 488 * ========================================================================== 489 */ 490 491 static int 492 spa_error_entry_compare(const void *a, const void *b) 493 { 494 spa_error_entry_t *sa = (spa_error_entry_t *)a; 495 spa_error_entry_t *sb = (spa_error_entry_t *)b; 496 int ret; 497 498 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 499 sizeof (zbookmark_t)); 500 501 if (ret < 0) 502 return (-1); 503 else if (ret > 0) 504 return (1); 505 else 506 return (0); 507 } 508 509 /* 510 * Utility function which retrieves copies of the current logs and 511 * re-initializes them in the process. 512 */ 513 void 514 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 515 { 516 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 517 518 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 519 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 520 521 avl_create(&spa->spa_errlist_scrub, 522 spa_error_entry_compare, sizeof (spa_error_entry_t), 523 offsetof(spa_error_entry_t, se_avl)); 524 avl_create(&spa->spa_errlist_last, 525 spa_error_entry_compare, sizeof (spa_error_entry_t), 526 offsetof(spa_error_entry_t, se_avl)); 527 } 528 529 /* 530 * Activate an uninitialized pool. 531 */ 532 static void 533 spa_activate(spa_t *spa, int mode) 534 { 535 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 536 537 spa->spa_state = POOL_STATE_ACTIVE; 538 spa->spa_mode = mode; 539 540 spa->spa_normal_class = metaslab_class_create(); 541 spa->spa_log_class = metaslab_class_create(); 542 543 for (int t = 0; t < ZIO_TYPES; t++) { 544 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 545 spa->spa_zio_taskq[t][q] = taskq_create("spa_zio", 546 zio_taskq_threads[t][q], maxclsyspri, 50, 547 INT_MAX, TASKQ_PREPOPULATE); 548 } 549 } 550 551 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 552 offsetof(vdev_t, vdev_config_dirty_node)); 553 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 554 offsetof(vdev_t, vdev_state_dirty_node)); 555 556 txg_list_create(&spa->spa_vdev_txg_list, 557 offsetof(struct vdev, vdev_txg_node)); 558 559 avl_create(&spa->spa_errlist_scrub, 560 spa_error_entry_compare, sizeof (spa_error_entry_t), 561 offsetof(spa_error_entry_t, se_avl)); 562 avl_create(&spa->spa_errlist_last, 563 spa_error_entry_compare, sizeof (spa_error_entry_t), 564 offsetof(spa_error_entry_t, se_avl)); 565 } 566 567 /* 568 * Opposite of spa_activate(). 569 */ 570 static void 571 spa_deactivate(spa_t *spa) 572 { 573 ASSERT(spa->spa_sync_on == B_FALSE); 574 ASSERT(spa->spa_dsl_pool == NULL); 575 ASSERT(spa->spa_root_vdev == NULL); 576 577 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 578 579 txg_list_destroy(&spa->spa_vdev_txg_list); 580 581 list_destroy(&spa->spa_config_dirty_list); 582 list_destroy(&spa->spa_state_dirty_list); 583 584 for (int t = 0; t < ZIO_TYPES; t++) { 585 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 586 taskq_destroy(spa->spa_zio_taskq[t][q]); 587 spa->spa_zio_taskq[t][q] = NULL; 588 } 589 } 590 591 metaslab_class_destroy(spa->spa_normal_class); 592 spa->spa_normal_class = NULL; 593 594 metaslab_class_destroy(spa->spa_log_class); 595 spa->spa_log_class = NULL; 596 597 /* 598 * If this was part of an import or the open otherwise failed, we may 599 * still have errors left in the queues. Empty them just in case. 600 */ 601 spa_errlog_drain(spa); 602 603 avl_destroy(&spa->spa_errlist_scrub); 604 avl_destroy(&spa->spa_errlist_last); 605 606 spa->spa_state = POOL_STATE_UNINITIALIZED; 607 } 608 609 /* 610 * Verify a pool configuration, and construct the vdev tree appropriately. This 611 * will create all the necessary vdevs in the appropriate layout, with each vdev 612 * in the CLOSED state. This will prep the pool before open/creation/import. 613 * All vdev validation is done by the vdev_alloc() routine. 614 */ 615 static int 616 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 617 uint_t id, int atype) 618 { 619 nvlist_t **child; 620 uint_t c, children; 621 int error; 622 623 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 624 return (error); 625 626 if ((*vdp)->vdev_ops->vdev_op_leaf) 627 return (0); 628 629 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 630 &child, &children); 631 632 if (error == ENOENT) 633 return (0); 634 635 if (error) { 636 vdev_free(*vdp); 637 *vdp = NULL; 638 return (EINVAL); 639 } 640 641 for (c = 0; c < children; c++) { 642 vdev_t *vd; 643 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 644 atype)) != 0) { 645 vdev_free(*vdp); 646 *vdp = NULL; 647 return (error); 648 } 649 } 650 651 ASSERT(*vdp != NULL); 652 653 return (0); 654 } 655 656 /* 657 * Opposite of spa_load(). 658 */ 659 static void 660 spa_unload(spa_t *spa) 661 { 662 int i; 663 664 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 665 666 /* 667 * Stop async tasks. 668 */ 669 spa_async_suspend(spa); 670 671 /* 672 * Stop syncing. 673 */ 674 if (spa->spa_sync_on) { 675 txg_sync_stop(spa->spa_dsl_pool); 676 spa->spa_sync_on = B_FALSE; 677 } 678 679 /* 680 * Wait for any outstanding async I/O to complete. 681 */ 682 mutex_enter(&spa->spa_async_root_lock); 683 while (spa->spa_async_root_count != 0) 684 cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock); 685 mutex_exit(&spa->spa_async_root_lock); 686 687 /* 688 * Close the dsl pool. 689 */ 690 if (spa->spa_dsl_pool) { 691 dsl_pool_close(spa->spa_dsl_pool); 692 spa->spa_dsl_pool = NULL; 693 } 694 695 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 696 697 /* 698 * Drop and purge level 2 cache 699 */ 700 spa_l2cache_drop(spa); 701 702 /* 703 * Close all vdevs. 704 */ 705 if (spa->spa_root_vdev) 706 vdev_free(spa->spa_root_vdev); 707 ASSERT(spa->spa_root_vdev == NULL); 708 709 for (i = 0; i < spa->spa_spares.sav_count; i++) 710 vdev_free(spa->spa_spares.sav_vdevs[i]); 711 if (spa->spa_spares.sav_vdevs) { 712 kmem_free(spa->spa_spares.sav_vdevs, 713 spa->spa_spares.sav_count * sizeof (void *)); 714 spa->spa_spares.sav_vdevs = NULL; 715 } 716 if (spa->spa_spares.sav_config) { 717 nvlist_free(spa->spa_spares.sav_config); 718 spa->spa_spares.sav_config = NULL; 719 } 720 spa->spa_spares.sav_count = 0; 721 722 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 723 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 724 if (spa->spa_l2cache.sav_vdevs) { 725 kmem_free(spa->spa_l2cache.sav_vdevs, 726 spa->spa_l2cache.sav_count * sizeof (void *)); 727 spa->spa_l2cache.sav_vdevs = NULL; 728 } 729 if (spa->spa_l2cache.sav_config) { 730 nvlist_free(spa->spa_l2cache.sav_config); 731 spa->spa_l2cache.sav_config = NULL; 732 } 733 spa->spa_l2cache.sav_count = 0; 734 735 spa->spa_async_suspended = 0; 736 737 spa_config_exit(spa, SCL_ALL, FTAG); 738 } 739 740 /* 741 * Load (or re-load) the current list of vdevs describing the active spares for 742 * this pool. When this is called, we have some form of basic information in 743 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 744 * then re-generate a more complete list including status information. 745 */ 746 static void 747 spa_load_spares(spa_t *spa) 748 { 749 nvlist_t **spares; 750 uint_t nspares; 751 int i; 752 vdev_t *vd, *tvd; 753 754 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 755 756 /* 757 * First, close and free any existing spare vdevs. 758 */ 759 for (i = 0; i < spa->spa_spares.sav_count; i++) { 760 vd = spa->spa_spares.sav_vdevs[i]; 761 762 /* Undo the call to spa_activate() below */ 763 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 764 B_FALSE)) != NULL && tvd->vdev_isspare) 765 spa_spare_remove(tvd); 766 vdev_close(vd); 767 vdev_free(vd); 768 } 769 770 if (spa->spa_spares.sav_vdevs) 771 kmem_free(spa->spa_spares.sav_vdevs, 772 spa->spa_spares.sav_count * sizeof (void *)); 773 774 if (spa->spa_spares.sav_config == NULL) 775 nspares = 0; 776 else 777 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 778 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 779 780 spa->spa_spares.sav_count = (int)nspares; 781 spa->spa_spares.sav_vdevs = NULL; 782 783 if (nspares == 0) 784 return; 785 786 /* 787 * Construct the array of vdevs, opening them to get status in the 788 * process. For each spare, there is potentially two different vdev_t 789 * structures associated with it: one in the list of spares (used only 790 * for basic validation purposes) and one in the active vdev 791 * configuration (if it's spared in). During this phase we open and 792 * validate each vdev on the spare list. If the vdev also exists in the 793 * active configuration, then we also mark this vdev as an active spare. 794 */ 795 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 796 KM_SLEEP); 797 for (i = 0; i < spa->spa_spares.sav_count; i++) { 798 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 799 VDEV_ALLOC_SPARE) == 0); 800 ASSERT(vd != NULL); 801 802 spa->spa_spares.sav_vdevs[i] = vd; 803 804 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 805 B_FALSE)) != NULL) { 806 if (!tvd->vdev_isspare) 807 spa_spare_add(tvd); 808 809 /* 810 * We only mark the spare active if we were successfully 811 * able to load the vdev. Otherwise, importing a pool 812 * with a bad active spare would result in strange 813 * behavior, because multiple pool would think the spare 814 * is actively in use. 815 * 816 * There is a vulnerability here to an equally bizarre 817 * circumstance, where a dead active spare is later 818 * brought back to life (onlined or otherwise). Given 819 * the rarity of this scenario, and the extra complexity 820 * it adds, we ignore the possibility. 821 */ 822 if (!vdev_is_dead(tvd)) 823 spa_spare_activate(tvd); 824 } 825 826 vd->vdev_top = vd; 827 828 if (vdev_open(vd) != 0) 829 continue; 830 831 if (vdev_validate_aux(vd) == 0) 832 spa_spare_add(vd); 833 } 834 835 /* 836 * Recompute the stashed list of spares, with status information 837 * this time. 838 */ 839 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 840 DATA_TYPE_NVLIST_ARRAY) == 0); 841 842 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 843 KM_SLEEP); 844 for (i = 0; i < spa->spa_spares.sav_count; i++) 845 spares[i] = vdev_config_generate(spa, 846 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 847 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 848 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 849 for (i = 0; i < spa->spa_spares.sav_count; i++) 850 nvlist_free(spares[i]); 851 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 852 } 853 854 /* 855 * Load (or re-load) the current list of vdevs describing the active l2cache for 856 * this pool. When this is called, we have some form of basic information in 857 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 858 * then re-generate a more complete list including status information. 859 * Devices which are already active have their details maintained, and are 860 * not re-opened. 861 */ 862 static void 863 spa_load_l2cache(spa_t *spa) 864 { 865 nvlist_t **l2cache; 866 uint_t nl2cache; 867 int i, j, oldnvdevs; 868 uint64_t guid, size; 869 vdev_t *vd, **oldvdevs, **newvdevs; 870 spa_aux_vdev_t *sav = &spa->spa_l2cache; 871 872 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 873 874 if (sav->sav_config != NULL) { 875 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 876 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 877 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 878 } else { 879 nl2cache = 0; 880 } 881 882 oldvdevs = sav->sav_vdevs; 883 oldnvdevs = sav->sav_count; 884 sav->sav_vdevs = NULL; 885 sav->sav_count = 0; 886 887 /* 888 * Process new nvlist of vdevs. 889 */ 890 for (i = 0; i < nl2cache; i++) { 891 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 892 &guid) == 0); 893 894 newvdevs[i] = NULL; 895 for (j = 0; j < oldnvdevs; j++) { 896 vd = oldvdevs[j]; 897 if (vd != NULL && guid == vd->vdev_guid) { 898 /* 899 * Retain previous vdev for add/remove ops. 900 */ 901 newvdevs[i] = vd; 902 oldvdevs[j] = NULL; 903 break; 904 } 905 } 906 907 if (newvdevs[i] == NULL) { 908 /* 909 * Create new vdev 910 */ 911 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 912 VDEV_ALLOC_L2CACHE) == 0); 913 ASSERT(vd != NULL); 914 newvdevs[i] = vd; 915 916 /* 917 * Commit this vdev as an l2cache device, 918 * even if it fails to open. 919 */ 920 spa_l2cache_add(vd); 921 922 vd->vdev_top = vd; 923 vd->vdev_aux = sav; 924 925 spa_l2cache_activate(vd); 926 927 if (vdev_open(vd) != 0) 928 continue; 929 930 (void) vdev_validate_aux(vd); 931 932 if (!vdev_is_dead(vd)) { 933 size = vdev_get_rsize(vd); 934 l2arc_add_vdev(spa, vd, 935 VDEV_LABEL_START_SIZE, 936 size - VDEV_LABEL_START_SIZE); 937 } 938 } 939 } 940 941 /* 942 * Purge vdevs that were dropped 943 */ 944 for (i = 0; i < oldnvdevs; i++) { 945 uint64_t pool; 946 947 vd = oldvdevs[i]; 948 if (vd != NULL) { 949 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 950 pool != 0ULL && l2arc_vdev_present(vd)) 951 l2arc_remove_vdev(vd); 952 (void) vdev_close(vd); 953 spa_l2cache_remove(vd); 954 } 955 } 956 957 if (oldvdevs) 958 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 959 960 if (sav->sav_config == NULL) 961 goto out; 962 963 sav->sav_vdevs = newvdevs; 964 sav->sav_count = (int)nl2cache; 965 966 /* 967 * Recompute the stashed list of l2cache devices, with status 968 * information this time. 969 */ 970 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 971 DATA_TYPE_NVLIST_ARRAY) == 0); 972 973 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 974 for (i = 0; i < sav->sav_count; i++) 975 l2cache[i] = vdev_config_generate(spa, 976 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 977 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 978 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 979 out: 980 for (i = 0; i < sav->sav_count; i++) 981 nvlist_free(l2cache[i]); 982 if (sav->sav_count) 983 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 984 } 985 986 static int 987 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 988 { 989 dmu_buf_t *db; 990 char *packed = NULL; 991 size_t nvsize = 0; 992 int error; 993 *value = NULL; 994 995 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 996 nvsize = *(uint64_t *)db->db_data; 997 dmu_buf_rele(db, FTAG); 998 999 packed = kmem_alloc(nvsize, KM_SLEEP); 1000 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 1001 if (error == 0) 1002 error = nvlist_unpack(packed, nvsize, value, 0); 1003 kmem_free(packed, nvsize); 1004 1005 return (error); 1006 } 1007 1008 /* 1009 * Checks to see if the given vdev could not be opened, in which case we post a 1010 * sysevent to notify the autoreplace code that the device has been removed. 1011 */ 1012 static void 1013 spa_check_removed(vdev_t *vd) 1014 { 1015 int c; 1016 1017 for (c = 0; c < vd->vdev_children; c++) 1018 spa_check_removed(vd->vdev_child[c]); 1019 1020 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 1021 zfs_post_autoreplace(vd->vdev_spa, vd); 1022 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1023 } 1024 } 1025 1026 /* 1027 * Check for missing log devices 1028 */ 1029 int 1030 spa_check_logs(spa_t *spa) 1031 { 1032 switch (spa->spa_log_state) { 1033 case SPA_LOG_MISSING: 1034 /* need to recheck in case slog has been restored */ 1035 case SPA_LOG_UNKNOWN: 1036 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 1037 DS_FIND_CHILDREN)) { 1038 spa->spa_log_state = SPA_LOG_MISSING; 1039 return (1); 1040 } 1041 break; 1042 1043 case SPA_LOG_CLEAR: 1044 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, 1045 DS_FIND_CHILDREN); 1046 break; 1047 } 1048 spa->spa_log_state = SPA_LOG_GOOD; 1049 return (0); 1050 } 1051 1052 /* 1053 * Load an existing storage pool, using the pool's builtin spa_config as a 1054 * source of configuration information. 1055 */ 1056 static int 1057 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 1058 { 1059 int error = 0; 1060 nvlist_t *nvroot = NULL; 1061 vdev_t *rvd; 1062 uberblock_t *ub = &spa->spa_uberblock; 1063 uint64_t config_cache_txg = spa->spa_config_txg; 1064 uint64_t pool_guid; 1065 uint64_t version; 1066 uint64_t autoreplace = 0; 1067 int orig_mode = spa->spa_mode; 1068 char *ereport = FM_EREPORT_ZFS_POOL; 1069 1070 /* 1071 * If this is an untrusted config, access the pool in read-only mode. 1072 * This prevents things like resilvering recently removed devices. 1073 */ 1074 if (!mosconfig) 1075 spa->spa_mode = FREAD; 1076 1077 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1078 1079 spa->spa_load_state = state; 1080 1081 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1082 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1083 error = EINVAL; 1084 goto out; 1085 } 1086 1087 /* 1088 * Versioning wasn't explicitly added to the label until later, so if 1089 * it's not present treat it as the initial version. 1090 */ 1091 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1092 version = SPA_VERSION_INITIAL; 1093 1094 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1095 &spa->spa_config_txg); 1096 1097 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1098 spa_guid_exists(pool_guid, 0)) { 1099 error = EEXIST; 1100 goto out; 1101 } 1102 1103 spa->spa_load_guid = pool_guid; 1104 1105 /* 1106 * Parse the configuration into a vdev tree. We explicitly set the 1107 * value that will be returned by spa_version() since parsing the 1108 * configuration requires knowing the version number. 1109 */ 1110 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1111 spa->spa_ubsync.ub_version = version; 1112 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1113 spa_config_exit(spa, SCL_ALL, FTAG); 1114 1115 if (error != 0) 1116 goto out; 1117 1118 ASSERT(spa->spa_root_vdev == rvd); 1119 ASSERT(spa_guid(spa) == pool_guid); 1120 1121 /* 1122 * Try to open all vdevs, loading each label in the process. 1123 */ 1124 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1125 error = vdev_open(rvd); 1126 spa_config_exit(spa, SCL_ALL, FTAG); 1127 if (error != 0) 1128 goto out; 1129 1130 /* 1131 * Validate the labels for all leaf vdevs. We need to grab the config 1132 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER. 1133 */ 1134 if (mosconfig) { 1135 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1136 error = vdev_validate(rvd); 1137 spa_config_exit(spa, SCL_ALL, FTAG); 1138 if (error != 0) 1139 goto out; 1140 } 1141 1142 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1143 error = ENXIO; 1144 goto out; 1145 } 1146 1147 /* 1148 * Find the best uberblock. 1149 */ 1150 vdev_uberblock_load(NULL, rvd, ub); 1151 1152 /* 1153 * If we weren't able to find a single valid uberblock, return failure. 1154 */ 1155 if (ub->ub_txg == 0) { 1156 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1157 VDEV_AUX_CORRUPT_DATA); 1158 error = ENXIO; 1159 goto out; 1160 } 1161 1162 /* 1163 * If the pool is newer than the code, we can't open it. 1164 */ 1165 if (ub->ub_version > SPA_VERSION) { 1166 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1167 VDEV_AUX_VERSION_NEWER); 1168 error = ENOTSUP; 1169 goto out; 1170 } 1171 1172 /* 1173 * If the vdev guid sum doesn't match the uberblock, we have an 1174 * incomplete configuration. 1175 */ 1176 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1177 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1178 VDEV_AUX_BAD_GUID_SUM); 1179 error = ENXIO; 1180 goto out; 1181 } 1182 1183 /* 1184 * Initialize internal SPA structures. 1185 */ 1186 spa->spa_state = POOL_STATE_ACTIVE; 1187 spa->spa_ubsync = spa->spa_uberblock; 1188 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1189 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1190 if (error) { 1191 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1192 VDEV_AUX_CORRUPT_DATA); 1193 goto out; 1194 } 1195 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1196 1197 if (zap_lookup(spa->spa_meta_objset, 1198 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1199 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1200 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1201 VDEV_AUX_CORRUPT_DATA); 1202 error = EIO; 1203 goto out; 1204 } 1205 1206 if (!mosconfig) { 1207 nvlist_t *newconfig; 1208 uint64_t hostid; 1209 1210 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1211 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1212 VDEV_AUX_CORRUPT_DATA); 1213 error = EIO; 1214 goto out; 1215 } 1216 1217 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig, 1218 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 1219 char *hostname; 1220 unsigned long myhostid = 0; 1221 1222 VERIFY(nvlist_lookup_string(newconfig, 1223 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1224 1225 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1226 if (hostid != 0 && myhostid != 0 && 1227 (unsigned long)hostid != myhostid) { 1228 cmn_err(CE_WARN, "pool '%s' could not be " 1229 "loaded as it was last accessed by " 1230 "another system (host: %s hostid: 0x%lx). " 1231 "See: http://www.sun.com/msg/ZFS-8000-EY", 1232 spa_name(spa), hostname, 1233 (unsigned long)hostid); 1234 error = EBADF; 1235 goto out; 1236 } 1237 } 1238 1239 spa_config_set(spa, newconfig); 1240 spa_unload(spa); 1241 spa_deactivate(spa); 1242 spa_activate(spa, orig_mode); 1243 1244 return (spa_load(spa, newconfig, state, B_TRUE)); 1245 } 1246 1247 if (zap_lookup(spa->spa_meta_objset, 1248 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1249 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1250 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1251 VDEV_AUX_CORRUPT_DATA); 1252 error = EIO; 1253 goto out; 1254 } 1255 1256 /* 1257 * Load the bit that tells us to use the new accounting function 1258 * (raid-z deflation). If we have an older pool, this will not 1259 * be present. 1260 */ 1261 error = zap_lookup(spa->spa_meta_objset, 1262 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1263 sizeof (uint64_t), 1, &spa->spa_deflate); 1264 if (error != 0 && error != ENOENT) { 1265 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1266 VDEV_AUX_CORRUPT_DATA); 1267 error = EIO; 1268 goto out; 1269 } 1270 1271 /* 1272 * Load the persistent error log. If we have an older pool, this will 1273 * not be present. 1274 */ 1275 error = zap_lookup(spa->spa_meta_objset, 1276 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1277 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1278 if (error != 0 && error != ENOENT) { 1279 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1280 VDEV_AUX_CORRUPT_DATA); 1281 error = EIO; 1282 goto out; 1283 } 1284 1285 error = zap_lookup(spa->spa_meta_objset, 1286 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1287 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1288 if (error != 0 && error != ENOENT) { 1289 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1290 VDEV_AUX_CORRUPT_DATA); 1291 error = EIO; 1292 goto out; 1293 } 1294 1295 /* 1296 * Load the history object. If we have an older pool, this 1297 * will not be present. 1298 */ 1299 error = zap_lookup(spa->spa_meta_objset, 1300 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1301 sizeof (uint64_t), 1, &spa->spa_history); 1302 if (error != 0 && error != ENOENT) { 1303 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1304 VDEV_AUX_CORRUPT_DATA); 1305 error = EIO; 1306 goto out; 1307 } 1308 1309 /* 1310 * Load any hot spares for this pool. 1311 */ 1312 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1313 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1314 if (error != 0 && error != ENOENT) { 1315 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1316 VDEV_AUX_CORRUPT_DATA); 1317 error = EIO; 1318 goto out; 1319 } 1320 if (error == 0) { 1321 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1322 if (load_nvlist(spa, spa->spa_spares.sav_object, 1323 &spa->spa_spares.sav_config) != 0) { 1324 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1325 VDEV_AUX_CORRUPT_DATA); 1326 error = EIO; 1327 goto out; 1328 } 1329 1330 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1331 spa_load_spares(spa); 1332 spa_config_exit(spa, SCL_ALL, FTAG); 1333 } 1334 1335 /* 1336 * Load any level 2 ARC devices for this pool. 1337 */ 1338 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1339 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1340 &spa->spa_l2cache.sav_object); 1341 if (error != 0 && error != ENOENT) { 1342 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1343 VDEV_AUX_CORRUPT_DATA); 1344 error = EIO; 1345 goto out; 1346 } 1347 if (error == 0) { 1348 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1349 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1350 &spa->spa_l2cache.sav_config) != 0) { 1351 vdev_set_state(rvd, B_TRUE, 1352 VDEV_STATE_CANT_OPEN, 1353 VDEV_AUX_CORRUPT_DATA); 1354 error = EIO; 1355 goto out; 1356 } 1357 1358 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1359 spa_load_l2cache(spa); 1360 spa_config_exit(spa, SCL_ALL, FTAG); 1361 } 1362 1363 if (spa_check_logs(spa)) { 1364 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1365 VDEV_AUX_BAD_LOG); 1366 error = ENXIO; 1367 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1368 goto out; 1369 } 1370 1371 1372 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1373 1374 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1375 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1376 1377 if (error && error != ENOENT) { 1378 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1379 VDEV_AUX_CORRUPT_DATA); 1380 error = EIO; 1381 goto out; 1382 } 1383 1384 if (error == 0) { 1385 (void) zap_lookup(spa->spa_meta_objset, 1386 spa->spa_pool_props_object, 1387 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1388 sizeof (uint64_t), 1, &spa->spa_bootfs); 1389 (void) zap_lookup(spa->spa_meta_objset, 1390 spa->spa_pool_props_object, 1391 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1392 sizeof (uint64_t), 1, &autoreplace); 1393 (void) zap_lookup(spa->spa_meta_objset, 1394 spa->spa_pool_props_object, 1395 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1396 sizeof (uint64_t), 1, &spa->spa_delegation); 1397 (void) zap_lookup(spa->spa_meta_objset, 1398 spa->spa_pool_props_object, 1399 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1400 sizeof (uint64_t), 1, &spa->spa_failmode); 1401 } 1402 1403 /* 1404 * If the 'autoreplace' property is set, then post a resource notifying 1405 * the ZFS DE that it should not issue any faults for unopenable 1406 * devices. We also iterate over the vdevs, and post a sysevent for any 1407 * unopenable vdevs so that the normal autoreplace handler can take 1408 * over. 1409 */ 1410 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1411 spa_check_removed(spa->spa_root_vdev); 1412 1413 /* 1414 * Load the vdev state for all toplevel vdevs. 1415 */ 1416 vdev_load(rvd); 1417 1418 /* 1419 * Propagate the leaf DTLs we just loaded all the way up the tree. 1420 */ 1421 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1422 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1423 spa_config_exit(spa, SCL_ALL, FTAG); 1424 1425 /* 1426 * Check the state of the root vdev. If it can't be opened, it 1427 * indicates one or more toplevel vdevs are faulted. 1428 */ 1429 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1430 error = ENXIO; 1431 goto out; 1432 } 1433 1434 if (spa_writeable(spa)) { 1435 dmu_tx_t *tx; 1436 int need_update = B_FALSE; 1437 1438 ASSERT(state != SPA_LOAD_TRYIMPORT); 1439 1440 /* 1441 * Claim log blocks that haven't been committed yet. 1442 * This must all happen in a single txg. 1443 */ 1444 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1445 spa_first_txg(spa)); 1446 (void) dmu_objset_find(spa_name(spa), 1447 zil_claim, tx, DS_FIND_CHILDREN); 1448 dmu_tx_commit(tx); 1449 1450 spa->spa_sync_on = B_TRUE; 1451 txg_sync_start(spa->spa_dsl_pool); 1452 1453 /* 1454 * Wait for all claims to sync. 1455 */ 1456 txg_wait_synced(spa->spa_dsl_pool, 0); 1457 1458 /* 1459 * If the config cache is stale, or we have uninitialized 1460 * metaslabs (see spa_vdev_add()), then update the config. 1461 */ 1462 if (config_cache_txg != spa->spa_config_txg || 1463 state == SPA_LOAD_IMPORT) 1464 need_update = B_TRUE; 1465 1466 for (int c = 0; c < rvd->vdev_children; c++) 1467 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1468 need_update = B_TRUE; 1469 1470 /* 1471 * Update the config cache asychronously in case we're the 1472 * root pool, in which case the config cache isn't writable yet. 1473 */ 1474 if (need_update) 1475 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1476 1477 /* 1478 * Check all DTLs to see if anything needs resilvering. 1479 */ 1480 if (vdev_resilver_needed(rvd, NULL, NULL)) 1481 spa_async_request(spa, SPA_ASYNC_RESILVER); 1482 } 1483 1484 error = 0; 1485 out: 1486 spa->spa_minref = refcount_count(&spa->spa_refcount); 1487 if (error && error != EBADF) 1488 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1489 spa->spa_load_state = SPA_LOAD_NONE; 1490 spa->spa_ena = 0; 1491 1492 return (error); 1493 } 1494 1495 /* 1496 * Pool Open/Import 1497 * 1498 * The import case is identical to an open except that the configuration is sent 1499 * down from userland, instead of grabbed from the configuration cache. For the 1500 * case of an open, the pool configuration will exist in the 1501 * POOL_STATE_UNINITIALIZED state. 1502 * 1503 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1504 * the same time open the pool, without having to keep around the spa_t in some 1505 * ambiguous state. 1506 */ 1507 static int 1508 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1509 { 1510 spa_t *spa; 1511 int error; 1512 int locked = B_FALSE; 1513 1514 *spapp = NULL; 1515 1516 /* 1517 * As disgusting as this is, we need to support recursive calls to this 1518 * function because dsl_dir_open() is called during spa_load(), and ends 1519 * up calling spa_open() again. The real fix is to figure out how to 1520 * avoid dsl_dir_open() calling this in the first place. 1521 */ 1522 if (mutex_owner(&spa_namespace_lock) != curthread) { 1523 mutex_enter(&spa_namespace_lock); 1524 locked = B_TRUE; 1525 } 1526 1527 if ((spa = spa_lookup(pool)) == NULL) { 1528 if (locked) 1529 mutex_exit(&spa_namespace_lock); 1530 return (ENOENT); 1531 } 1532 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1533 1534 spa_activate(spa, spa_mode_global); 1535 1536 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1537 1538 if (error == EBADF) { 1539 /* 1540 * If vdev_validate() returns failure (indicated by 1541 * EBADF), it indicates that one of the vdevs indicates 1542 * that the pool has been exported or destroyed. If 1543 * this is the case, the config cache is out of sync and 1544 * we should remove the pool from the namespace. 1545 */ 1546 spa_unload(spa); 1547 spa_deactivate(spa); 1548 spa_config_sync(spa, B_TRUE, B_TRUE); 1549 spa_remove(spa); 1550 if (locked) 1551 mutex_exit(&spa_namespace_lock); 1552 return (ENOENT); 1553 } 1554 1555 if (error) { 1556 /* 1557 * We can't open the pool, but we still have useful 1558 * information: the state of each vdev after the 1559 * attempted vdev_open(). Return this to the user. 1560 */ 1561 if (config != NULL && spa->spa_root_vdev != NULL) 1562 *config = spa_config_generate(spa, NULL, -1ULL, 1563 B_TRUE); 1564 spa_unload(spa); 1565 spa_deactivate(spa); 1566 spa->spa_last_open_failed = B_TRUE; 1567 if (locked) 1568 mutex_exit(&spa_namespace_lock); 1569 *spapp = NULL; 1570 return (error); 1571 } else { 1572 spa->spa_last_open_failed = B_FALSE; 1573 } 1574 } 1575 1576 spa_open_ref(spa, tag); 1577 1578 if (locked) 1579 mutex_exit(&spa_namespace_lock); 1580 1581 *spapp = spa; 1582 1583 if (config != NULL) 1584 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1585 1586 return (0); 1587 } 1588 1589 int 1590 spa_open(const char *name, spa_t **spapp, void *tag) 1591 { 1592 return (spa_open_common(name, spapp, tag, NULL)); 1593 } 1594 1595 /* 1596 * Lookup the given spa_t, incrementing the inject count in the process, 1597 * preventing it from being exported or destroyed. 1598 */ 1599 spa_t * 1600 spa_inject_addref(char *name) 1601 { 1602 spa_t *spa; 1603 1604 mutex_enter(&spa_namespace_lock); 1605 if ((spa = spa_lookup(name)) == NULL) { 1606 mutex_exit(&spa_namespace_lock); 1607 return (NULL); 1608 } 1609 spa->spa_inject_ref++; 1610 mutex_exit(&spa_namespace_lock); 1611 1612 return (spa); 1613 } 1614 1615 void 1616 spa_inject_delref(spa_t *spa) 1617 { 1618 mutex_enter(&spa_namespace_lock); 1619 spa->spa_inject_ref--; 1620 mutex_exit(&spa_namespace_lock); 1621 } 1622 1623 /* 1624 * Add spares device information to the nvlist. 1625 */ 1626 static void 1627 spa_add_spares(spa_t *spa, nvlist_t *config) 1628 { 1629 nvlist_t **spares; 1630 uint_t i, nspares; 1631 nvlist_t *nvroot; 1632 uint64_t guid; 1633 vdev_stat_t *vs; 1634 uint_t vsc; 1635 uint64_t pool; 1636 1637 if (spa->spa_spares.sav_count == 0) 1638 return; 1639 1640 VERIFY(nvlist_lookup_nvlist(config, 1641 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1642 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1643 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1644 if (nspares != 0) { 1645 VERIFY(nvlist_add_nvlist_array(nvroot, 1646 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1647 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1648 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1649 1650 /* 1651 * Go through and find any spares which have since been 1652 * repurposed as an active spare. If this is the case, update 1653 * their status appropriately. 1654 */ 1655 for (i = 0; i < nspares; i++) { 1656 VERIFY(nvlist_lookup_uint64(spares[i], 1657 ZPOOL_CONFIG_GUID, &guid) == 0); 1658 if (spa_spare_exists(guid, &pool, NULL) && 1659 pool != 0ULL) { 1660 VERIFY(nvlist_lookup_uint64_array( 1661 spares[i], ZPOOL_CONFIG_STATS, 1662 (uint64_t **)&vs, &vsc) == 0); 1663 vs->vs_state = VDEV_STATE_CANT_OPEN; 1664 vs->vs_aux = VDEV_AUX_SPARED; 1665 } 1666 } 1667 } 1668 } 1669 1670 /* 1671 * Add l2cache device information to the nvlist, including vdev stats. 1672 */ 1673 static void 1674 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1675 { 1676 nvlist_t **l2cache; 1677 uint_t i, j, nl2cache; 1678 nvlist_t *nvroot; 1679 uint64_t guid; 1680 vdev_t *vd; 1681 vdev_stat_t *vs; 1682 uint_t vsc; 1683 1684 if (spa->spa_l2cache.sav_count == 0) 1685 return; 1686 1687 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1688 1689 VERIFY(nvlist_lookup_nvlist(config, 1690 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1691 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1692 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1693 if (nl2cache != 0) { 1694 VERIFY(nvlist_add_nvlist_array(nvroot, 1695 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1696 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1697 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1698 1699 /* 1700 * Update level 2 cache device stats. 1701 */ 1702 1703 for (i = 0; i < nl2cache; i++) { 1704 VERIFY(nvlist_lookup_uint64(l2cache[i], 1705 ZPOOL_CONFIG_GUID, &guid) == 0); 1706 1707 vd = NULL; 1708 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1709 if (guid == 1710 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1711 vd = spa->spa_l2cache.sav_vdevs[j]; 1712 break; 1713 } 1714 } 1715 ASSERT(vd != NULL); 1716 1717 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1718 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1719 vdev_get_stats(vd, vs); 1720 } 1721 } 1722 1723 spa_config_exit(spa, SCL_CONFIG, FTAG); 1724 } 1725 1726 int 1727 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1728 { 1729 int error; 1730 spa_t *spa; 1731 1732 *config = NULL; 1733 error = spa_open_common(name, &spa, FTAG, config); 1734 1735 if (spa && *config != NULL) { 1736 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1737 spa_get_errlog_size(spa)) == 0); 1738 1739 if (spa_suspended(spa)) 1740 VERIFY(nvlist_add_uint64(*config, 1741 ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0); 1742 1743 spa_add_spares(spa, *config); 1744 spa_add_l2cache(spa, *config); 1745 } 1746 1747 /* 1748 * We want to get the alternate root even for faulted pools, so we cheat 1749 * and call spa_lookup() directly. 1750 */ 1751 if (altroot) { 1752 if (spa == NULL) { 1753 mutex_enter(&spa_namespace_lock); 1754 spa = spa_lookup(name); 1755 if (spa) 1756 spa_altroot(spa, altroot, buflen); 1757 else 1758 altroot[0] = '\0'; 1759 spa = NULL; 1760 mutex_exit(&spa_namespace_lock); 1761 } else { 1762 spa_altroot(spa, altroot, buflen); 1763 } 1764 } 1765 1766 if (spa != NULL) 1767 spa_close(spa, FTAG); 1768 1769 return (error); 1770 } 1771 1772 /* 1773 * Validate that the auxiliary device array is well formed. We must have an 1774 * array of nvlists, each which describes a valid leaf vdev. If this is an 1775 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1776 * specified, as long as they are well-formed. 1777 */ 1778 static int 1779 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1780 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1781 vdev_labeltype_t label) 1782 { 1783 nvlist_t **dev; 1784 uint_t i, ndev; 1785 vdev_t *vd; 1786 int error; 1787 1788 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1789 1790 /* 1791 * It's acceptable to have no devs specified. 1792 */ 1793 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1794 return (0); 1795 1796 if (ndev == 0) 1797 return (EINVAL); 1798 1799 /* 1800 * Make sure the pool is formatted with a version that supports this 1801 * device type. 1802 */ 1803 if (spa_version(spa) < version) 1804 return (ENOTSUP); 1805 1806 /* 1807 * Set the pending device list so we correctly handle device in-use 1808 * checking. 1809 */ 1810 sav->sav_pending = dev; 1811 sav->sav_npending = ndev; 1812 1813 for (i = 0; i < ndev; i++) { 1814 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1815 mode)) != 0) 1816 goto out; 1817 1818 if (!vd->vdev_ops->vdev_op_leaf) { 1819 vdev_free(vd); 1820 error = EINVAL; 1821 goto out; 1822 } 1823 1824 /* 1825 * The L2ARC currently only supports disk devices in 1826 * kernel context. For user-level testing, we allow it. 1827 */ 1828 #ifdef _KERNEL 1829 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1830 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1831 error = ENOTBLK; 1832 goto out; 1833 } 1834 #endif 1835 vd->vdev_top = vd; 1836 1837 if ((error = vdev_open(vd)) == 0 && 1838 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1839 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1840 vd->vdev_guid) == 0); 1841 } 1842 1843 vdev_free(vd); 1844 1845 if (error && 1846 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1847 goto out; 1848 else 1849 error = 0; 1850 } 1851 1852 out: 1853 sav->sav_pending = NULL; 1854 sav->sav_npending = 0; 1855 return (error); 1856 } 1857 1858 static int 1859 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1860 { 1861 int error; 1862 1863 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1864 1865 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1866 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1867 VDEV_LABEL_SPARE)) != 0) { 1868 return (error); 1869 } 1870 1871 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1872 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1873 VDEV_LABEL_L2CACHE)); 1874 } 1875 1876 static void 1877 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1878 const char *config) 1879 { 1880 int i; 1881 1882 if (sav->sav_config != NULL) { 1883 nvlist_t **olddevs; 1884 uint_t oldndevs; 1885 nvlist_t **newdevs; 1886 1887 /* 1888 * Generate new dev list by concatentating with the 1889 * current dev list. 1890 */ 1891 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1892 &olddevs, &oldndevs) == 0); 1893 1894 newdevs = kmem_alloc(sizeof (void *) * 1895 (ndevs + oldndevs), KM_SLEEP); 1896 for (i = 0; i < oldndevs; i++) 1897 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1898 KM_SLEEP) == 0); 1899 for (i = 0; i < ndevs; i++) 1900 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1901 KM_SLEEP) == 0); 1902 1903 VERIFY(nvlist_remove(sav->sav_config, config, 1904 DATA_TYPE_NVLIST_ARRAY) == 0); 1905 1906 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1907 config, newdevs, ndevs + oldndevs) == 0); 1908 for (i = 0; i < oldndevs + ndevs; i++) 1909 nvlist_free(newdevs[i]); 1910 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1911 } else { 1912 /* 1913 * Generate a new dev list. 1914 */ 1915 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1916 KM_SLEEP) == 0); 1917 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1918 devs, ndevs) == 0); 1919 } 1920 } 1921 1922 /* 1923 * Stop and drop level 2 ARC devices 1924 */ 1925 void 1926 spa_l2cache_drop(spa_t *spa) 1927 { 1928 vdev_t *vd; 1929 int i; 1930 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1931 1932 for (i = 0; i < sav->sav_count; i++) { 1933 uint64_t pool; 1934 1935 vd = sav->sav_vdevs[i]; 1936 ASSERT(vd != NULL); 1937 1938 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1939 pool != 0ULL && l2arc_vdev_present(vd)) 1940 l2arc_remove_vdev(vd); 1941 if (vd->vdev_isl2cache) 1942 spa_l2cache_remove(vd); 1943 vdev_clear_stats(vd); 1944 (void) vdev_close(vd); 1945 } 1946 } 1947 1948 /* 1949 * Pool Creation 1950 */ 1951 int 1952 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1953 const char *history_str, nvlist_t *zplprops) 1954 { 1955 spa_t *spa; 1956 char *altroot = NULL; 1957 vdev_t *rvd; 1958 dsl_pool_t *dp; 1959 dmu_tx_t *tx; 1960 int c, error = 0; 1961 uint64_t txg = TXG_INITIAL; 1962 nvlist_t **spares, **l2cache; 1963 uint_t nspares, nl2cache; 1964 uint64_t version; 1965 1966 /* 1967 * If this pool already exists, return failure. 1968 */ 1969 mutex_enter(&spa_namespace_lock); 1970 if (spa_lookup(pool) != NULL) { 1971 mutex_exit(&spa_namespace_lock); 1972 return (EEXIST); 1973 } 1974 1975 /* 1976 * Allocate a new spa_t structure. 1977 */ 1978 (void) nvlist_lookup_string(props, 1979 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1980 spa = spa_add(pool, altroot); 1981 spa_activate(spa, spa_mode_global); 1982 1983 spa->spa_uberblock.ub_txg = txg - 1; 1984 1985 if (props && (error = spa_prop_validate(spa, props))) { 1986 spa_unload(spa); 1987 spa_deactivate(spa); 1988 spa_remove(spa); 1989 mutex_exit(&spa_namespace_lock); 1990 return (error); 1991 } 1992 1993 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1994 &version) != 0) 1995 version = SPA_VERSION; 1996 ASSERT(version <= SPA_VERSION); 1997 spa->spa_uberblock.ub_version = version; 1998 spa->spa_ubsync = spa->spa_uberblock; 1999 2000 /* 2001 * Create the root vdev. 2002 */ 2003 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2004 2005 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 2006 2007 ASSERT(error != 0 || rvd != NULL); 2008 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 2009 2010 if (error == 0 && !zfs_allocatable_devs(nvroot)) 2011 error = EINVAL; 2012 2013 if (error == 0 && 2014 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 2015 (error = spa_validate_aux(spa, nvroot, txg, 2016 VDEV_ALLOC_ADD)) == 0) { 2017 for (c = 0; c < rvd->vdev_children; c++) 2018 vdev_init(rvd->vdev_child[c], txg); 2019 vdev_config_dirty(rvd); 2020 } 2021 2022 spa_config_exit(spa, SCL_ALL, FTAG); 2023 2024 if (error != 0) { 2025 spa_unload(spa); 2026 spa_deactivate(spa); 2027 spa_remove(spa); 2028 mutex_exit(&spa_namespace_lock); 2029 return (error); 2030 } 2031 2032 /* 2033 * Get the list of spares, if specified. 2034 */ 2035 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2036 &spares, &nspares) == 0) { 2037 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 2038 KM_SLEEP) == 0); 2039 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2040 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2041 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2042 spa_load_spares(spa); 2043 spa_config_exit(spa, SCL_ALL, FTAG); 2044 spa->spa_spares.sav_sync = B_TRUE; 2045 } 2046 2047 /* 2048 * Get the list of level 2 cache devices, if specified. 2049 */ 2050 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2051 &l2cache, &nl2cache) == 0) { 2052 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2053 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2054 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2055 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2056 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2057 spa_load_l2cache(spa); 2058 spa_config_exit(spa, SCL_ALL, FTAG); 2059 spa->spa_l2cache.sav_sync = B_TRUE; 2060 } 2061 2062 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 2063 spa->spa_meta_objset = dp->dp_meta_objset; 2064 2065 tx = dmu_tx_create_assigned(dp, txg); 2066 2067 /* 2068 * Create the pool config object. 2069 */ 2070 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 2071 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 2072 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 2073 2074 if (zap_add(spa->spa_meta_objset, 2075 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 2076 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 2077 cmn_err(CE_PANIC, "failed to add pool config"); 2078 } 2079 2080 /* Newly created pools with the right version are always deflated. */ 2081 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 2082 spa->spa_deflate = TRUE; 2083 if (zap_add(spa->spa_meta_objset, 2084 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2085 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2086 cmn_err(CE_PANIC, "failed to add deflate"); 2087 } 2088 } 2089 2090 /* 2091 * Create the deferred-free bplist object. Turn off compression 2092 * because sync-to-convergence takes longer if the blocksize 2093 * keeps changing. 2094 */ 2095 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2096 1 << 14, tx); 2097 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2098 ZIO_COMPRESS_OFF, tx); 2099 2100 if (zap_add(spa->spa_meta_objset, 2101 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2102 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2103 cmn_err(CE_PANIC, "failed to add bplist"); 2104 } 2105 2106 /* 2107 * Create the pool's history object. 2108 */ 2109 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2110 spa_history_create_obj(spa, tx); 2111 2112 /* 2113 * Set pool properties. 2114 */ 2115 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2116 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2117 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2118 if (props != NULL) { 2119 spa_configfile_set(spa, props, B_FALSE); 2120 spa_sync_props(spa, props, CRED(), tx); 2121 } 2122 2123 dmu_tx_commit(tx); 2124 2125 spa->spa_sync_on = B_TRUE; 2126 txg_sync_start(spa->spa_dsl_pool); 2127 2128 /* 2129 * We explicitly wait for the first transaction to complete so that our 2130 * bean counters are appropriately updated. 2131 */ 2132 txg_wait_synced(spa->spa_dsl_pool, txg); 2133 2134 spa_config_sync(spa, B_FALSE, B_TRUE); 2135 2136 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2137 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2138 2139 mutex_exit(&spa_namespace_lock); 2140 2141 spa->spa_minref = refcount_count(&spa->spa_refcount); 2142 2143 return (0); 2144 } 2145 2146 /* 2147 * Import the given pool into the system. We set up the necessary spa_t and 2148 * then call spa_load() to do the dirty work. 2149 */ 2150 static int 2151 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2152 boolean_t isroot, boolean_t allowfaulted) 2153 { 2154 spa_t *spa; 2155 char *altroot = NULL; 2156 int error, loaderr; 2157 nvlist_t *nvroot; 2158 nvlist_t **spares, **l2cache; 2159 uint_t nspares, nl2cache; 2160 2161 /* 2162 * If a pool with this name exists, return failure. 2163 */ 2164 mutex_enter(&spa_namespace_lock); 2165 if ((spa = spa_lookup(pool)) != NULL) { 2166 if (isroot) { 2167 /* 2168 * Remove the existing root pool from the 2169 * namespace so that we can replace it with 2170 * the correct config we just read in. 2171 */ 2172 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 2173 spa_remove(spa); 2174 } else { 2175 mutex_exit(&spa_namespace_lock); 2176 return (EEXIST); 2177 } 2178 } 2179 2180 /* 2181 * Create and initialize the spa structure. 2182 */ 2183 (void) nvlist_lookup_string(props, 2184 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2185 spa = spa_add(pool, altroot); 2186 spa_activate(spa, spa_mode_global); 2187 2188 if (allowfaulted) 2189 spa->spa_import_faulted = B_TRUE; 2190 spa->spa_is_root = isroot; 2191 2192 /* 2193 * Pass off the heavy lifting to spa_load(). 2194 * Pass TRUE for mosconfig (unless this is a root pool) because 2195 * the user-supplied config is actually the one to trust when 2196 * doing an import. 2197 */ 2198 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2199 2200 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2201 /* 2202 * Toss any existing sparelist, as it doesn't have any validity anymore, 2203 * and conflicts with spa_has_spare(). 2204 */ 2205 if (!isroot && spa->spa_spares.sav_config) { 2206 nvlist_free(spa->spa_spares.sav_config); 2207 spa->spa_spares.sav_config = NULL; 2208 spa_load_spares(spa); 2209 } 2210 if (!isroot && spa->spa_l2cache.sav_config) { 2211 nvlist_free(spa->spa_l2cache.sav_config); 2212 spa->spa_l2cache.sav_config = NULL; 2213 spa_load_l2cache(spa); 2214 } 2215 2216 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2217 &nvroot) == 0); 2218 if (error == 0) 2219 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2220 if (error == 0) 2221 error = spa_validate_aux(spa, nvroot, -1ULL, 2222 VDEV_ALLOC_L2CACHE); 2223 spa_config_exit(spa, SCL_ALL, FTAG); 2224 2225 if (props != NULL) 2226 spa_configfile_set(spa, props, B_FALSE); 2227 2228 if (error != 0 || (props && spa_writeable(spa) && 2229 (error = spa_prop_set(spa, props)))) { 2230 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2231 /* 2232 * If we failed to load the pool, but 'allowfaulted' is 2233 * set, then manually set the config as if the config 2234 * passed in was specified in the cache file. 2235 */ 2236 error = 0; 2237 spa->spa_import_faulted = B_FALSE; 2238 if (spa->spa_config == NULL) 2239 spa->spa_config = spa_config_generate(spa, 2240 NULL, -1ULL, B_TRUE); 2241 spa_unload(spa); 2242 spa_deactivate(spa); 2243 spa_config_sync(spa, B_FALSE, B_TRUE); 2244 } else { 2245 spa_unload(spa); 2246 spa_deactivate(spa); 2247 spa_remove(spa); 2248 } 2249 mutex_exit(&spa_namespace_lock); 2250 return (error); 2251 } 2252 2253 /* 2254 * Override any spares and level 2 cache devices as specified by 2255 * the user, as these may have correct device names/devids, etc. 2256 */ 2257 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2258 &spares, &nspares) == 0) { 2259 if (spa->spa_spares.sav_config) 2260 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2261 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2262 else 2263 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2264 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2265 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2266 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2267 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2268 spa_load_spares(spa); 2269 spa_config_exit(spa, SCL_ALL, FTAG); 2270 spa->spa_spares.sav_sync = B_TRUE; 2271 } 2272 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2273 &l2cache, &nl2cache) == 0) { 2274 if (spa->spa_l2cache.sav_config) 2275 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2276 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2277 else 2278 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2279 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2280 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2281 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2282 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2283 spa_load_l2cache(spa); 2284 spa_config_exit(spa, SCL_ALL, FTAG); 2285 spa->spa_l2cache.sav_sync = B_TRUE; 2286 } 2287 2288 if (spa_writeable(spa)) { 2289 /* 2290 * Update the config cache to include the newly-imported pool. 2291 */ 2292 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2293 } 2294 2295 spa->spa_import_faulted = B_FALSE; 2296 mutex_exit(&spa_namespace_lock); 2297 2298 return (0); 2299 } 2300 2301 #ifdef _KERNEL 2302 /* 2303 * Build a "root" vdev for a top level vdev read in from a rootpool 2304 * device label. 2305 */ 2306 static void 2307 spa_build_rootpool_config(nvlist_t *config) 2308 { 2309 nvlist_t *nvtop, *nvroot; 2310 uint64_t pgid; 2311 2312 /* 2313 * Add this top-level vdev to the child array. 2314 */ 2315 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2316 == 0); 2317 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2318 == 0); 2319 2320 /* 2321 * Put this pool's top-level vdevs into a root vdev. 2322 */ 2323 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2324 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2325 == 0); 2326 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2327 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2328 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2329 &nvtop, 1) == 0); 2330 2331 /* 2332 * Replace the existing vdev_tree with the new root vdev in 2333 * this pool's configuration (remove the old, add the new). 2334 */ 2335 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2336 nvlist_free(nvroot); 2337 } 2338 2339 /* 2340 * Get the root pool information from the root disk, then import the root pool 2341 * during the system boot up time. 2342 */ 2343 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 2344 2345 int 2346 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2347 uint64_t *besttxg) 2348 { 2349 nvlist_t *config; 2350 uint64_t txg; 2351 int error; 2352 2353 if (error = vdev_disk_read_rootlabel(devpath, devid, &config)) 2354 return (error); 2355 2356 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2357 2358 if (bestconf != NULL) 2359 *bestconf = config; 2360 else 2361 nvlist_free(config); 2362 *besttxg = txg; 2363 return (0); 2364 } 2365 2366 boolean_t 2367 spa_rootdev_validate(nvlist_t *nv) 2368 { 2369 uint64_t ival; 2370 2371 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2372 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2373 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2374 return (B_FALSE); 2375 2376 return (B_TRUE); 2377 } 2378 2379 2380 /* 2381 * Given the boot device's physical path or devid, check if the device 2382 * is in a valid state. If so, return the configuration from the vdev 2383 * label. 2384 */ 2385 int 2386 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2387 { 2388 nvlist_t *conf = NULL; 2389 uint64_t txg = 0; 2390 nvlist_t *nvtop, **child; 2391 char *type; 2392 char *bootpath = NULL; 2393 uint_t children, c; 2394 char *tmp; 2395 int error; 2396 2397 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2398 *tmp = '\0'; 2399 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) { 2400 cmn_err(CE_NOTE, "error reading device label"); 2401 return (error); 2402 } 2403 if (txg == 0) { 2404 cmn_err(CE_NOTE, "this device is detached"); 2405 nvlist_free(conf); 2406 return (EINVAL); 2407 } 2408 2409 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2410 &nvtop) == 0); 2411 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2412 2413 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2414 if (spa_rootdev_validate(nvtop)) { 2415 goto out; 2416 } else { 2417 nvlist_free(conf); 2418 return (EINVAL); 2419 } 2420 } 2421 2422 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2423 2424 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2425 &child, &children) == 0); 2426 2427 /* 2428 * Go thru vdevs in the mirror to see if the given device 2429 * has the most recent txg. Only the device with the most 2430 * recent txg has valid information and should be booted. 2431 */ 2432 for (c = 0; c < children; c++) { 2433 char *cdevid, *cpath; 2434 uint64_t tmptxg; 2435 2436 cpath = NULL; 2437 cdevid = NULL; 2438 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2439 &cpath) != 0 && nvlist_lookup_string(child[c], 2440 ZPOOL_CONFIG_DEVID, &cdevid) != 0) 2441 return (EINVAL); 2442 if ((spa_check_rootconf(cpath, cdevid, NULL, 2443 &tmptxg) == 0) && (tmptxg > txg)) { 2444 txg = tmptxg; 2445 VERIFY(nvlist_lookup_string(child[c], 2446 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2447 } 2448 } 2449 2450 /* Does the best device match the one we've booted from? */ 2451 if (bootpath) { 2452 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2453 return (EINVAL); 2454 } 2455 out: 2456 *bestconf = conf; 2457 return (0); 2458 } 2459 2460 /* 2461 * Import a root pool. 2462 * 2463 * For x86. devpath_list will consist of devid and/or physpath name of 2464 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2465 * The GRUB "findroot" command will return the vdev we should boot. 2466 * 2467 * For Sparc, devpath_list consists the physpath name of the booting device 2468 * no matter the rootpool is a single device pool or a mirrored pool. 2469 * e.g. 2470 * "/pci@1f,0/ide@d/disk@0,0:a" 2471 */ 2472 int 2473 spa_import_rootpool(char *devpath, char *devid) 2474 { 2475 nvlist_t *conf = NULL; 2476 char *pname; 2477 int error; 2478 2479 /* 2480 * Get the vdev pathname and configuation from the most 2481 * recently updated vdev (highest txg). 2482 */ 2483 if (error = spa_get_rootconf(devpath, devid, &conf)) 2484 goto msg_out; 2485 2486 /* 2487 * Add type "root" vdev to the config. 2488 */ 2489 spa_build_rootpool_config(conf); 2490 2491 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2492 2493 /* 2494 * We specify 'allowfaulted' for this to be treated like spa_open() 2495 * instead of spa_import(). This prevents us from marking vdevs as 2496 * persistently unavailable, and generates FMA ereports as if it were a 2497 * pool open, not import. 2498 */ 2499 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2500 ASSERT(error != EEXIST); 2501 2502 nvlist_free(conf); 2503 return (error); 2504 2505 msg_out: 2506 cmn_err(CE_NOTE, "\n" 2507 " *************************************************** \n" 2508 " * This device is not bootable! * \n" 2509 " * It is either offlined or detached or faulted. * \n" 2510 " * Please try to boot from a different device. * \n" 2511 " *************************************************** "); 2512 2513 return (error); 2514 } 2515 #endif 2516 2517 /* 2518 * Import a non-root pool into the system. 2519 */ 2520 int 2521 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2522 { 2523 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2524 } 2525 2526 int 2527 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2528 { 2529 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2530 } 2531 2532 2533 /* 2534 * This (illegal) pool name is used when temporarily importing a spa_t in order 2535 * to get the vdev stats associated with the imported devices. 2536 */ 2537 #define TRYIMPORT_NAME "$import" 2538 2539 nvlist_t * 2540 spa_tryimport(nvlist_t *tryconfig) 2541 { 2542 nvlist_t *config = NULL; 2543 char *poolname; 2544 spa_t *spa; 2545 uint64_t state; 2546 2547 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2548 return (NULL); 2549 2550 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2551 return (NULL); 2552 2553 /* 2554 * Create and initialize the spa structure. 2555 */ 2556 mutex_enter(&spa_namespace_lock); 2557 spa = spa_add(TRYIMPORT_NAME, NULL); 2558 spa_activate(spa, FREAD); 2559 2560 /* 2561 * Pass off the heavy lifting to spa_load(). 2562 * Pass TRUE for mosconfig because the user-supplied config 2563 * is actually the one to trust when doing an import. 2564 */ 2565 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2566 2567 /* 2568 * If 'tryconfig' was at least parsable, return the current config. 2569 */ 2570 if (spa->spa_root_vdev != NULL) { 2571 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2572 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2573 poolname) == 0); 2574 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2575 state) == 0); 2576 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2577 spa->spa_uberblock.ub_timestamp) == 0); 2578 2579 /* 2580 * If the bootfs property exists on this pool then we 2581 * copy it out so that external consumers can tell which 2582 * pools are bootable. 2583 */ 2584 if (spa->spa_bootfs) { 2585 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2586 2587 /* 2588 * We have to play games with the name since the 2589 * pool was opened as TRYIMPORT_NAME. 2590 */ 2591 if (dsl_dsobj_to_dsname(spa_name(spa), 2592 spa->spa_bootfs, tmpname) == 0) { 2593 char *cp; 2594 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2595 2596 cp = strchr(tmpname, '/'); 2597 if (cp == NULL) { 2598 (void) strlcpy(dsname, tmpname, 2599 MAXPATHLEN); 2600 } else { 2601 (void) snprintf(dsname, MAXPATHLEN, 2602 "%s/%s", poolname, ++cp); 2603 } 2604 VERIFY(nvlist_add_string(config, 2605 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2606 kmem_free(dsname, MAXPATHLEN); 2607 } 2608 kmem_free(tmpname, MAXPATHLEN); 2609 } 2610 2611 /* 2612 * Add the list of hot spares and level 2 cache devices. 2613 */ 2614 spa_add_spares(spa, config); 2615 spa_add_l2cache(spa, config); 2616 } 2617 2618 spa_unload(spa); 2619 spa_deactivate(spa); 2620 spa_remove(spa); 2621 mutex_exit(&spa_namespace_lock); 2622 2623 return (config); 2624 } 2625 2626 /* 2627 * Pool export/destroy 2628 * 2629 * The act of destroying or exporting a pool is very simple. We make sure there 2630 * is no more pending I/O and any references to the pool are gone. Then, we 2631 * update the pool state and sync all the labels to disk, removing the 2632 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 2633 * we don't sync the labels or remove the configuration cache. 2634 */ 2635 static int 2636 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2637 boolean_t force, boolean_t hardforce) 2638 { 2639 spa_t *spa; 2640 2641 if (oldconfig) 2642 *oldconfig = NULL; 2643 2644 if (!(spa_mode_global & FWRITE)) 2645 return (EROFS); 2646 2647 mutex_enter(&spa_namespace_lock); 2648 if ((spa = spa_lookup(pool)) == NULL) { 2649 mutex_exit(&spa_namespace_lock); 2650 return (ENOENT); 2651 } 2652 2653 /* 2654 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2655 * reacquire the namespace lock, and see if we can export. 2656 */ 2657 spa_open_ref(spa, FTAG); 2658 mutex_exit(&spa_namespace_lock); 2659 spa_async_suspend(spa); 2660 mutex_enter(&spa_namespace_lock); 2661 spa_close(spa, FTAG); 2662 2663 /* 2664 * The pool will be in core if it's openable, 2665 * in which case we can modify its state. 2666 */ 2667 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2668 /* 2669 * Objsets may be open only because they're dirty, so we 2670 * have to force it to sync before checking spa_refcnt. 2671 */ 2672 txg_wait_synced(spa->spa_dsl_pool, 0); 2673 2674 /* 2675 * A pool cannot be exported or destroyed if there are active 2676 * references. If we are resetting a pool, allow references by 2677 * fault injection handlers. 2678 */ 2679 if (!spa_refcount_zero(spa) || 2680 (spa->spa_inject_ref != 0 && 2681 new_state != POOL_STATE_UNINITIALIZED)) { 2682 spa_async_resume(spa); 2683 mutex_exit(&spa_namespace_lock); 2684 return (EBUSY); 2685 } 2686 2687 /* 2688 * A pool cannot be exported if it has an active shared spare. 2689 * This is to prevent other pools stealing the active spare 2690 * from an exported pool. At user's own will, such pool can 2691 * be forcedly exported. 2692 */ 2693 if (!force && new_state == POOL_STATE_EXPORTED && 2694 spa_has_active_shared_spare(spa)) { 2695 spa_async_resume(spa); 2696 mutex_exit(&spa_namespace_lock); 2697 return (EXDEV); 2698 } 2699 2700 /* 2701 * We want this to be reflected on every label, 2702 * so mark them all dirty. spa_unload() will do the 2703 * final sync that pushes these changes out. 2704 */ 2705 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 2706 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2707 spa->spa_state = new_state; 2708 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2709 vdev_config_dirty(spa->spa_root_vdev); 2710 spa_config_exit(spa, SCL_ALL, FTAG); 2711 } 2712 } 2713 2714 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2715 2716 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2717 spa_unload(spa); 2718 spa_deactivate(spa); 2719 } 2720 2721 if (oldconfig && spa->spa_config) 2722 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2723 2724 if (new_state != POOL_STATE_UNINITIALIZED) { 2725 if (!hardforce) 2726 spa_config_sync(spa, B_TRUE, B_TRUE); 2727 spa_remove(spa); 2728 } 2729 mutex_exit(&spa_namespace_lock); 2730 2731 return (0); 2732 } 2733 2734 /* 2735 * Destroy a storage pool. 2736 */ 2737 int 2738 spa_destroy(char *pool) 2739 { 2740 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 2741 B_FALSE, B_FALSE)); 2742 } 2743 2744 /* 2745 * Export a storage pool. 2746 */ 2747 int 2748 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 2749 boolean_t hardforce) 2750 { 2751 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 2752 force, hardforce)); 2753 } 2754 2755 /* 2756 * Similar to spa_export(), this unloads the spa_t without actually removing it 2757 * from the namespace in any way. 2758 */ 2759 int 2760 spa_reset(char *pool) 2761 { 2762 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2763 B_FALSE, B_FALSE)); 2764 } 2765 2766 /* 2767 * ========================================================================== 2768 * Device manipulation 2769 * ========================================================================== 2770 */ 2771 2772 /* 2773 * Add a device to a storage pool. 2774 */ 2775 int 2776 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2777 { 2778 uint64_t txg; 2779 int error; 2780 vdev_t *rvd = spa->spa_root_vdev; 2781 vdev_t *vd, *tvd; 2782 nvlist_t **spares, **l2cache; 2783 uint_t nspares, nl2cache; 2784 2785 txg = spa_vdev_enter(spa); 2786 2787 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2788 VDEV_ALLOC_ADD)) != 0) 2789 return (spa_vdev_exit(spa, NULL, txg, error)); 2790 2791 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 2792 2793 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2794 &nspares) != 0) 2795 nspares = 0; 2796 2797 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2798 &nl2cache) != 0) 2799 nl2cache = 0; 2800 2801 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 2802 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2803 2804 if (vd->vdev_children != 0 && 2805 (error = vdev_create(vd, txg, B_FALSE)) != 0) 2806 return (spa_vdev_exit(spa, vd, txg, error)); 2807 2808 /* 2809 * We must validate the spares and l2cache devices after checking the 2810 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2811 */ 2812 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 2813 return (spa_vdev_exit(spa, vd, txg, error)); 2814 2815 /* 2816 * Transfer each new top-level vdev from vd to rvd. 2817 */ 2818 for (int c = 0; c < vd->vdev_children; c++) { 2819 tvd = vd->vdev_child[c]; 2820 vdev_remove_child(vd, tvd); 2821 tvd->vdev_id = rvd->vdev_children; 2822 vdev_add_child(rvd, tvd); 2823 vdev_config_dirty(tvd); 2824 } 2825 2826 if (nspares != 0) { 2827 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2828 ZPOOL_CONFIG_SPARES); 2829 spa_load_spares(spa); 2830 spa->spa_spares.sav_sync = B_TRUE; 2831 } 2832 2833 if (nl2cache != 0) { 2834 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2835 ZPOOL_CONFIG_L2CACHE); 2836 spa_load_l2cache(spa); 2837 spa->spa_l2cache.sav_sync = B_TRUE; 2838 } 2839 2840 /* 2841 * We have to be careful when adding new vdevs to an existing pool. 2842 * If other threads start allocating from these vdevs before we 2843 * sync the config cache, and we lose power, then upon reboot we may 2844 * fail to open the pool because there are DVAs that the config cache 2845 * can't translate. Therefore, we first add the vdevs without 2846 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2847 * and then let spa_config_update() initialize the new metaslabs. 2848 * 2849 * spa_load() checks for added-but-not-initialized vdevs, so that 2850 * if we lose power at any point in this sequence, the remaining 2851 * steps will be completed the next time we load the pool. 2852 */ 2853 (void) spa_vdev_exit(spa, vd, txg, 0); 2854 2855 mutex_enter(&spa_namespace_lock); 2856 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2857 mutex_exit(&spa_namespace_lock); 2858 2859 return (0); 2860 } 2861 2862 /* 2863 * Attach a device to a mirror. The arguments are the path to any device 2864 * in the mirror, and the nvroot for the new device. If the path specifies 2865 * a device that is not mirrored, we automatically insert the mirror vdev. 2866 * 2867 * If 'replacing' is specified, the new device is intended to replace the 2868 * existing device; in this case the two devices are made into their own 2869 * mirror using the 'replacing' vdev, which is functionally identical to 2870 * the mirror vdev (it actually reuses all the same ops) but has a few 2871 * extra rules: you can't attach to it after it's been created, and upon 2872 * completion of resilvering, the first disk (the one being replaced) 2873 * is automatically detached. 2874 */ 2875 int 2876 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2877 { 2878 uint64_t txg, open_txg; 2879 vdev_t *rvd = spa->spa_root_vdev; 2880 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2881 vdev_ops_t *pvops; 2882 dmu_tx_t *tx; 2883 char *oldvdpath, *newvdpath; 2884 int newvd_isspare; 2885 int error; 2886 2887 txg = spa_vdev_enter(spa); 2888 2889 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2890 2891 if (oldvd == NULL) 2892 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2893 2894 if (!oldvd->vdev_ops->vdev_op_leaf) 2895 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2896 2897 pvd = oldvd->vdev_parent; 2898 2899 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2900 VDEV_ALLOC_ADD)) != 0) 2901 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2902 2903 if (newrootvd->vdev_children != 1) 2904 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2905 2906 newvd = newrootvd->vdev_child[0]; 2907 2908 if (!newvd->vdev_ops->vdev_op_leaf) 2909 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2910 2911 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2912 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2913 2914 /* 2915 * Spares can't replace logs 2916 */ 2917 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 2918 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2919 2920 if (!replacing) { 2921 /* 2922 * For attach, the only allowable parent is a mirror or the root 2923 * vdev. 2924 */ 2925 if (pvd->vdev_ops != &vdev_mirror_ops && 2926 pvd->vdev_ops != &vdev_root_ops) 2927 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2928 2929 pvops = &vdev_mirror_ops; 2930 } else { 2931 /* 2932 * Active hot spares can only be replaced by inactive hot 2933 * spares. 2934 */ 2935 if (pvd->vdev_ops == &vdev_spare_ops && 2936 pvd->vdev_child[1] == oldvd && 2937 !spa_has_spare(spa, newvd->vdev_guid)) 2938 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2939 2940 /* 2941 * If the source is a hot spare, and the parent isn't already a 2942 * spare, then we want to create a new hot spare. Otherwise, we 2943 * want to create a replacing vdev. The user is not allowed to 2944 * attach to a spared vdev child unless the 'isspare' state is 2945 * the same (spare replaces spare, non-spare replaces 2946 * non-spare). 2947 */ 2948 if (pvd->vdev_ops == &vdev_replacing_ops) 2949 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2950 else if (pvd->vdev_ops == &vdev_spare_ops && 2951 newvd->vdev_isspare != oldvd->vdev_isspare) 2952 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2953 else if (pvd->vdev_ops != &vdev_spare_ops && 2954 newvd->vdev_isspare) 2955 pvops = &vdev_spare_ops; 2956 else 2957 pvops = &vdev_replacing_ops; 2958 } 2959 2960 /* 2961 * Compare the new device size with the replaceable/attachable 2962 * device size. 2963 */ 2964 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2965 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2966 2967 /* 2968 * The new device cannot have a higher alignment requirement 2969 * than the top-level vdev. 2970 */ 2971 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2972 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2973 2974 /* 2975 * If this is an in-place replacement, update oldvd's path and devid 2976 * to make it distinguishable from newvd, and unopenable from now on. 2977 */ 2978 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2979 spa_strfree(oldvd->vdev_path); 2980 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2981 KM_SLEEP); 2982 (void) sprintf(oldvd->vdev_path, "%s/%s", 2983 newvd->vdev_path, "old"); 2984 if (oldvd->vdev_devid != NULL) { 2985 spa_strfree(oldvd->vdev_devid); 2986 oldvd->vdev_devid = NULL; 2987 } 2988 } 2989 2990 /* 2991 * If the parent is not a mirror, or if we're replacing, insert the new 2992 * mirror/replacing/spare vdev above oldvd. 2993 */ 2994 if (pvd->vdev_ops != pvops) 2995 pvd = vdev_add_parent(oldvd, pvops); 2996 2997 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2998 ASSERT(pvd->vdev_ops == pvops); 2999 ASSERT(oldvd->vdev_parent == pvd); 3000 3001 /* 3002 * Extract the new device from its root and add it to pvd. 3003 */ 3004 vdev_remove_child(newrootvd, newvd); 3005 newvd->vdev_id = pvd->vdev_children; 3006 vdev_add_child(pvd, newvd); 3007 3008 /* 3009 * If newvd is smaller than oldvd, but larger than its rsize, 3010 * the addition of newvd may have decreased our parent's asize. 3011 */ 3012 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 3013 3014 tvd = newvd->vdev_top; 3015 ASSERT(pvd->vdev_top == tvd); 3016 ASSERT(tvd->vdev_parent == rvd); 3017 3018 vdev_config_dirty(tvd); 3019 3020 /* 3021 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 3022 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 3023 */ 3024 open_txg = txg + TXG_CONCURRENT_STATES - 1; 3025 3026 vdev_dtl_dirty(newvd, DTL_MISSING, 3027 TXG_INITIAL, open_txg - TXG_INITIAL + 1); 3028 3029 if (newvd->vdev_isspare) 3030 spa_spare_activate(newvd); 3031 oldvdpath = spa_strdup(oldvd->vdev_path); 3032 newvdpath = spa_strdup(newvd->vdev_path); 3033 newvd_isspare = newvd->vdev_isspare; 3034 3035 /* 3036 * Mark newvd's DTL dirty in this txg. 3037 */ 3038 vdev_dirty(tvd, VDD_DTL, newvd, txg); 3039 3040 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 3041 3042 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 3043 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 3044 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 3045 CRED(), "%s vdev=%s %s vdev=%s", 3046 replacing && newvd_isspare ? "spare in" : 3047 replacing ? "replace" : "attach", newvdpath, 3048 replacing ? "for" : "to", oldvdpath); 3049 dmu_tx_commit(tx); 3050 } else { 3051 dmu_tx_abort(tx); 3052 } 3053 3054 spa_strfree(oldvdpath); 3055 spa_strfree(newvdpath); 3056 3057 /* 3058 * Kick off a resilver to update newvd. 3059 */ 3060 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 3061 3062 return (0); 3063 } 3064 3065 /* 3066 * Detach a device from a mirror or replacing vdev. 3067 * If 'replace_done' is specified, only detach if the parent 3068 * is a replacing vdev. 3069 */ 3070 int 3071 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 3072 { 3073 uint64_t txg; 3074 int error; 3075 vdev_t *rvd = spa->spa_root_vdev; 3076 vdev_t *vd, *pvd, *cvd, *tvd; 3077 boolean_t unspare = B_FALSE; 3078 uint64_t unspare_guid; 3079 size_t len; 3080 3081 txg = spa_vdev_enter(spa); 3082 3083 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3084 3085 if (vd == NULL) 3086 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 3087 3088 if (!vd->vdev_ops->vdev_op_leaf) 3089 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3090 3091 pvd = vd->vdev_parent; 3092 3093 /* 3094 * If the parent/child relationship is not as expected, don't do it. 3095 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 3096 * vdev that's replacing B with C. The user's intent in replacing 3097 * is to go from M(A,B) to M(A,C). If the user decides to cancel 3098 * the replace by detaching C, the expected behavior is to end up 3099 * M(A,B). But suppose that right after deciding to detach C, 3100 * the replacement of B completes. We would have M(A,C), and then 3101 * ask to detach C, which would leave us with just A -- not what 3102 * the user wanted. To prevent this, we make sure that the 3103 * parent/child relationship hasn't changed -- in this example, 3104 * that C's parent is still the replacing vdev R. 3105 */ 3106 if (pvd->vdev_guid != pguid && pguid != 0) 3107 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3108 3109 /* 3110 * If replace_done is specified, only remove this device if it's 3111 * the first child of a replacing vdev. For the 'spare' vdev, either 3112 * disk can be removed. 3113 */ 3114 if (replace_done) { 3115 if (pvd->vdev_ops == &vdev_replacing_ops) { 3116 if (vd->vdev_id != 0) 3117 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3118 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3119 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3120 } 3121 } 3122 3123 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3124 spa_version(spa) >= SPA_VERSION_SPARES); 3125 3126 /* 3127 * Only mirror, replacing, and spare vdevs support detach. 3128 */ 3129 if (pvd->vdev_ops != &vdev_replacing_ops && 3130 pvd->vdev_ops != &vdev_mirror_ops && 3131 pvd->vdev_ops != &vdev_spare_ops) 3132 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3133 3134 /* 3135 * If this device has the only valid copy of some data, 3136 * we cannot safely detach it. 3137 */ 3138 if (vdev_dtl_required(vd)) 3139 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3140 3141 ASSERT(pvd->vdev_children >= 2); 3142 3143 /* 3144 * If we are detaching the second disk from a replacing vdev, then 3145 * check to see if we changed the original vdev's path to have "/old" 3146 * at the end in spa_vdev_attach(). If so, undo that change now. 3147 */ 3148 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3149 pvd->vdev_child[0]->vdev_path != NULL && 3150 pvd->vdev_child[1]->vdev_path != NULL) { 3151 ASSERT(pvd->vdev_child[1] == vd); 3152 cvd = pvd->vdev_child[0]; 3153 len = strlen(vd->vdev_path); 3154 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3155 strcmp(cvd->vdev_path + len, "/old") == 0) { 3156 spa_strfree(cvd->vdev_path); 3157 cvd->vdev_path = spa_strdup(vd->vdev_path); 3158 } 3159 } 3160 3161 /* 3162 * If we are detaching the original disk from a spare, then it implies 3163 * that the spare should become a real disk, and be removed from the 3164 * active spare list for the pool. 3165 */ 3166 if (pvd->vdev_ops == &vdev_spare_ops && 3167 vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare) 3168 unspare = B_TRUE; 3169 3170 /* 3171 * Erase the disk labels so the disk can be used for other things. 3172 * This must be done after all other error cases are handled, 3173 * but before we disembowel vd (so we can still do I/O to it). 3174 * But if we can't do it, don't treat the error as fatal -- 3175 * it may be that the unwritability of the disk is the reason 3176 * it's being detached! 3177 */ 3178 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3179 3180 /* 3181 * Remove vd from its parent and compact the parent's children. 3182 */ 3183 vdev_remove_child(pvd, vd); 3184 vdev_compact_children(pvd); 3185 3186 /* 3187 * Remember one of the remaining children so we can get tvd below. 3188 */ 3189 cvd = pvd->vdev_child[0]; 3190 3191 /* 3192 * If we need to remove the remaining child from the list of hot spares, 3193 * do it now, marking the vdev as no longer a spare in the process. 3194 * We must do this before vdev_remove_parent(), because that can 3195 * change the GUID if it creates a new toplevel GUID. For a similar 3196 * reason, we must remove the spare now, in the same txg as the detach; 3197 * otherwise someone could attach a new sibling, change the GUID, and 3198 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 3199 */ 3200 if (unspare) { 3201 ASSERT(cvd->vdev_isspare); 3202 spa_spare_remove(cvd); 3203 unspare_guid = cvd->vdev_guid; 3204 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3205 } 3206 3207 /* 3208 * If the parent mirror/replacing vdev only has one child, 3209 * the parent is no longer needed. Remove it from the tree. 3210 */ 3211 if (pvd->vdev_children == 1) 3212 vdev_remove_parent(cvd); 3213 3214 /* 3215 * We don't set tvd until now because the parent we just removed 3216 * may have been the previous top-level vdev. 3217 */ 3218 tvd = cvd->vdev_top; 3219 ASSERT(tvd->vdev_parent == rvd); 3220 3221 /* 3222 * Reevaluate the parent vdev state. 3223 */ 3224 vdev_propagate_state(cvd); 3225 3226 /* 3227 * If the device we just detached was smaller than the others, it may be 3228 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3229 * can't fail because the existing metaslabs are already in core, so 3230 * there's nothing to read from disk. 3231 */ 3232 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3233 3234 vdev_config_dirty(tvd); 3235 3236 /* 3237 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3238 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3239 * But first make sure we're not on any *other* txg's DTL list, to 3240 * prevent vd from being accessed after it's freed. 3241 */ 3242 for (int t = 0; t < TXG_SIZE; t++) 3243 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3244 vd->vdev_detached = B_TRUE; 3245 vdev_dirty(tvd, VDD_DTL, vd, txg); 3246 3247 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3248 3249 error = spa_vdev_exit(spa, vd, txg, 0); 3250 3251 /* 3252 * If this was the removal of the original device in a hot spare vdev, 3253 * then we want to go through and remove the device from the hot spare 3254 * list of every other pool. 3255 */ 3256 if (unspare) { 3257 spa_t *myspa = spa; 3258 spa = NULL; 3259 mutex_enter(&spa_namespace_lock); 3260 while ((spa = spa_next(spa)) != NULL) { 3261 if (spa->spa_state != POOL_STATE_ACTIVE) 3262 continue; 3263 if (spa == myspa) 3264 continue; 3265 spa_open_ref(spa, FTAG); 3266 mutex_exit(&spa_namespace_lock); 3267 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3268 mutex_enter(&spa_namespace_lock); 3269 spa_close(spa, FTAG); 3270 } 3271 mutex_exit(&spa_namespace_lock); 3272 } 3273 3274 return (error); 3275 } 3276 3277 static nvlist_t * 3278 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 3279 { 3280 for (int i = 0; i < count; i++) { 3281 uint64_t guid; 3282 3283 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 3284 &guid) == 0); 3285 3286 if (guid == target_guid) 3287 return (nvpp[i]); 3288 } 3289 3290 return (NULL); 3291 } 3292 3293 static void 3294 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 3295 nvlist_t *dev_to_remove) 3296 { 3297 nvlist_t **newdev = NULL; 3298 3299 if (count > 1) 3300 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 3301 3302 for (int i = 0, j = 0; i < count; i++) { 3303 if (dev[i] == dev_to_remove) 3304 continue; 3305 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 3306 } 3307 3308 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 3309 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 3310 3311 for (int i = 0; i < count - 1; i++) 3312 nvlist_free(newdev[i]); 3313 3314 if (count > 1) 3315 kmem_free(newdev, (count - 1) * sizeof (void *)); 3316 } 3317 3318 /* 3319 * Remove a device from the pool. Currently, this supports removing only hot 3320 * spares and level 2 ARC devices. 3321 */ 3322 int 3323 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3324 { 3325 vdev_t *vd; 3326 nvlist_t **spares, **l2cache, *nv; 3327 uint_t nspares, nl2cache; 3328 uint64_t txg = 0; 3329 int error = 0; 3330 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 3331 3332 if (!locked) 3333 txg = spa_vdev_enter(spa); 3334 3335 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3336 3337 if (spa->spa_spares.sav_vdevs != NULL && 3338 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3339 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 3340 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 3341 /* 3342 * Only remove the hot spare if it's not currently in use 3343 * in this pool. 3344 */ 3345 if (vd == NULL || unspare) { 3346 spa_vdev_remove_aux(spa->spa_spares.sav_config, 3347 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 3348 spa_load_spares(spa); 3349 spa->spa_spares.sav_sync = B_TRUE; 3350 } else { 3351 error = EBUSY; 3352 } 3353 } else if (spa->spa_l2cache.sav_vdevs != NULL && 3354 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3355 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 3356 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 3357 /* 3358 * Cache devices can always be removed. 3359 */ 3360 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 3361 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 3362 spa_load_l2cache(spa); 3363 spa->spa_l2cache.sav_sync = B_TRUE; 3364 } else if (vd != NULL) { 3365 /* 3366 * Normal vdevs cannot be removed (yet). 3367 */ 3368 error = ENOTSUP; 3369 } else { 3370 /* 3371 * There is no vdev of any kind with the specified guid. 3372 */ 3373 error = ENOENT; 3374 } 3375 3376 if (!locked) 3377 return (spa_vdev_exit(spa, NULL, txg, error)); 3378 3379 return (error); 3380 } 3381 3382 /* 3383 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3384 * current spared, so we can detach it. 3385 */ 3386 static vdev_t * 3387 spa_vdev_resilver_done_hunt(vdev_t *vd) 3388 { 3389 vdev_t *newvd, *oldvd; 3390 int c; 3391 3392 for (c = 0; c < vd->vdev_children; c++) { 3393 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3394 if (oldvd != NULL) 3395 return (oldvd); 3396 } 3397 3398 /* 3399 * Check for a completed replacement. 3400 */ 3401 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3402 oldvd = vd->vdev_child[0]; 3403 newvd = vd->vdev_child[1]; 3404 3405 if (vdev_dtl_empty(newvd, DTL_MISSING) && 3406 !vdev_dtl_required(oldvd)) 3407 return (oldvd); 3408 } 3409 3410 /* 3411 * Check for a completed resilver with the 'unspare' flag set. 3412 */ 3413 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3414 newvd = vd->vdev_child[0]; 3415 oldvd = vd->vdev_child[1]; 3416 3417 if (newvd->vdev_unspare && 3418 vdev_dtl_empty(newvd, DTL_MISSING) && 3419 !vdev_dtl_required(oldvd)) { 3420 newvd->vdev_unspare = 0; 3421 return (oldvd); 3422 } 3423 } 3424 3425 return (NULL); 3426 } 3427 3428 static void 3429 spa_vdev_resilver_done(spa_t *spa) 3430 { 3431 vdev_t *vd, *pvd, *ppvd; 3432 uint64_t guid, sguid, pguid, ppguid; 3433 3434 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3435 3436 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3437 pvd = vd->vdev_parent; 3438 ppvd = pvd->vdev_parent; 3439 guid = vd->vdev_guid; 3440 pguid = pvd->vdev_guid; 3441 ppguid = ppvd->vdev_guid; 3442 sguid = 0; 3443 /* 3444 * If we have just finished replacing a hot spared device, then 3445 * we need to detach the parent's first child (the original hot 3446 * spare) as well. 3447 */ 3448 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) { 3449 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3450 ASSERT(ppvd->vdev_children == 2); 3451 sguid = ppvd->vdev_child[1]->vdev_guid; 3452 } 3453 spa_config_exit(spa, SCL_ALL, FTAG); 3454 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 3455 return; 3456 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 3457 return; 3458 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3459 } 3460 3461 spa_config_exit(spa, SCL_ALL, FTAG); 3462 } 3463 3464 /* 3465 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3466 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3467 */ 3468 int 3469 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3470 { 3471 vdev_t *vd; 3472 uint64_t txg; 3473 3474 txg = spa_vdev_enter(spa); 3475 3476 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3477 /* 3478 * Determine if this is a reference to a hot spare device. If 3479 * it is, update the path manually as there is no associated 3480 * vdev_t that can be synced to disk. 3481 */ 3482 nvlist_t **spares; 3483 uint_t i, nspares; 3484 3485 if (spa->spa_spares.sav_config != NULL) { 3486 VERIFY(nvlist_lookup_nvlist_array( 3487 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3488 &spares, &nspares) == 0); 3489 for (i = 0; i < nspares; i++) { 3490 uint64_t theguid; 3491 VERIFY(nvlist_lookup_uint64(spares[i], 3492 ZPOOL_CONFIG_GUID, &theguid) == 0); 3493 if (theguid == guid) { 3494 VERIFY(nvlist_add_string(spares[i], 3495 ZPOOL_CONFIG_PATH, newpath) == 0); 3496 spa_load_spares(spa); 3497 spa->spa_spares.sav_sync = B_TRUE; 3498 return (spa_vdev_exit(spa, NULL, txg, 3499 0)); 3500 } 3501 } 3502 } 3503 3504 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3505 } 3506 3507 if (!vd->vdev_ops->vdev_op_leaf) 3508 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3509 3510 spa_strfree(vd->vdev_path); 3511 vd->vdev_path = spa_strdup(newpath); 3512 3513 vdev_config_dirty(vd->vdev_top); 3514 3515 return (spa_vdev_exit(spa, NULL, txg, 0)); 3516 } 3517 3518 /* 3519 * ========================================================================== 3520 * SPA Scrubbing 3521 * ========================================================================== 3522 */ 3523 3524 int 3525 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3526 { 3527 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 3528 3529 if ((uint_t)type >= POOL_SCRUB_TYPES) 3530 return (ENOTSUP); 3531 3532 /* 3533 * If a resilver was requested, but there is no DTL on a 3534 * writeable leaf device, we have nothing to do. 3535 */ 3536 if (type == POOL_SCRUB_RESILVER && 3537 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3538 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3539 return (0); 3540 } 3541 3542 if (type == POOL_SCRUB_EVERYTHING && 3543 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3544 spa->spa_dsl_pool->dp_scrub_isresilver) 3545 return (EBUSY); 3546 3547 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3548 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3549 } else if (type == POOL_SCRUB_NONE) { 3550 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3551 } else { 3552 return (EINVAL); 3553 } 3554 } 3555 3556 /* 3557 * ========================================================================== 3558 * SPA async task processing 3559 * ========================================================================== 3560 */ 3561 3562 static void 3563 spa_async_remove(spa_t *spa, vdev_t *vd) 3564 { 3565 if (vd->vdev_remove_wanted) { 3566 vd->vdev_remove_wanted = 0; 3567 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 3568 vdev_clear(spa, vd); 3569 vdev_state_dirty(vd->vdev_top); 3570 } 3571 3572 for (int c = 0; c < vd->vdev_children; c++) 3573 spa_async_remove(spa, vd->vdev_child[c]); 3574 } 3575 3576 static void 3577 spa_async_probe(spa_t *spa, vdev_t *vd) 3578 { 3579 if (vd->vdev_probe_wanted) { 3580 vd->vdev_probe_wanted = 0; 3581 vdev_reopen(vd); /* vdev_open() does the actual probe */ 3582 } 3583 3584 for (int c = 0; c < vd->vdev_children; c++) 3585 spa_async_probe(spa, vd->vdev_child[c]); 3586 } 3587 3588 static void 3589 spa_async_thread(spa_t *spa) 3590 { 3591 int tasks; 3592 3593 ASSERT(spa->spa_sync_on); 3594 3595 mutex_enter(&spa->spa_async_lock); 3596 tasks = spa->spa_async_tasks; 3597 spa->spa_async_tasks = 0; 3598 mutex_exit(&spa->spa_async_lock); 3599 3600 /* 3601 * See if the config needs to be updated. 3602 */ 3603 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3604 mutex_enter(&spa_namespace_lock); 3605 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3606 mutex_exit(&spa_namespace_lock); 3607 } 3608 3609 /* 3610 * See if any devices need to be marked REMOVED. 3611 */ 3612 if (tasks & SPA_ASYNC_REMOVE) { 3613 spa_vdev_state_enter(spa); 3614 spa_async_remove(spa, spa->spa_root_vdev); 3615 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 3616 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 3617 for (int i = 0; i < spa->spa_spares.sav_count; i++) 3618 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 3619 (void) spa_vdev_state_exit(spa, NULL, 0); 3620 } 3621 3622 /* 3623 * See if any devices need to be probed. 3624 */ 3625 if (tasks & SPA_ASYNC_PROBE) { 3626 spa_vdev_state_enter(spa); 3627 spa_async_probe(spa, spa->spa_root_vdev); 3628 (void) spa_vdev_state_exit(spa, NULL, 0); 3629 } 3630 3631 /* 3632 * If any devices are done replacing, detach them. 3633 */ 3634 if (tasks & SPA_ASYNC_RESILVER_DONE) 3635 spa_vdev_resilver_done(spa); 3636 3637 /* 3638 * Kick off a resilver. 3639 */ 3640 if (tasks & SPA_ASYNC_RESILVER) 3641 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3642 3643 /* 3644 * Let the world know that we're done. 3645 */ 3646 mutex_enter(&spa->spa_async_lock); 3647 spa->spa_async_thread = NULL; 3648 cv_broadcast(&spa->spa_async_cv); 3649 mutex_exit(&spa->spa_async_lock); 3650 thread_exit(); 3651 } 3652 3653 void 3654 spa_async_suspend(spa_t *spa) 3655 { 3656 mutex_enter(&spa->spa_async_lock); 3657 spa->spa_async_suspended++; 3658 while (spa->spa_async_thread != NULL) 3659 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3660 mutex_exit(&spa->spa_async_lock); 3661 } 3662 3663 void 3664 spa_async_resume(spa_t *spa) 3665 { 3666 mutex_enter(&spa->spa_async_lock); 3667 ASSERT(spa->spa_async_suspended != 0); 3668 spa->spa_async_suspended--; 3669 mutex_exit(&spa->spa_async_lock); 3670 } 3671 3672 static void 3673 spa_async_dispatch(spa_t *spa) 3674 { 3675 mutex_enter(&spa->spa_async_lock); 3676 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3677 spa->spa_async_thread == NULL && 3678 rootdir != NULL && !vn_is_readonly(rootdir)) 3679 spa->spa_async_thread = thread_create(NULL, 0, 3680 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3681 mutex_exit(&spa->spa_async_lock); 3682 } 3683 3684 void 3685 spa_async_request(spa_t *spa, int task) 3686 { 3687 mutex_enter(&spa->spa_async_lock); 3688 spa->spa_async_tasks |= task; 3689 mutex_exit(&spa->spa_async_lock); 3690 } 3691 3692 /* 3693 * ========================================================================== 3694 * SPA syncing routines 3695 * ========================================================================== 3696 */ 3697 3698 static void 3699 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3700 { 3701 bplist_t *bpl = &spa->spa_sync_bplist; 3702 dmu_tx_t *tx; 3703 blkptr_t blk; 3704 uint64_t itor = 0; 3705 zio_t *zio; 3706 int error; 3707 uint8_t c = 1; 3708 3709 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 3710 3711 while (bplist_iterate(bpl, &itor, &blk) == 0) { 3712 ASSERT(blk.blk_birth < txg); 3713 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL, 3714 ZIO_FLAG_MUSTSUCCEED)); 3715 } 3716 3717 error = zio_wait(zio); 3718 ASSERT3U(error, ==, 0); 3719 3720 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3721 bplist_vacate(bpl, tx); 3722 3723 /* 3724 * Pre-dirty the first block so we sync to convergence faster. 3725 * (Usually only the first block is needed.) 3726 */ 3727 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3728 dmu_tx_commit(tx); 3729 } 3730 3731 static void 3732 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3733 { 3734 char *packed = NULL; 3735 size_t bufsize; 3736 size_t nvsize = 0; 3737 dmu_buf_t *db; 3738 3739 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3740 3741 /* 3742 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 3743 * information. This avoids the dbuf_will_dirty() path and 3744 * saves us a pre-read to get data we don't actually care about. 3745 */ 3746 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); 3747 packed = kmem_alloc(bufsize, KM_SLEEP); 3748 3749 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3750 KM_SLEEP) == 0); 3751 bzero(packed + nvsize, bufsize - nvsize); 3752 3753 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 3754 3755 kmem_free(packed, bufsize); 3756 3757 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3758 dmu_buf_will_dirty(db, tx); 3759 *(uint64_t *)db->db_data = nvsize; 3760 dmu_buf_rele(db, FTAG); 3761 } 3762 3763 static void 3764 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3765 const char *config, const char *entry) 3766 { 3767 nvlist_t *nvroot; 3768 nvlist_t **list; 3769 int i; 3770 3771 if (!sav->sav_sync) 3772 return; 3773 3774 /* 3775 * Update the MOS nvlist describing the list of available devices. 3776 * spa_validate_aux() will have already made sure this nvlist is 3777 * valid and the vdevs are labeled appropriately. 3778 */ 3779 if (sav->sav_object == 0) { 3780 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3781 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3782 sizeof (uint64_t), tx); 3783 VERIFY(zap_update(spa->spa_meta_objset, 3784 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3785 &sav->sav_object, tx) == 0); 3786 } 3787 3788 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3789 if (sav->sav_count == 0) { 3790 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3791 } else { 3792 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3793 for (i = 0; i < sav->sav_count; i++) 3794 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3795 B_FALSE, B_FALSE, B_TRUE); 3796 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3797 sav->sav_count) == 0); 3798 for (i = 0; i < sav->sav_count; i++) 3799 nvlist_free(list[i]); 3800 kmem_free(list, sav->sav_count * sizeof (void *)); 3801 } 3802 3803 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3804 nvlist_free(nvroot); 3805 3806 sav->sav_sync = B_FALSE; 3807 } 3808 3809 static void 3810 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3811 { 3812 nvlist_t *config; 3813 3814 if (list_is_empty(&spa->spa_config_dirty_list)) 3815 return; 3816 3817 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3818 3819 config = spa_config_generate(spa, spa->spa_root_vdev, 3820 dmu_tx_get_txg(tx), B_FALSE); 3821 3822 spa_config_exit(spa, SCL_STATE, FTAG); 3823 3824 if (spa->spa_config_syncing) 3825 nvlist_free(spa->spa_config_syncing); 3826 spa->spa_config_syncing = config; 3827 3828 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3829 } 3830 3831 /* 3832 * Set zpool properties. 3833 */ 3834 static void 3835 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3836 { 3837 spa_t *spa = arg1; 3838 objset_t *mos = spa->spa_meta_objset; 3839 nvlist_t *nvp = arg2; 3840 nvpair_t *elem; 3841 uint64_t intval; 3842 char *strval; 3843 zpool_prop_t prop; 3844 const char *propname; 3845 zprop_type_t proptype; 3846 3847 mutex_enter(&spa->spa_props_lock); 3848 3849 elem = NULL; 3850 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3851 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3852 case ZPOOL_PROP_VERSION: 3853 /* 3854 * Only set version for non-zpool-creation cases 3855 * (set/import). spa_create() needs special care 3856 * for version setting. 3857 */ 3858 if (tx->tx_txg != TXG_INITIAL) { 3859 VERIFY(nvpair_value_uint64(elem, 3860 &intval) == 0); 3861 ASSERT(intval <= SPA_VERSION); 3862 ASSERT(intval >= spa_version(spa)); 3863 spa->spa_uberblock.ub_version = intval; 3864 vdev_config_dirty(spa->spa_root_vdev); 3865 } 3866 break; 3867 3868 case ZPOOL_PROP_ALTROOT: 3869 /* 3870 * 'altroot' is a non-persistent property. It should 3871 * have been set temporarily at creation or import time. 3872 */ 3873 ASSERT(spa->spa_root != NULL); 3874 break; 3875 3876 case ZPOOL_PROP_CACHEFILE: 3877 /* 3878 * 'cachefile' is also a non-persisitent property. 3879 */ 3880 break; 3881 default: 3882 /* 3883 * Set pool property values in the poolprops mos object. 3884 */ 3885 if (spa->spa_pool_props_object == 0) { 3886 objset_t *mos = spa->spa_meta_objset; 3887 3888 VERIFY((spa->spa_pool_props_object = 3889 zap_create(mos, DMU_OT_POOL_PROPS, 3890 DMU_OT_NONE, 0, tx)) > 0); 3891 3892 VERIFY(zap_update(mos, 3893 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3894 8, 1, &spa->spa_pool_props_object, tx) 3895 == 0); 3896 } 3897 3898 /* normalize the property name */ 3899 propname = zpool_prop_to_name(prop); 3900 proptype = zpool_prop_get_type(prop); 3901 3902 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3903 ASSERT(proptype == PROP_TYPE_STRING); 3904 VERIFY(nvpair_value_string(elem, &strval) == 0); 3905 VERIFY(zap_update(mos, 3906 spa->spa_pool_props_object, propname, 3907 1, strlen(strval) + 1, strval, tx) == 0); 3908 3909 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3910 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3911 3912 if (proptype == PROP_TYPE_INDEX) { 3913 const char *unused; 3914 VERIFY(zpool_prop_index_to_string( 3915 prop, intval, &unused) == 0); 3916 } 3917 VERIFY(zap_update(mos, 3918 spa->spa_pool_props_object, propname, 3919 8, 1, &intval, tx) == 0); 3920 } else { 3921 ASSERT(0); /* not allowed */ 3922 } 3923 3924 switch (prop) { 3925 case ZPOOL_PROP_DELEGATION: 3926 spa->spa_delegation = intval; 3927 break; 3928 case ZPOOL_PROP_BOOTFS: 3929 spa->spa_bootfs = intval; 3930 break; 3931 case ZPOOL_PROP_FAILUREMODE: 3932 spa->spa_failmode = intval; 3933 break; 3934 default: 3935 break; 3936 } 3937 } 3938 3939 /* log internal history if this is not a zpool create */ 3940 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3941 tx->tx_txg != TXG_INITIAL) { 3942 spa_history_internal_log(LOG_POOL_PROPSET, 3943 spa, tx, cr, "%s %lld %s", 3944 nvpair_name(elem), intval, spa_name(spa)); 3945 } 3946 } 3947 3948 mutex_exit(&spa->spa_props_lock); 3949 } 3950 3951 /* 3952 * Sync the specified transaction group. New blocks may be dirtied as 3953 * part of the process, so we iterate until it converges. 3954 */ 3955 void 3956 spa_sync(spa_t *spa, uint64_t txg) 3957 { 3958 dsl_pool_t *dp = spa->spa_dsl_pool; 3959 objset_t *mos = spa->spa_meta_objset; 3960 bplist_t *bpl = &spa->spa_sync_bplist; 3961 vdev_t *rvd = spa->spa_root_vdev; 3962 vdev_t *vd; 3963 dmu_tx_t *tx; 3964 int dirty_vdevs; 3965 int error; 3966 3967 /* 3968 * Lock out configuration changes. 3969 */ 3970 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3971 3972 spa->spa_syncing_txg = txg; 3973 spa->spa_sync_pass = 0; 3974 3975 /* 3976 * If there are any pending vdev state changes, convert them 3977 * into config changes that go out with this transaction group. 3978 */ 3979 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 3980 while (list_head(&spa->spa_state_dirty_list) != NULL) { 3981 /* 3982 * We need the write lock here because, for aux vdevs, 3983 * calling vdev_config_dirty() modifies sav_config. 3984 * This is ugly and will become unnecessary when we 3985 * eliminate the aux vdev wart by integrating all vdevs 3986 * into the root vdev tree. 3987 */ 3988 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 3989 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 3990 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 3991 vdev_state_clean(vd); 3992 vdev_config_dirty(vd); 3993 } 3994 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 3995 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 3996 } 3997 spa_config_exit(spa, SCL_STATE, FTAG); 3998 3999 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 4000 4001 tx = dmu_tx_create_assigned(dp, txg); 4002 4003 /* 4004 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 4005 * set spa_deflate if we have no raid-z vdevs. 4006 */ 4007 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 4008 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 4009 int i; 4010 4011 for (i = 0; i < rvd->vdev_children; i++) { 4012 vd = rvd->vdev_child[i]; 4013 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 4014 break; 4015 } 4016 if (i == rvd->vdev_children) { 4017 spa->spa_deflate = TRUE; 4018 VERIFY(0 == zap_add(spa->spa_meta_objset, 4019 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 4020 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 4021 } 4022 } 4023 4024 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 4025 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 4026 dsl_pool_create_origin(dp, tx); 4027 4028 /* Keeping the origin open increases spa_minref */ 4029 spa->spa_minref += 3; 4030 } 4031 4032 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 4033 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 4034 dsl_pool_upgrade_clones(dp, tx); 4035 } 4036 4037 /* 4038 * If anything has changed in this txg, push the deferred frees 4039 * from the previous txg. If not, leave them alone so that we 4040 * don't generate work on an otherwise idle system. 4041 */ 4042 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 4043 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 4044 !txg_list_empty(&dp->dp_sync_tasks, txg)) 4045 spa_sync_deferred_frees(spa, txg); 4046 4047 /* 4048 * Iterate to convergence. 4049 */ 4050 do { 4051 spa->spa_sync_pass++; 4052 4053 spa_sync_config_object(spa, tx); 4054 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 4055 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 4056 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 4057 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 4058 spa_errlog_sync(spa, txg); 4059 dsl_pool_sync(dp, txg); 4060 4061 dirty_vdevs = 0; 4062 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 4063 vdev_sync(vd, txg); 4064 dirty_vdevs++; 4065 } 4066 4067 bplist_sync(bpl, tx); 4068 } while (dirty_vdevs); 4069 4070 bplist_close(bpl); 4071 4072 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4073 4074 /* 4075 * Rewrite the vdev configuration (which includes the uberblock) 4076 * to commit the transaction group. 4077 * 4078 * If there are no dirty vdevs, we sync the uberblock to a few 4079 * random top-level vdevs that are known to be visible in the 4080 * config cache (see spa_vdev_add() for a complete description). 4081 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 4082 */ 4083 for (;;) { 4084 /* 4085 * We hold SCL_STATE to prevent vdev open/close/etc. 4086 * while we're attempting to write the vdev labels. 4087 */ 4088 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 4089 4090 if (list_is_empty(&spa->spa_config_dirty_list)) { 4091 vdev_t *svd[SPA_DVAS_PER_BP]; 4092 int svdcount = 0; 4093 int children = rvd->vdev_children; 4094 int c0 = spa_get_random(children); 4095 int c; 4096 4097 for (c = 0; c < children; c++) { 4098 vd = rvd->vdev_child[(c0 + c) % children]; 4099 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4100 continue; 4101 svd[svdcount++] = vd; 4102 if (svdcount == SPA_DVAS_PER_BP) 4103 break; 4104 } 4105 error = vdev_config_sync(svd, svdcount, txg); 4106 } else { 4107 error = vdev_config_sync(rvd->vdev_child, 4108 rvd->vdev_children, txg); 4109 } 4110 4111 spa_config_exit(spa, SCL_STATE, FTAG); 4112 4113 if (error == 0) 4114 break; 4115 zio_suspend(spa, NULL); 4116 zio_resume_wait(spa); 4117 } 4118 dmu_tx_commit(tx); 4119 4120 /* 4121 * Clear the dirty config list. 4122 */ 4123 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 4124 vdev_config_clean(vd); 4125 4126 /* 4127 * Now that the new config has synced transactionally, 4128 * let it become visible to the config cache. 4129 */ 4130 if (spa->spa_config_syncing != NULL) { 4131 spa_config_set(spa, spa->spa_config_syncing); 4132 spa->spa_config_txg = txg; 4133 spa->spa_config_syncing = NULL; 4134 } 4135 4136 spa->spa_ubsync = spa->spa_uberblock; 4137 4138 /* 4139 * Clean up the ZIL records for the synced txg. 4140 */ 4141 dsl_pool_zil_clean(dp); 4142 4143 /* 4144 * Update usable space statistics. 4145 */ 4146 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4147 vdev_sync_done(vd, txg); 4148 4149 /* 4150 * It had better be the case that we didn't dirty anything 4151 * since vdev_config_sync(). 4152 */ 4153 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4154 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4155 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4156 ASSERT(bpl->bpl_queue == NULL); 4157 4158 spa_config_exit(spa, SCL_CONFIG, FTAG); 4159 4160 /* 4161 * If any async tasks have been requested, kick them off. 4162 */ 4163 spa_async_dispatch(spa); 4164 } 4165 4166 /* 4167 * Sync all pools. We don't want to hold the namespace lock across these 4168 * operations, so we take a reference on the spa_t and drop the lock during the 4169 * sync. 4170 */ 4171 void 4172 spa_sync_allpools(void) 4173 { 4174 spa_t *spa = NULL; 4175 mutex_enter(&spa_namespace_lock); 4176 while ((spa = spa_next(spa)) != NULL) { 4177 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa)) 4178 continue; 4179 spa_open_ref(spa, FTAG); 4180 mutex_exit(&spa_namespace_lock); 4181 txg_wait_synced(spa_get_dsl(spa), 0); 4182 mutex_enter(&spa_namespace_lock); 4183 spa_close(spa, FTAG); 4184 } 4185 mutex_exit(&spa_namespace_lock); 4186 } 4187 4188 /* 4189 * ========================================================================== 4190 * Miscellaneous routines 4191 * ========================================================================== 4192 */ 4193 4194 /* 4195 * Remove all pools in the system. 4196 */ 4197 void 4198 spa_evict_all(void) 4199 { 4200 spa_t *spa; 4201 4202 /* 4203 * Remove all cached state. All pools should be closed now, 4204 * so every spa in the AVL tree should be unreferenced. 4205 */ 4206 mutex_enter(&spa_namespace_lock); 4207 while ((spa = spa_next(NULL)) != NULL) { 4208 /* 4209 * Stop async tasks. The async thread may need to detach 4210 * a device that's been replaced, which requires grabbing 4211 * spa_namespace_lock, so we must drop it here. 4212 */ 4213 spa_open_ref(spa, FTAG); 4214 mutex_exit(&spa_namespace_lock); 4215 spa_async_suspend(spa); 4216 mutex_enter(&spa_namespace_lock); 4217 spa_close(spa, FTAG); 4218 4219 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4220 spa_unload(spa); 4221 spa_deactivate(spa); 4222 } 4223 spa_remove(spa); 4224 } 4225 mutex_exit(&spa_namespace_lock); 4226 } 4227 4228 vdev_t * 4229 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4230 { 4231 vdev_t *vd; 4232 int i; 4233 4234 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4235 return (vd); 4236 4237 if (l2cache) { 4238 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4239 vd = spa->spa_l2cache.sav_vdevs[i]; 4240 if (vd->vdev_guid == guid) 4241 return (vd); 4242 } 4243 } 4244 4245 return (NULL); 4246 } 4247 4248 void 4249 spa_upgrade(spa_t *spa, uint64_t version) 4250 { 4251 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4252 4253 /* 4254 * This should only be called for a non-faulted pool, and since a 4255 * future version would result in an unopenable pool, this shouldn't be 4256 * possible. 4257 */ 4258 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4259 ASSERT(version >= spa->spa_uberblock.ub_version); 4260 4261 spa->spa_uberblock.ub_version = version; 4262 vdev_config_dirty(spa->spa_root_vdev); 4263 4264 spa_config_exit(spa, SCL_ALL, FTAG); 4265 4266 txg_wait_synced(spa_get_dsl(spa), 0); 4267 } 4268 4269 boolean_t 4270 spa_has_spare(spa_t *spa, uint64_t guid) 4271 { 4272 int i; 4273 uint64_t spareguid; 4274 spa_aux_vdev_t *sav = &spa->spa_spares; 4275 4276 for (i = 0; i < sav->sav_count; i++) 4277 if (sav->sav_vdevs[i]->vdev_guid == guid) 4278 return (B_TRUE); 4279 4280 for (i = 0; i < sav->sav_npending; i++) { 4281 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4282 &spareguid) == 0 && spareguid == guid) 4283 return (B_TRUE); 4284 } 4285 4286 return (B_FALSE); 4287 } 4288 4289 /* 4290 * Check if a pool has an active shared spare device. 4291 * Note: reference count of an active spare is 2, as a spare and as a replace 4292 */ 4293 static boolean_t 4294 spa_has_active_shared_spare(spa_t *spa) 4295 { 4296 int i, refcnt; 4297 uint64_t pool; 4298 spa_aux_vdev_t *sav = &spa->spa_spares; 4299 4300 for (i = 0; i < sav->sav_count; i++) { 4301 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4302 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4303 refcnt > 2) 4304 return (B_TRUE); 4305 } 4306 4307 return (B_FALSE); 4308 } 4309 4310 /* 4311 * Post a sysevent corresponding to the given event. The 'name' must be one of 4312 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4313 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4314 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4315 * or zdb as real changes. 4316 */ 4317 void 4318 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4319 { 4320 #ifdef _KERNEL 4321 sysevent_t *ev; 4322 sysevent_attr_list_t *attr = NULL; 4323 sysevent_value_t value; 4324 sysevent_id_t eid; 4325 4326 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4327 SE_SLEEP); 4328 4329 value.value_type = SE_DATA_TYPE_STRING; 4330 value.value.sv_string = spa_name(spa); 4331 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4332 goto done; 4333 4334 value.value_type = SE_DATA_TYPE_UINT64; 4335 value.value.sv_uint64 = spa_guid(spa); 4336 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4337 goto done; 4338 4339 if (vd) { 4340 value.value_type = SE_DATA_TYPE_UINT64; 4341 value.value.sv_uint64 = vd->vdev_guid; 4342 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4343 SE_SLEEP) != 0) 4344 goto done; 4345 4346 if (vd->vdev_path) { 4347 value.value_type = SE_DATA_TYPE_STRING; 4348 value.value.sv_string = vd->vdev_path; 4349 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4350 &value, SE_SLEEP) != 0) 4351 goto done; 4352 } 4353 } 4354 4355 if (sysevent_attach_attributes(ev, attr) != 0) 4356 goto done; 4357 attr = NULL; 4358 4359 (void) log_sysevent(ev, SE_SLEEP, &eid); 4360 4361 done: 4362 if (attr) 4363 sysevent_free_attr(attr); 4364 sysevent_free(ev); 4365 #endif 4366 } 4367