1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #include "zfs_prop.h" 64 #include "zfs_comutil.h" 65 66 int zio_taskq_threads = 8; 67 68 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 69 static boolean_t spa_has_active_shared_spare(spa_t *spa); 70 71 /* 72 * ========================================================================== 73 * SPA properties routines 74 * ========================================================================== 75 */ 76 77 /* 78 * Add a (source=src, propname=propval) list to an nvlist. 79 */ 80 static void 81 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 82 uint64_t intval, zprop_source_t src) 83 { 84 const char *propname = zpool_prop_to_name(prop); 85 nvlist_t *propval; 86 87 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 88 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 89 90 if (strval != NULL) 91 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 92 else 93 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 94 95 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 96 nvlist_free(propval); 97 } 98 99 /* 100 * Get property values from the spa configuration. 101 */ 102 static void 103 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 104 { 105 uint64_t size = spa_get_space(spa); 106 uint64_t used = spa_get_alloc(spa); 107 uint64_t cap, version; 108 zprop_source_t src = ZPROP_SRC_NONE; 109 spa_config_dirent_t *dp; 110 111 /* 112 * readonly properties 113 */ 114 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src); 115 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 116 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 117 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 118 119 cap = (size == 0) ? 0 : (used * 100 / size); 120 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 121 122 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 123 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 124 spa->spa_root_vdev->vdev_state, src); 125 126 /* 127 * settable properties that are not stored in the pool property object. 128 */ 129 version = spa_version(spa); 130 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 131 src = ZPROP_SRC_DEFAULT; 132 else 133 src = ZPROP_SRC_LOCAL; 134 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 135 136 if (spa->spa_root != NULL) 137 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 138 0, ZPROP_SRC_LOCAL); 139 140 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 141 if (dp->scd_path == NULL) { 142 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 143 "none", 0, ZPROP_SRC_LOCAL); 144 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 145 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 146 dp->scd_path, 0, ZPROP_SRC_LOCAL); 147 } 148 } 149 } 150 151 /* 152 * Get zpool property values. 153 */ 154 int 155 spa_prop_get(spa_t *spa, nvlist_t **nvp) 156 { 157 zap_cursor_t zc; 158 zap_attribute_t za; 159 objset_t *mos = spa->spa_meta_objset; 160 int err; 161 162 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 163 164 /* 165 * Get properties from the spa config. 166 */ 167 spa_prop_get_config(spa, nvp); 168 169 mutex_enter(&spa->spa_props_lock); 170 /* If no pool property object, no more prop to get. */ 171 if (spa->spa_pool_props_object == 0) { 172 mutex_exit(&spa->spa_props_lock); 173 return (0); 174 } 175 176 /* 177 * Get properties from the MOS pool property object. 178 */ 179 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 180 (err = zap_cursor_retrieve(&zc, &za)) == 0; 181 zap_cursor_advance(&zc)) { 182 uint64_t intval = 0; 183 char *strval = NULL; 184 zprop_source_t src = ZPROP_SRC_DEFAULT; 185 zpool_prop_t prop; 186 187 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 188 continue; 189 190 switch (za.za_integer_length) { 191 case 8: 192 /* integer property */ 193 if (za.za_first_integer != 194 zpool_prop_default_numeric(prop)) 195 src = ZPROP_SRC_LOCAL; 196 197 if (prop == ZPOOL_PROP_BOOTFS) { 198 dsl_pool_t *dp; 199 dsl_dataset_t *ds = NULL; 200 201 dp = spa_get_dsl(spa); 202 rw_enter(&dp->dp_config_rwlock, RW_READER); 203 if (err = dsl_dataset_hold_obj(dp, 204 za.za_first_integer, FTAG, &ds)) { 205 rw_exit(&dp->dp_config_rwlock); 206 break; 207 } 208 209 strval = kmem_alloc( 210 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 211 KM_SLEEP); 212 dsl_dataset_name(ds, strval); 213 dsl_dataset_rele(ds, FTAG); 214 rw_exit(&dp->dp_config_rwlock); 215 } else { 216 strval = NULL; 217 intval = za.za_first_integer; 218 } 219 220 spa_prop_add_list(*nvp, prop, strval, intval, src); 221 222 if (strval != NULL) 223 kmem_free(strval, 224 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 225 226 break; 227 228 case 1: 229 /* string property */ 230 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 231 err = zap_lookup(mos, spa->spa_pool_props_object, 232 za.za_name, 1, za.za_num_integers, strval); 233 if (err) { 234 kmem_free(strval, za.za_num_integers); 235 break; 236 } 237 spa_prop_add_list(*nvp, prop, strval, 0, src); 238 kmem_free(strval, za.za_num_integers); 239 break; 240 241 default: 242 break; 243 } 244 } 245 zap_cursor_fini(&zc); 246 mutex_exit(&spa->spa_props_lock); 247 out: 248 if (err && err != ENOENT) { 249 nvlist_free(*nvp); 250 *nvp = NULL; 251 return (err); 252 } 253 254 return (0); 255 } 256 257 /* 258 * Validate the given pool properties nvlist and modify the list 259 * for the property values to be set. 260 */ 261 static int 262 spa_prop_validate(spa_t *spa, nvlist_t *props) 263 { 264 nvpair_t *elem; 265 int error = 0, reset_bootfs = 0; 266 uint64_t objnum; 267 268 elem = NULL; 269 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 270 zpool_prop_t prop; 271 char *propname, *strval; 272 uint64_t intval; 273 objset_t *os; 274 char *slash; 275 276 propname = nvpair_name(elem); 277 278 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 279 return (EINVAL); 280 281 switch (prop) { 282 case ZPOOL_PROP_VERSION: 283 error = nvpair_value_uint64(elem, &intval); 284 if (!error && 285 (intval < spa_version(spa) || intval > SPA_VERSION)) 286 error = EINVAL; 287 break; 288 289 case ZPOOL_PROP_DELEGATION: 290 case ZPOOL_PROP_AUTOREPLACE: 291 error = nvpair_value_uint64(elem, &intval); 292 if (!error && intval > 1) 293 error = EINVAL; 294 break; 295 296 case ZPOOL_PROP_BOOTFS: 297 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 298 error = ENOTSUP; 299 break; 300 } 301 302 /* 303 * Make sure the vdev config is bootable 304 */ 305 if (!vdev_is_bootable(spa->spa_root_vdev)) { 306 error = ENOTSUP; 307 break; 308 } 309 310 reset_bootfs = 1; 311 312 error = nvpair_value_string(elem, &strval); 313 314 if (!error) { 315 uint64_t compress; 316 317 if (strval == NULL || strval[0] == '\0') { 318 objnum = zpool_prop_default_numeric( 319 ZPOOL_PROP_BOOTFS); 320 break; 321 } 322 323 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 324 DS_MODE_USER | DS_MODE_READONLY, &os)) 325 break; 326 327 /* We don't support gzip bootable datasets */ 328 if ((error = dsl_prop_get_integer(strval, 329 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 330 &compress, NULL)) == 0 && 331 !BOOTFS_COMPRESS_VALID(compress)) { 332 error = ENOTSUP; 333 } else { 334 objnum = dmu_objset_id(os); 335 } 336 dmu_objset_close(os); 337 } 338 break; 339 case ZPOOL_PROP_FAILUREMODE: 340 error = nvpair_value_uint64(elem, &intval); 341 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 342 intval > ZIO_FAILURE_MODE_PANIC)) 343 error = EINVAL; 344 345 /* 346 * This is a special case which only occurs when 347 * the pool has completely failed. This allows 348 * the user to change the in-core failmode property 349 * without syncing it out to disk (I/Os might 350 * currently be blocked). We do this by returning 351 * EIO to the caller (spa_prop_set) to trick it 352 * into thinking we encountered a property validation 353 * error. 354 */ 355 if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) { 356 spa->spa_failmode = intval; 357 error = EIO; 358 } 359 break; 360 361 case ZPOOL_PROP_CACHEFILE: 362 if ((error = nvpair_value_string(elem, &strval)) != 0) 363 break; 364 365 if (strval[0] == '\0') 366 break; 367 368 if (strcmp(strval, "none") == 0) 369 break; 370 371 if (strval[0] != '/') { 372 error = EINVAL; 373 break; 374 } 375 376 slash = strrchr(strval, '/'); 377 ASSERT(slash != NULL); 378 379 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 380 strcmp(slash, "/..") == 0) 381 error = EINVAL; 382 break; 383 } 384 385 if (error) 386 break; 387 } 388 389 if (!error && reset_bootfs) { 390 error = nvlist_remove(props, 391 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 392 393 if (!error) { 394 error = nvlist_add_uint64(props, 395 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 396 } 397 } 398 399 return (error); 400 } 401 402 int 403 spa_prop_set(spa_t *spa, nvlist_t *nvp) 404 { 405 int error; 406 407 if ((error = spa_prop_validate(spa, nvp)) != 0) 408 return (error); 409 410 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 411 spa, nvp, 3)); 412 } 413 414 /* 415 * If the bootfs property value is dsobj, clear it. 416 */ 417 void 418 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 419 { 420 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 421 VERIFY(zap_remove(spa->spa_meta_objset, 422 spa->spa_pool_props_object, 423 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 424 spa->spa_bootfs = 0; 425 } 426 } 427 428 /* 429 * ========================================================================== 430 * SPA state manipulation (open/create/destroy/import/export) 431 * ========================================================================== 432 */ 433 434 static int 435 spa_error_entry_compare(const void *a, const void *b) 436 { 437 spa_error_entry_t *sa = (spa_error_entry_t *)a; 438 spa_error_entry_t *sb = (spa_error_entry_t *)b; 439 int ret; 440 441 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 442 sizeof (zbookmark_t)); 443 444 if (ret < 0) 445 return (-1); 446 else if (ret > 0) 447 return (1); 448 else 449 return (0); 450 } 451 452 /* 453 * Utility function which retrieves copies of the current logs and 454 * re-initializes them in the process. 455 */ 456 void 457 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 458 { 459 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 460 461 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 462 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 463 464 avl_create(&spa->spa_errlist_scrub, 465 spa_error_entry_compare, sizeof (spa_error_entry_t), 466 offsetof(spa_error_entry_t, se_avl)); 467 avl_create(&spa->spa_errlist_last, 468 spa_error_entry_compare, sizeof (spa_error_entry_t), 469 offsetof(spa_error_entry_t, se_avl)); 470 } 471 472 /* 473 * Activate an uninitialized pool. 474 */ 475 static void 476 spa_activate(spa_t *spa) 477 { 478 int t; 479 480 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 481 482 spa->spa_state = POOL_STATE_ACTIVE; 483 484 spa->spa_normal_class = metaslab_class_create(); 485 spa->spa_log_class = metaslab_class_create(); 486 487 for (t = 0; t < ZIO_TYPES; t++) { 488 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 489 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 490 TASKQ_PREPOPULATE); 491 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 492 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 493 TASKQ_PREPOPULATE); 494 } 495 496 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 497 offsetof(vdev_t, vdev_dirty_node)); 498 list_create(&spa->spa_zio_list, sizeof (zio_t), 499 offsetof(zio_t, zio_link_node)); 500 501 txg_list_create(&spa->spa_vdev_txg_list, 502 offsetof(struct vdev, vdev_txg_node)); 503 504 avl_create(&spa->spa_errlist_scrub, 505 spa_error_entry_compare, sizeof (spa_error_entry_t), 506 offsetof(spa_error_entry_t, se_avl)); 507 avl_create(&spa->spa_errlist_last, 508 spa_error_entry_compare, sizeof (spa_error_entry_t), 509 offsetof(spa_error_entry_t, se_avl)); 510 } 511 512 /* 513 * Opposite of spa_activate(). 514 */ 515 static void 516 spa_deactivate(spa_t *spa) 517 { 518 int t; 519 520 ASSERT(spa->spa_sync_on == B_FALSE); 521 ASSERT(spa->spa_dsl_pool == NULL); 522 ASSERT(spa->spa_root_vdev == NULL); 523 524 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 525 526 txg_list_destroy(&spa->spa_vdev_txg_list); 527 528 list_destroy(&spa->spa_dirty_list); 529 list_destroy(&spa->spa_zio_list); 530 531 for (t = 0; t < ZIO_TYPES; t++) { 532 taskq_destroy(spa->spa_zio_issue_taskq[t]); 533 taskq_destroy(spa->spa_zio_intr_taskq[t]); 534 spa->spa_zio_issue_taskq[t] = NULL; 535 spa->spa_zio_intr_taskq[t] = NULL; 536 } 537 538 metaslab_class_destroy(spa->spa_normal_class); 539 spa->spa_normal_class = NULL; 540 541 metaslab_class_destroy(spa->spa_log_class); 542 spa->spa_log_class = NULL; 543 544 /* 545 * If this was part of an import or the open otherwise failed, we may 546 * still have errors left in the queues. Empty them just in case. 547 */ 548 spa_errlog_drain(spa); 549 550 avl_destroy(&spa->spa_errlist_scrub); 551 avl_destroy(&spa->spa_errlist_last); 552 553 spa->spa_state = POOL_STATE_UNINITIALIZED; 554 } 555 556 /* 557 * Verify a pool configuration, and construct the vdev tree appropriately. This 558 * will create all the necessary vdevs in the appropriate layout, with each vdev 559 * in the CLOSED state. This will prep the pool before open/creation/import. 560 * All vdev validation is done by the vdev_alloc() routine. 561 */ 562 static int 563 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 564 uint_t id, int atype) 565 { 566 nvlist_t **child; 567 uint_t c, children; 568 int error; 569 570 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 571 return (error); 572 573 if ((*vdp)->vdev_ops->vdev_op_leaf) 574 return (0); 575 576 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 577 &child, &children) != 0) { 578 vdev_free(*vdp); 579 *vdp = NULL; 580 return (EINVAL); 581 } 582 583 for (c = 0; c < children; c++) { 584 vdev_t *vd; 585 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 586 atype)) != 0) { 587 vdev_free(*vdp); 588 *vdp = NULL; 589 return (error); 590 } 591 } 592 593 ASSERT(*vdp != NULL); 594 595 return (0); 596 } 597 598 /* 599 * Opposite of spa_load(). 600 */ 601 static void 602 spa_unload(spa_t *spa) 603 { 604 int i; 605 606 /* 607 * Stop async tasks. 608 */ 609 spa_async_suspend(spa); 610 611 /* 612 * Stop syncing. 613 */ 614 if (spa->spa_sync_on) { 615 txg_sync_stop(spa->spa_dsl_pool); 616 spa->spa_sync_on = B_FALSE; 617 } 618 619 /* 620 * Wait for any outstanding prefetch I/O to complete. 621 */ 622 spa_config_enter(spa, RW_WRITER, FTAG); 623 spa_config_exit(spa, FTAG); 624 625 /* 626 * Drop and purge level 2 cache 627 */ 628 spa_l2cache_drop(spa); 629 630 /* 631 * Close the dsl pool. 632 */ 633 if (spa->spa_dsl_pool) { 634 dsl_pool_close(spa->spa_dsl_pool); 635 spa->spa_dsl_pool = NULL; 636 } 637 638 /* 639 * Close all vdevs. 640 */ 641 if (spa->spa_root_vdev) 642 vdev_free(spa->spa_root_vdev); 643 ASSERT(spa->spa_root_vdev == NULL); 644 645 for (i = 0; i < spa->spa_spares.sav_count; i++) 646 vdev_free(spa->spa_spares.sav_vdevs[i]); 647 if (spa->spa_spares.sav_vdevs) { 648 kmem_free(spa->spa_spares.sav_vdevs, 649 spa->spa_spares.sav_count * sizeof (void *)); 650 spa->spa_spares.sav_vdevs = NULL; 651 } 652 if (spa->spa_spares.sav_config) { 653 nvlist_free(spa->spa_spares.sav_config); 654 spa->spa_spares.sav_config = NULL; 655 } 656 spa->spa_spares.sav_count = 0; 657 658 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 659 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 660 if (spa->spa_l2cache.sav_vdevs) { 661 kmem_free(spa->spa_l2cache.sav_vdevs, 662 spa->spa_l2cache.sav_count * sizeof (void *)); 663 spa->spa_l2cache.sav_vdevs = NULL; 664 } 665 if (spa->spa_l2cache.sav_config) { 666 nvlist_free(spa->spa_l2cache.sav_config); 667 spa->spa_l2cache.sav_config = NULL; 668 } 669 spa->spa_l2cache.sav_count = 0; 670 671 spa->spa_async_suspended = 0; 672 } 673 674 /* 675 * Load (or re-load) the current list of vdevs describing the active spares for 676 * this pool. When this is called, we have some form of basic information in 677 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 678 * then re-generate a more complete list including status information. 679 */ 680 static void 681 spa_load_spares(spa_t *spa) 682 { 683 nvlist_t **spares; 684 uint_t nspares; 685 int i; 686 vdev_t *vd, *tvd; 687 688 /* 689 * First, close and free any existing spare vdevs. 690 */ 691 for (i = 0; i < spa->spa_spares.sav_count; i++) { 692 vd = spa->spa_spares.sav_vdevs[i]; 693 694 /* Undo the call to spa_activate() below */ 695 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 696 B_FALSE)) != NULL && tvd->vdev_isspare) 697 spa_spare_remove(tvd); 698 vdev_close(vd); 699 vdev_free(vd); 700 } 701 702 if (spa->spa_spares.sav_vdevs) 703 kmem_free(spa->spa_spares.sav_vdevs, 704 spa->spa_spares.sav_count * sizeof (void *)); 705 706 if (spa->spa_spares.sav_config == NULL) 707 nspares = 0; 708 else 709 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 710 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 711 712 spa->spa_spares.sav_count = (int)nspares; 713 spa->spa_spares.sav_vdevs = NULL; 714 715 if (nspares == 0) 716 return; 717 718 /* 719 * Construct the array of vdevs, opening them to get status in the 720 * process. For each spare, there is potentially two different vdev_t 721 * structures associated with it: one in the list of spares (used only 722 * for basic validation purposes) and one in the active vdev 723 * configuration (if it's spared in). During this phase we open and 724 * validate each vdev on the spare list. If the vdev also exists in the 725 * active configuration, then we also mark this vdev as an active spare. 726 */ 727 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 728 KM_SLEEP); 729 for (i = 0; i < spa->spa_spares.sav_count; i++) { 730 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 731 VDEV_ALLOC_SPARE) == 0); 732 ASSERT(vd != NULL); 733 734 spa->spa_spares.sav_vdevs[i] = vd; 735 736 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 737 B_FALSE)) != NULL) { 738 if (!tvd->vdev_isspare) 739 spa_spare_add(tvd); 740 741 /* 742 * We only mark the spare active if we were successfully 743 * able to load the vdev. Otherwise, importing a pool 744 * with a bad active spare would result in strange 745 * behavior, because multiple pool would think the spare 746 * is actively in use. 747 * 748 * There is a vulnerability here to an equally bizarre 749 * circumstance, where a dead active spare is later 750 * brought back to life (onlined or otherwise). Given 751 * the rarity of this scenario, and the extra complexity 752 * it adds, we ignore the possibility. 753 */ 754 if (!vdev_is_dead(tvd)) 755 spa_spare_activate(tvd); 756 } 757 758 if (vdev_open(vd) != 0) 759 continue; 760 761 vd->vdev_top = vd; 762 if (vdev_validate_aux(vd) == 0) 763 spa_spare_add(vd); 764 } 765 766 /* 767 * Recompute the stashed list of spares, with status information 768 * this time. 769 */ 770 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 771 DATA_TYPE_NVLIST_ARRAY) == 0); 772 773 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 774 KM_SLEEP); 775 for (i = 0; i < spa->spa_spares.sav_count; i++) 776 spares[i] = vdev_config_generate(spa, 777 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 778 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 779 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 780 for (i = 0; i < spa->spa_spares.sav_count; i++) 781 nvlist_free(spares[i]); 782 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 783 } 784 785 /* 786 * Load (or re-load) the current list of vdevs describing the active l2cache for 787 * this pool. When this is called, we have some form of basic information in 788 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 789 * then re-generate a more complete list including status information. 790 * Devices which are already active have their details maintained, and are 791 * not re-opened. 792 */ 793 static void 794 spa_load_l2cache(spa_t *spa) 795 { 796 nvlist_t **l2cache; 797 uint_t nl2cache; 798 int i, j, oldnvdevs; 799 uint64_t guid, size; 800 vdev_t *vd, **oldvdevs, **newvdevs; 801 spa_aux_vdev_t *sav = &spa->spa_l2cache; 802 803 if (sav->sav_config != NULL) { 804 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 805 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 806 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 807 } else { 808 nl2cache = 0; 809 } 810 811 oldvdevs = sav->sav_vdevs; 812 oldnvdevs = sav->sav_count; 813 sav->sav_vdevs = NULL; 814 sav->sav_count = 0; 815 816 /* 817 * Process new nvlist of vdevs. 818 */ 819 for (i = 0; i < nl2cache; i++) { 820 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 821 &guid) == 0); 822 823 newvdevs[i] = NULL; 824 for (j = 0; j < oldnvdevs; j++) { 825 vd = oldvdevs[j]; 826 if (vd != NULL && guid == vd->vdev_guid) { 827 /* 828 * Retain previous vdev for add/remove ops. 829 */ 830 newvdevs[i] = vd; 831 oldvdevs[j] = NULL; 832 break; 833 } 834 } 835 836 if (newvdevs[i] == NULL) { 837 /* 838 * Create new vdev 839 */ 840 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 841 VDEV_ALLOC_L2CACHE) == 0); 842 ASSERT(vd != NULL); 843 newvdevs[i] = vd; 844 845 /* 846 * Commit this vdev as an l2cache device, 847 * even if it fails to open. 848 */ 849 spa_l2cache_add(vd); 850 851 vd->vdev_top = vd; 852 vd->vdev_aux = sav; 853 854 spa_l2cache_activate(vd); 855 856 if (vdev_open(vd) != 0) 857 continue; 858 859 (void) vdev_validate_aux(vd); 860 861 if (!vdev_is_dead(vd)) { 862 size = vdev_get_rsize(vd); 863 l2arc_add_vdev(spa, vd, 864 VDEV_LABEL_START_SIZE, 865 size - VDEV_LABEL_START_SIZE); 866 } 867 } 868 } 869 870 /* 871 * Purge vdevs that were dropped 872 */ 873 for (i = 0; i < oldnvdevs; i++) { 874 uint64_t pool; 875 876 vd = oldvdevs[i]; 877 if (vd != NULL) { 878 if (spa_mode & FWRITE && 879 spa_l2cache_exists(vd->vdev_guid, &pool) && 880 pool != 0ULL && 881 l2arc_vdev_present(vd)) { 882 l2arc_remove_vdev(vd); 883 } 884 (void) vdev_close(vd); 885 spa_l2cache_remove(vd); 886 } 887 } 888 889 if (oldvdevs) 890 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 891 892 if (sav->sav_config == NULL) 893 goto out; 894 895 sav->sav_vdevs = newvdevs; 896 sav->sav_count = (int)nl2cache; 897 898 /* 899 * Recompute the stashed list of l2cache devices, with status 900 * information this time. 901 */ 902 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 903 DATA_TYPE_NVLIST_ARRAY) == 0); 904 905 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 906 for (i = 0; i < sav->sav_count; i++) 907 l2cache[i] = vdev_config_generate(spa, 908 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 909 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 910 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 911 out: 912 for (i = 0; i < sav->sav_count; i++) 913 nvlist_free(l2cache[i]); 914 if (sav->sav_count) 915 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 916 } 917 918 static int 919 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 920 { 921 dmu_buf_t *db; 922 char *packed = NULL; 923 size_t nvsize = 0; 924 int error; 925 *value = NULL; 926 927 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 928 nvsize = *(uint64_t *)db->db_data; 929 dmu_buf_rele(db, FTAG); 930 931 packed = kmem_alloc(nvsize, KM_SLEEP); 932 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 933 if (error == 0) 934 error = nvlist_unpack(packed, nvsize, value, 0); 935 kmem_free(packed, nvsize); 936 937 return (error); 938 } 939 940 /* 941 * Checks to see if the given vdev could not be opened, in which case we post a 942 * sysevent to notify the autoreplace code that the device has been removed. 943 */ 944 static void 945 spa_check_removed(vdev_t *vd) 946 { 947 int c; 948 949 for (c = 0; c < vd->vdev_children; c++) 950 spa_check_removed(vd->vdev_child[c]); 951 952 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 953 zfs_post_autoreplace(vd->vdev_spa, vd); 954 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 955 } 956 } 957 958 /* 959 * Check for missing log devices 960 */ 961 int 962 spa_check_logs(spa_t *spa) 963 { 964 switch (spa->spa_log_state) { 965 case SPA_LOG_MISSING: 966 /* need to recheck in case slog has been restored */ 967 case SPA_LOG_UNKNOWN: 968 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 969 DS_FIND_CHILDREN)) { 970 spa->spa_log_state = SPA_LOG_MISSING; 971 return (1); 972 } 973 break; 974 975 case SPA_LOG_CLEAR: 976 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, 977 DS_FIND_CHILDREN); 978 break; 979 } 980 spa->spa_log_state = SPA_LOG_GOOD; 981 return (0); 982 } 983 984 /* 985 * Load an existing storage pool, using the pool's builtin spa_config as a 986 * source of configuration information. 987 */ 988 static int 989 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 990 { 991 int error = 0; 992 nvlist_t *nvroot = NULL; 993 vdev_t *rvd; 994 uberblock_t *ub = &spa->spa_uberblock; 995 uint64_t config_cache_txg = spa->spa_config_txg; 996 uint64_t pool_guid; 997 uint64_t version; 998 zio_t *zio; 999 uint64_t autoreplace = 0; 1000 char *ereport = FM_EREPORT_ZFS_POOL; 1001 1002 spa->spa_load_state = state; 1003 1004 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1005 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1006 error = EINVAL; 1007 goto out; 1008 } 1009 1010 /* 1011 * Versioning wasn't explicitly added to the label until later, so if 1012 * it's not present treat it as the initial version. 1013 */ 1014 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1015 version = SPA_VERSION_INITIAL; 1016 1017 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1018 &spa->spa_config_txg); 1019 1020 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1021 spa_guid_exists(pool_guid, 0)) { 1022 error = EEXIST; 1023 goto out; 1024 } 1025 1026 spa->spa_load_guid = pool_guid; 1027 1028 /* 1029 * Parse the configuration into a vdev tree. We explicitly set the 1030 * value that will be returned by spa_version() since parsing the 1031 * configuration requires knowing the version number. 1032 */ 1033 spa_config_enter(spa, RW_WRITER, FTAG); 1034 spa->spa_ubsync.ub_version = version; 1035 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1036 spa_config_exit(spa, FTAG); 1037 1038 if (error != 0) 1039 goto out; 1040 1041 ASSERT(spa->spa_root_vdev == rvd); 1042 ASSERT(spa_guid(spa) == pool_guid); 1043 1044 /* 1045 * Try to open all vdevs, loading each label in the process. 1046 */ 1047 error = vdev_open(rvd); 1048 if (error != 0) 1049 goto out; 1050 1051 /* 1052 * Validate the labels for all leaf vdevs. We need to grab the config 1053 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 1054 * flag. 1055 */ 1056 spa_config_enter(spa, RW_READER, FTAG); 1057 error = vdev_validate(rvd); 1058 spa_config_exit(spa, FTAG); 1059 1060 if (error != 0) 1061 goto out; 1062 1063 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1064 error = ENXIO; 1065 goto out; 1066 } 1067 1068 /* 1069 * Find the best uberblock. 1070 */ 1071 bzero(ub, sizeof (uberblock_t)); 1072 1073 zio = zio_root(spa, NULL, NULL, 1074 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1075 vdev_uberblock_load(zio, rvd, ub); 1076 error = zio_wait(zio); 1077 1078 /* 1079 * If we weren't able to find a single valid uberblock, return failure. 1080 */ 1081 if (ub->ub_txg == 0) { 1082 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1083 VDEV_AUX_CORRUPT_DATA); 1084 error = ENXIO; 1085 goto out; 1086 } 1087 1088 /* 1089 * If the pool is newer than the code, we can't open it. 1090 */ 1091 if (ub->ub_version > SPA_VERSION) { 1092 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1093 VDEV_AUX_VERSION_NEWER); 1094 error = ENOTSUP; 1095 goto out; 1096 } 1097 1098 /* 1099 * If the vdev guid sum doesn't match the uberblock, we have an 1100 * incomplete configuration. 1101 */ 1102 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1103 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1104 VDEV_AUX_BAD_GUID_SUM); 1105 error = ENXIO; 1106 goto out; 1107 } 1108 1109 /* 1110 * Initialize internal SPA structures. 1111 */ 1112 spa->spa_state = POOL_STATE_ACTIVE; 1113 spa->spa_ubsync = spa->spa_uberblock; 1114 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1115 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1116 if (error) { 1117 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1118 VDEV_AUX_CORRUPT_DATA); 1119 goto out; 1120 } 1121 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1122 1123 if (zap_lookup(spa->spa_meta_objset, 1124 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1125 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1126 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1127 VDEV_AUX_CORRUPT_DATA); 1128 error = EIO; 1129 goto out; 1130 } 1131 1132 if (!mosconfig) { 1133 nvlist_t *newconfig; 1134 uint64_t hostid; 1135 1136 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1137 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1138 VDEV_AUX_CORRUPT_DATA); 1139 error = EIO; 1140 goto out; 1141 } 1142 1143 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 1144 &hostid) == 0) { 1145 char *hostname; 1146 unsigned long myhostid = 0; 1147 1148 VERIFY(nvlist_lookup_string(newconfig, 1149 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1150 1151 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1152 if (hostid != 0 && myhostid != 0 && 1153 (unsigned long)hostid != myhostid) { 1154 cmn_err(CE_WARN, "pool '%s' could not be " 1155 "loaded as it was last accessed by " 1156 "another system (host: %s hostid: 0x%lx). " 1157 "See: http://www.sun.com/msg/ZFS-8000-EY", 1158 spa->spa_name, hostname, 1159 (unsigned long)hostid); 1160 error = EBADF; 1161 goto out; 1162 } 1163 } 1164 1165 spa_config_set(spa, newconfig); 1166 spa_unload(spa); 1167 spa_deactivate(spa); 1168 spa_activate(spa); 1169 1170 return (spa_load(spa, newconfig, state, B_TRUE)); 1171 } 1172 1173 if (zap_lookup(spa->spa_meta_objset, 1174 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1175 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1176 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1177 VDEV_AUX_CORRUPT_DATA); 1178 error = EIO; 1179 goto out; 1180 } 1181 1182 /* 1183 * Load the bit that tells us to use the new accounting function 1184 * (raid-z deflation). If we have an older pool, this will not 1185 * be present. 1186 */ 1187 error = zap_lookup(spa->spa_meta_objset, 1188 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1189 sizeof (uint64_t), 1, &spa->spa_deflate); 1190 if (error != 0 && error != ENOENT) { 1191 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1192 VDEV_AUX_CORRUPT_DATA); 1193 error = EIO; 1194 goto out; 1195 } 1196 1197 /* 1198 * Load the persistent error log. If we have an older pool, this will 1199 * not be present. 1200 */ 1201 error = zap_lookup(spa->spa_meta_objset, 1202 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1203 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1204 if (error != 0 && error != ENOENT) { 1205 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1206 VDEV_AUX_CORRUPT_DATA); 1207 error = EIO; 1208 goto out; 1209 } 1210 1211 error = zap_lookup(spa->spa_meta_objset, 1212 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1213 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1214 if (error != 0 && error != ENOENT) { 1215 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1216 VDEV_AUX_CORRUPT_DATA); 1217 error = EIO; 1218 goto out; 1219 } 1220 1221 /* 1222 * Load the history object. If we have an older pool, this 1223 * will not be present. 1224 */ 1225 error = zap_lookup(spa->spa_meta_objset, 1226 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1227 sizeof (uint64_t), 1, &spa->spa_history); 1228 if (error != 0 && error != ENOENT) { 1229 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1230 VDEV_AUX_CORRUPT_DATA); 1231 error = EIO; 1232 goto out; 1233 } 1234 1235 /* 1236 * Load any hot spares for this pool. 1237 */ 1238 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1239 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1240 if (error != 0 && error != ENOENT) { 1241 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1242 VDEV_AUX_CORRUPT_DATA); 1243 error = EIO; 1244 goto out; 1245 } 1246 if (error == 0) { 1247 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1248 if (load_nvlist(spa, spa->spa_spares.sav_object, 1249 &spa->spa_spares.sav_config) != 0) { 1250 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1251 VDEV_AUX_CORRUPT_DATA); 1252 error = EIO; 1253 goto out; 1254 } 1255 1256 spa_config_enter(spa, RW_WRITER, FTAG); 1257 spa_load_spares(spa); 1258 spa_config_exit(spa, FTAG); 1259 } 1260 1261 /* 1262 * Load any level 2 ARC devices for this pool. 1263 */ 1264 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1265 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1266 &spa->spa_l2cache.sav_object); 1267 if (error != 0 && error != ENOENT) { 1268 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1269 VDEV_AUX_CORRUPT_DATA); 1270 error = EIO; 1271 goto out; 1272 } 1273 if (error == 0) { 1274 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1275 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1276 &spa->spa_l2cache.sav_config) != 0) { 1277 vdev_set_state(rvd, B_TRUE, 1278 VDEV_STATE_CANT_OPEN, 1279 VDEV_AUX_CORRUPT_DATA); 1280 error = EIO; 1281 goto out; 1282 } 1283 1284 spa_config_enter(spa, RW_WRITER, FTAG); 1285 spa_load_l2cache(spa); 1286 spa_config_exit(spa, FTAG); 1287 } 1288 1289 if (spa_check_logs(spa)) { 1290 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1291 VDEV_AUX_BAD_LOG); 1292 error = ENXIO; 1293 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1294 goto out; 1295 } 1296 1297 1298 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1299 1300 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1301 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1302 1303 if (error && error != ENOENT) { 1304 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1305 VDEV_AUX_CORRUPT_DATA); 1306 error = EIO; 1307 goto out; 1308 } 1309 1310 if (error == 0) { 1311 (void) zap_lookup(spa->spa_meta_objset, 1312 spa->spa_pool_props_object, 1313 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1314 sizeof (uint64_t), 1, &spa->spa_bootfs); 1315 (void) zap_lookup(spa->spa_meta_objset, 1316 spa->spa_pool_props_object, 1317 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1318 sizeof (uint64_t), 1, &autoreplace); 1319 (void) zap_lookup(spa->spa_meta_objset, 1320 spa->spa_pool_props_object, 1321 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1322 sizeof (uint64_t), 1, &spa->spa_delegation); 1323 (void) zap_lookup(spa->spa_meta_objset, 1324 spa->spa_pool_props_object, 1325 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1326 sizeof (uint64_t), 1, &spa->spa_failmode); 1327 } 1328 1329 /* 1330 * If the 'autoreplace' property is set, then post a resource notifying 1331 * the ZFS DE that it should not issue any faults for unopenable 1332 * devices. We also iterate over the vdevs, and post a sysevent for any 1333 * unopenable vdevs so that the normal autoreplace handler can take 1334 * over. 1335 */ 1336 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1337 spa_check_removed(spa->spa_root_vdev); 1338 1339 /* 1340 * Load the vdev state for all toplevel vdevs. 1341 */ 1342 vdev_load(rvd); 1343 1344 /* 1345 * Propagate the leaf DTLs we just loaded all the way up the tree. 1346 */ 1347 spa_config_enter(spa, RW_WRITER, FTAG); 1348 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1349 spa_config_exit(spa, FTAG); 1350 1351 /* 1352 * Check the state of the root vdev. If it can't be opened, it 1353 * indicates one or more toplevel vdevs are faulted. 1354 */ 1355 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1356 error = ENXIO; 1357 goto out; 1358 } 1359 1360 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1361 dmu_tx_t *tx; 1362 int need_update = B_FALSE; 1363 int c; 1364 1365 /* 1366 * Claim log blocks that haven't been committed yet. 1367 * This must all happen in a single txg. 1368 */ 1369 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1370 spa_first_txg(spa)); 1371 (void) dmu_objset_find(spa->spa_name, 1372 zil_claim, tx, DS_FIND_CHILDREN); 1373 dmu_tx_commit(tx); 1374 1375 spa->spa_sync_on = B_TRUE; 1376 txg_sync_start(spa->spa_dsl_pool); 1377 1378 /* 1379 * Wait for all claims to sync. 1380 */ 1381 txg_wait_synced(spa->spa_dsl_pool, 0); 1382 1383 /* 1384 * If the config cache is stale, or we have uninitialized 1385 * metaslabs (see spa_vdev_add()), then update the config. 1386 */ 1387 if (config_cache_txg != spa->spa_config_txg || 1388 state == SPA_LOAD_IMPORT) 1389 need_update = B_TRUE; 1390 1391 for (c = 0; c < rvd->vdev_children; c++) 1392 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1393 need_update = B_TRUE; 1394 1395 /* 1396 * Update the config cache asychronously in case we're the 1397 * root pool, in which case the config cache isn't writable yet. 1398 */ 1399 if (need_update) 1400 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1401 } 1402 1403 error = 0; 1404 out: 1405 spa->spa_minref = refcount_count(&spa->spa_refcount); 1406 if (error && error != EBADF) 1407 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1408 spa->spa_load_state = SPA_LOAD_NONE; 1409 spa->spa_ena = 0; 1410 1411 return (error); 1412 } 1413 1414 /* 1415 * Pool Open/Import 1416 * 1417 * The import case is identical to an open except that the configuration is sent 1418 * down from userland, instead of grabbed from the configuration cache. For the 1419 * case of an open, the pool configuration will exist in the 1420 * POOL_STATE_UNINITIALIZED state. 1421 * 1422 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1423 * the same time open the pool, without having to keep around the spa_t in some 1424 * ambiguous state. 1425 */ 1426 static int 1427 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1428 { 1429 spa_t *spa; 1430 int error; 1431 int locked = B_FALSE; 1432 1433 *spapp = NULL; 1434 1435 /* 1436 * As disgusting as this is, we need to support recursive calls to this 1437 * function because dsl_dir_open() is called during spa_load(), and ends 1438 * up calling spa_open() again. The real fix is to figure out how to 1439 * avoid dsl_dir_open() calling this in the first place. 1440 */ 1441 if (mutex_owner(&spa_namespace_lock) != curthread) { 1442 mutex_enter(&spa_namespace_lock); 1443 locked = B_TRUE; 1444 } 1445 1446 if ((spa = spa_lookup(pool)) == NULL) { 1447 if (locked) 1448 mutex_exit(&spa_namespace_lock); 1449 return (ENOENT); 1450 } 1451 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1452 1453 spa_activate(spa); 1454 1455 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1456 1457 if (error == EBADF) { 1458 /* 1459 * If vdev_validate() returns failure (indicated by 1460 * EBADF), it indicates that one of the vdevs indicates 1461 * that the pool has been exported or destroyed. If 1462 * this is the case, the config cache is out of sync and 1463 * we should remove the pool from the namespace. 1464 */ 1465 spa_unload(spa); 1466 spa_deactivate(spa); 1467 spa_config_sync(spa, B_TRUE, B_TRUE); 1468 spa_remove(spa); 1469 if (locked) 1470 mutex_exit(&spa_namespace_lock); 1471 return (ENOENT); 1472 } 1473 1474 if (error) { 1475 /* 1476 * We can't open the pool, but we still have useful 1477 * information: the state of each vdev after the 1478 * attempted vdev_open(). Return this to the user. 1479 */ 1480 if (config != NULL && spa->spa_root_vdev != NULL) { 1481 spa_config_enter(spa, RW_READER, FTAG); 1482 *config = spa_config_generate(spa, NULL, -1ULL, 1483 B_TRUE); 1484 spa_config_exit(spa, FTAG); 1485 } 1486 spa_unload(spa); 1487 spa_deactivate(spa); 1488 spa->spa_last_open_failed = B_TRUE; 1489 if (locked) 1490 mutex_exit(&spa_namespace_lock); 1491 *spapp = NULL; 1492 return (error); 1493 } else { 1494 spa->spa_last_open_failed = B_FALSE; 1495 } 1496 } 1497 1498 spa_open_ref(spa, tag); 1499 1500 if (locked) 1501 mutex_exit(&spa_namespace_lock); 1502 1503 *spapp = spa; 1504 1505 if (config != NULL) { 1506 spa_config_enter(spa, RW_READER, FTAG); 1507 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1508 spa_config_exit(spa, FTAG); 1509 } 1510 1511 return (0); 1512 } 1513 1514 int 1515 spa_open(const char *name, spa_t **spapp, void *tag) 1516 { 1517 return (spa_open_common(name, spapp, tag, NULL)); 1518 } 1519 1520 /* 1521 * Lookup the given spa_t, incrementing the inject count in the process, 1522 * preventing it from being exported or destroyed. 1523 */ 1524 spa_t * 1525 spa_inject_addref(char *name) 1526 { 1527 spa_t *spa; 1528 1529 mutex_enter(&spa_namespace_lock); 1530 if ((spa = spa_lookup(name)) == NULL) { 1531 mutex_exit(&spa_namespace_lock); 1532 return (NULL); 1533 } 1534 spa->spa_inject_ref++; 1535 mutex_exit(&spa_namespace_lock); 1536 1537 return (spa); 1538 } 1539 1540 void 1541 spa_inject_delref(spa_t *spa) 1542 { 1543 mutex_enter(&spa_namespace_lock); 1544 spa->spa_inject_ref--; 1545 mutex_exit(&spa_namespace_lock); 1546 } 1547 1548 /* 1549 * Add spares device information to the nvlist. 1550 */ 1551 static void 1552 spa_add_spares(spa_t *spa, nvlist_t *config) 1553 { 1554 nvlist_t **spares; 1555 uint_t i, nspares; 1556 nvlist_t *nvroot; 1557 uint64_t guid; 1558 vdev_stat_t *vs; 1559 uint_t vsc; 1560 uint64_t pool; 1561 1562 if (spa->spa_spares.sav_count == 0) 1563 return; 1564 1565 VERIFY(nvlist_lookup_nvlist(config, 1566 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1567 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1568 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1569 if (nspares != 0) { 1570 VERIFY(nvlist_add_nvlist_array(nvroot, 1571 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1572 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1573 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1574 1575 /* 1576 * Go through and find any spares which have since been 1577 * repurposed as an active spare. If this is the case, update 1578 * their status appropriately. 1579 */ 1580 for (i = 0; i < nspares; i++) { 1581 VERIFY(nvlist_lookup_uint64(spares[i], 1582 ZPOOL_CONFIG_GUID, &guid) == 0); 1583 if (spa_spare_exists(guid, &pool, NULL) && 1584 pool != 0ULL) { 1585 VERIFY(nvlist_lookup_uint64_array( 1586 spares[i], ZPOOL_CONFIG_STATS, 1587 (uint64_t **)&vs, &vsc) == 0); 1588 vs->vs_state = VDEV_STATE_CANT_OPEN; 1589 vs->vs_aux = VDEV_AUX_SPARED; 1590 } 1591 } 1592 } 1593 } 1594 1595 /* 1596 * Add l2cache device information to the nvlist, including vdev stats. 1597 */ 1598 static void 1599 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1600 { 1601 nvlist_t **l2cache; 1602 uint_t i, j, nl2cache; 1603 nvlist_t *nvroot; 1604 uint64_t guid; 1605 vdev_t *vd; 1606 vdev_stat_t *vs; 1607 uint_t vsc; 1608 1609 if (spa->spa_l2cache.sav_count == 0) 1610 return; 1611 1612 spa_config_enter(spa, RW_READER, FTAG); 1613 1614 VERIFY(nvlist_lookup_nvlist(config, 1615 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1616 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1617 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1618 if (nl2cache != 0) { 1619 VERIFY(nvlist_add_nvlist_array(nvroot, 1620 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1621 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1622 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1623 1624 /* 1625 * Update level 2 cache device stats. 1626 */ 1627 1628 for (i = 0; i < nl2cache; i++) { 1629 VERIFY(nvlist_lookup_uint64(l2cache[i], 1630 ZPOOL_CONFIG_GUID, &guid) == 0); 1631 1632 vd = NULL; 1633 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1634 if (guid == 1635 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1636 vd = spa->spa_l2cache.sav_vdevs[j]; 1637 break; 1638 } 1639 } 1640 ASSERT(vd != NULL); 1641 1642 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1643 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1644 vdev_get_stats(vd, vs); 1645 } 1646 } 1647 1648 spa_config_exit(spa, FTAG); 1649 } 1650 1651 int 1652 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1653 { 1654 int error; 1655 spa_t *spa; 1656 1657 *config = NULL; 1658 error = spa_open_common(name, &spa, FTAG, config); 1659 1660 if (spa && *config != NULL) { 1661 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1662 spa_get_errlog_size(spa)) == 0); 1663 1664 spa_add_spares(spa, *config); 1665 spa_add_l2cache(spa, *config); 1666 } 1667 1668 /* 1669 * We want to get the alternate root even for faulted pools, so we cheat 1670 * and call spa_lookup() directly. 1671 */ 1672 if (altroot) { 1673 if (spa == NULL) { 1674 mutex_enter(&spa_namespace_lock); 1675 spa = spa_lookup(name); 1676 if (spa) 1677 spa_altroot(spa, altroot, buflen); 1678 else 1679 altroot[0] = '\0'; 1680 spa = NULL; 1681 mutex_exit(&spa_namespace_lock); 1682 } else { 1683 spa_altroot(spa, altroot, buflen); 1684 } 1685 } 1686 1687 if (spa != NULL) 1688 spa_close(spa, FTAG); 1689 1690 return (error); 1691 } 1692 1693 /* 1694 * Validate that the auxiliary device array is well formed. We must have an 1695 * array of nvlists, each which describes a valid leaf vdev. If this is an 1696 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1697 * specified, as long as they are well-formed. 1698 */ 1699 static int 1700 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1701 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1702 vdev_labeltype_t label) 1703 { 1704 nvlist_t **dev; 1705 uint_t i, ndev; 1706 vdev_t *vd; 1707 int error; 1708 1709 /* 1710 * It's acceptable to have no devs specified. 1711 */ 1712 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1713 return (0); 1714 1715 if (ndev == 0) 1716 return (EINVAL); 1717 1718 /* 1719 * Make sure the pool is formatted with a version that supports this 1720 * device type. 1721 */ 1722 if (spa_version(spa) < version) 1723 return (ENOTSUP); 1724 1725 /* 1726 * Set the pending device list so we correctly handle device in-use 1727 * checking. 1728 */ 1729 sav->sav_pending = dev; 1730 sav->sav_npending = ndev; 1731 1732 for (i = 0; i < ndev; i++) { 1733 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1734 mode)) != 0) 1735 goto out; 1736 1737 if (!vd->vdev_ops->vdev_op_leaf) { 1738 vdev_free(vd); 1739 error = EINVAL; 1740 goto out; 1741 } 1742 1743 /* 1744 * The L2ARC currently only supports disk devices. 1745 */ 1746 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1747 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1748 error = ENOTBLK; 1749 goto out; 1750 } 1751 1752 vd->vdev_top = vd; 1753 1754 if ((error = vdev_open(vd)) == 0 && 1755 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1756 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1757 vd->vdev_guid) == 0); 1758 } 1759 1760 vdev_free(vd); 1761 1762 if (error && 1763 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1764 goto out; 1765 else 1766 error = 0; 1767 } 1768 1769 out: 1770 sav->sav_pending = NULL; 1771 sav->sav_npending = 0; 1772 return (error); 1773 } 1774 1775 static int 1776 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1777 { 1778 int error; 1779 1780 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1781 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1782 VDEV_LABEL_SPARE)) != 0) { 1783 return (error); 1784 } 1785 1786 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1787 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1788 VDEV_LABEL_L2CACHE)); 1789 } 1790 1791 static void 1792 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1793 const char *config) 1794 { 1795 int i; 1796 1797 if (sav->sav_config != NULL) { 1798 nvlist_t **olddevs; 1799 uint_t oldndevs; 1800 nvlist_t **newdevs; 1801 1802 /* 1803 * Generate new dev list by concatentating with the 1804 * current dev list. 1805 */ 1806 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1807 &olddevs, &oldndevs) == 0); 1808 1809 newdevs = kmem_alloc(sizeof (void *) * 1810 (ndevs + oldndevs), KM_SLEEP); 1811 for (i = 0; i < oldndevs; i++) 1812 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1813 KM_SLEEP) == 0); 1814 for (i = 0; i < ndevs; i++) 1815 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1816 KM_SLEEP) == 0); 1817 1818 VERIFY(nvlist_remove(sav->sav_config, config, 1819 DATA_TYPE_NVLIST_ARRAY) == 0); 1820 1821 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1822 config, newdevs, ndevs + oldndevs) == 0); 1823 for (i = 0; i < oldndevs + ndevs; i++) 1824 nvlist_free(newdevs[i]); 1825 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1826 } else { 1827 /* 1828 * Generate a new dev list. 1829 */ 1830 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1831 KM_SLEEP) == 0); 1832 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1833 devs, ndevs) == 0); 1834 } 1835 } 1836 1837 /* 1838 * Stop and drop level 2 ARC devices 1839 */ 1840 void 1841 spa_l2cache_drop(spa_t *spa) 1842 { 1843 vdev_t *vd; 1844 int i; 1845 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1846 1847 for (i = 0; i < sav->sav_count; i++) { 1848 uint64_t pool; 1849 1850 vd = sav->sav_vdevs[i]; 1851 ASSERT(vd != NULL); 1852 1853 if (spa_mode & FWRITE && 1854 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1855 l2arc_vdev_present(vd)) { 1856 l2arc_remove_vdev(vd); 1857 } 1858 if (vd->vdev_isl2cache) 1859 spa_l2cache_remove(vd); 1860 vdev_clear_stats(vd); 1861 (void) vdev_close(vd); 1862 } 1863 } 1864 1865 /* 1866 * Pool Creation 1867 */ 1868 int 1869 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1870 const char *history_str, nvlist_t *zplprops) 1871 { 1872 spa_t *spa; 1873 char *altroot = NULL; 1874 vdev_t *rvd; 1875 dsl_pool_t *dp; 1876 dmu_tx_t *tx; 1877 int c, error = 0; 1878 uint64_t txg = TXG_INITIAL; 1879 nvlist_t **spares, **l2cache; 1880 uint_t nspares, nl2cache; 1881 uint64_t version; 1882 1883 /* 1884 * If this pool already exists, return failure. 1885 */ 1886 mutex_enter(&spa_namespace_lock); 1887 if (spa_lookup(pool) != NULL) { 1888 mutex_exit(&spa_namespace_lock); 1889 return (EEXIST); 1890 } 1891 1892 /* 1893 * Allocate a new spa_t structure. 1894 */ 1895 (void) nvlist_lookup_string(props, 1896 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1897 spa = spa_add(pool, altroot); 1898 spa_activate(spa); 1899 1900 spa->spa_uberblock.ub_txg = txg - 1; 1901 1902 if (props && (error = spa_prop_validate(spa, props))) { 1903 spa_unload(spa); 1904 spa_deactivate(spa); 1905 spa_remove(spa); 1906 mutex_exit(&spa_namespace_lock); 1907 return (error); 1908 } 1909 1910 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1911 &version) != 0) 1912 version = SPA_VERSION; 1913 ASSERT(version <= SPA_VERSION); 1914 spa->spa_uberblock.ub_version = version; 1915 spa->spa_ubsync = spa->spa_uberblock; 1916 1917 /* 1918 * Create the root vdev. 1919 */ 1920 spa_config_enter(spa, RW_WRITER, FTAG); 1921 1922 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1923 1924 ASSERT(error != 0 || rvd != NULL); 1925 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1926 1927 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1928 error = EINVAL; 1929 1930 if (error == 0 && 1931 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1932 (error = spa_validate_aux(spa, nvroot, txg, 1933 VDEV_ALLOC_ADD)) == 0) { 1934 for (c = 0; c < rvd->vdev_children; c++) 1935 vdev_init(rvd->vdev_child[c], txg); 1936 vdev_config_dirty(rvd); 1937 } 1938 1939 spa_config_exit(spa, FTAG); 1940 1941 if (error != 0) { 1942 spa_unload(spa); 1943 spa_deactivate(spa); 1944 spa_remove(spa); 1945 mutex_exit(&spa_namespace_lock); 1946 return (error); 1947 } 1948 1949 /* 1950 * Get the list of spares, if specified. 1951 */ 1952 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1953 &spares, &nspares) == 0) { 1954 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1955 KM_SLEEP) == 0); 1956 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1957 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1958 spa_config_enter(spa, RW_WRITER, FTAG); 1959 spa_load_spares(spa); 1960 spa_config_exit(spa, FTAG); 1961 spa->spa_spares.sav_sync = B_TRUE; 1962 } 1963 1964 /* 1965 * Get the list of level 2 cache devices, if specified. 1966 */ 1967 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1968 &l2cache, &nl2cache) == 0) { 1969 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1970 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1971 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1972 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1973 spa_config_enter(spa, RW_WRITER, FTAG); 1974 spa_load_l2cache(spa); 1975 spa_config_exit(spa, FTAG); 1976 spa->spa_l2cache.sav_sync = B_TRUE; 1977 } 1978 1979 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 1980 spa->spa_meta_objset = dp->dp_meta_objset; 1981 1982 tx = dmu_tx_create_assigned(dp, txg); 1983 1984 /* 1985 * Create the pool config object. 1986 */ 1987 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1988 DMU_OT_PACKED_NVLIST, 1 << 14, 1989 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1990 1991 if (zap_add(spa->spa_meta_objset, 1992 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1993 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1994 cmn_err(CE_PANIC, "failed to add pool config"); 1995 } 1996 1997 /* Newly created pools with the right version are always deflated. */ 1998 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 1999 spa->spa_deflate = TRUE; 2000 if (zap_add(spa->spa_meta_objset, 2001 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2002 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2003 cmn_err(CE_PANIC, "failed to add deflate"); 2004 } 2005 } 2006 2007 /* 2008 * Create the deferred-free bplist object. Turn off compression 2009 * because sync-to-convergence takes longer if the blocksize 2010 * keeps changing. 2011 */ 2012 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2013 1 << 14, tx); 2014 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2015 ZIO_COMPRESS_OFF, tx); 2016 2017 if (zap_add(spa->spa_meta_objset, 2018 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2019 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2020 cmn_err(CE_PANIC, "failed to add bplist"); 2021 } 2022 2023 /* 2024 * Create the pool's history object. 2025 */ 2026 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2027 spa_history_create_obj(spa, tx); 2028 2029 /* 2030 * Set pool properties. 2031 */ 2032 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2033 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2034 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2035 if (props) 2036 spa_sync_props(spa, props, CRED(), tx); 2037 2038 dmu_tx_commit(tx); 2039 2040 spa->spa_sync_on = B_TRUE; 2041 txg_sync_start(spa->spa_dsl_pool); 2042 2043 /* 2044 * We explicitly wait for the first transaction to complete so that our 2045 * bean counters are appropriately updated. 2046 */ 2047 txg_wait_synced(spa->spa_dsl_pool, txg); 2048 2049 spa_config_sync(spa, B_FALSE, B_TRUE); 2050 2051 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2052 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2053 2054 mutex_exit(&spa_namespace_lock); 2055 2056 spa->spa_minref = refcount_count(&spa->spa_refcount); 2057 2058 return (0); 2059 } 2060 2061 /* 2062 * Import the given pool into the system. We set up the necessary spa_t and 2063 * then call spa_load() to do the dirty work. 2064 */ 2065 static int 2066 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2067 boolean_t isroot, boolean_t allowfaulted) 2068 { 2069 spa_t *spa; 2070 char *altroot = NULL; 2071 int error, loaderr; 2072 nvlist_t *nvroot; 2073 nvlist_t **spares, **l2cache; 2074 uint_t nspares, nl2cache; 2075 2076 /* 2077 * If a pool with this name exists, return failure. 2078 */ 2079 mutex_enter(&spa_namespace_lock); 2080 if (spa_lookup(pool) != NULL) { 2081 mutex_exit(&spa_namespace_lock); 2082 return (EEXIST); 2083 } 2084 2085 /* 2086 * Create and initialize the spa structure. 2087 */ 2088 (void) nvlist_lookup_string(props, 2089 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2090 spa = spa_add(pool, altroot); 2091 spa_activate(spa); 2092 2093 if (allowfaulted) 2094 spa->spa_import_faulted = B_TRUE; 2095 spa->spa_is_root = isroot; 2096 2097 /* 2098 * Pass off the heavy lifting to spa_load(). 2099 * Pass TRUE for mosconfig (unless this is a root pool) because 2100 * the user-supplied config is actually the one to trust when 2101 * doing an import. 2102 */ 2103 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2104 2105 spa_config_enter(spa, RW_WRITER, FTAG); 2106 /* 2107 * Toss any existing sparelist, as it doesn't have any validity anymore, 2108 * and conflicts with spa_has_spare(). 2109 */ 2110 if (!isroot && spa->spa_spares.sav_config) { 2111 nvlist_free(spa->spa_spares.sav_config); 2112 spa->spa_spares.sav_config = NULL; 2113 spa_load_spares(spa); 2114 } 2115 if (!isroot && spa->spa_l2cache.sav_config) { 2116 nvlist_free(spa->spa_l2cache.sav_config); 2117 spa->spa_l2cache.sav_config = NULL; 2118 spa_load_l2cache(spa); 2119 } 2120 2121 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2122 &nvroot) == 0); 2123 if (error == 0) 2124 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2125 if (error == 0) 2126 error = spa_validate_aux(spa, nvroot, -1ULL, 2127 VDEV_ALLOC_L2CACHE); 2128 spa_config_exit(spa, FTAG); 2129 2130 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2131 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2132 /* 2133 * If we failed to load the pool, but 'allowfaulted' is 2134 * set, then manually set the config as if the config 2135 * passed in was specified in the cache file. 2136 */ 2137 error = 0; 2138 spa->spa_import_faulted = B_FALSE; 2139 if (spa->spa_config == NULL) { 2140 spa_config_enter(spa, RW_READER, FTAG); 2141 spa->spa_config = spa_config_generate(spa, 2142 NULL, -1ULL, B_TRUE); 2143 spa_config_exit(spa, FTAG); 2144 } 2145 spa_unload(spa); 2146 spa_deactivate(spa); 2147 spa_config_sync(spa, B_FALSE, B_TRUE); 2148 } else { 2149 spa_unload(spa); 2150 spa_deactivate(spa); 2151 spa_remove(spa); 2152 } 2153 mutex_exit(&spa_namespace_lock); 2154 return (error); 2155 } 2156 2157 /* 2158 * Override any spares and level 2 cache devices as specified by 2159 * the user, as these may have correct device names/devids, etc. 2160 */ 2161 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2162 &spares, &nspares) == 0) { 2163 if (spa->spa_spares.sav_config) 2164 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2165 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2166 else 2167 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2168 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2169 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2170 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2171 spa_config_enter(spa, RW_WRITER, FTAG); 2172 spa_load_spares(spa); 2173 spa_config_exit(spa, FTAG); 2174 spa->spa_spares.sav_sync = B_TRUE; 2175 } 2176 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2177 &l2cache, &nl2cache) == 0) { 2178 if (spa->spa_l2cache.sav_config) 2179 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2180 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2181 else 2182 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2183 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2184 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2185 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2186 spa_config_enter(spa, RW_WRITER, FTAG); 2187 spa_load_l2cache(spa); 2188 spa_config_exit(spa, FTAG); 2189 spa->spa_l2cache.sav_sync = B_TRUE; 2190 } 2191 2192 if (spa_mode & FWRITE) { 2193 /* 2194 * Update the config cache to include the newly-imported pool. 2195 */ 2196 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2197 } 2198 2199 spa->spa_import_faulted = B_FALSE; 2200 mutex_exit(&spa_namespace_lock); 2201 2202 return (0); 2203 } 2204 2205 #ifdef _KERNEL 2206 /* 2207 * Build a "root" vdev for a top level vdev read in from a rootpool 2208 * device label. 2209 */ 2210 static void 2211 spa_build_rootpool_config(nvlist_t *config) 2212 { 2213 nvlist_t *nvtop, *nvroot; 2214 uint64_t pgid; 2215 2216 /* 2217 * Add this top-level vdev to the child array. 2218 */ 2219 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2220 == 0); 2221 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2222 == 0); 2223 2224 /* 2225 * Put this pool's top-level vdevs into a root vdev. 2226 */ 2227 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2228 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2229 == 0); 2230 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2231 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2232 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2233 &nvtop, 1) == 0); 2234 2235 /* 2236 * Replace the existing vdev_tree with the new root vdev in 2237 * this pool's configuration (remove the old, add the new). 2238 */ 2239 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2240 nvlist_free(nvroot); 2241 } 2242 2243 /* 2244 * Get the root pool information from the root disk, then import the root pool 2245 * during the system boot up time. 2246 */ 2247 extern nvlist_t *vdev_disk_read_rootlabel(char *, char *); 2248 2249 int 2250 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2251 uint64_t *besttxg) 2252 { 2253 nvlist_t *config; 2254 uint64_t txg; 2255 2256 if ((config = vdev_disk_read_rootlabel(devpath, devid)) == NULL) 2257 return (-1); 2258 2259 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2260 2261 if (bestconf != NULL) 2262 *bestconf = config; 2263 *besttxg = txg; 2264 return (0); 2265 } 2266 2267 boolean_t 2268 spa_rootdev_validate(nvlist_t *nv) 2269 { 2270 uint64_t ival; 2271 2272 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2273 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2274 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2275 return (B_FALSE); 2276 2277 return (B_TRUE); 2278 } 2279 2280 2281 /* 2282 * Given the boot device's physical path or devid, check if the device 2283 * is in a valid state. If so, return the configuration from the vdev 2284 * label. 2285 */ 2286 int 2287 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2288 { 2289 nvlist_t *conf = NULL; 2290 uint64_t txg = 0; 2291 nvlist_t *nvtop, **child; 2292 char *type; 2293 char *bootpath = NULL; 2294 uint_t children, c; 2295 char *tmp; 2296 2297 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2298 *tmp = '\0'; 2299 if (spa_check_rootconf(devpath, devid, &conf, &txg) < 0) { 2300 cmn_err(CE_NOTE, "error reading device label"); 2301 nvlist_free(conf); 2302 return (EINVAL); 2303 } 2304 if (txg == 0) { 2305 cmn_err(CE_NOTE, "this device is detached"); 2306 nvlist_free(conf); 2307 return (EINVAL); 2308 } 2309 2310 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2311 &nvtop) == 0); 2312 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2313 2314 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2315 if (spa_rootdev_validate(nvtop)) { 2316 goto out; 2317 } else { 2318 nvlist_free(conf); 2319 return (EINVAL); 2320 } 2321 } 2322 2323 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2324 2325 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2326 &child, &children) == 0); 2327 2328 /* 2329 * Go thru vdevs in the mirror to see if the given device 2330 * has the most recent txg. Only the device with the most 2331 * recent txg has valid information and should be booted. 2332 */ 2333 for (c = 0; c < children; c++) { 2334 char *cdevid, *cpath; 2335 uint64_t tmptxg; 2336 2337 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2338 &cpath) != 0) 2339 return (EINVAL); 2340 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, 2341 &cdevid) != 0) 2342 return (EINVAL); 2343 if ((spa_check_rootconf(cpath, cdevid, NULL, 2344 &tmptxg) == 0) && (tmptxg > txg)) { 2345 txg = tmptxg; 2346 VERIFY(nvlist_lookup_string(child[c], 2347 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2348 } 2349 } 2350 2351 /* Does the best device match the one we've booted from? */ 2352 if (bootpath) { 2353 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2354 return (EINVAL); 2355 } 2356 out: 2357 *bestconf = conf; 2358 return (0); 2359 } 2360 2361 /* 2362 * Import a root pool. 2363 * 2364 * For x86. devpath_list will consist of devid and/or physpath name of 2365 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2366 * The GRUB "findroot" command will return the vdev we should boot. 2367 * 2368 * For Sparc, devpath_list consists the physpath name of the booting device 2369 * no matter the rootpool is a single device pool or a mirrored pool. 2370 * e.g. 2371 * "/pci@1f,0/ide@d/disk@0,0:a" 2372 */ 2373 int 2374 spa_import_rootpool(char *devpath, char *devid) 2375 { 2376 nvlist_t *conf = NULL; 2377 char *pname; 2378 int error; 2379 2380 /* 2381 * Get the vdev pathname and configuation from the most 2382 * recently updated vdev (highest txg). 2383 */ 2384 if (error = spa_get_rootconf(devpath, devid, &conf)) 2385 goto msg_out; 2386 2387 /* 2388 * Add type "root" vdev to the config. 2389 */ 2390 spa_build_rootpool_config(conf); 2391 2392 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2393 2394 /* 2395 * We specify 'allowfaulted' for this to be treated like spa_open() 2396 * instead of spa_import(). This prevents us from marking vdevs as 2397 * persistently unavailable, and generates FMA ereports as if it were a 2398 * pool open, not import. 2399 */ 2400 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2401 if (error == EEXIST) 2402 error = 0; 2403 2404 nvlist_free(conf); 2405 return (error); 2406 2407 msg_out: 2408 cmn_err(CE_NOTE, "\n" 2409 " *************************************************** \n" 2410 " * This device is not bootable! * \n" 2411 " * It is either offlined or detached or faulted. * \n" 2412 " * Please try to boot from a different device. * \n" 2413 " *************************************************** "); 2414 2415 return (error); 2416 } 2417 #endif 2418 2419 /* 2420 * Import a non-root pool into the system. 2421 */ 2422 int 2423 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2424 { 2425 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2426 } 2427 2428 int 2429 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2430 { 2431 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2432 } 2433 2434 2435 /* 2436 * This (illegal) pool name is used when temporarily importing a spa_t in order 2437 * to get the vdev stats associated with the imported devices. 2438 */ 2439 #define TRYIMPORT_NAME "$import" 2440 2441 nvlist_t * 2442 spa_tryimport(nvlist_t *tryconfig) 2443 { 2444 nvlist_t *config = NULL; 2445 char *poolname; 2446 spa_t *spa; 2447 uint64_t state; 2448 2449 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2450 return (NULL); 2451 2452 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2453 return (NULL); 2454 2455 /* 2456 * Create and initialize the spa structure. 2457 */ 2458 mutex_enter(&spa_namespace_lock); 2459 spa = spa_add(TRYIMPORT_NAME, NULL); 2460 spa_activate(spa); 2461 2462 /* 2463 * Pass off the heavy lifting to spa_load(). 2464 * Pass TRUE for mosconfig because the user-supplied config 2465 * is actually the one to trust when doing an import. 2466 */ 2467 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2468 2469 /* 2470 * If 'tryconfig' was at least parsable, return the current config. 2471 */ 2472 if (spa->spa_root_vdev != NULL) { 2473 spa_config_enter(spa, RW_READER, FTAG); 2474 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2475 spa_config_exit(spa, FTAG); 2476 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2477 poolname) == 0); 2478 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2479 state) == 0); 2480 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2481 spa->spa_uberblock.ub_timestamp) == 0); 2482 2483 /* 2484 * If the bootfs property exists on this pool then we 2485 * copy it out so that external consumers can tell which 2486 * pools are bootable. 2487 */ 2488 if (spa->spa_bootfs) { 2489 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2490 2491 /* 2492 * We have to play games with the name since the 2493 * pool was opened as TRYIMPORT_NAME. 2494 */ 2495 if (dsl_dsobj_to_dsname(spa->spa_name, 2496 spa->spa_bootfs, tmpname) == 0) { 2497 char *cp; 2498 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2499 2500 cp = strchr(tmpname, '/'); 2501 if (cp == NULL) { 2502 (void) strlcpy(dsname, tmpname, 2503 MAXPATHLEN); 2504 } else { 2505 (void) snprintf(dsname, MAXPATHLEN, 2506 "%s/%s", poolname, ++cp); 2507 } 2508 VERIFY(nvlist_add_string(config, 2509 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2510 kmem_free(dsname, MAXPATHLEN); 2511 } 2512 kmem_free(tmpname, MAXPATHLEN); 2513 } 2514 2515 /* 2516 * Add the list of hot spares and level 2 cache devices. 2517 */ 2518 spa_add_spares(spa, config); 2519 spa_add_l2cache(spa, config); 2520 } 2521 2522 spa_unload(spa); 2523 spa_deactivate(spa); 2524 spa_remove(spa); 2525 mutex_exit(&spa_namespace_lock); 2526 2527 return (config); 2528 } 2529 2530 /* 2531 * Pool export/destroy 2532 * 2533 * The act of destroying or exporting a pool is very simple. We make sure there 2534 * is no more pending I/O and any references to the pool are gone. Then, we 2535 * update the pool state and sync all the labels to disk, removing the 2536 * configuration from the cache afterwards. 2537 */ 2538 static int 2539 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2540 boolean_t force) 2541 { 2542 spa_t *spa; 2543 2544 if (oldconfig) 2545 *oldconfig = NULL; 2546 2547 if (!(spa_mode & FWRITE)) 2548 return (EROFS); 2549 2550 mutex_enter(&spa_namespace_lock); 2551 if ((spa = spa_lookup(pool)) == NULL) { 2552 mutex_exit(&spa_namespace_lock); 2553 return (ENOENT); 2554 } 2555 2556 /* 2557 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2558 * reacquire the namespace lock, and see if we can export. 2559 */ 2560 spa_open_ref(spa, FTAG); 2561 mutex_exit(&spa_namespace_lock); 2562 spa_async_suspend(spa); 2563 mutex_enter(&spa_namespace_lock); 2564 spa_close(spa, FTAG); 2565 2566 /* 2567 * The pool will be in core if it's openable, 2568 * in which case we can modify its state. 2569 */ 2570 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2571 /* 2572 * Objsets may be open only because they're dirty, so we 2573 * have to force it to sync before checking spa_refcnt. 2574 */ 2575 txg_wait_synced(spa->spa_dsl_pool, 0); 2576 2577 /* 2578 * A pool cannot be exported or destroyed if there are active 2579 * references. If we are resetting a pool, allow references by 2580 * fault injection handlers. 2581 */ 2582 if (!spa_refcount_zero(spa) || 2583 (spa->spa_inject_ref != 0 && 2584 new_state != POOL_STATE_UNINITIALIZED)) { 2585 spa_async_resume(spa); 2586 mutex_exit(&spa_namespace_lock); 2587 return (EBUSY); 2588 } 2589 2590 /* 2591 * A pool cannot be exported if it has an active shared spare. 2592 * This is to prevent other pools stealing the active spare 2593 * from an exported pool. At user's own will, such pool can 2594 * be forcedly exported. 2595 */ 2596 if (!force && new_state == POOL_STATE_EXPORTED && 2597 spa_has_active_shared_spare(spa)) { 2598 spa_async_resume(spa); 2599 mutex_exit(&spa_namespace_lock); 2600 return (EXDEV); 2601 } 2602 2603 /* 2604 * We want this to be reflected on every label, 2605 * so mark them all dirty. spa_unload() will do the 2606 * final sync that pushes these changes out. 2607 */ 2608 if (new_state != POOL_STATE_UNINITIALIZED) { 2609 spa_config_enter(spa, RW_WRITER, FTAG); 2610 spa->spa_state = new_state; 2611 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2612 vdev_config_dirty(spa->spa_root_vdev); 2613 spa_config_exit(spa, FTAG); 2614 } 2615 } 2616 2617 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2618 2619 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2620 spa_unload(spa); 2621 spa_deactivate(spa); 2622 } 2623 2624 if (oldconfig && spa->spa_config) 2625 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2626 2627 if (new_state != POOL_STATE_UNINITIALIZED) { 2628 spa_config_sync(spa, B_TRUE, B_TRUE); 2629 spa_remove(spa); 2630 } 2631 mutex_exit(&spa_namespace_lock); 2632 2633 return (0); 2634 } 2635 2636 /* 2637 * Destroy a storage pool. 2638 */ 2639 int 2640 spa_destroy(char *pool) 2641 { 2642 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE)); 2643 } 2644 2645 /* 2646 * Export a storage pool. 2647 */ 2648 int 2649 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force) 2650 { 2651 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force)); 2652 } 2653 2654 /* 2655 * Similar to spa_export(), this unloads the spa_t without actually removing it 2656 * from the namespace in any way. 2657 */ 2658 int 2659 spa_reset(char *pool) 2660 { 2661 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2662 B_FALSE)); 2663 } 2664 2665 /* 2666 * ========================================================================== 2667 * Device manipulation 2668 * ========================================================================== 2669 */ 2670 2671 /* 2672 * Add a device to a storage pool. 2673 */ 2674 int 2675 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2676 { 2677 uint64_t txg; 2678 int c, error; 2679 vdev_t *rvd = spa->spa_root_vdev; 2680 vdev_t *vd, *tvd; 2681 nvlist_t **spares, **l2cache; 2682 uint_t nspares, nl2cache; 2683 2684 txg = spa_vdev_enter(spa); 2685 2686 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2687 VDEV_ALLOC_ADD)) != 0) 2688 return (spa_vdev_exit(spa, NULL, txg, error)); 2689 2690 spa->spa_pending_vdev = vd; 2691 2692 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2693 &nspares) != 0) 2694 nspares = 0; 2695 2696 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2697 &nl2cache) != 0) 2698 nl2cache = 0; 2699 2700 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) { 2701 spa->spa_pending_vdev = NULL; 2702 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2703 } 2704 2705 if (vd->vdev_children != 0) { 2706 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 2707 spa->spa_pending_vdev = NULL; 2708 return (spa_vdev_exit(spa, vd, txg, error)); 2709 } 2710 } 2711 2712 /* 2713 * We must validate the spares and l2cache devices after checking the 2714 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2715 */ 2716 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) { 2717 spa->spa_pending_vdev = NULL; 2718 return (spa_vdev_exit(spa, vd, txg, error)); 2719 } 2720 2721 spa->spa_pending_vdev = NULL; 2722 2723 /* 2724 * Transfer each new top-level vdev from vd to rvd. 2725 */ 2726 for (c = 0; c < vd->vdev_children; c++) { 2727 tvd = vd->vdev_child[c]; 2728 vdev_remove_child(vd, tvd); 2729 tvd->vdev_id = rvd->vdev_children; 2730 vdev_add_child(rvd, tvd); 2731 vdev_config_dirty(tvd); 2732 } 2733 2734 if (nspares != 0) { 2735 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2736 ZPOOL_CONFIG_SPARES); 2737 spa_load_spares(spa); 2738 spa->spa_spares.sav_sync = B_TRUE; 2739 } 2740 2741 if (nl2cache != 0) { 2742 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2743 ZPOOL_CONFIG_L2CACHE); 2744 spa_load_l2cache(spa); 2745 spa->spa_l2cache.sav_sync = B_TRUE; 2746 } 2747 2748 /* 2749 * We have to be careful when adding new vdevs to an existing pool. 2750 * If other threads start allocating from these vdevs before we 2751 * sync the config cache, and we lose power, then upon reboot we may 2752 * fail to open the pool because there are DVAs that the config cache 2753 * can't translate. Therefore, we first add the vdevs without 2754 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2755 * and then let spa_config_update() initialize the new metaslabs. 2756 * 2757 * spa_load() checks for added-but-not-initialized vdevs, so that 2758 * if we lose power at any point in this sequence, the remaining 2759 * steps will be completed the next time we load the pool. 2760 */ 2761 (void) spa_vdev_exit(spa, vd, txg, 0); 2762 2763 mutex_enter(&spa_namespace_lock); 2764 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2765 mutex_exit(&spa_namespace_lock); 2766 2767 return (0); 2768 } 2769 2770 /* 2771 * Attach a device to a mirror. The arguments are the path to any device 2772 * in the mirror, and the nvroot for the new device. If the path specifies 2773 * a device that is not mirrored, we automatically insert the mirror vdev. 2774 * 2775 * If 'replacing' is specified, the new device is intended to replace the 2776 * existing device; in this case the two devices are made into their own 2777 * mirror using the 'replacing' vdev, which is functionally identical to 2778 * the mirror vdev (it actually reuses all the same ops) but has a few 2779 * extra rules: you can't attach to it after it's been created, and upon 2780 * completion of resilvering, the first disk (the one being replaced) 2781 * is automatically detached. 2782 */ 2783 int 2784 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2785 { 2786 uint64_t txg, open_txg; 2787 vdev_t *rvd = spa->spa_root_vdev; 2788 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2789 vdev_ops_t *pvops; 2790 dmu_tx_t *tx; 2791 char *oldvdpath, *newvdpath; 2792 int newvd_isspare; 2793 int error; 2794 2795 txg = spa_vdev_enter(spa); 2796 2797 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2798 2799 if (oldvd == NULL) 2800 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2801 2802 if (!oldvd->vdev_ops->vdev_op_leaf) 2803 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2804 2805 pvd = oldvd->vdev_parent; 2806 2807 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2808 VDEV_ALLOC_ADD)) != 0) 2809 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2810 2811 if (newrootvd->vdev_children != 1) 2812 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2813 2814 newvd = newrootvd->vdev_child[0]; 2815 2816 if (!newvd->vdev_ops->vdev_op_leaf) 2817 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2818 2819 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2820 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2821 2822 /* 2823 * Spares can't replace logs 2824 */ 2825 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 2826 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2827 2828 if (!replacing) { 2829 /* 2830 * For attach, the only allowable parent is a mirror or the root 2831 * vdev. 2832 */ 2833 if (pvd->vdev_ops != &vdev_mirror_ops && 2834 pvd->vdev_ops != &vdev_root_ops) 2835 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2836 2837 pvops = &vdev_mirror_ops; 2838 } else { 2839 /* 2840 * Active hot spares can only be replaced by inactive hot 2841 * spares. 2842 */ 2843 if (pvd->vdev_ops == &vdev_spare_ops && 2844 pvd->vdev_child[1] == oldvd && 2845 !spa_has_spare(spa, newvd->vdev_guid)) 2846 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2847 2848 /* 2849 * If the source is a hot spare, and the parent isn't already a 2850 * spare, then we want to create a new hot spare. Otherwise, we 2851 * want to create a replacing vdev. The user is not allowed to 2852 * attach to a spared vdev child unless the 'isspare' state is 2853 * the same (spare replaces spare, non-spare replaces 2854 * non-spare). 2855 */ 2856 if (pvd->vdev_ops == &vdev_replacing_ops) 2857 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2858 else if (pvd->vdev_ops == &vdev_spare_ops && 2859 newvd->vdev_isspare != oldvd->vdev_isspare) 2860 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2861 else if (pvd->vdev_ops != &vdev_spare_ops && 2862 newvd->vdev_isspare) 2863 pvops = &vdev_spare_ops; 2864 else 2865 pvops = &vdev_replacing_ops; 2866 } 2867 2868 /* 2869 * Compare the new device size with the replaceable/attachable 2870 * device size. 2871 */ 2872 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2873 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2874 2875 /* 2876 * The new device cannot have a higher alignment requirement 2877 * than the top-level vdev. 2878 */ 2879 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2880 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2881 2882 /* 2883 * If this is an in-place replacement, update oldvd's path and devid 2884 * to make it distinguishable from newvd, and unopenable from now on. 2885 */ 2886 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2887 spa_strfree(oldvd->vdev_path); 2888 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2889 KM_SLEEP); 2890 (void) sprintf(oldvd->vdev_path, "%s/%s", 2891 newvd->vdev_path, "old"); 2892 if (oldvd->vdev_devid != NULL) { 2893 spa_strfree(oldvd->vdev_devid); 2894 oldvd->vdev_devid = NULL; 2895 } 2896 } 2897 2898 /* 2899 * If the parent is not a mirror, or if we're replacing, insert the new 2900 * mirror/replacing/spare vdev above oldvd. 2901 */ 2902 if (pvd->vdev_ops != pvops) 2903 pvd = vdev_add_parent(oldvd, pvops); 2904 2905 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2906 ASSERT(pvd->vdev_ops == pvops); 2907 ASSERT(oldvd->vdev_parent == pvd); 2908 2909 /* 2910 * Extract the new device from its root and add it to pvd. 2911 */ 2912 vdev_remove_child(newrootvd, newvd); 2913 newvd->vdev_id = pvd->vdev_children; 2914 vdev_add_child(pvd, newvd); 2915 2916 /* 2917 * If newvd is smaller than oldvd, but larger than its rsize, 2918 * the addition of newvd may have decreased our parent's asize. 2919 */ 2920 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2921 2922 tvd = newvd->vdev_top; 2923 ASSERT(pvd->vdev_top == tvd); 2924 ASSERT(tvd->vdev_parent == rvd); 2925 2926 vdev_config_dirty(tvd); 2927 2928 /* 2929 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2930 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2931 */ 2932 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2933 2934 mutex_enter(&newvd->vdev_dtl_lock); 2935 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2936 open_txg - TXG_INITIAL + 1); 2937 mutex_exit(&newvd->vdev_dtl_lock); 2938 2939 if (newvd->vdev_isspare) 2940 spa_spare_activate(newvd); 2941 oldvdpath = spa_strdup(vdev_description(oldvd)); 2942 newvdpath = spa_strdup(vdev_description(newvd)); 2943 newvd_isspare = newvd->vdev_isspare; 2944 2945 /* 2946 * Mark newvd's DTL dirty in this txg. 2947 */ 2948 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2949 2950 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2951 2952 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2953 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 2954 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 2955 CRED(), "%s vdev=%s %s vdev=%s", 2956 replacing && newvd_isspare ? "spare in" : 2957 replacing ? "replace" : "attach", newvdpath, 2958 replacing ? "for" : "to", oldvdpath); 2959 dmu_tx_commit(tx); 2960 } else { 2961 dmu_tx_abort(tx); 2962 } 2963 2964 spa_strfree(oldvdpath); 2965 spa_strfree(newvdpath); 2966 2967 /* 2968 * Kick off a resilver to update newvd. 2969 */ 2970 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 2971 2972 return (0); 2973 } 2974 2975 /* 2976 * Detach a device from a mirror or replacing vdev. 2977 * If 'replace_done' is specified, only detach if the parent 2978 * is a replacing vdev. 2979 */ 2980 int 2981 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2982 { 2983 uint64_t txg; 2984 int c, t, error; 2985 vdev_t *rvd = spa->spa_root_vdev; 2986 vdev_t *vd, *pvd, *cvd, *tvd; 2987 boolean_t unspare = B_FALSE; 2988 uint64_t unspare_guid; 2989 size_t len; 2990 2991 txg = spa_vdev_enter(spa); 2992 2993 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2994 2995 if (vd == NULL) 2996 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2997 2998 if (!vd->vdev_ops->vdev_op_leaf) 2999 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3000 3001 pvd = vd->vdev_parent; 3002 3003 /* 3004 * If replace_done is specified, only remove this device if it's 3005 * the first child of a replacing vdev. For the 'spare' vdev, either 3006 * disk can be removed. 3007 */ 3008 if (replace_done) { 3009 if (pvd->vdev_ops == &vdev_replacing_ops) { 3010 if (vd->vdev_id != 0) 3011 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3012 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3013 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3014 } 3015 } 3016 3017 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3018 spa_version(spa) >= SPA_VERSION_SPARES); 3019 3020 /* 3021 * Only mirror, replacing, and spare vdevs support detach. 3022 */ 3023 if (pvd->vdev_ops != &vdev_replacing_ops && 3024 pvd->vdev_ops != &vdev_mirror_ops && 3025 pvd->vdev_ops != &vdev_spare_ops) 3026 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3027 3028 /* 3029 * If there's only one replica, you can't detach it. 3030 */ 3031 if (pvd->vdev_children <= 1) 3032 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3033 3034 /* 3035 * If all siblings have non-empty DTLs, this device may have the only 3036 * valid copy of the data, which means we cannot safely detach it. 3037 * 3038 * XXX -- as in the vdev_offline() case, we really want a more 3039 * precise DTL check. 3040 */ 3041 for (c = 0; c < pvd->vdev_children; c++) { 3042 uint64_t dirty; 3043 3044 cvd = pvd->vdev_child[c]; 3045 if (cvd == vd) 3046 continue; 3047 if (vdev_is_dead(cvd)) 3048 continue; 3049 mutex_enter(&cvd->vdev_dtl_lock); 3050 dirty = cvd->vdev_dtl_map.sm_space | 3051 cvd->vdev_dtl_scrub.sm_space; 3052 mutex_exit(&cvd->vdev_dtl_lock); 3053 if (!dirty) 3054 break; 3055 } 3056 3057 /* 3058 * If we are a replacing or spare vdev, then we can always detach the 3059 * latter child, as that is how one cancels the operation. 3060 */ 3061 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 3062 c == pvd->vdev_children) 3063 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3064 3065 /* 3066 * If we are detaching the second disk from a replacing vdev, then 3067 * check to see if we changed the original vdev's path to have "/old" 3068 * at the end in spa_vdev_attach(). If so, undo that change now. 3069 */ 3070 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3071 pvd->vdev_child[0]->vdev_path != NULL && 3072 pvd->vdev_child[1]->vdev_path != NULL) { 3073 ASSERT(pvd->vdev_child[1] == vd); 3074 cvd = pvd->vdev_child[0]; 3075 len = strlen(vd->vdev_path); 3076 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3077 strcmp(cvd->vdev_path + len, "/old") == 0) { 3078 spa_strfree(cvd->vdev_path); 3079 cvd->vdev_path = spa_strdup(vd->vdev_path); 3080 } 3081 } 3082 3083 /* 3084 * If we are detaching the original disk from a spare, then it implies 3085 * that the spare should become a real disk, and be removed from the 3086 * active spare list for the pool. 3087 */ 3088 if (pvd->vdev_ops == &vdev_spare_ops && 3089 vd->vdev_id == 0) 3090 unspare = B_TRUE; 3091 3092 /* 3093 * Erase the disk labels so the disk can be used for other things. 3094 * This must be done after all other error cases are handled, 3095 * but before we disembowel vd (so we can still do I/O to it). 3096 * But if we can't do it, don't treat the error as fatal -- 3097 * it may be that the unwritability of the disk is the reason 3098 * it's being detached! 3099 */ 3100 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3101 3102 /* 3103 * Remove vd from its parent and compact the parent's children. 3104 */ 3105 vdev_remove_child(pvd, vd); 3106 vdev_compact_children(pvd); 3107 3108 /* 3109 * Remember one of the remaining children so we can get tvd below. 3110 */ 3111 cvd = pvd->vdev_child[0]; 3112 3113 /* 3114 * If we need to remove the remaining child from the list of hot spares, 3115 * do it now, marking the vdev as no longer a spare in the process. We 3116 * must do this before vdev_remove_parent(), because that can change the 3117 * GUID if it creates a new toplevel GUID. 3118 */ 3119 if (unspare) { 3120 ASSERT(cvd->vdev_isspare); 3121 spa_spare_remove(cvd); 3122 unspare_guid = cvd->vdev_guid; 3123 } 3124 3125 /* 3126 * If the parent mirror/replacing vdev only has one child, 3127 * the parent is no longer needed. Remove it from the tree. 3128 */ 3129 if (pvd->vdev_children == 1) 3130 vdev_remove_parent(cvd); 3131 3132 /* 3133 * We don't set tvd until now because the parent we just removed 3134 * may have been the previous top-level vdev. 3135 */ 3136 tvd = cvd->vdev_top; 3137 ASSERT(tvd->vdev_parent == rvd); 3138 3139 /* 3140 * Reevaluate the parent vdev state. 3141 */ 3142 vdev_propagate_state(cvd); 3143 3144 /* 3145 * If the device we just detached was smaller than the others, it may be 3146 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3147 * can't fail because the existing metaslabs are already in core, so 3148 * there's nothing to read from disk. 3149 */ 3150 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3151 3152 vdev_config_dirty(tvd); 3153 3154 /* 3155 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3156 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3157 * But first make sure we're not on any *other* txg's DTL list, to 3158 * prevent vd from being accessed after it's freed. 3159 */ 3160 for (t = 0; t < TXG_SIZE; t++) 3161 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3162 vd->vdev_detached = B_TRUE; 3163 vdev_dirty(tvd, VDD_DTL, vd, txg); 3164 3165 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3166 3167 error = spa_vdev_exit(spa, vd, txg, 0); 3168 3169 /* 3170 * If this was the removal of the original device in a hot spare vdev, 3171 * then we want to go through and remove the device from the hot spare 3172 * list of every other pool. 3173 */ 3174 if (unspare) { 3175 spa = NULL; 3176 mutex_enter(&spa_namespace_lock); 3177 while ((spa = spa_next(spa)) != NULL) { 3178 if (spa->spa_state != POOL_STATE_ACTIVE) 3179 continue; 3180 3181 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3182 } 3183 mutex_exit(&spa_namespace_lock); 3184 } 3185 3186 return (error); 3187 } 3188 3189 /* 3190 * Remove a spares vdev from the nvlist config. 3191 */ 3192 static int 3193 spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare, 3194 nvlist_t **spares, int nspares, vdev_t *vd) 3195 { 3196 nvlist_t *nv, **newspares; 3197 int i, j; 3198 3199 nv = NULL; 3200 for (i = 0; i < nspares; i++) { 3201 uint64_t theguid; 3202 3203 VERIFY(nvlist_lookup_uint64(spares[i], 3204 ZPOOL_CONFIG_GUID, &theguid) == 0); 3205 if (theguid == guid) { 3206 nv = spares[i]; 3207 break; 3208 } 3209 } 3210 3211 /* 3212 * Only remove the hot spare if it's not currently in use in this pool. 3213 */ 3214 if (nv == NULL && vd == NULL) 3215 return (ENOENT); 3216 3217 if (nv == NULL && vd != NULL) 3218 return (ENOTSUP); 3219 3220 if (!unspare && nv != NULL && vd != NULL) 3221 return (EBUSY); 3222 3223 if (nspares == 1) { 3224 newspares = NULL; 3225 } else { 3226 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 3227 KM_SLEEP); 3228 for (i = 0, j = 0; i < nspares; i++) { 3229 if (spares[i] != nv) 3230 VERIFY(nvlist_dup(spares[i], 3231 &newspares[j++], KM_SLEEP) == 0); 3232 } 3233 } 3234 3235 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES, 3236 DATA_TYPE_NVLIST_ARRAY) == 0); 3237 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3238 ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0); 3239 for (i = 0; i < nspares - 1; i++) 3240 nvlist_free(newspares[i]); 3241 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 3242 3243 return (0); 3244 } 3245 3246 /* 3247 * Remove an l2cache vdev from the nvlist config. 3248 */ 3249 static int 3250 spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache, 3251 int nl2cache, vdev_t *vd) 3252 { 3253 nvlist_t *nv, **newl2cache; 3254 int i, j; 3255 3256 nv = NULL; 3257 for (i = 0; i < nl2cache; i++) { 3258 uint64_t theguid; 3259 3260 VERIFY(nvlist_lookup_uint64(l2cache[i], 3261 ZPOOL_CONFIG_GUID, &theguid) == 0); 3262 if (theguid == guid) { 3263 nv = l2cache[i]; 3264 break; 3265 } 3266 } 3267 3268 if (vd == NULL) { 3269 for (i = 0; i < nl2cache; i++) { 3270 if (sav->sav_vdevs[i]->vdev_guid == guid) { 3271 vd = sav->sav_vdevs[i]; 3272 break; 3273 } 3274 } 3275 } 3276 3277 if (nv == NULL && vd == NULL) 3278 return (ENOENT); 3279 3280 if (nv == NULL && vd != NULL) 3281 return (ENOTSUP); 3282 3283 if (nl2cache == 1) { 3284 newl2cache = NULL; 3285 } else { 3286 newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *), 3287 KM_SLEEP); 3288 for (i = 0, j = 0; i < nl2cache; i++) { 3289 if (l2cache[i] != nv) 3290 VERIFY(nvlist_dup(l2cache[i], 3291 &newl2cache[j++], KM_SLEEP) == 0); 3292 } 3293 } 3294 3295 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 3296 DATA_TYPE_NVLIST_ARRAY) == 0); 3297 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3298 ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0); 3299 for (i = 0; i < nl2cache - 1; i++) 3300 nvlist_free(newl2cache[i]); 3301 kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *)); 3302 3303 return (0); 3304 } 3305 3306 /* 3307 * Remove a device from the pool. Currently, this supports removing only hot 3308 * spares and level 2 ARC devices. 3309 */ 3310 int 3311 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3312 { 3313 vdev_t *vd; 3314 nvlist_t **spares, **l2cache; 3315 uint_t nspares, nl2cache; 3316 int error = 0; 3317 3318 spa_config_enter(spa, RW_WRITER, FTAG); 3319 3320 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3321 3322 if (spa->spa_spares.sav_vdevs != NULL && 3323 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3324 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 3325 if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare, 3326 spares, nspares, vd)) != 0) 3327 goto cache; 3328 spa_load_spares(spa); 3329 spa->spa_spares.sav_sync = B_TRUE; 3330 goto out; 3331 } 3332 3333 cache: 3334 if (spa->spa_l2cache.sav_vdevs != NULL && 3335 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3336 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { 3337 if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid, 3338 l2cache, nl2cache, vd)) != 0) 3339 goto out; 3340 spa_load_l2cache(spa); 3341 spa->spa_l2cache.sav_sync = B_TRUE; 3342 } 3343 3344 out: 3345 spa_config_exit(spa, FTAG); 3346 return (error); 3347 } 3348 3349 /* 3350 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3351 * current spared, so we can detach it. 3352 */ 3353 static vdev_t * 3354 spa_vdev_resilver_done_hunt(vdev_t *vd) 3355 { 3356 vdev_t *newvd, *oldvd; 3357 int c; 3358 3359 for (c = 0; c < vd->vdev_children; c++) { 3360 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3361 if (oldvd != NULL) 3362 return (oldvd); 3363 } 3364 3365 /* 3366 * Check for a completed replacement. 3367 */ 3368 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3369 oldvd = vd->vdev_child[0]; 3370 newvd = vd->vdev_child[1]; 3371 3372 mutex_enter(&newvd->vdev_dtl_lock); 3373 if (newvd->vdev_dtl_map.sm_space == 0 && 3374 newvd->vdev_dtl_scrub.sm_space == 0) { 3375 mutex_exit(&newvd->vdev_dtl_lock); 3376 return (oldvd); 3377 } 3378 mutex_exit(&newvd->vdev_dtl_lock); 3379 } 3380 3381 /* 3382 * Check for a completed resilver with the 'unspare' flag set. 3383 */ 3384 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3385 newvd = vd->vdev_child[0]; 3386 oldvd = vd->vdev_child[1]; 3387 3388 mutex_enter(&newvd->vdev_dtl_lock); 3389 if (newvd->vdev_unspare && 3390 newvd->vdev_dtl_map.sm_space == 0 && 3391 newvd->vdev_dtl_scrub.sm_space == 0) { 3392 newvd->vdev_unspare = 0; 3393 mutex_exit(&newvd->vdev_dtl_lock); 3394 return (oldvd); 3395 } 3396 mutex_exit(&newvd->vdev_dtl_lock); 3397 } 3398 3399 return (NULL); 3400 } 3401 3402 static void 3403 spa_vdev_resilver_done(spa_t *spa) 3404 { 3405 vdev_t *vd; 3406 vdev_t *pvd; 3407 uint64_t guid; 3408 uint64_t pguid = 0; 3409 3410 spa_config_enter(spa, RW_READER, FTAG); 3411 3412 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3413 guid = vd->vdev_guid; 3414 /* 3415 * If we have just finished replacing a hot spared device, then 3416 * we need to detach the parent's first child (the original hot 3417 * spare) as well. 3418 */ 3419 pvd = vd->vdev_parent; 3420 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3421 pvd->vdev_id == 0) { 3422 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3423 ASSERT(pvd->vdev_parent->vdev_children == 2); 3424 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3425 } 3426 spa_config_exit(spa, FTAG); 3427 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3428 return; 3429 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3430 return; 3431 spa_config_enter(spa, RW_READER, FTAG); 3432 } 3433 3434 spa_config_exit(spa, FTAG); 3435 } 3436 3437 /* 3438 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3439 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3440 */ 3441 int 3442 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3443 { 3444 vdev_t *vd; 3445 uint64_t txg; 3446 3447 txg = spa_vdev_enter(spa); 3448 3449 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3450 /* 3451 * Determine if this is a reference to a hot spare device. If 3452 * it is, update the path manually as there is no associated 3453 * vdev_t that can be synced to disk. 3454 */ 3455 nvlist_t **spares; 3456 uint_t i, nspares; 3457 3458 if (spa->spa_spares.sav_config != NULL) { 3459 VERIFY(nvlist_lookup_nvlist_array( 3460 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3461 &spares, &nspares) == 0); 3462 for (i = 0; i < nspares; i++) { 3463 uint64_t theguid; 3464 VERIFY(nvlist_lookup_uint64(spares[i], 3465 ZPOOL_CONFIG_GUID, &theguid) == 0); 3466 if (theguid == guid) { 3467 VERIFY(nvlist_add_string(spares[i], 3468 ZPOOL_CONFIG_PATH, newpath) == 0); 3469 spa_load_spares(spa); 3470 spa->spa_spares.sav_sync = B_TRUE; 3471 return (spa_vdev_exit(spa, NULL, txg, 3472 0)); 3473 } 3474 } 3475 } 3476 3477 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3478 } 3479 3480 if (!vd->vdev_ops->vdev_op_leaf) 3481 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3482 3483 spa_strfree(vd->vdev_path); 3484 vd->vdev_path = spa_strdup(newpath); 3485 3486 vdev_config_dirty(vd->vdev_top); 3487 3488 return (spa_vdev_exit(spa, NULL, txg, 0)); 3489 } 3490 3491 /* 3492 * ========================================================================== 3493 * SPA Scrubbing 3494 * ========================================================================== 3495 */ 3496 3497 int 3498 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3499 { 3500 ASSERT(!spa_config_held(spa, RW_WRITER)); 3501 3502 if ((uint_t)type >= POOL_SCRUB_TYPES) 3503 return (ENOTSUP); 3504 3505 /* 3506 * If a resilver was requested, but there is no DTL on a 3507 * writeable leaf device, we have nothing to do. 3508 */ 3509 if (type == POOL_SCRUB_RESILVER && 3510 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3511 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3512 return (0); 3513 } 3514 3515 if (type == POOL_SCRUB_EVERYTHING && 3516 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3517 spa->spa_dsl_pool->dp_scrub_isresilver) 3518 return (EBUSY); 3519 3520 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3521 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3522 } else if (type == POOL_SCRUB_NONE) { 3523 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3524 } else { 3525 return (EINVAL); 3526 } 3527 } 3528 3529 /* 3530 * ========================================================================== 3531 * SPA async task processing 3532 * ========================================================================== 3533 */ 3534 3535 static void 3536 spa_async_remove(spa_t *spa, vdev_t *vd) 3537 { 3538 int c; 3539 3540 if (vd->vdev_remove_wanted) { 3541 vd->vdev_remove_wanted = 0; 3542 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 3543 vdev_clear(spa, vd, B_TRUE); 3544 vdev_config_dirty(vd->vdev_top); 3545 } 3546 3547 for (c = 0; c < vd->vdev_children; c++) 3548 spa_async_remove(spa, vd->vdev_child[c]); 3549 } 3550 3551 static void 3552 spa_async_thread(spa_t *spa) 3553 { 3554 int tasks, i; 3555 uint64_t txg; 3556 3557 ASSERT(spa->spa_sync_on); 3558 3559 mutex_enter(&spa->spa_async_lock); 3560 tasks = spa->spa_async_tasks; 3561 spa->spa_async_tasks = 0; 3562 mutex_exit(&spa->spa_async_lock); 3563 3564 /* 3565 * See if the config needs to be updated. 3566 */ 3567 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3568 mutex_enter(&spa_namespace_lock); 3569 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3570 mutex_exit(&spa_namespace_lock); 3571 } 3572 3573 /* 3574 * See if any devices need to be marked REMOVED. 3575 * 3576 * XXX - We avoid doing this when we are in 3577 * I/O failure state since spa_vdev_enter() grabs 3578 * the namespace lock and would not be able to obtain 3579 * the writer config lock. 3580 */ 3581 if (tasks & SPA_ASYNC_REMOVE && 3582 spa_state(spa) != POOL_STATE_IO_FAILURE) { 3583 txg = spa_vdev_enter(spa); 3584 spa_async_remove(spa, spa->spa_root_vdev); 3585 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 3586 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 3587 for (i = 0; i < spa->spa_spares.sav_count; i++) 3588 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 3589 (void) spa_vdev_exit(spa, NULL, txg, 0); 3590 } 3591 3592 /* 3593 * If any devices are done replacing, detach them. 3594 */ 3595 if (tasks & SPA_ASYNC_RESILVER_DONE) 3596 spa_vdev_resilver_done(spa); 3597 3598 /* 3599 * Kick off a resilver. 3600 */ 3601 if (tasks & SPA_ASYNC_RESILVER) 3602 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3603 3604 /* 3605 * Let the world know that we're done. 3606 */ 3607 mutex_enter(&spa->spa_async_lock); 3608 spa->spa_async_thread = NULL; 3609 cv_broadcast(&spa->spa_async_cv); 3610 mutex_exit(&spa->spa_async_lock); 3611 thread_exit(); 3612 } 3613 3614 void 3615 spa_async_suspend(spa_t *spa) 3616 { 3617 mutex_enter(&spa->spa_async_lock); 3618 spa->spa_async_suspended++; 3619 while (spa->spa_async_thread != NULL) 3620 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3621 mutex_exit(&spa->spa_async_lock); 3622 } 3623 3624 void 3625 spa_async_resume(spa_t *spa) 3626 { 3627 mutex_enter(&spa->spa_async_lock); 3628 ASSERT(spa->spa_async_suspended != 0); 3629 spa->spa_async_suspended--; 3630 mutex_exit(&spa->spa_async_lock); 3631 } 3632 3633 static void 3634 spa_async_dispatch(spa_t *spa) 3635 { 3636 mutex_enter(&spa->spa_async_lock); 3637 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3638 spa->spa_async_thread == NULL && 3639 rootdir != NULL && !vn_is_readonly(rootdir)) 3640 spa->spa_async_thread = thread_create(NULL, 0, 3641 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3642 mutex_exit(&spa->spa_async_lock); 3643 } 3644 3645 void 3646 spa_async_request(spa_t *spa, int task) 3647 { 3648 mutex_enter(&spa->spa_async_lock); 3649 spa->spa_async_tasks |= task; 3650 mutex_exit(&spa->spa_async_lock); 3651 } 3652 3653 /* 3654 * ========================================================================== 3655 * SPA syncing routines 3656 * ========================================================================== 3657 */ 3658 3659 static void 3660 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3661 { 3662 bplist_t *bpl = &spa->spa_sync_bplist; 3663 dmu_tx_t *tx; 3664 blkptr_t blk; 3665 uint64_t itor = 0; 3666 zio_t *zio; 3667 int error; 3668 uint8_t c = 1; 3669 3670 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 3671 3672 while (bplist_iterate(bpl, &itor, &blk) == 0) 3673 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 3674 3675 error = zio_wait(zio); 3676 ASSERT3U(error, ==, 0); 3677 3678 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3679 bplist_vacate(bpl, tx); 3680 3681 /* 3682 * Pre-dirty the first block so we sync to convergence faster. 3683 * (Usually only the first block is needed.) 3684 */ 3685 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3686 dmu_tx_commit(tx); 3687 } 3688 3689 static void 3690 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3691 { 3692 char *packed = NULL; 3693 size_t nvsize = 0; 3694 dmu_buf_t *db; 3695 3696 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3697 3698 packed = kmem_alloc(nvsize, KM_SLEEP); 3699 3700 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3701 KM_SLEEP) == 0); 3702 3703 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 3704 3705 kmem_free(packed, nvsize); 3706 3707 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3708 dmu_buf_will_dirty(db, tx); 3709 *(uint64_t *)db->db_data = nvsize; 3710 dmu_buf_rele(db, FTAG); 3711 } 3712 3713 static void 3714 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3715 const char *config, const char *entry) 3716 { 3717 nvlist_t *nvroot; 3718 nvlist_t **list; 3719 int i; 3720 3721 if (!sav->sav_sync) 3722 return; 3723 3724 /* 3725 * Update the MOS nvlist describing the list of available devices. 3726 * spa_validate_aux() will have already made sure this nvlist is 3727 * valid and the vdevs are labeled appropriately. 3728 */ 3729 if (sav->sav_object == 0) { 3730 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3731 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3732 sizeof (uint64_t), tx); 3733 VERIFY(zap_update(spa->spa_meta_objset, 3734 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3735 &sav->sav_object, tx) == 0); 3736 } 3737 3738 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3739 if (sav->sav_count == 0) { 3740 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3741 } else { 3742 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3743 for (i = 0; i < sav->sav_count; i++) 3744 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3745 B_FALSE, B_FALSE, B_TRUE); 3746 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3747 sav->sav_count) == 0); 3748 for (i = 0; i < sav->sav_count; i++) 3749 nvlist_free(list[i]); 3750 kmem_free(list, sav->sav_count * sizeof (void *)); 3751 } 3752 3753 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3754 nvlist_free(nvroot); 3755 3756 sav->sav_sync = B_FALSE; 3757 } 3758 3759 static void 3760 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3761 { 3762 nvlist_t *config; 3763 3764 if (list_is_empty(&spa->spa_dirty_list)) 3765 return; 3766 3767 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 3768 3769 if (spa->spa_config_syncing) 3770 nvlist_free(spa->spa_config_syncing); 3771 spa->spa_config_syncing = config; 3772 3773 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3774 } 3775 3776 /* 3777 * Set zpool properties. 3778 */ 3779 static void 3780 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3781 { 3782 spa_t *spa = arg1; 3783 objset_t *mos = spa->spa_meta_objset; 3784 nvlist_t *nvp = arg2; 3785 nvpair_t *elem; 3786 uint64_t intval; 3787 char *strval; 3788 zpool_prop_t prop; 3789 const char *propname; 3790 zprop_type_t proptype; 3791 spa_config_dirent_t *dp; 3792 3793 elem = NULL; 3794 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3795 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3796 case ZPOOL_PROP_VERSION: 3797 /* 3798 * Only set version for non-zpool-creation cases 3799 * (set/import). spa_create() needs special care 3800 * for version setting. 3801 */ 3802 if (tx->tx_txg != TXG_INITIAL) { 3803 VERIFY(nvpair_value_uint64(elem, 3804 &intval) == 0); 3805 ASSERT(intval <= SPA_VERSION); 3806 ASSERT(intval >= spa_version(spa)); 3807 spa->spa_uberblock.ub_version = intval; 3808 vdev_config_dirty(spa->spa_root_vdev); 3809 } 3810 break; 3811 3812 case ZPOOL_PROP_ALTROOT: 3813 /* 3814 * 'altroot' is a non-persistent property. It should 3815 * have been set temporarily at creation or import time. 3816 */ 3817 ASSERT(spa->spa_root != NULL); 3818 break; 3819 3820 case ZPOOL_PROP_CACHEFILE: 3821 /* 3822 * 'cachefile' is a non-persistent property, but note 3823 * an async request that the config cache needs to be 3824 * udpated. 3825 */ 3826 VERIFY(nvpair_value_string(elem, &strval) == 0); 3827 3828 dp = kmem_alloc(sizeof (spa_config_dirent_t), 3829 KM_SLEEP); 3830 3831 if (strval[0] == '\0') 3832 dp->scd_path = spa_strdup(spa_config_path); 3833 else if (strcmp(strval, "none") == 0) 3834 dp->scd_path = NULL; 3835 else 3836 dp->scd_path = spa_strdup(strval); 3837 3838 list_insert_head(&spa->spa_config_list, dp); 3839 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3840 break; 3841 default: 3842 /* 3843 * Set pool property values in the poolprops mos object. 3844 */ 3845 mutex_enter(&spa->spa_props_lock); 3846 if (spa->spa_pool_props_object == 0) { 3847 objset_t *mos = spa->spa_meta_objset; 3848 3849 VERIFY((spa->spa_pool_props_object = 3850 zap_create(mos, DMU_OT_POOL_PROPS, 3851 DMU_OT_NONE, 0, tx)) > 0); 3852 3853 VERIFY(zap_update(mos, 3854 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3855 8, 1, &spa->spa_pool_props_object, tx) 3856 == 0); 3857 } 3858 mutex_exit(&spa->spa_props_lock); 3859 3860 /* normalize the property name */ 3861 propname = zpool_prop_to_name(prop); 3862 proptype = zpool_prop_get_type(prop); 3863 3864 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3865 ASSERT(proptype == PROP_TYPE_STRING); 3866 VERIFY(nvpair_value_string(elem, &strval) == 0); 3867 VERIFY(zap_update(mos, 3868 spa->spa_pool_props_object, propname, 3869 1, strlen(strval) + 1, strval, tx) == 0); 3870 3871 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3872 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3873 3874 if (proptype == PROP_TYPE_INDEX) { 3875 const char *unused; 3876 VERIFY(zpool_prop_index_to_string( 3877 prop, intval, &unused) == 0); 3878 } 3879 VERIFY(zap_update(mos, 3880 spa->spa_pool_props_object, propname, 3881 8, 1, &intval, tx) == 0); 3882 } else { 3883 ASSERT(0); /* not allowed */ 3884 } 3885 3886 switch (prop) { 3887 case ZPOOL_PROP_DELEGATION: 3888 spa->spa_delegation = intval; 3889 break; 3890 case ZPOOL_PROP_BOOTFS: 3891 spa->spa_bootfs = intval; 3892 break; 3893 case ZPOOL_PROP_FAILUREMODE: 3894 spa->spa_failmode = intval; 3895 break; 3896 default: 3897 break; 3898 } 3899 } 3900 3901 /* log internal history if this is not a zpool create */ 3902 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3903 tx->tx_txg != TXG_INITIAL) { 3904 spa_history_internal_log(LOG_POOL_PROPSET, 3905 spa, tx, cr, "%s %lld %s", 3906 nvpair_name(elem), intval, spa->spa_name); 3907 } 3908 } 3909 } 3910 3911 /* 3912 * Sync the specified transaction group. New blocks may be dirtied as 3913 * part of the process, so we iterate until it converges. 3914 */ 3915 void 3916 spa_sync(spa_t *spa, uint64_t txg) 3917 { 3918 dsl_pool_t *dp = spa->spa_dsl_pool; 3919 objset_t *mos = spa->spa_meta_objset; 3920 bplist_t *bpl = &spa->spa_sync_bplist; 3921 vdev_t *rvd = spa->spa_root_vdev; 3922 vdev_t *vd; 3923 dmu_tx_t *tx; 3924 int dirty_vdevs; 3925 3926 /* 3927 * Lock out configuration changes. 3928 */ 3929 spa_config_enter(spa, RW_READER, FTAG); 3930 3931 spa->spa_syncing_txg = txg; 3932 spa->spa_sync_pass = 0; 3933 3934 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3935 3936 tx = dmu_tx_create_assigned(dp, txg); 3937 3938 /* 3939 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 3940 * set spa_deflate if we have no raid-z vdevs. 3941 */ 3942 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 3943 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 3944 int i; 3945 3946 for (i = 0; i < rvd->vdev_children; i++) { 3947 vd = rvd->vdev_child[i]; 3948 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 3949 break; 3950 } 3951 if (i == rvd->vdev_children) { 3952 spa->spa_deflate = TRUE; 3953 VERIFY(0 == zap_add(spa->spa_meta_objset, 3954 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3955 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 3956 } 3957 } 3958 3959 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 3960 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 3961 dsl_pool_create_origin(dp, tx); 3962 3963 /* Keeping the origin open increases spa_minref */ 3964 spa->spa_minref += 3; 3965 } 3966 3967 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 3968 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 3969 dsl_pool_upgrade_clones(dp, tx); 3970 } 3971 3972 /* 3973 * If anything has changed in this txg, push the deferred frees 3974 * from the previous txg. If not, leave them alone so that we 3975 * don't generate work on an otherwise idle system. 3976 */ 3977 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 3978 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 3979 !txg_list_empty(&dp->dp_sync_tasks, txg)) 3980 spa_sync_deferred_frees(spa, txg); 3981 3982 /* 3983 * Iterate to convergence. 3984 */ 3985 do { 3986 spa->spa_sync_pass++; 3987 3988 spa_sync_config_object(spa, tx); 3989 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 3990 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 3991 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 3992 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 3993 spa_errlog_sync(spa, txg); 3994 dsl_pool_sync(dp, txg); 3995 3996 dirty_vdevs = 0; 3997 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 3998 vdev_sync(vd, txg); 3999 dirty_vdevs++; 4000 } 4001 4002 bplist_sync(bpl, tx); 4003 } while (dirty_vdevs); 4004 4005 bplist_close(bpl); 4006 4007 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4008 4009 /* 4010 * Rewrite the vdev configuration (which includes the uberblock) 4011 * to commit the transaction group. 4012 * 4013 * If there are no dirty vdevs, we sync the uberblock to a few 4014 * random top-level vdevs that are known to be visible in the 4015 * config cache (see spa_vdev_add() for details). If there *are* 4016 * dirty vdevs -- or if the sync to our random subset fails -- 4017 * then sync the uberblock to all vdevs. 4018 */ 4019 if (list_is_empty(&spa->spa_dirty_list)) { 4020 vdev_t *svd[SPA_DVAS_PER_BP]; 4021 int svdcount = 0; 4022 int children = rvd->vdev_children; 4023 int c0 = spa_get_random(children); 4024 int c; 4025 4026 for (c = 0; c < children; c++) { 4027 vd = rvd->vdev_child[(c0 + c) % children]; 4028 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4029 continue; 4030 svd[svdcount++] = vd; 4031 if (svdcount == SPA_DVAS_PER_BP) 4032 break; 4033 } 4034 vdev_config_sync(svd, svdcount, txg); 4035 } else { 4036 vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg); 4037 } 4038 dmu_tx_commit(tx); 4039 4040 /* 4041 * Clear the dirty config list. 4042 */ 4043 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 4044 vdev_config_clean(vd); 4045 4046 /* 4047 * Now that the new config has synced transactionally, 4048 * let it become visible to the config cache. 4049 */ 4050 if (spa->spa_config_syncing != NULL) { 4051 spa_config_set(spa, spa->spa_config_syncing); 4052 spa->spa_config_txg = txg; 4053 spa->spa_config_syncing = NULL; 4054 } 4055 4056 spa->spa_traverse_wanted = B_TRUE; 4057 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4058 spa->spa_traverse_wanted = B_FALSE; 4059 spa->spa_ubsync = spa->spa_uberblock; 4060 rw_exit(&spa->spa_traverse_lock); 4061 4062 /* 4063 * Clean up the ZIL records for the synced txg. 4064 */ 4065 dsl_pool_zil_clean(dp); 4066 4067 /* 4068 * Update usable space statistics. 4069 */ 4070 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4071 vdev_sync_done(vd, txg); 4072 4073 /* 4074 * It had better be the case that we didn't dirty anything 4075 * since vdev_config_sync(). 4076 */ 4077 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4078 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4079 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4080 ASSERT(bpl->bpl_queue == NULL); 4081 4082 spa_config_exit(spa, FTAG); 4083 4084 /* 4085 * If any async tasks have been requested, kick them off. 4086 */ 4087 spa_async_dispatch(spa); 4088 } 4089 4090 /* 4091 * Sync all pools. We don't want to hold the namespace lock across these 4092 * operations, so we take a reference on the spa_t and drop the lock during the 4093 * sync. 4094 */ 4095 void 4096 spa_sync_allpools(void) 4097 { 4098 spa_t *spa = NULL; 4099 mutex_enter(&spa_namespace_lock); 4100 while ((spa = spa_next(spa)) != NULL) { 4101 if (spa_state(spa) != POOL_STATE_ACTIVE) 4102 continue; 4103 spa_open_ref(spa, FTAG); 4104 mutex_exit(&spa_namespace_lock); 4105 txg_wait_synced(spa_get_dsl(spa), 0); 4106 mutex_enter(&spa_namespace_lock); 4107 spa_close(spa, FTAG); 4108 } 4109 mutex_exit(&spa_namespace_lock); 4110 } 4111 4112 /* 4113 * ========================================================================== 4114 * Miscellaneous routines 4115 * ========================================================================== 4116 */ 4117 4118 /* 4119 * Remove all pools in the system. 4120 */ 4121 void 4122 spa_evict_all(void) 4123 { 4124 spa_t *spa; 4125 4126 /* 4127 * Remove all cached state. All pools should be closed now, 4128 * so every spa in the AVL tree should be unreferenced. 4129 */ 4130 mutex_enter(&spa_namespace_lock); 4131 while ((spa = spa_next(NULL)) != NULL) { 4132 /* 4133 * Stop async tasks. The async thread may need to detach 4134 * a device that's been replaced, which requires grabbing 4135 * spa_namespace_lock, so we must drop it here. 4136 */ 4137 spa_open_ref(spa, FTAG); 4138 mutex_exit(&spa_namespace_lock); 4139 spa_async_suspend(spa); 4140 mutex_enter(&spa_namespace_lock); 4141 spa_close(spa, FTAG); 4142 4143 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4144 spa_unload(spa); 4145 spa_deactivate(spa); 4146 } 4147 spa_remove(spa); 4148 } 4149 mutex_exit(&spa_namespace_lock); 4150 } 4151 4152 vdev_t * 4153 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4154 { 4155 vdev_t *vd; 4156 int i; 4157 4158 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4159 return (vd); 4160 4161 if (l2cache) { 4162 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4163 vd = spa->spa_l2cache.sav_vdevs[i]; 4164 if (vd->vdev_guid == guid) 4165 return (vd); 4166 } 4167 } 4168 4169 return (NULL); 4170 } 4171 4172 void 4173 spa_upgrade(spa_t *spa, uint64_t version) 4174 { 4175 spa_config_enter(spa, RW_WRITER, FTAG); 4176 4177 /* 4178 * This should only be called for a non-faulted pool, and since a 4179 * future version would result in an unopenable pool, this shouldn't be 4180 * possible. 4181 */ 4182 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4183 ASSERT(version >= spa->spa_uberblock.ub_version); 4184 4185 spa->spa_uberblock.ub_version = version; 4186 vdev_config_dirty(spa->spa_root_vdev); 4187 4188 spa_config_exit(spa, FTAG); 4189 4190 txg_wait_synced(spa_get_dsl(spa), 0); 4191 } 4192 4193 boolean_t 4194 spa_has_spare(spa_t *spa, uint64_t guid) 4195 { 4196 int i; 4197 uint64_t spareguid; 4198 spa_aux_vdev_t *sav = &spa->spa_spares; 4199 4200 for (i = 0; i < sav->sav_count; i++) 4201 if (sav->sav_vdevs[i]->vdev_guid == guid) 4202 return (B_TRUE); 4203 4204 for (i = 0; i < sav->sav_npending; i++) { 4205 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4206 &spareguid) == 0 && spareguid == guid) 4207 return (B_TRUE); 4208 } 4209 4210 return (B_FALSE); 4211 } 4212 4213 /* 4214 * Check if a pool has an active shared spare device. 4215 * Note: reference count of an active spare is 2, as a spare and as a replace 4216 */ 4217 static boolean_t 4218 spa_has_active_shared_spare(spa_t *spa) 4219 { 4220 int i, refcnt; 4221 uint64_t pool; 4222 spa_aux_vdev_t *sav = &spa->spa_spares; 4223 4224 for (i = 0; i < sav->sav_count; i++) { 4225 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4226 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4227 refcnt > 2) 4228 return (B_TRUE); 4229 } 4230 4231 return (B_FALSE); 4232 } 4233 4234 /* 4235 * Post a sysevent corresponding to the given event. The 'name' must be one of 4236 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4237 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4238 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4239 * or zdb as real changes. 4240 */ 4241 void 4242 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4243 { 4244 #ifdef _KERNEL 4245 sysevent_t *ev; 4246 sysevent_attr_list_t *attr = NULL; 4247 sysevent_value_t value; 4248 sysevent_id_t eid; 4249 4250 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4251 SE_SLEEP); 4252 4253 value.value_type = SE_DATA_TYPE_STRING; 4254 value.value.sv_string = spa_name(spa); 4255 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4256 goto done; 4257 4258 value.value_type = SE_DATA_TYPE_UINT64; 4259 value.value.sv_uint64 = spa_guid(spa); 4260 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4261 goto done; 4262 4263 if (vd) { 4264 value.value_type = SE_DATA_TYPE_UINT64; 4265 value.value.sv_uint64 = vd->vdev_guid; 4266 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4267 SE_SLEEP) != 0) 4268 goto done; 4269 4270 if (vd->vdev_path) { 4271 value.value_type = SE_DATA_TYPE_STRING; 4272 value.value.sv_string = vd->vdev_path; 4273 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4274 &value, SE_SLEEP) != 0) 4275 goto done; 4276 } 4277 } 4278 4279 if (sysevent_attach_attributes(ev, attr) != 0) 4280 goto done; 4281 attr = NULL; 4282 4283 (void) log_sysevent(ev, SE_SLEEP, &eid); 4284 4285 done: 4286 if (attr) 4287 sysevent_free_attr(attr); 4288 sysevent_free(ev); 4289 #endif 4290 } 4291