1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file contains all the routines used when modifying on-disk SPA state. 29 * This includes opening, importing, destroying, exporting a pool, and syncing a 30 * pool. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/metaslab.h> 45 #include <sys/uberblock_impl.h> 46 #include <sys/txg.h> 47 #include <sys/avl.h> 48 #include <sys/dmu_traverse.h> 49 #include <sys/dmu_objset.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dataset.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_synctask.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/arc.h> 58 #include <sys/callb.h> 59 #include <sys/systeminfo.h> 60 #include <sys/sunddi.h> 61 #include <sys/spa_boot.h> 62 63 #include "zfs_prop.h" 64 #include "zfs_comutil.h" 65 66 int zio_taskq_threads = 8; 67 68 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 69 static boolean_t spa_has_active_shared_spare(spa_t *spa); 70 71 /* 72 * ========================================================================== 73 * SPA properties routines 74 * ========================================================================== 75 */ 76 77 /* 78 * Add a (source=src, propname=propval) list to an nvlist. 79 */ 80 static void 81 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 82 uint64_t intval, zprop_source_t src) 83 { 84 const char *propname = zpool_prop_to_name(prop); 85 nvlist_t *propval; 86 87 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 88 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 89 90 if (strval != NULL) 91 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 92 else 93 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 94 95 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 96 nvlist_free(propval); 97 } 98 99 /* 100 * Get property values from the spa configuration. 101 */ 102 static void 103 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 104 { 105 uint64_t size = spa_get_space(spa); 106 uint64_t used = spa_get_alloc(spa); 107 uint64_t cap, version; 108 zprop_source_t src = ZPROP_SRC_NONE; 109 spa_config_dirent_t *dp; 110 111 /* 112 * readonly properties 113 */ 114 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src); 115 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 116 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 117 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 118 119 cap = (size == 0) ? 0 : (used * 100 / size); 120 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 121 122 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 123 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 124 spa->spa_root_vdev->vdev_state, src); 125 126 /* 127 * settable properties that are not stored in the pool property object. 128 */ 129 version = spa_version(spa); 130 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 131 src = ZPROP_SRC_DEFAULT; 132 else 133 src = ZPROP_SRC_LOCAL; 134 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 135 136 if (spa->spa_root != NULL) 137 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 138 0, ZPROP_SRC_LOCAL); 139 140 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 141 if (dp->scd_path == NULL) { 142 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 143 "none", 0, ZPROP_SRC_LOCAL); 144 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 145 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 146 dp->scd_path, 0, ZPROP_SRC_LOCAL); 147 } 148 } 149 } 150 151 /* 152 * Get zpool property values. 153 */ 154 int 155 spa_prop_get(spa_t *spa, nvlist_t **nvp) 156 { 157 zap_cursor_t zc; 158 zap_attribute_t za; 159 objset_t *mos = spa->spa_meta_objset; 160 int err; 161 162 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 163 164 /* 165 * Get properties from the spa config. 166 */ 167 spa_prop_get_config(spa, nvp); 168 169 mutex_enter(&spa->spa_props_lock); 170 /* If no pool property object, no more prop to get. */ 171 if (spa->spa_pool_props_object == 0) { 172 mutex_exit(&spa->spa_props_lock); 173 return (0); 174 } 175 176 /* 177 * Get properties from the MOS pool property object. 178 */ 179 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 180 (err = zap_cursor_retrieve(&zc, &za)) == 0; 181 zap_cursor_advance(&zc)) { 182 uint64_t intval = 0; 183 char *strval = NULL; 184 zprop_source_t src = ZPROP_SRC_DEFAULT; 185 zpool_prop_t prop; 186 187 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 188 continue; 189 190 switch (za.za_integer_length) { 191 case 8: 192 /* integer property */ 193 if (za.za_first_integer != 194 zpool_prop_default_numeric(prop)) 195 src = ZPROP_SRC_LOCAL; 196 197 if (prop == ZPOOL_PROP_BOOTFS) { 198 dsl_pool_t *dp; 199 dsl_dataset_t *ds = NULL; 200 201 dp = spa_get_dsl(spa); 202 rw_enter(&dp->dp_config_rwlock, RW_READER); 203 if (err = dsl_dataset_hold_obj(dp, 204 za.za_first_integer, FTAG, &ds)) { 205 rw_exit(&dp->dp_config_rwlock); 206 break; 207 } 208 209 strval = kmem_alloc( 210 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 211 KM_SLEEP); 212 dsl_dataset_name(ds, strval); 213 dsl_dataset_rele(ds, FTAG); 214 rw_exit(&dp->dp_config_rwlock); 215 } else { 216 strval = NULL; 217 intval = za.za_first_integer; 218 } 219 220 spa_prop_add_list(*nvp, prop, strval, intval, src); 221 222 if (strval != NULL) 223 kmem_free(strval, 224 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 225 226 break; 227 228 case 1: 229 /* string property */ 230 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 231 err = zap_lookup(mos, spa->spa_pool_props_object, 232 za.za_name, 1, za.za_num_integers, strval); 233 if (err) { 234 kmem_free(strval, za.za_num_integers); 235 break; 236 } 237 spa_prop_add_list(*nvp, prop, strval, 0, src); 238 kmem_free(strval, za.za_num_integers); 239 break; 240 241 default: 242 break; 243 } 244 } 245 zap_cursor_fini(&zc); 246 mutex_exit(&spa->spa_props_lock); 247 out: 248 if (err && err != ENOENT) { 249 nvlist_free(*nvp); 250 *nvp = NULL; 251 return (err); 252 } 253 254 return (0); 255 } 256 257 /* 258 * Validate the given pool properties nvlist and modify the list 259 * for the property values to be set. 260 */ 261 static int 262 spa_prop_validate(spa_t *spa, nvlist_t *props) 263 { 264 nvpair_t *elem; 265 int error = 0, reset_bootfs = 0; 266 uint64_t objnum; 267 268 elem = NULL; 269 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 270 zpool_prop_t prop; 271 char *propname, *strval; 272 uint64_t intval; 273 objset_t *os; 274 char *slash; 275 276 propname = nvpair_name(elem); 277 278 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 279 return (EINVAL); 280 281 switch (prop) { 282 case ZPOOL_PROP_VERSION: 283 error = nvpair_value_uint64(elem, &intval); 284 if (!error && 285 (intval < spa_version(spa) || intval > SPA_VERSION)) 286 error = EINVAL; 287 break; 288 289 case ZPOOL_PROP_DELEGATION: 290 case ZPOOL_PROP_AUTOREPLACE: 291 error = nvpair_value_uint64(elem, &intval); 292 if (!error && intval > 1) 293 error = EINVAL; 294 break; 295 296 case ZPOOL_PROP_BOOTFS: 297 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 298 error = ENOTSUP; 299 break; 300 } 301 302 /* 303 * Make sure the vdev config is bootable 304 */ 305 if (!vdev_is_bootable(spa->spa_root_vdev)) { 306 error = ENOTSUP; 307 break; 308 } 309 310 reset_bootfs = 1; 311 312 error = nvpair_value_string(elem, &strval); 313 314 if (!error) { 315 uint64_t compress; 316 317 if (strval == NULL || strval[0] == '\0') { 318 objnum = zpool_prop_default_numeric( 319 ZPOOL_PROP_BOOTFS); 320 break; 321 } 322 323 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 324 DS_MODE_USER | DS_MODE_READONLY, &os)) 325 break; 326 327 /* We don't support gzip bootable datasets */ 328 if ((error = dsl_prop_get_integer(strval, 329 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 330 &compress, NULL)) == 0 && 331 !BOOTFS_COMPRESS_VALID(compress)) { 332 error = ENOTSUP; 333 } else { 334 objnum = dmu_objset_id(os); 335 } 336 dmu_objset_close(os); 337 } 338 break; 339 case ZPOOL_PROP_FAILUREMODE: 340 error = nvpair_value_uint64(elem, &intval); 341 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 342 intval > ZIO_FAILURE_MODE_PANIC)) 343 error = EINVAL; 344 345 /* 346 * This is a special case which only occurs when 347 * the pool has completely failed. This allows 348 * the user to change the in-core failmode property 349 * without syncing it out to disk (I/Os might 350 * currently be blocked). We do this by returning 351 * EIO to the caller (spa_prop_set) to trick it 352 * into thinking we encountered a property validation 353 * error. 354 */ 355 if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) { 356 spa->spa_failmode = intval; 357 error = EIO; 358 } 359 break; 360 361 case ZPOOL_PROP_CACHEFILE: 362 if ((error = nvpair_value_string(elem, &strval)) != 0) 363 break; 364 365 if (strval[0] == '\0') 366 break; 367 368 if (strcmp(strval, "none") == 0) 369 break; 370 371 if (strval[0] != '/') { 372 error = EINVAL; 373 break; 374 } 375 376 slash = strrchr(strval, '/'); 377 ASSERT(slash != NULL); 378 379 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 380 strcmp(slash, "/..") == 0) 381 error = EINVAL; 382 break; 383 } 384 385 if (error) 386 break; 387 } 388 389 if (!error && reset_bootfs) { 390 error = nvlist_remove(props, 391 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 392 393 if (!error) { 394 error = nvlist_add_uint64(props, 395 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 396 } 397 } 398 399 return (error); 400 } 401 402 int 403 spa_prop_set(spa_t *spa, nvlist_t *nvp) 404 { 405 int error; 406 407 if ((error = spa_prop_validate(spa, nvp)) != 0) 408 return (error); 409 410 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 411 spa, nvp, 3)); 412 } 413 414 /* 415 * If the bootfs property value is dsobj, clear it. 416 */ 417 void 418 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 419 { 420 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 421 VERIFY(zap_remove(spa->spa_meta_objset, 422 spa->spa_pool_props_object, 423 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 424 spa->spa_bootfs = 0; 425 } 426 } 427 428 /* 429 * ========================================================================== 430 * SPA state manipulation (open/create/destroy/import/export) 431 * ========================================================================== 432 */ 433 434 static int 435 spa_error_entry_compare(const void *a, const void *b) 436 { 437 spa_error_entry_t *sa = (spa_error_entry_t *)a; 438 spa_error_entry_t *sb = (spa_error_entry_t *)b; 439 int ret; 440 441 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 442 sizeof (zbookmark_t)); 443 444 if (ret < 0) 445 return (-1); 446 else if (ret > 0) 447 return (1); 448 else 449 return (0); 450 } 451 452 /* 453 * Utility function which retrieves copies of the current logs and 454 * re-initializes them in the process. 455 */ 456 void 457 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 458 { 459 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 460 461 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 462 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 463 464 avl_create(&spa->spa_errlist_scrub, 465 spa_error_entry_compare, sizeof (spa_error_entry_t), 466 offsetof(spa_error_entry_t, se_avl)); 467 avl_create(&spa->spa_errlist_last, 468 spa_error_entry_compare, sizeof (spa_error_entry_t), 469 offsetof(spa_error_entry_t, se_avl)); 470 } 471 472 /* 473 * Activate an uninitialized pool. 474 */ 475 static void 476 spa_activate(spa_t *spa) 477 { 478 int t; 479 480 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 481 482 spa->spa_state = POOL_STATE_ACTIVE; 483 484 spa->spa_normal_class = metaslab_class_create(); 485 spa->spa_log_class = metaslab_class_create(); 486 487 for (t = 0; t < ZIO_TYPES; t++) { 488 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 489 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 490 TASKQ_PREPOPULATE); 491 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 492 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 493 TASKQ_PREPOPULATE); 494 } 495 496 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 497 offsetof(vdev_t, vdev_dirty_node)); 498 list_create(&spa->spa_zio_list, sizeof (zio_t), 499 offsetof(zio_t, zio_link_node)); 500 501 txg_list_create(&spa->spa_vdev_txg_list, 502 offsetof(struct vdev, vdev_txg_node)); 503 504 avl_create(&spa->spa_errlist_scrub, 505 spa_error_entry_compare, sizeof (spa_error_entry_t), 506 offsetof(spa_error_entry_t, se_avl)); 507 avl_create(&spa->spa_errlist_last, 508 spa_error_entry_compare, sizeof (spa_error_entry_t), 509 offsetof(spa_error_entry_t, se_avl)); 510 } 511 512 /* 513 * Opposite of spa_activate(). 514 */ 515 static void 516 spa_deactivate(spa_t *spa) 517 { 518 int t; 519 520 ASSERT(spa->spa_sync_on == B_FALSE); 521 ASSERT(spa->spa_dsl_pool == NULL); 522 ASSERT(spa->spa_root_vdev == NULL); 523 524 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 525 526 txg_list_destroy(&spa->spa_vdev_txg_list); 527 528 list_destroy(&spa->spa_dirty_list); 529 list_destroy(&spa->spa_zio_list); 530 531 for (t = 0; t < ZIO_TYPES; t++) { 532 taskq_destroy(spa->spa_zio_issue_taskq[t]); 533 taskq_destroy(spa->spa_zio_intr_taskq[t]); 534 spa->spa_zio_issue_taskq[t] = NULL; 535 spa->spa_zio_intr_taskq[t] = NULL; 536 } 537 538 metaslab_class_destroy(spa->spa_normal_class); 539 spa->spa_normal_class = NULL; 540 541 metaslab_class_destroy(spa->spa_log_class); 542 spa->spa_log_class = NULL; 543 544 /* 545 * If this was part of an import or the open otherwise failed, we may 546 * still have errors left in the queues. Empty them just in case. 547 */ 548 spa_errlog_drain(spa); 549 550 avl_destroy(&spa->spa_errlist_scrub); 551 avl_destroy(&spa->spa_errlist_last); 552 553 spa->spa_state = POOL_STATE_UNINITIALIZED; 554 } 555 556 /* 557 * Verify a pool configuration, and construct the vdev tree appropriately. This 558 * will create all the necessary vdevs in the appropriate layout, with each vdev 559 * in the CLOSED state. This will prep the pool before open/creation/import. 560 * All vdev validation is done by the vdev_alloc() routine. 561 */ 562 static int 563 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 564 uint_t id, int atype) 565 { 566 nvlist_t **child; 567 uint_t c, children; 568 int error; 569 570 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 571 return (error); 572 573 if ((*vdp)->vdev_ops->vdev_op_leaf) 574 return (0); 575 576 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 577 &child, &children) != 0) { 578 vdev_free(*vdp); 579 *vdp = NULL; 580 return (EINVAL); 581 } 582 583 for (c = 0; c < children; c++) { 584 vdev_t *vd; 585 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 586 atype)) != 0) { 587 vdev_free(*vdp); 588 *vdp = NULL; 589 return (error); 590 } 591 } 592 593 ASSERT(*vdp != NULL); 594 595 return (0); 596 } 597 598 /* 599 * Opposite of spa_load(). 600 */ 601 static void 602 spa_unload(spa_t *spa) 603 { 604 int i; 605 606 /* 607 * Stop async tasks. 608 */ 609 spa_async_suspend(spa); 610 611 /* 612 * Stop syncing. 613 */ 614 if (spa->spa_sync_on) { 615 txg_sync_stop(spa->spa_dsl_pool); 616 spa->spa_sync_on = B_FALSE; 617 } 618 619 /* 620 * Wait for any outstanding prefetch I/O to complete. 621 */ 622 spa_config_enter(spa, RW_WRITER, FTAG); 623 spa_config_exit(spa, FTAG); 624 625 /* 626 * Drop and purge level 2 cache 627 */ 628 spa_l2cache_drop(spa); 629 630 /* 631 * Close the dsl pool. 632 */ 633 if (spa->spa_dsl_pool) { 634 dsl_pool_close(spa->spa_dsl_pool); 635 spa->spa_dsl_pool = NULL; 636 } 637 638 /* 639 * Close all vdevs. 640 */ 641 if (spa->spa_root_vdev) 642 vdev_free(spa->spa_root_vdev); 643 ASSERT(spa->spa_root_vdev == NULL); 644 645 for (i = 0; i < spa->spa_spares.sav_count; i++) 646 vdev_free(spa->spa_spares.sav_vdevs[i]); 647 if (spa->spa_spares.sav_vdevs) { 648 kmem_free(spa->spa_spares.sav_vdevs, 649 spa->spa_spares.sav_count * sizeof (void *)); 650 spa->spa_spares.sav_vdevs = NULL; 651 } 652 if (spa->spa_spares.sav_config) { 653 nvlist_free(spa->spa_spares.sav_config); 654 spa->spa_spares.sav_config = NULL; 655 } 656 657 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 658 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 659 if (spa->spa_l2cache.sav_vdevs) { 660 kmem_free(spa->spa_l2cache.sav_vdevs, 661 spa->spa_l2cache.sav_count * sizeof (void *)); 662 spa->spa_l2cache.sav_vdevs = NULL; 663 } 664 if (spa->spa_l2cache.sav_config) { 665 nvlist_free(spa->spa_l2cache.sav_config); 666 spa->spa_l2cache.sav_config = NULL; 667 } 668 669 spa->spa_async_suspended = 0; 670 } 671 672 /* 673 * Load (or re-load) the current list of vdevs describing the active spares for 674 * this pool. When this is called, we have some form of basic information in 675 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 676 * then re-generate a more complete list including status information. 677 */ 678 static void 679 spa_load_spares(spa_t *spa) 680 { 681 nvlist_t **spares; 682 uint_t nspares; 683 int i; 684 vdev_t *vd, *tvd; 685 686 /* 687 * First, close and free any existing spare vdevs. 688 */ 689 for (i = 0; i < spa->spa_spares.sav_count; i++) { 690 vd = spa->spa_spares.sav_vdevs[i]; 691 692 /* Undo the call to spa_activate() below */ 693 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 694 B_FALSE)) != NULL && tvd->vdev_isspare) 695 spa_spare_remove(tvd); 696 vdev_close(vd); 697 vdev_free(vd); 698 } 699 700 if (spa->spa_spares.sav_vdevs) 701 kmem_free(spa->spa_spares.sav_vdevs, 702 spa->spa_spares.sav_count * sizeof (void *)); 703 704 if (spa->spa_spares.sav_config == NULL) 705 nspares = 0; 706 else 707 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 708 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 709 710 spa->spa_spares.sav_count = (int)nspares; 711 spa->spa_spares.sav_vdevs = NULL; 712 713 if (nspares == 0) 714 return; 715 716 /* 717 * Construct the array of vdevs, opening them to get status in the 718 * process. For each spare, there is potentially two different vdev_t 719 * structures associated with it: one in the list of spares (used only 720 * for basic validation purposes) and one in the active vdev 721 * configuration (if it's spared in). During this phase we open and 722 * validate each vdev on the spare list. If the vdev also exists in the 723 * active configuration, then we also mark this vdev as an active spare. 724 */ 725 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 726 KM_SLEEP); 727 for (i = 0; i < spa->spa_spares.sav_count; i++) { 728 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 729 VDEV_ALLOC_SPARE) == 0); 730 ASSERT(vd != NULL); 731 732 spa->spa_spares.sav_vdevs[i] = vd; 733 734 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 735 B_FALSE)) != NULL) { 736 if (!tvd->vdev_isspare) 737 spa_spare_add(tvd); 738 739 /* 740 * We only mark the spare active if we were successfully 741 * able to load the vdev. Otherwise, importing a pool 742 * with a bad active spare would result in strange 743 * behavior, because multiple pool would think the spare 744 * is actively in use. 745 * 746 * There is a vulnerability here to an equally bizarre 747 * circumstance, where a dead active spare is later 748 * brought back to life (onlined or otherwise). Given 749 * the rarity of this scenario, and the extra complexity 750 * it adds, we ignore the possibility. 751 */ 752 if (!vdev_is_dead(tvd)) 753 spa_spare_activate(tvd); 754 } 755 756 if (vdev_open(vd) != 0) 757 continue; 758 759 vd->vdev_top = vd; 760 if (vdev_validate_aux(vd) == 0) 761 spa_spare_add(vd); 762 } 763 764 /* 765 * Recompute the stashed list of spares, with status information 766 * this time. 767 */ 768 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 769 DATA_TYPE_NVLIST_ARRAY) == 0); 770 771 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 772 KM_SLEEP); 773 for (i = 0; i < spa->spa_spares.sav_count; i++) 774 spares[i] = vdev_config_generate(spa, 775 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 776 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 777 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 778 for (i = 0; i < spa->spa_spares.sav_count; i++) 779 nvlist_free(spares[i]); 780 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 781 } 782 783 /* 784 * Load (or re-load) the current list of vdevs describing the active l2cache for 785 * this pool. When this is called, we have some form of basic information in 786 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 787 * then re-generate a more complete list including status information. 788 * Devices which are already active have their details maintained, and are 789 * not re-opened. 790 */ 791 static void 792 spa_load_l2cache(spa_t *spa) 793 { 794 nvlist_t **l2cache; 795 uint_t nl2cache; 796 int i, j, oldnvdevs; 797 uint64_t guid, size; 798 vdev_t *vd, **oldvdevs, **newvdevs; 799 spa_aux_vdev_t *sav = &spa->spa_l2cache; 800 801 if (sav->sav_config != NULL) { 802 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 803 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 804 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 805 } else { 806 nl2cache = 0; 807 } 808 809 oldvdevs = sav->sav_vdevs; 810 oldnvdevs = sav->sav_count; 811 sav->sav_vdevs = NULL; 812 sav->sav_count = 0; 813 814 /* 815 * Process new nvlist of vdevs. 816 */ 817 for (i = 0; i < nl2cache; i++) { 818 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 819 &guid) == 0); 820 821 newvdevs[i] = NULL; 822 for (j = 0; j < oldnvdevs; j++) { 823 vd = oldvdevs[j]; 824 if (vd != NULL && guid == vd->vdev_guid) { 825 /* 826 * Retain previous vdev for add/remove ops. 827 */ 828 newvdevs[i] = vd; 829 oldvdevs[j] = NULL; 830 break; 831 } 832 } 833 834 if (newvdevs[i] == NULL) { 835 /* 836 * Create new vdev 837 */ 838 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 839 VDEV_ALLOC_L2CACHE) == 0); 840 ASSERT(vd != NULL); 841 newvdevs[i] = vd; 842 843 /* 844 * Commit this vdev as an l2cache device, 845 * even if it fails to open. 846 */ 847 spa_l2cache_add(vd); 848 849 vd->vdev_top = vd; 850 vd->vdev_aux = sav; 851 852 spa_l2cache_activate(vd); 853 854 if (vdev_open(vd) != 0) 855 continue; 856 857 (void) vdev_validate_aux(vd); 858 859 if (!vdev_is_dead(vd)) { 860 size = vdev_get_rsize(vd); 861 l2arc_add_vdev(spa, vd, 862 VDEV_LABEL_START_SIZE, 863 size - VDEV_LABEL_START_SIZE); 864 } 865 } 866 } 867 868 /* 869 * Purge vdevs that were dropped 870 */ 871 for (i = 0; i < oldnvdevs; i++) { 872 uint64_t pool; 873 874 vd = oldvdevs[i]; 875 if (vd != NULL) { 876 if (spa_mode & FWRITE && 877 spa_l2cache_exists(vd->vdev_guid, &pool) && 878 pool != 0ULL && 879 l2arc_vdev_present(vd)) { 880 l2arc_remove_vdev(vd); 881 } 882 (void) vdev_close(vd); 883 spa_l2cache_remove(vd); 884 } 885 } 886 887 if (oldvdevs) 888 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 889 890 if (sav->sav_config == NULL) 891 goto out; 892 893 sav->sav_vdevs = newvdevs; 894 sav->sav_count = (int)nl2cache; 895 896 /* 897 * Recompute the stashed list of l2cache devices, with status 898 * information this time. 899 */ 900 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 901 DATA_TYPE_NVLIST_ARRAY) == 0); 902 903 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 904 for (i = 0; i < sav->sav_count; i++) 905 l2cache[i] = vdev_config_generate(spa, 906 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 907 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 908 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 909 out: 910 for (i = 0; i < sav->sav_count; i++) 911 nvlist_free(l2cache[i]); 912 if (sav->sav_count) 913 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 914 } 915 916 static int 917 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 918 { 919 dmu_buf_t *db; 920 char *packed = NULL; 921 size_t nvsize = 0; 922 int error; 923 *value = NULL; 924 925 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 926 nvsize = *(uint64_t *)db->db_data; 927 dmu_buf_rele(db, FTAG); 928 929 packed = kmem_alloc(nvsize, KM_SLEEP); 930 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 931 if (error == 0) 932 error = nvlist_unpack(packed, nvsize, value, 0); 933 kmem_free(packed, nvsize); 934 935 return (error); 936 } 937 938 /* 939 * Checks to see if the given vdev could not be opened, in which case we post a 940 * sysevent to notify the autoreplace code that the device has been removed. 941 */ 942 static void 943 spa_check_removed(vdev_t *vd) 944 { 945 int c; 946 947 for (c = 0; c < vd->vdev_children; c++) 948 spa_check_removed(vd->vdev_child[c]); 949 950 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 951 zfs_post_autoreplace(vd->vdev_spa, vd); 952 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 953 } 954 } 955 956 /* 957 * Check for missing log devices 958 */ 959 int 960 spa_check_logs(spa_t *spa) 961 { 962 switch (spa->spa_log_state) { 963 case SPA_LOG_MISSING: 964 /* need to recheck in case slog has been restored */ 965 case SPA_LOG_UNKNOWN: 966 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, 967 DS_FIND_CHILDREN)) { 968 spa->spa_log_state = SPA_LOG_MISSING; 969 return (1); 970 } 971 break; 972 973 case SPA_LOG_CLEAR: 974 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, 975 DS_FIND_CHILDREN); 976 break; 977 } 978 spa->spa_log_state = SPA_LOG_GOOD; 979 return (0); 980 } 981 982 /* 983 * Load an existing storage pool, using the pool's builtin spa_config as a 984 * source of configuration information. 985 */ 986 static int 987 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 988 { 989 int error = 0; 990 nvlist_t *nvroot = NULL; 991 vdev_t *rvd; 992 uberblock_t *ub = &spa->spa_uberblock; 993 uint64_t config_cache_txg = spa->spa_config_txg; 994 uint64_t pool_guid; 995 uint64_t version; 996 zio_t *zio; 997 uint64_t autoreplace = 0; 998 char *ereport = FM_EREPORT_ZFS_POOL; 999 1000 spa->spa_load_state = state; 1001 1002 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 1003 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 1004 error = EINVAL; 1005 goto out; 1006 } 1007 1008 /* 1009 * Versioning wasn't explicitly added to the label until later, so if 1010 * it's not present treat it as the initial version. 1011 */ 1012 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 1013 version = SPA_VERSION_INITIAL; 1014 1015 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 1016 &spa->spa_config_txg); 1017 1018 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 1019 spa_guid_exists(pool_guid, 0)) { 1020 error = EEXIST; 1021 goto out; 1022 } 1023 1024 spa->spa_load_guid = pool_guid; 1025 1026 /* 1027 * Parse the configuration into a vdev tree. We explicitly set the 1028 * value that will be returned by spa_version() since parsing the 1029 * configuration requires knowing the version number. 1030 */ 1031 spa_config_enter(spa, RW_WRITER, FTAG); 1032 spa->spa_ubsync.ub_version = version; 1033 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1034 spa_config_exit(spa, FTAG); 1035 1036 if (error != 0) 1037 goto out; 1038 1039 ASSERT(spa->spa_root_vdev == rvd); 1040 ASSERT(spa_guid(spa) == pool_guid); 1041 1042 /* 1043 * Try to open all vdevs, loading each label in the process. 1044 */ 1045 error = vdev_open(rvd); 1046 if (error != 0) 1047 goto out; 1048 1049 /* 1050 * Validate the labels for all leaf vdevs. We need to grab the config 1051 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 1052 * flag. 1053 */ 1054 spa_config_enter(spa, RW_READER, FTAG); 1055 error = vdev_validate(rvd); 1056 spa_config_exit(spa, FTAG); 1057 1058 if (error != 0) 1059 goto out; 1060 1061 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1062 error = ENXIO; 1063 goto out; 1064 } 1065 1066 /* 1067 * Find the best uberblock. 1068 */ 1069 bzero(ub, sizeof (uberblock_t)); 1070 1071 zio = zio_root(spa, NULL, NULL, 1072 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1073 vdev_uberblock_load(zio, rvd, ub); 1074 error = zio_wait(zio); 1075 1076 /* 1077 * If we weren't able to find a single valid uberblock, return failure. 1078 */ 1079 if (ub->ub_txg == 0) { 1080 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1081 VDEV_AUX_CORRUPT_DATA); 1082 error = ENXIO; 1083 goto out; 1084 } 1085 1086 /* 1087 * If the pool is newer than the code, we can't open it. 1088 */ 1089 if (ub->ub_version > SPA_VERSION) { 1090 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1091 VDEV_AUX_VERSION_NEWER); 1092 error = ENOTSUP; 1093 goto out; 1094 } 1095 1096 /* 1097 * If the vdev guid sum doesn't match the uberblock, we have an 1098 * incomplete configuration. 1099 */ 1100 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1101 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1102 VDEV_AUX_BAD_GUID_SUM); 1103 error = ENXIO; 1104 goto out; 1105 } 1106 1107 /* 1108 * Initialize internal SPA structures. 1109 */ 1110 spa->spa_state = POOL_STATE_ACTIVE; 1111 spa->spa_ubsync = spa->spa_uberblock; 1112 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1113 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1114 if (error) { 1115 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1116 VDEV_AUX_CORRUPT_DATA); 1117 goto out; 1118 } 1119 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1120 1121 if (zap_lookup(spa->spa_meta_objset, 1122 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1123 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1124 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1125 VDEV_AUX_CORRUPT_DATA); 1126 error = EIO; 1127 goto out; 1128 } 1129 1130 if (!mosconfig) { 1131 nvlist_t *newconfig; 1132 uint64_t hostid; 1133 1134 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1135 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1136 VDEV_AUX_CORRUPT_DATA); 1137 error = EIO; 1138 goto out; 1139 } 1140 1141 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 1142 &hostid) == 0) { 1143 char *hostname; 1144 unsigned long myhostid = 0; 1145 1146 VERIFY(nvlist_lookup_string(newconfig, 1147 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1148 1149 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1150 if (hostid != 0 && myhostid != 0 && 1151 (unsigned long)hostid != myhostid) { 1152 cmn_err(CE_WARN, "pool '%s' could not be " 1153 "loaded as it was last accessed by " 1154 "another system (host: %s hostid: 0x%lx). " 1155 "See: http://www.sun.com/msg/ZFS-8000-EY", 1156 spa->spa_name, hostname, 1157 (unsigned long)hostid); 1158 error = EBADF; 1159 goto out; 1160 } 1161 } 1162 1163 spa_config_set(spa, newconfig); 1164 spa_unload(spa); 1165 spa_deactivate(spa); 1166 spa_activate(spa); 1167 1168 return (spa_load(spa, newconfig, state, B_TRUE)); 1169 } 1170 1171 if (zap_lookup(spa->spa_meta_objset, 1172 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1173 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1174 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1175 VDEV_AUX_CORRUPT_DATA); 1176 error = EIO; 1177 goto out; 1178 } 1179 1180 /* 1181 * Load the bit that tells us to use the new accounting function 1182 * (raid-z deflation). If we have an older pool, this will not 1183 * be present. 1184 */ 1185 error = zap_lookup(spa->spa_meta_objset, 1186 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1187 sizeof (uint64_t), 1, &spa->spa_deflate); 1188 if (error != 0 && error != ENOENT) { 1189 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1190 VDEV_AUX_CORRUPT_DATA); 1191 error = EIO; 1192 goto out; 1193 } 1194 1195 /* 1196 * Load the persistent error log. If we have an older pool, this will 1197 * not be present. 1198 */ 1199 error = zap_lookup(spa->spa_meta_objset, 1200 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1201 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1202 if (error != 0 && error != ENOENT) { 1203 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1204 VDEV_AUX_CORRUPT_DATA); 1205 error = EIO; 1206 goto out; 1207 } 1208 1209 error = zap_lookup(spa->spa_meta_objset, 1210 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1211 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1212 if (error != 0 && error != ENOENT) { 1213 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1214 VDEV_AUX_CORRUPT_DATA); 1215 error = EIO; 1216 goto out; 1217 } 1218 1219 /* 1220 * Load the history object. If we have an older pool, this 1221 * will not be present. 1222 */ 1223 error = zap_lookup(spa->spa_meta_objset, 1224 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1225 sizeof (uint64_t), 1, &spa->spa_history); 1226 if (error != 0 && error != ENOENT) { 1227 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1228 VDEV_AUX_CORRUPT_DATA); 1229 error = EIO; 1230 goto out; 1231 } 1232 1233 /* 1234 * Load any hot spares for this pool. 1235 */ 1236 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1237 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1238 if (error != 0 && error != ENOENT) { 1239 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1240 VDEV_AUX_CORRUPT_DATA); 1241 error = EIO; 1242 goto out; 1243 } 1244 if (error == 0) { 1245 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1246 if (load_nvlist(spa, spa->spa_spares.sav_object, 1247 &spa->spa_spares.sav_config) != 0) { 1248 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1249 VDEV_AUX_CORRUPT_DATA); 1250 error = EIO; 1251 goto out; 1252 } 1253 1254 spa_config_enter(spa, RW_WRITER, FTAG); 1255 spa_load_spares(spa); 1256 spa_config_exit(spa, FTAG); 1257 } 1258 1259 /* 1260 * Load any level 2 ARC devices for this pool. 1261 */ 1262 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1263 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1264 &spa->spa_l2cache.sav_object); 1265 if (error != 0 && error != ENOENT) { 1266 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1267 VDEV_AUX_CORRUPT_DATA); 1268 error = EIO; 1269 goto out; 1270 } 1271 if (error == 0) { 1272 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1273 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1274 &spa->spa_l2cache.sav_config) != 0) { 1275 vdev_set_state(rvd, B_TRUE, 1276 VDEV_STATE_CANT_OPEN, 1277 VDEV_AUX_CORRUPT_DATA); 1278 error = EIO; 1279 goto out; 1280 } 1281 1282 spa_config_enter(spa, RW_WRITER, FTAG); 1283 spa_load_l2cache(spa); 1284 spa_config_exit(spa, FTAG); 1285 } 1286 1287 if (spa_check_logs(spa)) { 1288 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1289 VDEV_AUX_BAD_LOG); 1290 error = ENXIO; 1291 ereport = FM_EREPORT_ZFS_LOG_REPLAY; 1292 goto out; 1293 } 1294 1295 1296 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1297 1298 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1299 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1300 1301 if (error && error != ENOENT) { 1302 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1303 VDEV_AUX_CORRUPT_DATA); 1304 error = EIO; 1305 goto out; 1306 } 1307 1308 if (error == 0) { 1309 (void) zap_lookup(spa->spa_meta_objset, 1310 spa->spa_pool_props_object, 1311 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1312 sizeof (uint64_t), 1, &spa->spa_bootfs); 1313 (void) zap_lookup(spa->spa_meta_objset, 1314 spa->spa_pool_props_object, 1315 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1316 sizeof (uint64_t), 1, &autoreplace); 1317 (void) zap_lookup(spa->spa_meta_objset, 1318 spa->spa_pool_props_object, 1319 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1320 sizeof (uint64_t), 1, &spa->spa_delegation); 1321 (void) zap_lookup(spa->spa_meta_objset, 1322 spa->spa_pool_props_object, 1323 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1324 sizeof (uint64_t), 1, &spa->spa_failmode); 1325 } 1326 1327 /* 1328 * If the 'autoreplace' property is set, then post a resource notifying 1329 * the ZFS DE that it should not issue any faults for unopenable 1330 * devices. We also iterate over the vdevs, and post a sysevent for any 1331 * unopenable vdevs so that the normal autoreplace handler can take 1332 * over. 1333 */ 1334 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1335 spa_check_removed(spa->spa_root_vdev); 1336 1337 /* 1338 * Load the vdev state for all toplevel vdevs. 1339 */ 1340 vdev_load(rvd); 1341 1342 /* 1343 * Propagate the leaf DTLs we just loaded all the way up the tree. 1344 */ 1345 spa_config_enter(spa, RW_WRITER, FTAG); 1346 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1347 spa_config_exit(spa, FTAG); 1348 1349 /* 1350 * Check the state of the root vdev. If it can't be opened, it 1351 * indicates one or more toplevel vdevs are faulted. 1352 */ 1353 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1354 error = ENXIO; 1355 goto out; 1356 } 1357 1358 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1359 dmu_tx_t *tx; 1360 int need_update = B_FALSE; 1361 int c; 1362 1363 /* 1364 * Claim log blocks that haven't been committed yet. 1365 * This must all happen in a single txg. 1366 */ 1367 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1368 spa_first_txg(spa)); 1369 (void) dmu_objset_find(spa->spa_name, 1370 zil_claim, tx, DS_FIND_CHILDREN); 1371 dmu_tx_commit(tx); 1372 1373 spa->spa_sync_on = B_TRUE; 1374 txg_sync_start(spa->spa_dsl_pool); 1375 1376 /* 1377 * Wait for all claims to sync. 1378 */ 1379 txg_wait_synced(spa->spa_dsl_pool, 0); 1380 1381 /* 1382 * If the config cache is stale, or we have uninitialized 1383 * metaslabs (see spa_vdev_add()), then update the config. 1384 */ 1385 if (config_cache_txg != spa->spa_config_txg || 1386 state == SPA_LOAD_IMPORT) 1387 need_update = B_TRUE; 1388 1389 for (c = 0; c < rvd->vdev_children; c++) 1390 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1391 need_update = B_TRUE; 1392 1393 /* 1394 * Update the config cache asychronously in case we're the 1395 * root pool, in which case the config cache isn't writable yet. 1396 */ 1397 if (need_update) 1398 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1399 } 1400 1401 error = 0; 1402 out: 1403 spa->spa_minref = refcount_count(&spa->spa_refcount); 1404 if (error && error != EBADF) 1405 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 1406 spa->spa_load_state = SPA_LOAD_NONE; 1407 spa->spa_ena = 0; 1408 1409 return (error); 1410 } 1411 1412 /* 1413 * Pool Open/Import 1414 * 1415 * The import case is identical to an open except that the configuration is sent 1416 * down from userland, instead of grabbed from the configuration cache. For the 1417 * case of an open, the pool configuration will exist in the 1418 * POOL_STATE_UNINITIALIZED state. 1419 * 1420 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1421 * the same time open the pool, without having to keep around the spa_t in some 1422 * ambiguous state. 1423 */ 1424 static int 1425 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1426 { 1427 spa_t *spa; 1428 int error; 1429 int locked = B_FALSE; 1430 1431 *spapp = NULL; 1432 1433 /* 1434 * As disgusting as this is, we need to support recursive calls to this 1435 * function because dsl_dir_open() is called during spa_load(), and ends 1436 * up calling spa_open() again. The real fix is to figure out how to 1437 * avoid dsl_dir_open() calling this in the first place. 1438 */ 1439 if (mutex_owner(&spa_namespace_lock) != curthread) { 1440 mutex_enter(&spa_namespace_lock); 1441 locked = B_TRUE; 1442 } 1443 1444 if ((spa = spa_lookup(pool)) == NULL) { 1445 if (locked) 1446 mutex_exit(&spa_namespace_lock); 1447 return (ENOENT); 1448 } 1449 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1450 1451 spa_activate(spa); 1452 1453 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1454 1455 if (error == EBADF) { 1456 /* 1457 * If vdev_validate() returns failure (indicated by 1458 * EBADF), it indicates that one of the vdevs indicates 1459 * that the pool has been exported or destroyed. If 1460 * this is the case, the config cache is out of sync and 1461 * we should remove the pool from the namespace. 1462 */ 1463 spa_unload(spa); 1464 spa_deactivate(spa); 1465 spa_config_sync(spa, B_TRUE, B_TRUE); 1466 spa_remove(spa); 1467 if (locked) 1468 mutex_exit(&spa_namespace_lock); 1469 return (ENOENT); 1470 } 1471 1472 if (error) { 1473 /* 1474 * We can't open the pool, but we still have useful 1475 * information: the state of each vdev after the 1476 * attempted vdev_open(). Return this to the user. 1477 */ 1478 if (config != NULL && spa->spa_root_vdev != NULL) { 1479 spa_config_enter(spa, RW_READER, FTAG); 1480 *config = spa_config_generate(spa, NULL, -1ULL, 1481 B_TRUE); 1482 spa_config_exit(spa, FTAG); 1483 } 1484 spa_unload(spa); 1485 spa_deactivate(spa); 1486 spa->spa_last_open_failed = B_TRUE; 1487 if (locked) 1488 mutex_exit(&spa_namespace_lock); 1489 *spapp = NULL; 1490 return (error); 1491 } else { 1492 spa->spa_last_open_failed = B_FALSE; 1493 } 1494 } 1495 1496 spa_open_ref(spa, tag); 1497 1498 if (locked) 1499 mutex_exit(&spa_namespace_lock); 1500 1501 *spapp = spa; 1502 1503 if (config != NULL) { 1504 spa_config_enter(spa, RW_READER, FTAG); 1505 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1506 spa_config_exit(spa, FTAG); 1507 } 1508 1509 return (0); 1510 } 1511 1512 int 1513 spa_open(const char *name, spa_t **spapp, void *tag) 1514 { 1515 return (spa_open_common(name, spapp, tag, NULL)); 1516 } 1517 1518 /* 1519 * Lookup the given spa_t, incrementing the inject count in the process, 1520 * preventing it from being exported or destroyed. 1521 */ 1522 spa_t * 1523 spa_inject_addref(char *name) 1524 { 1525 spa_t *spa; 1526 1527 mutex_enter(&spa_namespace_lock); 1528 if ((spa = spa_lookup(name)) == NULL) { 1529 mutex_exit(&spa_namespace_lock); 1530 return (NULL); 1531 } 1532 spa->spa_inject_ref++; 1533 mutex_exit(&spa_namespace_lock); 1534 1535 return (spa); 1536 } 1537 1538 void 1539 spa_inject_delref(spa_t *spa) 1540 { 1541 mutex_enter(&spa_namespace_lock); 1542 spa->spa_inject_ref--; 1543 mutex_exit(&spa_namespace_lock); 1544 } 1545 1546 /* 1547 * Add spares device information to the nvlist. 1548 */ 1549 static void 1550 spa_add_spares(spa_t *spa, nvlist_t *config) 1551 { 1552 nvlist_t **spares; 1553 uint_t i, nspares; 1554 nvlist_t *nvroot; 1555 uint64_t guid; 1556 vdev_stat_t *vs; 1557 uint_t vsc; 1558 uint64_t pool; 1559 1560 if (spa->spa_spares.sav_count == 0) 1561 return; 1562 1563 VERIFY(nvlist_lookup_nvlist(config, 1564 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1565 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1566 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1567 if (nspares != 0) { 1568 VERIFY(nvlist_add_nvlist_array(nvroot, 1569 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1570 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1571 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1572 1573 /* 1574 * Go through and find any spares which have since been 1575 * repurposed as an active spare. If this is the case, update 1576 * their status appropriately. 1577 */ 1578 for (i = 0; i < nspares; i++) { 1579 VERIFY(nvlist_lookup_uint64(spares[i], 1580 ZPOOL_CONFIG_GUID, &guid) == 0); 1581 if (spa_spare_exists(guid, &pool, NULL) && 1582 pool != 0ULL) { 1583 VERIFY(nvlist_lookup_uint64_array( 1584 spares[i], ZPOOL_CONFIG_STATS, 1585 (uint64_t **)&vs, &vsc) == 0); 1586 vs->vs_state = VDEV_STATE_CANT_OPEN; 1587 vs->vs_aux = VDEV_AUX_SPARED; 1588 } 1589 } 1590 } 1591 } 1592 1593 /* 1594 * Add l2cache device information to the nvlist, including vdev stats. 1595 */ 1596 static void 1597 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1598 { 1599 nvlist_t **l2cache; 1600 uint_t i, j, nl2cache; 1601 nvlist_t *nvroot; 1602 uint64_t guid; 1603 vdev_t *vd; 1604 vdev_stat_t *vs; 1605 uint_t vsc; 1606 1607 if (spa->spa_l2cache.sav_count == 0) 1608 return; 1609 1610 spa_config_enter(spa, RW_READER, FTAG); 1611 1612 VERIFY(nvlist_lookup_nvlist(config, 1613 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1614 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1615 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1616 if (nl2cache != 0) { 1617 VERIFY(nvlist_add_nvlist_array(nvroot, 1618 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1619 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1620 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1621 1622 /* 1623 * Update level 2 cache device stats. 1624 */ 1625 1626 for (i = 0; i < nl2cache; i++) { 1627 VERIFY(nvlist_lookup_uint64(l2cache[i], 1628 ZPOOL_CONFIG_GUID, &guid) == 0); 1629 1630 vd = NULL; 1631 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1632 if (guid == 1633 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1634 vd = spa->spa_l2cache.sav_vdevs[j]; 1635 break; 1636 } 1637 } 1638 ASSERT(vd != NULL); 1639 1640 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1641 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1642 vdev_get_stats(vd, vs); 1643 } 1644 } 1645 1646 spa_config_exit(spa, FTAG); 1647 } 1648 1649 int 1650 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1651 { 1652 int error; 1653 spa_t *spa; 1654 1655 *config = NULL; 1656 error = spa_open_common(name, &spa, FTAG, config); 1657 1658 if (spa && *config != NULL) { 1659 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1660 spa_get_errlog_size(spa)) == 0); 1661 1662 spa_add_spares(spa, *config); 1663 spa_add_l2cache(spa, *config); 1664 } 1665 1666 /* 1667 * We want to get the alternate root even for faulted pools, so we cheat 1668 * and call spa_lookup() directly. 1669 */ 1670 if (altroot) { 1671 if (spa == NULL) { 1672 mutex_enter(&spa_namespace_lock); 1673 spa = spa_lookup(name); 1674 if (spa) 1675 spa_altroot(spa, altroot, buflen); 1676 else 1677 altroot[0] = '\0'; 1678 spa = NULL; 1679 mutex_exit(&spa_namespace_lock); 1680 } else { 1681 spa_altroot(spa, altroot, buflen); 1682 } 1683 } 1684 1685 if (spa != NULL) 1686 spa_close(spa, FTAG); 1687 1688 return (error); 1689 } 1690 1691 /* 1692 * Validate that the auxiliary device array is well formed. We must have an 1693 * array of nvlists, each which describes a valid leaf vdev. If this is an 1694 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1695 * specified, as long as they are well-formed. 1696 */ 1697 static int 1698 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1699 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1700 vdev_labeltype_t label) 1701 { 1702 nvlist_t **dev; 1703 uint_t i, ndev; 1704 vdev_t *vd; 1705 int error; 1706 1707 /* 1708 * It's acceptable to have no devs specified. 1709 */ 1710 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1711 return (0); 1712 1713 if (ndev == 0) 1714 return (EINVAL); 1715 1716 /* 1717 * Make sure the pool is formatted with a version that supports this 1718 * device type. 1719 */ 1720 if (spa_version(spa) < version) 1721 return (ENOTSUP); 1722 1723 /* 1724 * Set the pending device list so we correctly handle device in-use 1725 * checking. 1726 */ 1727 sav->sav_pending = dev; 1728 sav->sav_npending = ndev; 1729 1730 for (i = 0; i < ndev; i++) { 1731 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1732 mode)) != 0) 1733 goto out; 1734 1735 if (!vd->vdev_ops->vdev_op_leaf) { 1736 vdev_free(vd); 1737 error = EINVAL; 1738 goto out; 1739 } 1740 1741 /* 1742 * The L2ARC currently only supports disk devices. 1743 */ 1744 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1745 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1746 error = ENOTBLK; 1747 goto out; 1748 } 1749 1750 vd->vdev_top = vd; 1751 1752 if ((error = vdev_open(vd)) == 0 && 1753 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1754 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1755 vd->vdev_guid) == 0); 1756 } 1757 1758 vdev_free(vd); 1759 1760 if (error && 1761 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1762 goto out; 1763 else 1764 error = 0; 1765 } 1766 1767 out: 1768 sav->sav_pending = NULL; 1769 sav->sav_npending = 0; 1770 return (error); 1771 } 1772 1773 static int 1774 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1775 { 1776 int error; 1777 1778 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1779 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1780 VDEV_LABEL_SPARE)) != 0) { 1781 return (error); 1782 } 1783 1784 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1785 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1786 VDEV_LABEL_L2CACHE)); 1787 } 1788 1789 static void 1790 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1791 const char *config) 1792 { 1793 int i; 1794 1795 if (sav->sav_config != NULL) { 1796 nvlist_t **olddevs; 1797 uint_t oldndevs; 1798 nvlist_t **newdevs; 1799 1800 /* 1801 * Generate new dev list by concatentating with the 1802 * current dev list. 1803 */ 1804 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1805 &olddevs, &oldndevs) == 0); 1806 1807 newdevs = kmem_alloc(sizeof (void *) * 1808 (ndevs + oldndevs), KM_SLEEP); 1809 for (i = 0; i < oldndevs; i++) 1810 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1811 KM_SLEEP) == 0); 1812 for (i = 0; i < ndevs; i++) 1813 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1814 KM_SLEEP) == 0); 1815 1816 VERIFY(nvlist_remove(sav->sav_config, config, 1817 DATA_TYPE_NVLIST_ARRAY) == 0); 1818 1819 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1820 config, newdevs, ndevs + oldndevs) == 0); 1821 for (i = 0; i < oldndevs + ndevs; i++) 1822 nvlist_free(newdevs[i]); 1823 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1824 } else { 1825 /* 1826 * Generate a new dev list. 1827 */ 1828 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1829 KM_SLEEP) == 0); 1830 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1831 devs, ndevs) == 0); 1832 } 1833 } 1834 1835 /* 1836 * Stop and drop level 2 ARC devices 1837 */ 1838 void 1839 spa_l2cache_drop(spa_t *spa) 1840 { 1841 vdev_t *vd; 1842 int i; 1843 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1844 1845 for (i = 0; i < sav->sav_count; i++) { 1846 uint64_t pool; 1847 1848 vd = sav->sav_vdevs[i]; 1849 ASSERT(vd != NULL); 1850 1851 if (spa_mode & FWRITE && 1852 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1853 l2arc_vdev_present(vd)) { 1854 l2arc_remove_vdev(vd); 1855 } 1856 if (vd->vdev_isl2cache) 1857 spa_l2cache_remove(vd); 1858 vdev_clear_stats(vd); 1859 (void) vdev_close(vd); 1860 } 1861 } 1862 1863 /* 1864 * Pool Creation 1865 */ 1866 int 1867 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1868 const char *history_str, nvlist_t *zplprops) 1869 { 1870 spa_t *spa; 1871 char *altroot = NULL; 1872 vdev_t *rvd; 1873 dsl_pool_t *dp; 1874 dmu_tx_t *tx; 1875 int c, error = 0; 1876 uint64_t txg = TXG_INITIAL; 1877 nvlist_t **spares, **l2cache; 1878 uint_t nspares, nl2cache; 1879 uint64_t version; 1880 1881 /* 1882 * If this pool already exists, return failure. 1883 */ 1884 mutex_enter(&spa_namespace_lock); 1885 if (spa_lookup(pool) != NULL) { 1886 mutex_exit(&spa_namespace_lock); 1887 return (EEXIST); 1888 } 1889 1890 /* 1891 * Allocate a new spa_t structure. 1892 */ 1893 (void) nvlist_lookup_string(props, 1894 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1895 spa = spa_add(pool, altroot); 1896 spa_activate(spa); 1897 1898 spa->spa_uberblock.ub_txg = txg - 1; 1899 1900 if (props && (error = spa_prop_validate(spa, props))) { 1901 spa_unload(spa); 1902 spa_deactivate(spa); 1903 spa_remove(spa); 1904 mutex_exit(&spa_namespace_lock); 1905 return (error); 1906 } 1907 1908 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1909 &version) != 0) 1910 version = SPA_VERSION; 1911 ASSERT(version <= SPA_VERSION); 1912 spa->spa_uberblock.ub_version = version; 1913 spa->spa_ubsync = spa->spa_uberblock; 1914 1915 /* 1916 * Create the root vdev. 1917 */ 1918 spa_config_enter(spa, RW_WRITER, FTAG); 1919 1920 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1921 1922 ASSERT(error != 0 || rvd != NULL); 1923 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1924 1925 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1926 error = EINVAL; 1927 1928 if (error == 0 && 1929 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1930 (error = spa_validate_aux(spa, nvroot, txg, 1931 VDEV_ALLOC_ADD)) == 0) { 1932 for (c = 0; c < rvd->vdev_children; c++) 1933 vdev_init(rvd->vdev_child[c], txg); 1934 vdev_config_dirty(rvd); 1935 } 1936 1937 spa_config_exit(spa, FTAG); 1938 1939 if (error != 0) { 1940 spa_unload(spa); 1941 spa_deactivate(spa); 1942 spa_remove(spa); 1943 mutex_exit(&spa_namespace_lock); 1944 return (error); 1945 } 1946 1947 /* 1948 * Get the list of spares, if specified. 1949 */ 1950 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1951 &spares, &nspares) == 0) { 1952 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1953 KM_SLEEP) == 0); 1954 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1955 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1956 spa_config_enter(spa, RW_WRITER, FTAG); 1957 spa_load_spares(spa); 1958 spa_config_exit(spa, FTAG); 1959 spa->spa_spares.sav_sync = B_TRUE; 1960 } 1961 1962 /* 1963 * Get the list of level 2 cache devices, if specified. 1964 */ 1965 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1966 &l2cache, &nl2cache) == 0) { 1967 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1968 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1969 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1970 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1971 spa_config_enter(spa, RW_WRITER, FTAG); 1972 spa_load_l2cache(spa); 1973 spa_config_exit(spa, FTAG); 1974 spa->spa_l2cache.sav_sync = B_TRUE; 1975 } 1976 1977 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 1978 spa->spa_meta_objset = dp->dp_meta_objset; 1979 1980 tx = dmu_tx_create_assigned(dp, txg); 1981 1982 /* 1983 * Create the pool config object. 1984 */ 1985 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1986 DMU_OT_PACKED_NVLIST, 1 << 14, 1987 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1988 1989 if (zap_add(spa->spa_meta_objset, 1990 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1991 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1992 cmn_err(CE_PANIC, "failed to add pool config"); 1993 } 1994 1995 /* Newly created pools with the right version are always deflated. */ 1996 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 1997 spa->spa_deflate = TRUE; 1998 if (zap_add(spa->spa_meta_objset, 1999 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2000 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 2001 cmn_err(CE_PANIC, "failed to add deflate"); 2002 } 2003 } 2004 2005 /* 2006 * Create the deferred-free bplist object. Turn off compression 2007 * because sync-to-convergence takes longer if the blocksize 2008 * keeps changing. 2009 */ 2010 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 2011 1 << 14, tx); 2012 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 2013 ZIO_COMPRESS_OFF, tx); 2014 2015 if (zap_add(spa->spa_meta_objset, 2016 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 2017 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 2018 cmn_err(CE_PANIC, "failed to add bplist"); 2019 } 2020 2021 /* 2022 * Create the pool's history object. 2023 */ 2024 if (version >= SPA_VERSION_ZPOOL_HISTORY) 2025 spa_history_create_obj(spa, tx); 2026 2027 /* 2028 * Set pool properties. 2029 */ 2030 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2031 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2032 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2033 if (props) 2034 spa_sync_props(spa, props, CRED(), tx); 2035 2036 dmu_tx_commit(tx); 2037 2038 spa->spa_sync_on = B_TRUE; 2039 txg_sync_start(spa->spa_dsl_pool); 2040 2041 /* 2042 * We explicitly wait for the first transaction to complete so that our 2043 * bean counters are appropriately updated. 2044 */ 2045 txg_wait_synced(spa->spa_dsl_pool, txg); 2046 2047 spa_config_sync(spa, B_FALSE, B_TRUE); 2048 2049 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2050 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2051 2052 mutex_exit(&spa_namespace_lock); 2053 2054 spa->spa_minref = refcount_count(&spa->spa_refcount); 2055 2056 return (0); 2057 } 2058 2059 /* 2060 * Import the given pool into the system. We set up the necessary spa_t and 2061 * then call spa_load() to do the dirty work. 2062 */ 2063 static int 2064 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2065 boolean_t isroot, boolean_t allowfaulted) 2066 { 2067 spa_t *spa; 2068 char *altroot = NULL; 2069 int error, loaderr; 2070 nvlist_t *nvroot; 2071 nvlist_t **spares, **l2cache; 2072 uint_t nspares, nl2cache; 2073 2074 /* 2075 * If a pool with this name exists, return failure. 2076 */ 2077 mutex_enter(&spa_namespace_lock); 2078 if (spa_lookup(pool) != NULL) { 2079 mutex_exit(&spa_namespace_lock); 2080 return (EEXIST); 2081 } 2082 2083 /* 2084 * Create and initialize the spa structure. 2085 */ 2086 (void) nvlist_lookup_string(props, 2087 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2088 spa = spa_add(pool, altroot); 2089 spa_activate(spa); 2090 2091 if (allowfaulted) 2092 spa->spa_import_faulted = B_TRUE; 2093 spa->spa_is_root = isroot; 2094 2095 /* 2096 * Pass off the heavy lifting to spa_load(). 2097 * Pass TRUE for mosconfig (unless this is a root pool) because 2098 * the user-supplied config is actually the one to trust when 2099 * doing an import. 2100 */ 2101 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2102 2103 spa_config_enter(spa, RW_WRITER, FTAG); 2104 /* 2105 * Toss any existing sparelist, as it doesn't have any validity anymore, 2106 * and conflicts with spa_has_spare(). 2107 */ 2108 if (!isroot && spa->spa_spares.sav_config) { 2109 nvlist_free(spa->spa_spares.sav_config); 2110 spa->spa_spares.sav_config = NULL; 2111 spa_load_spares(spa); 2112 } 2113 if (!isroot && spa->spa_l2cache.sav_config) { 2114 nvlist_free(spa->spa_l2cache.sav_config); 2115 spa->spa_l2cache.sav_config = NULL; 2116 spa_load_l2cache(spa); 2117 } 2118 2119 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2120 &nvroot) == 0); 2121 if (error == 0) 2122 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2123 if (error == 0) 2124 error = spa_validate_aux(spa, nvroot, -1ULL, 2125 VDEV_ALLOC_L2CACHE); 2126 spa_config_exit(spa, FTAG); 2127 2128 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2129 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2130 /* 2131 * If we failed to load the pool, but 'allowfaulted' is 2132 * set, then manually set the config as if the config 2133 * passed in was specified in the cache file. 2134 */ 2135 error = 0; 2136 spa->spa_import_faulted = B_FALSE; 2137 if (spa->spa_config == NULL) { 2138 spa_config_enter(spa, RW_READER, FTAG); 2139 spa->spa_config = spa_config_generate(spa, 2140 NULL, -1ULL, B_TRUE); 2141 spa_config_exit(spa, FTAG); 2142 } 2143 spa_unload(spa); 2144 spa_deactivate(spa); 2145 spa_config_sync(spa, B_FALSE, B_TRUE); 2146 } else { 2147 spa_unload(spa); 2148 spa_deactivate(spa); 2149 spa_remove(spa); 2150 } 2151 mutex_exit(&spa_namespace_lock); 2152 return (error); 2153 } 2154 2155 /* 2156 * Override any spares and level 2 cache devices as specified by 2157 * the user, as these may have correct device names/devids, etc. 2158 */ 2159 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2160 &spares, &nspares) == 0) { 2161 if (spa->spa_spares.sav_config) 2162 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2163 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2164 else 2165 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2166 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2167 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2168 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2169 spa_config_enter(spa, RW_WRITER, FTAG); 2170 spa_load_spares(spa); 2171 spa_config_exit(spa, FTAG); 2172 spa->spa_spares.sav_sync = B_TRUE; 2173 } 2174 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2175 &l2cache, &nl2cache) == 0) { 2176 if (spa->spa_l2cache.sav_config) 2177 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2178 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2179 else 2180 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2181 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2182 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2183 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2184 spa_config_enter(spa, RW_WRITER, FTAG); 2185 spa_load_l2cache(spa); 2186 spa_config_exit(spa, FTAG); 2187 spa->spa_l2cache.sav_sync = B_TRUE; 2188 } 2189 2190 if (spa_mode & FWRITE) { 2191 /* 2192 * Update the config cache to include the newly-imported pool. 2193 */ 2194 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2195 } 2196 2197 spa->spa_import_faulted = B_FALSE; 2198 mutex_exit(&spa_namespace_lock); 2199 2200 return (0); 2201 } 2202 2203 #ifdef _KERNEL 2204 /* 2205 * Build a "root" vdev for a top level vdev read in from a rootpool 2206 * device label. 2207 */ 2208 static void 2209 spa_build_rootpool_config(nvlist_t *config) 2210 { 2211 nvlist_t *nvtop, *nvroot; 2212 uint64_t pgid; 2213 2214 /* 2215 * Add this top-level vdev to the child array. 2216 */ 2217 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2218 == 0); 2219 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2220 == 0); 2221 2222 /* 2223 * Put this pool's top-level vdevs into a root vdev. 2224 */ 2225 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2226 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2227 == 0); 2228 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2229 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2230 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2231 &nvtop, 1) == 0); 2232 2233 /* 2234 * Replace the existing vdev_tree with the new root vdev in 2235 * this pool's configuration (remove the old, add the new). 2236 */ 2237 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2238 nvlist_free(nvroot); 2239 } 2240 2241 /* 2242 * Get the root pool information from the root disk, then import the root pool 2243 * during the system boot up time. 2244 */ 2245 extern nvlist_t *vdev_disk_read_rootlabel(char *, char *); 2246 2247 int 2248 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2249 uint64_t *besttxg) 2250 { 2251 nvlist_t *config; 2252 uint64_t txg; 2253 2254 if ((config = vdev_disk_read_rootlabel(devpath, devid)) == NULL) 2255 return (-1); 2256 2257 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2258 2259 if (bestconf != NULL) 2260 *bestconf = config; 2261 *besttxg = txg; 2262 return (0); 2263 } 2264 2265 boolean_t 2266 spa_rootdev_validate(nvlist_t *nv) 2267 { 2268 uint64_t ival; 2269 2270 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2271 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2272 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2273 return (B_FALSE); 2274 2275 return (B_TRUE); 2276 } 2277 2278 2279 /* 2280 * Given the boot device's physical path or devid, check if the device 2281 * is in a valid state. If so, return the configuration from the vdev 2282 * label. 2283 */ 2284 int 2285 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2286 { 2287 nvlist_t *conf = NULL; 2288 uint64_t txg = 0; 2289 nvlist_t *nvtop, **child; 2290 char *type; 2291 char *bootpath = NULL; 2292 uint_t children, c; 2293 char *tmp; 2294 2295 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2296 *tmp = '\0'; 2297 if (spa_check_rootconf(devpath, devid, &conf, &txg) < 0) { 2298 cmn_err(CE_NOTE, "error reading device label"); 2299 nvlist_free(conf); 2300 return (EINVAL); 2301 } 2302 if (txg == 0) { 2303 cmn_err(CE_NOTE, "this device is detached"); 2304 nvlist_free(conf); 2305 return (EINVAL); 2306 } 2307 2308 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2309 &nvtop) == 0); 2310 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2311 2312 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2313 if (spa_rootdev_validate(nvtop)) { 2314 goto out; 2315 } else { 2316 nvlist_free(conf); 2317 return (EINVAL); 2318 } 2319 } 2320 2321 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2322 2323 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2324 &child, &children) == 0); 2325 2326 /* 2327 * Go thru vdevs in the mirror to see if the given device 2328 * has the most recent txg. Only the device with the most 2329 * recent txg has valid information and should be booted. 2330 */ 2331 for (c = 0; c < children; c++) { 2332 char *cdevid, *cpath; 2333 uint64_t tmptxg; 2334 2335 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2336 &cpath) != 0) 2337 return (EINVAL); 2338 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, 2339 &cdevid) != 0) 2340 return (EINVAL); 2341 if ((spa_check_rootconf(cpath, cdevid, NULL, 2342 &tmptxg) == 0) && (tmptxg > txg)) { 2343 txg = tmptxg; 2344 VERIFY(nvlist_lookup_string(child[c], 2345 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2346 } 2347 } 2348 2349 /* Does the best device match the one we've booted from? */ 2350 if (bootpath) { 2351 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2352 return (EINVAL); 2353 } 2354 out: 2355 *bestconf = conf; 2356 return (0); 2357 } 2358 2359 /* 2360 * Import a root pool. 2361 * 2362 * For x86. devpath_list will consist of devid and/or physpath name of 2363 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2364 * The GRUB "findroot" command will return the vdev we should boot. 2365 * 2366 * For Sparc, devpath_list consists the physpath name of the booting device 2367 * no matter the rootpool is a single device pool or a mirrored pool. 2368 * e.g. 2369 * "/pci@1f,0/ide@d/disk@0,0:a" 2370 */ 2371 int 2372 spa_import_rootpool(char *devpath, char *devid) 2373 { 2374 nvlist_t *conf = NULL; 2375 char *pname; 2376 int error; 2377 2378 /* 2379 * Get the vdev pathname and configuation from the most 2380 * recently updated vdev (highest txg). 2381 */ 2382 if (error = spa_get_rootconf(devpath, devid, &conf)) 2383 goto msg_out; 2384 2385 /* 2386 * Add type "root" vdev to the config. 2387 */ 2388 spa_build_rootpool_config(conf); 2389 2390 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2391 2392 /* 2393 * We specify 'allowfaulted' for this to be treated like spa_open() 2394 * instead of spa_import(). This prevents us from marking vdevs as 2395 * persistently unavailable, and generates FMA ereports as if it were a 2396 * pool open, not import. 2397 */ 2398 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2399 if (error == EEXIST) 2400 error = 0; 2401 2402 nvlist_free(conf); 2403 return (error); 2404 2405 msg_out: 2406 cmn_err(CE_NOTE, "\n" 2407 " *************************************************** \n" 2408 " * This device is not bootable! * \n" 2409 " * It is either offlined or detached or faulted. * \n" 2410 " * Please try to boot from a different device. * \n" 2411 " *************************************************** "); 2412 2413 return (error); 2414 } 2415 #endif 2416 2417 /* 2418 * Import a non-root pool into the system. 2419 */ 2420 int 2421 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2422 { 2423 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2424 } 2425 2426 int 2427 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2428 { 2429 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2430 } 2431 2432 2433 /* 2434 * This (illegal) pool name is used when temporarily importing a spa_t in order 2435 * to get the vdev stats associated with the imported devices. 2436 */ 2437 #define TRYIMPORT_NAME "$import" 2438 2439 nvlist_t * 2440 spa_tryimport(nvlist_t *tryconfig) 2441 { 2442 nvlist_t *config = NULL; 2443 char *poolname; 2444 spa_t *spa; 2445 uint64_t state; 2446 2447 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2448 return (NULL); 2449 2450 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2451 return (NULL); 2452 2453 /* 2454 * Create and initialize the spa structure. 2455 */ 2456 mutex_enter(&spa_namespace_lock); 2457 spa = spa_add(TRYIMPORT_NAME, NULL); 2458 spa_activate(spa); 2459 2460 /* 2461 * Pass off the heavy lifting to spa_load(). 2462 * Pass TRUE for mosconfig because the user-supplied config 2463 * is actually the one to trust when doing an import. 2464 */ 2465 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2466 2467 /* 2468 * If 'tryconfig' was at least parsable, return the current config. 2469 */ 2470 if (spa->spa_root_vdev != NULL) { 2471 spa_config_enter(spa, RW_READER, FTAG); 2472 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2473 spa_config_exit(spa, FTAG); 2474 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2475 poolname) == 0); 2476 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2477 state) == 0); 2478 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2479 spa->spa_uberblock.ub_timestamp) == 0); 2480 2481 /* 2482 * If the bootfs property exists on this pool then we 2483 * copy it out so that external consumers can tell which 2484 * pools are bootable. 2485 */ 2486 if (spa->spa_bootfs) { 2487 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2488 2489 /* 2490 * We have to play games with the name since the 2491 * pool was opened as TRYIMPORT_NAME. 2492 */ 2493 if (dsl_dsobj_to_dsname(spa->spa_name, 2494 spa->spa_bootfs, tmpname) == 0) { 2495 char *cp; 2496 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2497 2498 cp = strchr(tmpname, '/'); 2499 if (cp == NULL) { 2500 (void) strlcpy(dsname, tmpname, 2501 MAXPATHLEN); 2502 } else { 2503 (void) snprintf(dsname, MAXPATHLEN, 2504 "%s/%s", poolname, ++cp); 2505 } 2506 VERIFY(nvlist_add_string(config, 2507 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2508 kmem_free(dsname, MAXPATHLEN); 2509 } 2510 kmem_free(tmpname, MAXPATHLEN); 2511 } 2512 2513 /* 2514 * Add the list of hot spares and level 2 cache devices. 2515 */ 2516 spa_add_spares(spa, config); 2517 spa_add_l2cache(spa, config); 2518 } 2519 2520 spa_unload(spa); 2521 spa_deactivate(spa); 2522 spa_remove(spa); 2523 mutex_exit(&spa_namespace_lock); 2524 2525 return (config); 2526 } 2527 2528 /* 2529 * Pool export/destroy 2530 * 2531 * The act of destroying or exporting a pool is very simple. We make sure there 2532 * is no more pending I/O and any references to the pool are gone. Then, we 2533 * update the pool state and sync all the labels to disk, removing the 2534 * configuration from the cache afterwards. 2535 */ 2536 static int 2537 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2538 boolean_t force) 2539 { 2540 spa_t *spa; 2541 2542 if (oldconfig) 2543 *oldconfig = NULL; 2544 2545 if (!(spa_mode & FWRITE)) 2546 return (EROFS); 2547 2548 mutex_enter(&spa_namespace_lock); 2549 if ((spa = spa_lookup(pool)) == NULL) { 2550 mutex_exit(&spa_namespace_lock); 2551 return (ENOENT); 2552 } 2553 2554 /* 2555 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2556 * reacquire the namespace lock, and see if we can export. 2557 */ 2558 spa_open_ref(spa, FTAG); 2559 mutex_exit(&spa_namespace_lock); 2560 spa_async_suspend(spa); 2561 mutex_enter(&spa_namespace_lock); 2562 spa_close(spa, FTAG); 2563 2564 /* 2565 * The pool will be in core if it's openable, 2566 * in which case we can modify its state. 2567 */ 2568 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2569 /* 2570 * Objsets may be open only because they're dirty, so we 2571 * have to force it to sync before checking spa_refcnt. 2572 */ 2573 txg_wait_synced(spa->spa_dsl_pool, 0); 2574 2575 /* 2576 * A pool cannot be exported or destroyed if there are active 2577 * references. If we are resetting a pool, allow references by 2578 * fault injection handlers. 2579 */ 2580 if (!spa_refcount_zero(spa) || 2581 (spa->spa_inject_ref != 0 && 2582 new_state != POOL_STATE_UNINITIALIZED)) { 2583 spa_async_resume(spa); 2584 mutex_exit(&spa_namespace_lock); 2585 return (EBUSY); 2586 } 2587 2588 /* 2589 * A pool cannot be exported if it has an active shared spare. 2590 * This is to prevent other pools stealing the active spare 2591 * from an exported pool. At user's own will, such pool can 2592 * be forcedly exported. 2593 */ 2594 if (!force && new_state == POOL_STATE_EXPORTED && 2595 spa_has_active_shared_spare(spa)) { 2596 spa_async_resume(spa); 2597 mutex_exit(&spa_namespace_lock); 2598 return (EXDEV); 2599 } 2600 2601 /* 2602 * We want this to be reflected on every label, 2603 * so mark them all dirty. spa_unload() will do the 2604 * final sync that pushes these changes out. 2605 */ 2606 if (new_state != POOL_STATE_UNINITIALIZED) { 2607 spa_config_enter(spa, RW_WRITER, FTAG); 2608 spa->spa_state = new_state; 2609 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2610 vdev_config_dirty(spa->spa_root_vdev); 2611 spa_config_exit(spa, FTAG); 2612 } 2613 } 2614 2615 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2616 2617 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2618 spa_unload(spa); 2619 spa_deactivate(spa); 2620 } 2621 2622 if (oldconfig && spa->spa_config) 2623 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2624 2625 if (new_state != POOL_STATE_UNINITIALIZED) { 2626 spa_config_sync(spa, B_TRUE, B_TRUE); 2627 spa_remove(spa); 2628 } 2629 mutex_exit(&spa_namespace_lock); 2630 2631 return (0); 2632 } 2633 2634 /* 2635 * Destroy a storage pool. 2636 */ 2637 int 2638 spa_destroy(char *pool) 2639 { 2640 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE)); 2641 } 2642 2643 /* 2644 * Export a storage pool. 2645 */ 2646 int 2647 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force) 2648 { 2649 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force)); 2650 } 2651 2652 /* 2653 * Similar to spa_export(), this unloads the spa_t without actually removing it 2654 * from the namespace in any way. 2655 */ 2656 int 2657 spa_reset(char *pool) 2658 { 2659 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2660 B_FALSE)); 2661 } 2662 2663 /* 2664 * ========================================================================== 2665 * Device manipulation 2666 * ========================================================================== 2667 */ 2668 2669 /* 2670 * Add a device to a storage pool. 2671 */ 2672 int 2673 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2674 { 2675 uint64_t txg; 2676 int c, error; 2677 vdev_t *rvd = spa->spa_root_vdev; 2678 vdev_t *vd, *tvd; 2679 nvlist_t **spares, **l2cache; 2680 uint_t nspares, nl2cache; 2681 2682 txg = spa_vdev_enter(spa); 2683 2684 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2685 VDEV_ALLOC_ADD)) != 0) 2686 return (spa_vdev_exit(spa, NULL, txg, error)); 2687 2688 spa->spa_pending_vdev = vd; 2689 2690 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2691 &nspares) != 0) 2692 nspares = 0; 2693 2694 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2695 &nl2cache) != 0) 2696 nl2cache = 0; 2697 2698 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) { 2699 spa->spa_pending_vdev = NULL; 2700 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2701 } 2702 2703 if (vd->vdev_children != 0) { 2704 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 2705 spa->spa_pending_vdev = NULL; 2706 return (spa_vdev_exit(spa, vd, txg, error)); 2707 } 2708 } 2709 2710 /* 2711 * We must validate the spares and l2cache devices after checking the 2712 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2713 */ 2714 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) { 2715 spa->spa_pending_vdev = NULL; 2716 return (spa_vdev_exit(spa, vd, txg, error)); 2717 } 2718 2719 spa->spa_pending_vdev = NULL; 2720 2721 /* 2722 * Transfer each new top-level vdev from vd to rvd. 2723 */ 2724 for (c = 0; c < vd->vdev_children; c++) { 2725 tvd = vd->vdev_child[c]; 2726 vdev_remove_child(vd, tvd); 2727 tvd->vdev_id = rvd->vdev_children; 2728 vdev_add_child(rvd, tvd); 2729 vdev_config_dirty(tvd); 2730 } 2731 2732 if (nspares != 0) { 2733 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2734 ZPOOL_CONFIG_SPARES); 2735 spa_load_spares(spa); 2736 spa->spa_spares.sav_sync = B_TRUE; 2737 } 2738 2739 if (nl2cache != 0) { 2740 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2741 ZPOOL_CONFIG_L2CACHE); 2742 spa_load_l2cache(spa); 2743 spa->spa_l2cache.sav_sync = B_TRUE; 2744 } 2745 2746 /* 2747 * We have to be careful when adding new vdevs to an existing pool. 2748 * If other threads start allocating from these vdevs before we 2749 * sync the config cache, and we lose power, then upon reboot we may 2750 * fail to open the pool because there are DVAs that the config cache 2751 * can't translate. Therefore, we first add the vdevs without 2752 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2753 * and then let spa_config_update() initialize the new metaslabs. 2754 * 2755 * spa_load() checks for added-but-not-initialized vdevs, so that 2756 * if we lose power at any point in this sequence, the remaining 2757 * steps will be completed the next time we load the pool. 2758 */ 2759 (void) spa_vdev_exit(spa, vd, txg, 0); 2760 2761 mutex_enter(&spa_namespace_lock); 2762 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2763 mutex_exit(&spa_namespace_lock); 2764 2765 return (0); 2766 } 2767 2768 /* 2769 * Attach a device to a mirror. The arguments are the path to any device 2770 * in the mirror, and the nvroot for the new device. If the path specifies 2771 * a device that is not mirrored, we automatically insert the mirror vdev. 2772 * 2773 * If 'replacing' is specified, the new device is intended to replace the 2774 * existing device; in this case the two devices are made into their own 2775 * mirror using the 'replacing' vdev, which is functionally identical to 2776 * the mirror vdev (it actually reuses all the same ops) but has a few 2777 * extra rules: you can't attach to it after it's been created, and upon 2778 * completion of resilvering, the first disk (the one being replaced) 2779 * is automatically detached. 2780 */ 2781 int 2782 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2783 { 2784 uint64_t txg, open_txg; 2785 vdev_t *rvd = spa->spa_root_vdev; 2786 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2787 vdev_ops_t *pvops; 2788 dmu_tx_t *tx; 2789 char *oldvdpath, *newvdpath; 2790 int newvd_isspare; 2791 int error; 2792 2793 txg = spa_vdev_enter(spa); 2794 2795 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2796 2797 if (oldvd == NULL) 2798 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2799 2800 if (!oldvd->vdev_ops->vdev_op_leaf) 2801 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2802 2803 pvd = oldvd->vdev_parent; 2804 2805 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2806 VDEV_ALLOC_ADD)) != 0) 2807 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2808 2809 if (newrootvd->vdev_children != 1) 2810 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2811 2812 newvd = newrootvd->vdev_child[0]; 2813 2814 if (!newvd->vdev_ops->vdev_op_leaf) 2815 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2816 2817 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2818 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2819 2820 /* 2821 * Spares can't replace logs 2822 */ 2823 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 2824 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2825 2826 if (!replacing) { 2827 /* 2828 * For attach, the only allowable parent is a mirror or the root 2829 * vdev. 2830 */ 2831 if (pvd->vdev_ops != &vdev_mirror_ops && 2832 pvd->vdev_ops != &vdev_root_ops) 2833 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2834 2835 pvops = &vdev_mirror_ops; 2836 } else { 2837 /* 2838 * Active hot spares can only be replaced by inactive hot 2839 * spares. 2840 */ 2841 if (pvd->vdev_ops == &vdev_spare_ops && 2842 pvd->vdev_child[1] == oldvd && 2843 !spa_has_spare(spa, newvd->vdev_guid)) 2844 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2845 2846 /* 2847 * If the source is a hot spare, and the parent isn't already a 2848 * spare, then we want to create a new hot spare. Otherwise, we 2849 * want to create a replacing vdev. The user is not allowed to 2850 * attach to a spared vdev child unless the 'isspare' state is 2851 * the same (spare replaces spare, non-spare replaces 2852 * non-spare). 2853 */ 2854 if (pvd->vdev_ops == &vdev_replacing_ops) 2855 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2856 else if (pvd->vdev_ops == &vdev_spare_ops && 2857 newvd->vdev_isspare != oldvd->vdev_isspare) 2858 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2859 else if (pvd->vdev_ops != &vdev_spare_ops && 2860 newvd->vdev_isspare) 2861 pvops = &vdev_spare_ops; 2862 else 2863 pvops = &vdev_replacing_ops; 2864 } 2865 2866 /* 2867 * Compare the new device size with the replaceable/attachable 2868 * device size. 2869 */ 2870 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2871 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2872 2873 /* 2874 * The new device cannot have a higher alignment requirement 2875 * than the top-level vdev. 2876 */ 2877 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2878 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2879 2880 /* 2881 * If this is an in-place replacement, update oldvd's path and devid 2882 * to make it distinguishable from newvd, and unopenable from now on. 2883 */ 2884 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2885 spa_strfree(oldvd->vdev_path); 2886 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2887 KM_SLEEP); 2888 (void) sprintf(oldvd->vdev_path, "%s/%s", 2889 newvd->vdev_path, "old"); 2890 if (oldvd->vdev_devid != NULL) { 2891 spa_strfree(oldvd->vdev_devid); 2892 oldvd->vdev_devid = NULL; 2893 } 2894 } 2895 2896 /* 2897 * If the parent is not a mirror, or if we're replacing, insert the new 2898 * mirror/replacing/spare vdev above oldvd. 2899 */ 2900 if (pvd->vdev_ops != pvops) 2901 pvd = vdev_add_parent(oldvd, pvops); 2902 2903 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2904 ASSERT(pvd->vdev_ops == pvops); 2905 ASSERT(oldvd->vdev_parent == pvd); 2906 2907 /* 2908 * Extract the new device from its root and add it to pvd. 2909 */ 2910 vdev_remove_child(newrootvd, newvd); 2911 newvd->vdev_id = pvd->vdev_children; 2912 vdev_add_child(pvd, newvd); 2913 2914 /* 2915 * If newvd is smaller than oldvd, but larger than its rsize, 2916 * the addition of newvd may have decreased our parent's asize. 2917 */ 2918 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2919 2920 tvd = newvd->vdev_top; 2921 ASSERT(pvd->vdev_top == tvd); 2922 ASSERT(tvd->vdev_parent == rvd); 2923 2924 vdev_config_dirty(tvd); 2925 2926 /* 2927 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2928 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2929 */ 2930 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2931 2932 mutex_enter(&newvd->vdev_dtl_lock); 2933 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2934 open_txg - TXG_INITIAL + 1); 2935 mutex_exit(&newvd->vdev_dtl_lock); 2936 2937 if (newvd->vdev_isspare) 2938 spa_spare_activate(newvd); 2939 oldvdpath = spa_strdup(vdev_description(oldvd)); 2940 newvdpath = spa_strdup(vdev_description(newvd)); 2941 newvd_isspare = newvd->vdev_isspare; 2942 2943 /* 2944 * Mark newvd's DTL dirty in this txg. 2945 */ 2946 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2947 2948 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2949 2950 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 2951 if (dmu_tx_assign(tx, TXG_WAIT) == 0) { 2952 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, 2953 CRED(), "%s vdev=%s %s vdev=%s", 2954 replacing && newvd_isspare ? "spare in" : 2955 replacing ? "replace" : "attach", newvdpath, 2956 replacing ? "for" : "to", oldvdpath); 2957 dmu_tx_commit(tx); 2958 } else { 2959 dmu_tx_abort(tx); 2960 } 2961 2962 spa_strfree(oldvdpath); 2963 spa_strfree(newvdpath); 2964 2965 /* 2966 * Kick off a resilver to update newvd. 2967 */ 2968 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 2969 2970 return (0); 2971 } 2972 2973 /* 2974 * Detach a device from a mirror or replacing vdev. 2975 * If 'replace_done' is specified, only detach if the parent 2976 * is a replacing vdev. 2977 */ 2978 int 2979 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2980 { 2981 uint64_t txg; 2982 int c, t, error; 2983 vdev_t *rvd = spa->spa_root_vdev; 2984 vdev_t *vd, *pvd, *cvd, *tvd; 2985 boolean_t unspare = B_FALSE; 2986 uint64_t unspare_guid; 2987 size_t len; 2988 2989 txg = spa_vdev_enter(spa); 2990 2991 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2992 2993 if (vd == NULL) 2994 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2995 2996 if (!vd->vdev_ops->vdev_op_leaf) 2997 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2998 2999 pvd = vd->vdev_parent; 3000 3001 /* 3002 * If replace_done is specified, only remove this device if it's 3003 * the first child of a replacing vdev. For the 'spare' vdev, either 3004 * disk can be removed. 3005 */ 3006 if (replace_done) { 3007 if (pvd->vdev_ops == &vdev_replacing_ops) { 3008 if (vd->vdev_id != 0) 3009 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3010 } else if (pvd->vdev_ops != &vdev_spare_ops) { 3011 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3012 } 3013 } 3014 3015 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 3016 spa_version(spa) >= SPA_VERSION_SPARES); 3017 3018 /* 3019 * Only mirror, replacing, and spare vdevs support detach. 3020 */ 3021 if (pvd->vdev_ops != &vdev_replacing_ops && 3022 pvd->vdev_ops != &vdev_mirror_ops && 3023 pvd->vdev_ops != &vdev_spare_ops) 3024 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3025 3026 /* 3027 * If there's only one replica, you can't detach it. 3028 */ 3029 if (pvd->vdev_children <= 1) 3030 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3031 3032 /* 3033 * If all siblings have non-empty DTLs, this device may have the only 3034 * valid copy of the data, which means we cannot safely detach it. 3035 * 3036 * XXX -- as in the vdev_offline() case, we really want a more 3037 * precise DTL check. 3038 */ 3039 for (c = 0; c < pvd->vdev_children; c++) { 3040 uint64_t dirty; 3041 3042 cvd = pvd->vdev_child[c]; 3043 if (cvd == vd) 3044 continue; 3045 if (vdev_is_dead(cvd)) 3046 continue; 3047 mutex_enter(&cvd->vdev_dtl_lock); 3048 dirty = cvd->vdev_dtl_map.sm_space | 3049 cvd->vdev_dtl_scrub.sm_space; 3050 mutex_exit(&cvd->vdev_dtl_lock); 3051 if (!dirty) 3052 break; 3053 } 3054 3055 /* 3056 * If we are a replacing or spare vdev, then we can always detach the 3057 * latter child, as that is how one cancels the operation. 3058 */ 3059 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 3060 c == pvd->vdev_children) 3061 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3062 3063 /* 3064 * If we are detaching the second disk from a replacing vdev, then 3065 * check to see if we changed the original vdev's path to have "/old" 3066 * at the end in spa_vdev_attach(). If so, undo that change now. 3067 */ 3068 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3069 pvd->vdev_child[0]->vdev_path != NULL && 3070 pvd->vdev_child[1]->vdev_path != NULL) { 3071 ASSERT(pvd->vdev_child[1] == vd); 3072 cvd = pvd->vdev_child[0]; 3073 len = strlen(vd->vdev_path); 3074 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3075 strcmp(cvd->vdev_path + len, "/old") == 0) { 3076 spa_strfree(cvd->vdev_path); 3077 cvd->vdev_path = spa_strdup(vd->vdev_path); 3078 } 3079 } 3080 3081 /* 3082 * If we are detaching the original disk from a spare, then it implies 3083 * that the spare should become a real disk, and be removed from the 3084 * active spare list for the pool. 3085 */ 3086 if (pvd->vdev_ops == &vdev_spare_ops && 3087 vd->vdev_id == 0) 3088 unspare = B_TRUE; 3089 3090 /* 3091 * Erase the disk labels so the disk can be used for other things. 3092 * This must be done after all other error cases are handled, 3093 * but before we disembowel vd (so we can still do I/O to it). 3094 * But if we can't do it, don't treat the error as fatal -- 3095 * it may be that the unwritability of the disk is the reason 3096 * it's being detached! 3097 */ 3098 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3099 3100 /* 3101 * Remove vd from its parent and compact the parent's children. 3102 */ 3103 vdev_remove_child(pvd, vd); 3104 vdev_compact_children(pvd); 3105 3106 /* 3107 * Remember one of the remaining children so we can get tvd below. 3108 */ 3109 cvd = pvd->vdev_child[0]; 3110 3111 /* 3112 * If we need to remove the remaining child from the list of hot spares, 3113 * do it now, marking the vdev as no longer a spare in the process. We 3114 * must do this before vdev_remove_parent(), because that can change the 3115 * GUID if it creates a new toplevel GUID. 3116 */ 3117 if (unspare) { 3118 ASSERT(cvd->vdev_isspare); 3119 spa_spare_remove(cvd); 3120 unspare_guid = cvd->vdev_guid; 3121 } 3122 3123 /* 3124 * If the parent mirror/replacing vdev only has one child, 3125 * the parent is no longer needed. Remove it from the tree. 3126 */ 3127 if (pvd->vdev_children == 1) 3128 vdev_remove_parent(cvd); 3129 3130 /* 3131 * We don't set tvd until now because the parent we just removed 3132 * may have been the previous top-level vdev. 3133 */ 3134 tvd = cvd->vdev_top; 3135 ASSERT(tvd->vdev_parent == rvd); 3136 3137 /* 3138 * Reevaluate the parent vdev state. 3139 */ 3140 vdev_propagate_state(cvd); 3141 3142 /* 3143 * If the device we just detached was smaller than the others, it may be 3144 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3145 * can't fail because the existing metaslabs are already in core, so 3146 * there's nothing to read from disk. 3147 */ 3148 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3149 3150 vdev_config_dirty(tvd); 3151 3152 /* 3153 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3154 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3155 * But first make sure we're not on any *other* txg's DTL list, to 3156 * prevent vd from being accessed after it's freed. 3157 */ 3158 for (t = 0; t < TXG_SIZE; t++) 3159 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3160 vd->vdev_detached = B_TRUE; 3161 vdev_dirty(tvd, VDD_DTL, vd, txg); 3162 3163 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3164 3165 error = spa_vdev_exit(spa, vd, txg, 0); 3166 3167 /* 3168 * If this was the removal of the original device in a hot spare vdev, 3169 * then we want to go through and remove the device from the hot spare 3170 * list of every other pool. 3171 */ 3172 if (unspare) { 3173 spa = NULL; 3174 mutex_enter(&spa_namespace_lock); 3175 while ((spa = spa_next(spa)) != NULL) { 3176 if (spa->spa_state != POOL_STATE_ACTIVE) 3177 continue; 3178 3179 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3180 } 3181 mutex_exit(&spa_namespace_lock); 3182 } 3183 3184 return (error); 3185 } 3186 3187 /* 3188 * Remove a spares vdev from the nvlist config. 3189 */ 3190 static int 3191 spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare, 3192 nvlist_t **spares, int nspares, vdev_t *vd) 3193 { 3194 nvlist_t *nv, **newspares; 3195 int i, j; 3196 3197 nv = NULL; 3198 for (i = 0; i < nspares; i++) { 3199 uint64_t theguid; 3200 3201 VERIFY(nvlist_lookup_uint64(spares[i], 3202 ZPOOL_CONFIG_GUID, &theguid) == 0); 3203 if (theguid == guid) { 3204 nv = spares[i]; 3205 break; 3206 } 3207 } 3208 3209 /* 3210 * Only remove the hot spare if it's not currently in use in this pool. 3211 */ 3212 if (nv == NULL && vd == NULL) 3213 return (ENOENT); 3214 3215 if (nv == NULL && vd != NULL) 3216 return (ENOTSUP); 3217 3218 if (!unspare && nv != NULL && vd != NULL) 3219 return (EBUSY); 3220 3221 if (nspares == 1) { 3222 newspares = NULL; 3223 } else { 3224 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 3225 KM_SLEEP); 3226 for (i = 0, j = 0; i < nspares; i++) { 3227 if (spares[i] != nv) 3228 VERIFY(nvlist_dup(spares[i], 3229 &newspares[j++], KM_SLEEP) == 0); 3230 } 3231 } 3232 3233 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES, 3234 DATA_TYPE_NVLIST_ARRAY) == 0); 3235 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3236 ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0); 3237 for (i = 0; i < nspares - 1; i++) 3238 nvlist_free(newspares[i]); 3239 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 3240 3241 return (0); 3242 } 3243 3244 /* 3245 * Remove an l2cache vdev from the nvlist config. 3246 */ 3247 static int 3248 spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache, 3249 int nl2cache, vdev_t *vd) 3250 { 3251 nvlist_t *nv, **newl2cache; 3252 int i, j; 3253 3254 nv = NULL; 3255 for (i = 0; i < nl2cache; i++) { 3256 uint64_t theguid; 3257 3258 VERIFY(nvlist_lookup_uint64(l2cache[i], 3259 ZPOOL_CONFIG_GUID, &theguid) == 0); 3260 if (theguid == guid) { 3261 nv = l2cache[i]; 3262 break; 3263 } 3264 } 3265 3266 if (vd == NULL) { 3267 for (i = 0; i < nl2cache; i++) { 3268 if (sav->sav_vdevs[i]->vdev_guid == guid) { 3269 vd = sav->sav_vdevs[i]; 3270 break; 3271 } 3272 } 3273 } 3274 3275 if (nv == NULL && vd == NULL) 3276 return (ENOENT); 3277 3278 if (nv == NULL && vd != NULL) 3279 return (ENOTSUP); 3280 3281 if (nl2cache == 1) { 3282 newl2cache = NULL; 3283 } else { 3284 newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *), 3285 KM_SLEEP); 3286 for (i = 0, j = 0; i < nl2cache; i++) { 3287 if (l2cache[i] != nv) 3288 VERIFY(nvlist_dup(l2cache[i], 3289 &newl2cache[j++], KM_SLEEP) == 0); 3290 } 3291 } 3292 3293 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 3294 DATA_TYPE_NVLIST_ARRAY) == 0); 3295 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3296 ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0); 3297 for (i = 0; i < nl2cache - 1; i++) 3298 nvlist_free(newl2cache[i]); 3299 kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *)); 3300 3301 return (0); 3302 } 3303 3304 /* 3305 * Remove a device from the pool. Currently, this supports removing only hot 3306 * spares and level 2 ARC devices. 3307 */ 3308 int 3309 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3310 { 3311 vdev_t *vd; 3312 nvlist_t **spares, **l2cache; 3313 uint_t nspares, nl2cache; 3314 int error = 0; 3315 3316 spa_config_enter(spa, RW_WRITER, FTAG); 3317 3318 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3319 3320 if (spa->spa_spares.sav_vdevs != NULL && 3321 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3322 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 3323 if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare, 3324 spares, nspares, vd)) != 0) 3325 goto out; 3326 spa_load_spares(spa); 3327 spa->spa_spares.sav_sync = B_TRUE; 3328 goto out; 3329 } 3330 3331 if (spa->spa_l2cache.sav_vdevs != NULL && 3332 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3333 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { 3334 if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid, 3335 l2cache, nl2cache, vd)) != 0) 3336 goto out; 3337 spa_load_l2cache(spa); 3338 spa->spa_l2cache.sav_sync = B_TRUE; 3339 } 3340 3341 out: 3342 spa_config_exit(spa, FTAG); 3343 return (error); 3344 } 3345 3346 /* 3347 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3348 * current spared, so we can detach it. 3349 */ 3350 static vdev_t * 3351 spa_vdev_resilver_done_hunt(vdev_t *vd) 3352 { 3353 vdev_t *newvd, *oldvd; 3354 int c; 3355 3356 for (c = 0; c < vd->vdev_children; c++) { 3357 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3358 if (oldvd != NULL) 3359 return (oldvd); 3360 } 3361 3362 /* 3363 * Check for a completed replacement. 3364 */ 3365 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3366 oldvd = vd->vdev_child[0]; 3367 newvd = vd->vdev_child[1]; 3368 3369 mutex_enter(&newvd->vdev_dtl_lock); 3370 if (newvd->vdev_dtl_map.sm_space == 0 && 3371 newvd->vdev_dtl_scrub.sm_space == 0) { 3372 mutex_exit(&newvd->vdev_dtl_lock); 3373 return (oldvd); 3374 } 3375 mutex_exit(&newvd->vdev_dtl_lock); 3376 } 3377 3378 /* 3379 * Check for a completed resilver with the 'unspare' flag set. 3380 */ 3381 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3382 newvd = vd->vdev_child[0]; 3383 oldvd = vd->vdev_child[1]; 3384 3385 mutex_enter(&newvd->vdev_dtl_lock); 3386 if (newvd->vdev_unspare && 3387 newvd->vdev_dtl_map.sm_space == 0 && 3388 newvd->vdev_dtl_scrub.sm_space == 0) { 3389 newvd->vdev_unspare = 0; 3390 mutex_exit(&newvd->vdev_dtl_lock); 3391 return (oldvd); 3392 } 3393 mutex_exit(&newvd->vdev_dtl_lock); 3394 } 3395 3396 return (NULL); 3397 } 3398 3399 static void 3400 spa_vdev_resilver_done(spa_t *spa) 3401 { 3402 vdev_t *vd; 3403 vdev_t *pvd; 3404 uint64_t guid; 3405 uint64_t pguid = 0; 3406 3407 spa_config_enter(spa, RW_READER, FTAG); 3408 3409 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3410 guid = vd->vdev_guid; 3411 /* 3412 * If we have just finished replacing a hot spared device, then 3413 * we need to detach the parent's first child (the original hot 3414 * spare) as well. 3415 */ 3416 pvd = vd->vdev_parent; 3417 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3418 pvd->vdev_id == 0) { 3419 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3420 ASSERT(pvd->vdev_parent->vdev_children == 2); 3421 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3422 } 3423 spa_config_exit(spa, FTAG); 3424 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3425 return; 3426 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3427 return; 3428 spa_config_enter(spa, RW_READER, FTAG); 3429 } 3430 3431 spa_config_exit(spa, FTAG); 3432 } 3433 3434 /* 3435 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3436 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3437 */ 3438 int 3439 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3440 { 3441 vdev_t *vd; 3442 uint64_t txg; 3443 3444 txg = spa_vdev_enter(spa); 3445 3446 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3447 /* 3448 * Determine if this is a reference to a hot spare device. If 3449 * it is, update the path manually as there is no associated 3450 * vdev_t that can be synced to disk. 3451 */ 3452 nvlist_t **spares; 3453 uint_t i, nspares; 3454 3455 if (spa->spa_spares.sav_config != NULL) { 3456 VERIFY(nvlist_lookup_nvlist_array( 3457 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3458 &spares, &nspares) == 0); 3459 for (i = 0; i < nspares; i++) { 3460 uint64_t theguid; 3461 VERIFY(nvlist_lookup_uint64(spares[i], 3462 ZPOOL_CONFIG_GUID, &theguid) == 0); 3463 if (theguid == guid) { 3464 VERIFY(nvlist_add_string(spares[i], 3465 ZPOOL_CONFIG_PATH, newpath) == 0); 3466 spa_load_spares(spa); 3467 spa->spa_spares.sav_sync = B_TRUE; 3468 return (spa_vdev_exit(spa, NULL, txg, 3469 0)); 3470 } 3471 } 3472 } 3473 3474 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3475 } 3476 3477 if (!vd->vdev_ops->vdev_op_leaf) 3478 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3479 3480 spa_strfree(vd->vdev_path); 3481 vd->vdev_path = spa_strdup(newpath); 3482 3483 vdev_config_dirty(vd->vdev_top); 3484 3485 return (spa_vdev_exit(spa, NULL, txg, 0)); 3486 } 3487 3488 /* 3489 * ========================================================================== 3490 * SPA Scrubbing 3491 * ========================================================================== 3492 */ 3493 3494 int 3495 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3496 { 3497 ASSERT(!spa_config_held(spa, RW_WRITER)); 3498 3499 if ((uint_t)type >= POOL_SCRUB_TYPES) 3500 return (ENOTSUP); 3501 3502 /* 3503 * If a resilver was requested, but there is no DTL on a 3504 * writeable leaf device, we have nothing to do. 3505 */ 3506 if (type == POOL_SCRUB_RESILVER && 3507 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3508 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3509 return (0); 3510 } 3511 3512 if (type == POOL_SCRUB_EVERYTHING && 3513 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3514 spa->spa_dsl_pool->dp_scrub_isresilver) 3515 return (EBUSY); 3516 3517 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3518 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3519 } else if (type == POOL_SCRUB_NONE) { 3520 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3521 } else { 3522 return (EINVAL); 3523 } 3524 } 3525 3526 /* 3527 * ========================================================================== 3528 * SPA async task processing 3529 * ========================================================================== 3530 */ 3531 3532 static void 3533 spa_async_remove(spa_t *spa, vdev_t *vd) 3534 { 3535 vdev_t *tvd; 3536 int c; 3537 3538 for (c = 0; c < vd->vdev_children; c++) { 3539 tvd = vd->vdev_child[c]; 3540 if (tvd->vdev_remove_wanted) { 3541 tvd->vdev_remove_wanted = 0; 3542 vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED, 3543 VDEV_AUX_NONE); 3544 vdev_clear(spa, tvd, B_TRUE); 3545 vdev_config_dirty(tvd->vdev_top); 3546 } 3547 spa_async_remove(spa, tvd); 3548 } 3549 } 3550 3551 static void 3552 spa_async_thread(spa_t *spa) 3553 { 3554 int tasks; 3555 uint64_t txg; 3556 3557 ASSERT(spa->spa_sync_on); 3558 3559 mutex_enter(&spa->spa_async_lock); 3560 tasks = spa->spa_async_tasks; 3561 spa->spa_async_tasks = 0; 3562 mutex_exit(&spa->spa_async_lock); 3563 3564 /* 3565 * See if the config needs to be updated. 3566 */ 3567 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3568 mutex_enter(&spa_namespace_lock); 3569 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3570 mutex_exit(&spa_namespace_lock); 3571 } 3572 3573 /* 3574 * See if any devices need to be marked REMOVED. 3575 * 3576 * XXX - We avoid doing this when we are in 3577 * I/O failure state since spa_vdev_enter() grabs 3578 * the namespace lock and would not be able to obtain 3579 * the writer config lock. 3580 */ 3581 if (tasks & SPA_ASYNC_REMOVE && 3582 spa_state(spa) != POOL_STATE_IO_FAILURE) { 3583 txg = spa_vdev_enter(spa); 3584 spa_async_remove(spa, spa->spa_root_vdev); 3585 (void) spa_vdev_exit(spa, NULL, txg, 0); 3586 } 3587 3588 /* 3589 * If any devices are done replacing, detach them. 3590 */ 3591 if (tasks & SPA_ASYNC_RESILVER_DONE) 3592 spa_vdev_resilver_done(spa); 3593 3594 /* 3595 * Kick off a resilver. 3596 */ 3597 if (tasks & SPA_ASYNC_RESILVER) 3598 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3599 3600 /* 3601 * Let the world know that we're done. 3602 */ 3603 mutex_enter(&spa->spa_async_lock); 3604 spa->spa_async_thread = NULL; 3605 cv_broadcast(&spa->spa_async_cv); 3606 mutex_exit(&spa->spa_async_lock); 3607 thread_exit(); 3608 } 3609 3610 void 3611 spa_async_suspend(spa_t *spa) 3612 { 3613 mutex_enter(&spa->spa_async_lock); 3614 spa->spa_async_suspended++; 3615 while (spa->spa_async_thread != NULL) 3616 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3617 mutex_exit(&spa->spa_async_lock); 3618 } 3619 3620 void 3621 spa_async_resume(spa_t *spa) 3622 { 3623 mutex_enter(&spa->spa_async_lock); 3624 ASSERT(spa->spa_async_suspended != 0); 3625 spa->spa_async_suspended--; 3626 mutex_exit(&spa->spa_async_lock); 3627 } 3628 3629 static void 3630 spa_async_dispatch(spa_t *spa) 3631 { 3632 mutex_enter(&spa->spa_async_lock); 3633 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3634 spa->spa_async_thread == NULL && 3635 rootdir != NULL && !vn_is_readonly(rootdir)) 3636 spa->spa_async_thread = thread_create(NULL, 0, 3637 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3638 mutex_exit(&spa->spa_async_lock); 3639 } 3640 3641 void 3642 spa_async_request(spa_t *spa, int task) 3643 { 3644 mutex_enter(&spa->spa_async_lock); 3645 spa->spa_async_tasks |= task; 3646 mutex_exit(&spa->spa_async_lock); 3647 } 3648 3649 /* 3650 * ========================================================================== 3651 * SPA syncing routines 3652 * ========================================================================== 3653 */ 3654 3655 static void 3656 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3657 { 3658 bplist_t *bpl = &spa->spa_sync_bplist; 3659 dmu_tx_t *tx; 3660 blkptr_t blk; 3661 uint64_t itor = 0; 3662 zio_t *zio; 3663 int error; 3664 uint8_t c = 1; 3665 3666 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 3667 3668 while (bplist_iterate(bpl, &itor, &blk) == 0) 3669 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 3670 3671 error = zio_wait(zio); 3672 ASSERT3U(error, ==, 0); 3673 3674 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3675 bplist_vacate(bpl, tx); 3676 3677 /* 3678 * Pre-dirty the first block so we sync to convergence faster. 3679 * (Usually only the first block is needed.) 3680 */ 3681 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3682 dmu_tx_commit(tx); 3683 } 3684 3685 static void 3686 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3687 { 3688 char *packed = NULL; 3689 size_t nvsize = 0; 3690 dmu_buf_t *db; 3691 3692 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3693 3694 packed = kmem_alloc(nvsize, KM_SLEEP); 3695 3696 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3697 KM_SLEEP) == 0); 3698 3699 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 3700 3701 kmem_free(packed, nvsize); 3702 3703 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3704 dmu_buf_will_dirty(db, tx); 3705 *(uint64_t *)db->db_data = nvsize; 3706 dmu_buf_rele(db, FTAG); 3707 } 3708 3709 static void 3710 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3711 const char *config, const char *entry) 3712 { 3713 nvlist_t *nvroot; 3714 nvlist_t **list; 3715 int i; 3716 3717 if (!sav->sav_sync) 3718 return; 3719 3720 /* 3721 * Update the MOS nvlist describing the list of available devices. 3722 * spa_validate_aux() will have already made sure this nvlist is 3723 * valid and the vdevs are labeled appropriately. 3724 */ 3725 if (sav->sav_object == 0) { 3726 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3727 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3728 sizeof (uint64_t), tx); 3729 VERIFY(zap_update(spa->spa_meta_objset, 3730 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3731 &sav->sav_object, tx) == 0); 3732 } 3733 3734 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3735 if (sav->sav_count == 0) { 3736 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3737 } else { 3738 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3739 for (i = 0; i < sav->sav_count; i++) 3740 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3741 B_FALSE, B_FALSE, B_TRUE); 3742 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3743 sav->sav_count) == 0); 3744 for (i = 0; i < sav->sav_count; i++) 3745 nvlist_free(list[i]); 3746 kmem_free(list, sav->sav_count * sizeof (void *)); 3747 } 3748 3749 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3750 nvlist_free(nvroot); 3751 3752 sav->sav_sync = B_FALSE; 3753 } 3754 3755 static void 3756 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3757 { 3758 nvlist_t *config; 3759 3760 if (list_is_empty(&spa->spa_dirty_list)) 3761 return; 3762 3763 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 3764 3765 if (spa->spa_config_syncing) 3766 nvlist_free(spa->spa_config_syncing); 3767 spa->spa_config_syncing = config; 3768 3769 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3770 } 3771 3772 /* 3773 * Set zpool properties. 3774 */ 3775 static void 3776 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3777 { 3778 spa_t *spa = arg1; 3779 objset_t *mos = spa->spa_meta_objset; 3780 nvlist_t *nvp = arg2; 3781 nvpair_t *elem; 3782 uint64_t intval; 3783 char *strval; 3784 zpool_prop_t prop; 3785 const char *propname; 3786 zprop_type_t proptype; 3787 spa_config_dirent_t *dp; 3788 3789 elem = NULL; 3790 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3791 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3792 case ZPOOL_PROP_VERSION: 3793 /* 3794 * Only set version for non-zpool-creation cases 3795 * (set/import). spa_create() needs special care 3796 * for version setting. 3797 */ 3798 if (tx->tx_txg != TXG_INITIAL) { 3799 VERIFY(nvpair_value_uint64(elem, 3800 &intval) == 0); 3801 ASSERT(intval <= SPA_VERSION); 3802 ASSERT(intval >= spa_version(spa)); 3803 spa->spa_uberblock.ub_version = intval; 3804 vdev_config_dirty(spa->spa_root_vdev); 3805 } 3806 break; 3807 3808 case ZPOOL_PROP_ALTROOT: 3809 /* 3810 * 'altroot' is a non-persistent property. It should 3811 * have been set temporarily at creation or import time. 3812 */ 3813 ASSERT(spa->spa_root != NULL); 3814 break; 3815 3816 case ZPOOL_PROP_CACHEFILE: 3817 /* 3818 * 'cachefile' is a non-persistent property, but note 3819 * an async request that the config cache needs to be 3820 * udpated. 3821 */ 3822 VERIFY(nvpair_value_string(elem, &strval) == 0); 3823 3824 dp = kmem_alloc(sizeof (spa_config_dirent_t), 3825 KM_SLEEP); 3826 3827 if (strval[0] == '\0') 3828 dp->scd_path = spa_strdup(spa_config_path); 3829 else if (strcmp(strval, "none") == 0) 3830 dp->scd_path = NULL; 3831 else 3832 dp->scd_path = spa_strdup(strval); 3833 3834 list_insert_head(&spa->spa_config_list, dp); 3835 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3836 break; 3837 default: 3838 /* 3839 * Set pool property values in the poolprops mos object. 3840 */ 3841 mutex_enter(&spa->spa_props_lock); 3842 if (spa->spa_pool_props_object == 0) { 3843 objset_t *mos = spa->spa_meta_objset; 3844 3845 VERIFY((spa->spa_pool_props_object = 3846 zap_create(mos, DMU_OT_POOL_PROPS, 3847 DMU_OT_NONE, 0, tx)) > 0); 3848 3849 VERIFY(zap_update(mos, 3850 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3851 8, 1, &spa->spa_pool_props_object, tx) 3852 == 0); 3853 } 3854 mutex_exit(&spa->spa_props_lock); 3855 3856 /* normalize the property name */ 3857 propname = zpool_prop_to_name(prop); 3858 proptype = zpool_prop_get_type(prop); 3859 3860 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3861 ASSERT(proptype == PROP_TYPE_STRING); 3862 VERIFY(nvpair_value_string(elem, &strval) == 0); 3863 VERIFY(zap_update(mos, 3864 spa->spa_pool_props_object, propname, 3865 1, strlen(strval) + 1, strval, tx) == 0); 3866 3867 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3868 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3869 3870 if (proptype == PROP_TYPE_INDEX) { 3871 const char *unused; 3872 VERIFY(zpool_prop_index_to_string( 3873 prop, intval, &unused) == 0); 3874 } 3875 VERIFY(zap_update(mos, 3876 spa->spa_pool_props_object, propname, 3877 8, 1, &intval, tx) == 0); 3878 } else { 3879 ASSERT(0); /* not allowed */ 3880 } 3881 3882 switch (prop) { 3883 case ZPOOL_PROP_DELEGATION: 3884 spa->spa_delegation = intval; 3885 break; 3886 case ZPOOL_PROP_BOOTFS: 3887 spa->spa_bootfs = intval; 3888 break; 3889 case ZPOOL_PROP_FAILUREMODE: 3890 spa->spa_failmode = intval; 3891 break; 3892 default: 3893 break; 3894 } 3895 } 3896 3897 /* log internal history if this is not a zpool create */ 3898 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3899 tx->tx_txg != TXG_INITIAL) { 3900 spa_history_internal_log(LOG_POOL_PROPSET, 3901 spa, tx, cr, "%s %lld %s", 3902 nvpair_name(elem), intval, spa->spa_name); 3903 } 3904 } 3905 } 3906 3907 /* 3908 * Sync the specified transaction group. New blocks may be dirtied as 3909 * part of the process, so we iterate until it converges. 3910 */ 3911 void 3912 spa_sync(spa_t *spa, uint64_t txg) 3913 { 3914 dsl_pool_t *dp = spa->spa_dsl_pool; 3915 objset_t *mos = spa->spa_meta_objset; 3916 bplist_t *bpl = &spa->spa_sync_bplist; 3917 vdev_t *rvd = spa->spa_root_vdev; 3918 vdev_t *vd; 3919 dmu_tx_t *tx; 3920 int dirty_vdevs; 3921 3922 /* 3923 * Lock out configuration changes. 3924 */ 3925 spa_config_enter(spa, RW_READER, FTAG); 3926 3927 spa->spa_syncing_txg = txg; 3928 spa->spa_sync_pass = 0; 3929 3930 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3931 3932 tx = dmu_tx_create_assigned(dp, txg); 3933 3934 /* 3935 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 3936 * set spa_deflate if we have no raid-z vdevs. 3937 */ 3938 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 3939 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 3940 int i; 3941 3942 for (i = 0; i < rvd->vdev_children; i++) { 3943 vd = rvd->vdev_child[i]; 3944 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 3945 break; 3946 } 3947 if (i == rvd->vdev_children) { 3948 spa->spa_deflate = TRUE; 3949 VERIFY(0 == zap_add(spa->spa_meta_objset, 3950 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3951 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 3952 } 3953 } 3954 3955 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 3956 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 3957 dsl_pool_create_origin(dp, tx); 3958 3959 /* Keeping the origin open increases spa_minref */ 3960 spa->spa_minref += 3; 3961 } 3962 3963 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 3964 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 3965 dsl_pool_upgrade_clones(dp, tx); 3966 } 3967 3968 /* 3969 * If anything has changed in this txg, push the deferred frees 3970 * from the previous txg. If not, leave them alone so that we 3971 * don't generate work on an otherwise idle system. 3972 */ 3973 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 3974 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 3975 !txg_list_empty(&dp->dp_sync_tasks, txg)) 3976 spa_sync_deferred_frees(spa, txg); 3977 3978 /* 3979 * Iterate to convergence. 3980 */ 3981 do { 3982 spa->spa_sync_pass++; 3983 3984 spa_sync_config_object(spa, tx); 3985 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 3986 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 3987 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 3988 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 3989 spa_errlog_sync(spa, txg); 3990 dsl_pool_sync(dp, txg); 3991 3992 dirty_vdevs = 0; 3993 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 3994 vdev_sync(vd, txg); 3995 dirty_vdevs++; 3996 } 3997 3998 bplist_sync(bpl, tx); 3999 } while (dirty_vdevs); 4000 4001 bplist_close(bpl); 4002 4003 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4004 4005 /* 4006 * Rewrite the vdev configuration (which includes the uberblock) 4007 * to commit the transaction group. 4008 * 4009 * If there are no dirty vdevs, we sync the uberblock to a few 4010 * random top-level vdevs that are known to be visible in the 4011 * config cache (see spa_vdev_add() for details). If there *are* 4012 * dirty vdevs -- or if the sync to our random subset fails -- 4013 * then sync the uberblock to all vdevs. 4014 */ 4015 if (list_is_empty(&spa->spa_dirty_list)) { 4016 vdev_t *svd[SPA_DVAS_PER_BP]; 4017 int svdcount = 0; 4018 int children = rvd->vdev_children; 4019 int c0 = spa_get_random(children); 4020 int c; 4021 4022 for (c = 0; c < children; c++) { 4023 vd = rvd->vdev_child[(c0 + c) % children]; 4024 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4025 continue; 4026 svd[svdcount++] = vd; 4027 if (svdcount == SPA_DVAS_PER_BP) 4028 break; 4029 } 4030 vdev_config_sync(svd, svdcount, txg); 4031 } else { 4032 vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg); 4033 } 4034 dmu_tx_commit(tx); 4035 4036 /* 4037 * Clear the dirty config list. 4038 */ 4039 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 4040 vdev_config_clean(vd); 4041 4042 /* 4043 * Now that the new config has synced transactionally, 4044 * let it become visible to the config cache. 4045 */ 4046 if (spa->spa_config_syncing != NULL) { 4047 spa_config_set(spa, spa->spa_config_syncing); 4048 spa->spa_config_txg = txg; 4049 spa->spa_config_syncing = NULL; 4050 } 4051 4052 spa->spa_traverse_wanted = B_TRUE; 4053 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4054 spa->spa_traverse_wanted = B_FALSE; 4055 spa->spa_ubsync = spa->spa_uberblock; 4056 rw_exit(&spa->spa_traverse_lock); 4057 4058 /* 4059 * Clean up the ZIL records for the synced txg. 4060 */ 4061 dsl_pool_zil_clean(dp); 4062 4063 /* 4064 * Update usable space statistics. 4065 */ 4066 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4067 vdev_sync_done(vd, txg); 4068 4069 /* 4070 * It had better be the case that we didn't dirty anything 4071 * since vdev_config_sync(). 4072 */ 4073 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4074 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4075 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4076 ASSERT(bpl->bpl_queue == NULL); 4077 4078 spa_config_exit(spa, FTAG); 4079 4080 /* 4081 * If any async tasks have been requested, kick them off. 4082 */ 4083 spa_async_dispatch(spa); 4084 } 4085 4086 /* 4087 * Sync all pools. We don't want to hold the namespace lock across these 4088 * operations, so we take a reference on the spa_t and drop the lock during the 4089 * sync. 4090 */ 4091 void 4092 spa_sync_allpools(void) 4093 { 4094 spa_t *spa = NULL; 4095 mutex_enter(&spa_namespace_lock); 4096 while ((spa = spa_next(spa)) != NULL) { 4097 if (spa_state(spa) != POOL_STATE_ACTIVE) 4098 continue; 4099 spa_open_ref(spa, FTAG); 4100 mutex_exit(&spa_namespace_lock); 4101 txg_wait_synced(spa_get_dsl(spa), 0); 4102 mutex_enter(&spa_namespace_lock); 4103 spa_close(spa, FTAG); 4104 } 4105 mutex_exit(&spa_namespace_lock); 4106 } 4107 4108 /* 4109 * ========================================================================== 4110 * Miscellaneous routines 4111 * ========================================================================== 4112 */ 4113 4114 /* 4115 * Remove all pools in the system. 4116 */ 4117 void 4118 spa_evict_all(void) 4119 { 4120 spa_t *spa; 4121 4122 /* 4123 * Remove all cached state. All pools should be closed now, 4124 * so every spa in the AVL tree should be unreferenced. 4125 */ 4126 mutex_enter(&spa_namespace_lock); 4127 while ((spa = spa_next(NULL)) != NULL) { 4128 /* 4129 * Stop async tasks. The async thread may need to detach 4130 * a device that's been replaced, which requires grabbing 4131 * spa_namespace_lock, so we must drop it here. 4132 */ 4133 spa_open_ref(spa, FTAG); 4134 mutex_exit(&spa_namespace_lock); 4135 spa_async_suspend(spa); 4136 mutex_enter(&spa_namespace_lock); 4137 spa_close(spa, FTAG); 4138 4139 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4140 spa_unload(spa); 4141 spa_deactivate(spa); 4142 } 4143 spa_remove(spa); 4144 } 4145 mutex_exit(&spa_namespace_lock); 4146 } 4147 4148 vdev_t * 4149 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4150 { 4151 vdev_t *vd; 4152 int i; 4153 4154 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4155 return (vd); 4156 4157 if (l2cache) { 4158 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4159 vd = spa->spa_l2cache.sav_vdevs[i]; 4160 if (vd->vdev_guid == guid) 4161 return (vd); 4162 } 4163 } 4164 4165 return (NULL); 4166 } 4167 4168 void 4169 spa_upgrade(spa_t *spa, uint64_t version) 4170 { 4171 spa_config_enter(spa, RW_WRITER, FTAG); 4172 4173 /* 4174 * This should only be called for a non-faulted pool, and since a 4175 * future version would result in an unopenable pool, this shouldn't be 4176 * possible. 4177 */ 4178 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4179 ASSERT(version >= spa->spa_uberblock.ub_version); 4180 4181 spa->spa_uberblock.ub_version = version; 4182 vdev_config_dirty(spa->spa_root_vdev); 4183 4184 spa_config_exit(spa, FTAG); 4185 4186 txg_wait_synced(spa_get_dsl(spa), 0); 4187 } 4188 4189 boolean_t 4190 spa_has_spare(spa_t *spa, uint64_t guid) 4191 { 4192 int i; 4193 uint64_t spareguid; 4194 spa_aux_vdev_t *sav = &spa->spa_spares; 4195 4196 for (i = 0; i < sav->sav_count; i++) 4197 if (sav->sav_vdevs[i]->vdev_guid == guid) 4198 return (B_TRUE); 4199 4200 for (i = 0; i < sav->sav_npending; i++) { 4201 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4202 &spareguid) == 0 && spareguid == guid) 4203 return (B_TRUE); 4204 } 4205 4206 return (B_FALSE); 4207 } 4208 4209 /* 4210 * Check if a pool has an active shared spare device. 4211 * Note: reference count of an active spare is 2, as a spare and as a replace 4212 */ 4213 static boolean_t 4214 spa_has_active_shared_spare(spa_t *spa) 4215 { 4216 int i, refcnt; 4217 uint64_t pool; 4218 spa_aux_vdev_t *sav = &spa->spa_spares; 4219 4220 for (i = 0; i < sav->sav_count; i++) { 4221 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4222 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4223 refcnt > 2) 4224 return (B_TRUE); 4225 } 4226 4227 return (B_FALSE); 4228 } 4229 4230 /* 4231 * Post a sysevent corresponding to the given event. The 'name' must be one of 4232 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4233 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4234 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4235 * or zdb as real changes. 4236 */ 4237 void 4238 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4239 { 4240 #ifdef _KERNEL 4241 sysevent_t *ev; 4242 sysevent_attr_list_t *attr = NULL; 4243 sysevent_value_t value; 4244 sysevent_id_t eid; 4245 4246 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4247 SE_SLEEP); 4248 4249 value.value_type = SE_DATA_TYPE_STRING; 4250 value.value.sv_string = spa_name(spa); 4251 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4252 goto done; 4253 4254 value.value_type = SE_DATA_TYPE_UINT64; 4255 value.value.sv_uint64 = spa_guid(spa); 4256 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4257 goto done; 4258 4259 if (vd) { 4260 value.value_type = SE_DATA_TYPE_UINT64; 4261 value.value.sv_uint64 = vd->vdev_guid; 4262 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4263 SE_SLEEP) != 0) 4264 goto done; 4265 4266 if (vd->vdev_path) { 4267 value.value_type = SE_DATA_TYPE_STRING; 4268 value.value.sv_string = vd->vdev_path; 4269 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4270 &value, SE_SLEEP) != 0) 4271 goto done; 4272 } 4273 } 4274 4275 if (sysevent_attach_attributes(ev, attr) != 0) 4276 goto done; 4277 attr = NULL; 4278 4279 (void) log_sysevent(ev, SE_SLEEP, &eid); 4280 4281 done: 4282 if (attr) 4283 sysevent_free_attr(attr); 4284 sysevent_free(ev); 4285 #endif 4286 } 4287