1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains all the routines used when modifying on-disk SPA state. 31 * This includes opening, importing, destroying, exporting a pool, and syncing a 32 * pool. 33 */ 34 35 #include <sys/zfs_context.h> 36 #include <sys/fm/fs/zfs.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zio.h> 39 #include <sys/zio_checksum.h> 40 #include <sys/zio_compress.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/vdev_impl.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/dmu_traverse.h> 51 #include <sys/dmu_objset.h> 52 #include <sys/unique.h> 53 #include <sys/dsl_pool.h> 54 #include <sys/dsl_dataset.h> 55 #include <sys/dsl_dir.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/dsl_synctask.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/arc.h> 60 #include <sys/callb.h> 61 #include <sys/systeminfo.h> 62 #include <sys/sunddi.h> 63 #include <sys/spa_boot.h> 64 65 #include "zfs_prop.h" 66 #include "zfs_comutil.h" 67 68 int zio_taskq_threads = 8; 69 70 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 71 static boolean_t spa_has_active_shared_spare(spa_t *spa); 72 73 /* 74 * ========================================================================== 75 * SPA properties routines 76 * ========================================================================== 77 */ 78 79 /* 80 * Add a (source=src, propname=propval) list to an nvlist. 81 */ 82 static void 83 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 84 uint64_t intval, zprop_source_t src) 85 { 86 const char *propname = zpool_prop_to_name(prop); 87 nvlist_t *propval; 88 89 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 90 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 91 92 if (strval != NULL) 93 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 94 else 95 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 96 97 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 98 nvlist_free(propval); 99 } 100 101 /* 102 * Get property values from the spa configuration. 103 */ 104 static void 105 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 106 { 107 uint64_t size = spa_get_space(spa); 108 uint64_t used = spa_get_alloc(spa); 109 uint64_t cap, version; 110 zprop_source_t src = ZPROP_SRC_NONE; 111 spa_config_dirent_t *dp; 112 113 /* 114 * readonly properties 115 */ 116 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src); 117 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 118 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 119 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 120 121 cap = (size == 0) ? 0 : (used * 100 / size); 122 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 123 124 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 125 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 126 spa->spa_root_vdev->vdev_state, src); 127 128 /* 129 * settable properties that are not stored in the pool property object. 130 */ 131 version = spa_version(spa); 132 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 133 src = ZPROP_SRC_DEFAULT; 134 else 135 src = ZPROP_SRC_LOCAL; 136 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 137 138 if (spa->spa_root != NULL) 139 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 140 0, ZPROP_SRC_LOCAL); 141 142 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 143 if (dp->scd_path == NULL) { 144 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 145 "none", 0, ZPROP_SRC_LOCAL); 146 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 147 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 148 dp->scd_path, 0, ZPROP_SRC_LOCAL); 149 } 150 } 151 } 152 153 /* 154 * Get zpool property values. 155 */ 156 int 157 spa_prop_get(spa_t *spa, nvlist_t **nvp) 158 { 159 zap_cursor_t zc; 160 zap_attribute_t za; 161 objset_t *mos = spa->spa_meta_objset; 162 int err; 163 164 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 165 166 /* 167 * Get properties from the spa config. 168 */ 169 spa_prop_get_config(spa, nvp); 170 171 mutex_enter(&spa->spa_props_lock); 172 /* If no pool property object, no more prop to get. */ 173 if (spa->spa_pool_props_object == 0) { 174 mutex_exit(&spa->spa_props_lock); 175 return (0); 176 } 177 178 /* 179 * Get properties from the MOS pool property object. 180 */ 181 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 182 (err = zap_cursor_retrieve(&zc, &za)) == 0; 183 zap_cursor_advance(&zc)) { 184 uint64_t intval = 0; 185 char *strval = NULL; 186 zprop_source_t src = ZPROP_SRC_DEFAULT; 187 zpool_prop_t prop; 188 189 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 190 continue; 191 192 switch (za.za_integer_length) { 193 case 8: 194 /* integer property */ 195 if (za.za_first_integer != 196 zpool_prop_default_numeric(prop)) 197 src = ZPROP_SRC_LOCAL; 198 199 if (prop == ZPOOL_PROP_BOOTFS) { 200 dsl_pool_t *dp; 201 dsl_dataset_t *ds = NULL; 202 203 dp = spa_get_dsl(spa); 204 rw_enter(&dp->dp_config_rwlock, RW_READER); 205 if (err = dsl_dataset_hold_obj(dp, 206 za.za_first_integer, FTAG, &ds)) { 207 rw_exit(&dp->dp_config_rwlock); 208 break; 209 } 210 211 strval = kmem_alloc( 212 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 213 KM_SLEEP); 214 dsl_dataset_name(ds, strval); 215 dsl_dataset_rele(ds, FTAG); 216 rw_exit(&dp->dp_config_rwlock); 217 } else { 218 strval = NULL; 219 intval = za.za_first_integer; 220 } 221 222 spa_prop_add_list(*nvp, prop, strval, intval, src); 223 224 if (strval != NULL) 225 kmem_free(strval, 226 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 227 228 break; 229 230 case 1: 231 /* string property */ 232 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 233 err = zap_lookup(mos, spa->spa_pool_props_object, 234 za.za_name, 1, za.za_num_integers, strval); 235 if (err) { 236 kmem_free(strval, za.za_num_integers); 237 break; 238 } 239 spa_prop_add_list(*nvp, prop, strval, 0, src); 240 kmem_free(strval, za.za_num_integers); 241 break; 242 243 default: 244 break; 245 } 246 } 247 zap_cursor_fini(&zc); 248 mutex_exit(&spa->spa_props_lock); 249 out: 250 if (err && err != ENOENT) { 251 nvlist_free(*nvp); 252 *nvp = NULL; 253 return (err); 254 } 255 256 return (0); 257 } 258 259 /* 260 * Validate the given pool properties nvlist and modify the list 261 * for the property values to be set. 262 */ 263 static int 264 spa_prop_validate(spa_t *spa, nvlist_t *props) 265 { 266 nvpair_t *elem; 267 int error = 0, reset_bootfs = 0; 268 uint64_t objnum; 269 270 elem = NULL; 271 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 272 zpool_prop_t prop; 273 char *propname, *strval; 274 uint64_t intval; 275 objset_t *os; 276 char *slash; 277 278 propname = nvpair_name(elem); 279 280 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 281 return (EINVAL); 282 283 switch (prop) { 284 case ZPOOL_PROP_VERSION: 285 error = nvpair_value_uint64(elem, &intval); 286 if (!error && 287 (intval < spa_version(spa) || intval > SPA_VERSION)) 288 error = EINVAL; 289 break; 290 291 case ZPOOL_PROP_DELEGATION: 292 case ZPOOL_PROP_AUTOREPLACE: 293 error = nvpair_value_uint64(elem, &intval); 294 if (!error && intval > 1) 295 error = EINVAL; 296 break; 297 298 case ZPOOL_PROP_BOOTFS: 299 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 300 error = ENOTSUP; 301 break; 302 } 303 304 /* 305 * Make sure the vdev config is bootable 306 */ 307 if (!vdev_is_bootable(spa->spa_root_vdev)) { 308 error = ENOTSUP; 309 break; 310 } 311 312 reset_bootfs = 1; 313 314 error = nvpair_value_string(elem, &strval); 315 316 if (!error) { 317 uint64_t compress; 318 319 if (strval == NULL || strval[0] == '\0') { 320 objnum = zpool_prop_default_numeric( 321 ZPOOL_PROP_BOOTFS); 322 break; 323 } 324 325 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 326 DS_MODE_USER | DS_MODE_READONLY, &os)) 327 break; 328 329 /* We don't support gzip bootable datasets */ 330 if ((error = dsl_prop_get_integer(strval, 331 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 332 &compress, NULL)) == 0 && 333 !BOOTFS_COMPRESS_VALID(compress)) { 334 error = ENOTSUP; 335 } else { 336 objnum = dmu_objset_id(os); 337 } 338 dmu_objset_close(os); 339 } 340 break; 341 case ZPOOL_PROP_FAILUREMODE: 342 error = nvpair_value_uint64(elem, &intval); 343 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 344 intval > ZIO_FAILURE_MODE_PANIC)) 345 error = EINVAL; 346 347 /* 348 * This is a special case which only occurs when 349 * the pool has completely failed. This allows 350 * the user to change the in-core failmode property 351 * without syncing it out to disk (I/Os might 352 * currently be blocked). We do this by returning 353 * EIO to the caller (spa_prop_set) to trick it 354 * into thinking we encountered a property validation 355 * error. 356 */ 357 if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) { 358 spa->spa_failmode = intval; 359 error = EIO; 360 } 361 break; 362 363 case ZPOOL_PROP_CACHEFILE: 364 if ((error = nvpair_value_string(elem, &strval)) != 0) 365 break; 366 367 if (strval[0] == '\0') 368 break; 369 370 if (strcmp(strval, "none") == 0) 371 break; 372 373 if (strval[0] != '/') { 374 error = EINVAL; 375 break; 376 } 377 378 slash = strrchr(strval, '/'); 379 ASSERT(slash != NULL); 380 381 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 382 strcmp(slash, "/..") == 0) 383 error = EINVAL; 384 break; 385 } 386 387 if (error) 388 break; 389 } 390 391 if (!error && reset_bootfs) { 392 error = nvlist_remove(props, 393 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 394 395 if (!error) { 396 error = nvlist_add_uint64(props, 397 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 398 } 399 } 400 401 return (error); 402 } 403 404 int 405 spa_prop_set(spa_t *spa, nvlist_t *nvp) 406 { 407 int error; 408 409 if ((error = spa_prop_validate(spa, nvp)) != 0) 410 return (error); 411 412 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 413 spa, nvp, 3)); 414 } 415 416 /* 417 * If the bootfs property value is dsobj, clear it. 418 */ 419 void 420 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 421 { 422 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 423 VERIFY(zap_remove(spa->spa_meta_objset, 424 spa->spa_pool_props_object, 425 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 426 spa->spa_bootfs = 0; 427 } 428 } 429 430 /* 431 * ========================================================================== 432 * SPA state manipulation (open/create/destroy/import/export) 433 * ========================================================================== 434 */ 435 436 static int 437 spa_error_entry_compare(const void *a, const void *b) 438 { 439 spa_error_entry_t *sa = (spa_error_entry_t *)a; 440 spa_error_entry_t *sb = (spa_error_entry_t *)b; 441 int ret; 442 443 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 444 sizeof (zbookmark_t)); 445 446 if (ret < 0) 447 return (-1); 448 else if (ret > 0) 449 return (1); 450 else 451 return (0); 452 } 453 454 /* 455 * Utility function which retrieves copies of the current logs and 456 * re-initializes them in the process. 457 */ 458 void 459 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 460 { 461 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 462 463 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 464 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 465 466 avl_create(&spa->spa_errlist_scrub, 467 spa_error_entry_compare, sizeof (spa_error_entry_t), 468 offsetof(spa_error_entry_t, se_avl)); 469 avl_create(&spa->spa_errlist_last, 470 spa_error_entry_compare, sizeof (spa_error_entry_t), 471 offsetof(spa_error_entry_t, se_avl)); 472 } 473 474 /* 475 * Activate an uninitialized pool. 476 */ 477 static void 478 spa_activate(spa_t *spa) 479 { 480 int t; 481 482 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 483 484 spa->spa_state = POOL_STATE_ACTIVE; 485 486 spa->spa_normal_class = metaslab_class_create(); 487 spa->spa_log_class = metaslab_class_create(); 488 489 for (t = 0; t < ZIO_TYPES; t++) { 490 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 491 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 492 TASKQ_PREPOPULATE); 493 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 494 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 495 TASKQ_PREPOPULATE); 496 } 497 498 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 499 offsetof(vdev_t, vdev_dirty_node)); 500 list_create(&spa->spa_zio_list, sizeof (zio_t), 501 offsetof(zio_t, zio_link_node)); 502 503 txg_list_create(&spa->spa_vdev_txg_list, 504 offsetof(struct vdev, vdev_txg_node)); 505 506 avl_create(&spa->spa_errlist_scrub, 507 spa_error_entry_compare, sizeof (spa_error_entry_t), 508 offsetof(spa_error_entry_t, se_avl)); 509 avl_create(&spa->spa_errlist_last, 510 spa_error_entry_compare, sizeof (spa_error_entry_t), 511 offsetof(spa_error_entry_t, se_avl)); 512 } 513 514 /* 515 * Opposite of spa_activate(). 516 */ 517 static void 518 spa_deactivate(spa_t *spa) 519 { 520 int t; 521 522 ASSERT(spa->spa_sync_on == B_FALSE); 523 ASSERT(spa->spa_dsl_pool == NULL); 524 ASSERT(spa->spa_root_vdev == NULL); 525 526 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 527 528 txg_list_destroy(&spa->spa_vdev_txg_list); 529 530 list_destroy(&spa->spa_dirty_list); 531 list_destroy(&spa->spa_zio_list); 532 533 for (t = 0; t < ZIO_TYPES; t++) { 534 taskq_destroy(spa->spa_zio_issue_taskq[t]); 535 taskq_destroy(spa->spa_zio_intr_taskq[t]); 536 spa->spa_zio_issue_taskq[t] = NULL; 537 spa->spa_zio_intr_taskq[t] = NULL; 538 } 539 540 metaslab_class_destroy(spa->spa_normal_class); 541 spa->spa_normal_class = NULL; 542 543 metaslab_class_destroy(spa->spa_log_class); 544 spa->spa_log_class = NULL; 545 546 /* 547 * If this was part of an import or the open otherwise failed, we may 548 * still have errors left in the queues. Empty them just in case. 549 */ 550 spa_errlog_drain(spa); 551 552 avl_destroy(&spa->spa_errlist_scrub); 553 avl_destroy(&spa->spa_errlist_last); 554 555 spa->spa_state = POOL_STATE_UNINITIALIZED; 556 } 557 558 /* 559 * Verify a pool configuration, and construct the vdev tree appropriately. This 560 * will create all the necessary vdevs in the appropriate layout, with each vdev 561 * in the CLOSED state. This will prep the pool before open/creation/import. 562 * All vdev validation is done by the vdev_alloc() routine. 563 */ 564 static int 565 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 566 uint_t id, int atype) 567 { 568 nvlist_t **child; 569 uint_t c, children; 570 int error; 571 572 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 573 return (error); 574 575 if ((*vdp)->vdev_ops->vdev_op_leaf) 576 return (0); 577 578 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 579 &child, &children) != 0) { 580 vdev_free(*vdp); 581 *vdp = NULL; 582 return (EINVAL); 583 } 584 585 for (c = 0; c < children; c++) { 586 vdev_t *vd; 587 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 588 atype)) != 0) { 589 vdev_free(*vdp); 590 *vdp = NULL; 591 return (error); 592 } 593 } 594 595 ASSERT(*vdp != NULL); 596 597 return (0); 598 } 599 600 /* 601 * Opposite of spa_load(). 602 */ 603 static void 604 spa_unload(spa_t *spa) 605 { 606 int i; 607 608 /* 609 * Stop async tasks. 610 */ 611 spa_async_suspend(spa); 612 613 /* 614 * Stop syncing. 615 */ 616 if (spa->spa_sync_on) { 617 txg_sync_stop(spa->spa_dsl_pool); 618 spa->spa_sync_on = B_FALSE; 619 } 620 621 /* 622 * Wait for any outstanding prefetch I/O to complete. 623 */ 624 spa_config_enter(spa, RW_WRITER, FTAG); 625 spa_config_exit(spa, FTAG); 626 627 /* 628 * Drop and purge level 2 cache 629 */ 630 spa_l2cache_drop(spa); 631 632 /* 633 * Close the dsl pool. 634 */ 635 if (spa->spa_dsl_pool) { 636 dsl_pool_close(spa->spa_dsl_pool); 637 spa->spa_dsl_pool = NULL; 638 } 639 640 /* 641 * Close all vdevs. 642 */ 643 if (spa->spa_root_vdev) 644 vdev_free(spa->spa_root_vdev); 645 ASSERT(spa->spa_root_vdev == NULL); 646 647 for (i = 0; i < spa->spa_spares.sav_count; i++) 648 vdev_free(spa->spa_spares.sav_vdevs[i]); 649 if (spa->spa_spares.sav_vdevs) { 650 kmem_free(spa->spa_spares.sav_vdevs, 651 spa->spa_spares.sav_count * sizeof (void *)); 652 spa->spa_spares.sav_vdevs = NULL; 653 } 654 if (spa->spa_spares.sav_config) { 655 nvlist_free(spa->spa_spares.sav_config); 656 spa->spa_spares.sav_config = NULL; 657 } 658 659 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 660 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 661 if (spa->spa_l2cache.sav_vdevs) { 662 kmem_free(spa->spa_l2cache.sav_vdevs, 663 spa->spa_l2cache.sav_count * sizeof (void *)); 664 spa->spa_l2cache.sav_vdevs = NULL; 665 } 666 if (spa->spa_l2cache.sav_config) { 667 nvlist_free(spa->spa_l2cache.sav_config); 668 spa->spa_l2cache.sav_config = NULL; 669 } 670 671 spa->spa_async_suspended = 0; 672 } 673 674 /* 675 * Load (or re-load) the current list of vdevs describing the active spares for 676 * this pool. When this is called, we have some form of basic information in 677 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 678 * then re-generate a more complete list including status information. 679 */ 680 static void 681 spa_load_spares(spa_t *spa) 682 { 683 nvlist_t **spares; 684 uint_t nspares; 685 int i; 686 vdev_t *vd, *tvd; 687 688 /* 689 * First, close and free any existing spare vdevs. 690 */ 691 for (i = 0; i < spa->spa_spares.sav_count; i++) { 692 vd = spa->spa_spares.sav_vdevs[i]; 693 694 /* Undo the call to spa_activate() below */ 695 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 696 B_FALSE)) != NULL && tvd->vdev_isspare) 697 spa_spare_remove(tvd); 698 vdev_close(vd); 699 vdev_free(vd); 700 } 701 702 if (spa->spa_spares.sav_vdevs) 703 kmem_free(spa->spa_spares.sav_vdevs, 704 spa->spa_spares.sav_count * sizeof (void *)); 705 706 if (spa->spa_spares.sav_config == NULL) 707 nspares = 0; 708 else 709 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 710 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 711 712 spa->spa_spares.sav_count = (int)nspares; 713 spa->spa_spares.sav_vdevs = NULL; 714 715 if (nspares == 0) 716 return; 717 718 /* 719 * Construct the array of vdevs, opening them to get status in the 720 * process. For each spare, there is potentially two different vdev_t 721 * structures associated with it: one in the list of spares (used only 722 * for basic validation purposes) and one in the active vdev 723 * configuration (if it's spared in). During this phase we open and 724 * validate each vdev on the spare list. If the vdev also exists in the 725 * active configuration, then we also mark this vdev as an active spare. 726 */ 727 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 728 KM_SLEEP); 729 for (i = 0; i < spa->spa_spares.sav_count; i++) { 730 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 731 VDEV_ALLOC_SPARE) == 0); 732 ASSERT(vd != NULL); 733 734 spa->spa_spares.sav_vdevs[i] = vd; 735 736 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 737 B_FALSE)) != NULL) { 738 if (!tvd->vdev_isspare) 739 spa_spare_add(tvd); 740 741 /* 742 * We only mark the spare active if we were successfully 743 * able to load the vdev. Otherwise, importing a pool 744 * with a bad active spare would result in strange 745 * behavior, because multiple pool would think the spare 746 * is actively in use. 747 * 748 * There is a vulnerability here to an equally bizarre 749 * circumstance, where a dead active spare is later 750 * brought back to life (onlined or otherwise). Given 751 * the rarity of this scenario, and the extra complexity 752 * it adds, we ignore the possibility. 753 */ 754 if (!vdev_is_dead(tvd)) 755 spa_spare_activate(tvd); 756 } 757 758 if (vdev_open(vd) != 0) 759 continue; 760 761 vd->vdev_top = vd; 762 if (vdev_validate_aux(vd) == 0) 763 spa_spare_add(vd); 764 } 765 766 /* 767 * Recompute the stashed list of spares, with status information 768 * this time. 769 */ 770 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 771 DATA_TYPE_NVLIST_ARRAY) == 0); 772 773 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 774 KM_SLEEP); 775 for (i = 0; i < spa->spa_spares.sav_count; i++) 776 spares[i] = vdev_config_generate(spa, 777 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 778 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 779 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 780 for (i = 0; i < spa->spa_spares.sav_count; i++) 781 nvlist_free(spares[i]); 782 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 783 } 784 785 /* 786 * Load (or re-load) the current list of vdevs describing the active l2cache for 787 * this pool. When this is called, we have some form of basic information in 788 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 789 * then re-generate a more complete list including status information. 790 * Devices which are already active have their details maintained, and are 791 * not re-opened. 792 */ 793 static void 794 spa_load_l2cache(spa_t *spa) 795 { 796 nvlist_t **l2cache; 797 uint_t nl2cache; 798 int i, j, oldnvdevs; 799 uint64_t guid, size; 800 vdev_t *vd, **oldvdevs, **newvdevs; 801 spa_aux_vdev_t *sav = &spa->spa_l2cache; 802 803 if (sav->sav_config != NULL) { 804 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 805 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 806 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 807 } else { 808 nl2cache = 0; 809 } 810 811 oldvdevs = sav->sav_vdevs; 812 oldnvdevs = sav->sav_count; 813 sav->sav_vdevs = NULL; 814 sav->sav_count = 0; 815 816 /* 817 * Process new nvlist of vdevs. 818 */ 819 for (i = 0; i < nl2cache; i++) { 820 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 821 &guid) == 0); 822 823 newvdevs[i] = NULL; 824 for (j = 0; j < oldnvdevs; j++) { 825 vd = oldvdevs[j]; 826 if (vd != NULL && guid == vd->vdev_guid) { 827 /* 828 * Retain previous vdev for add/remove ops. 829 */ 830 newvdevs[i] = vd; 831 oldvdevs[j] = NULL; 832 break; 833 } 834 } 835 836 if (newvdevs[i] == NULL) { 837 /* 838 * Create new vdev 839 */ 840 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 841 VDEV_ALLOC_L2CACHE) == 0); 842 ASSERT(vd != NULL); 843 newvdevs[i] = vd; 844 845 /* 846 * Commit this vdev as an l2cache device, 847 * even if it fails to open. 848 */ 849 spa_l2cache_add(vd); 850 851 vd->vdev_top = vd; 852 vd->vdev_aux = sav; 853 854 spa_l2cache_activate(vd); 855 856 if (vdev_open(vd) != 0) 857 continue; 858 859 (void) vdev_validate_aux(vd); 860 861 if (!vdev_is_dead(vd)) { 862 size = vdev_get_rsize(vd); 863 l2arc_add_vdev(spa, vd, 864 VDEV_LABEL_START_SIZE, 865 size - VDEV_LABEL_START_SIZE); 866 } 867 } 868 } 869 870 /* 871 * Purge vdevs that were dropped 872 */ 873 for (i = 0; i < oldnvdevs; i++) { 874 uint64_t pool; 875 876 vd = oldvdevs[i]; 877 if (vd != NULL) { 878 if (spa_mode & FWRITE && 879 spa_l2cache_exists(vd->vdev_guid, &pool) && 880 pool != 0ULL && 881 l2arc_vdev_present(vd)) { 882 l2arc_remove_vdev(vd); 883 } 884 (void) vdev_close(vd); 885 spa_l2cache_remove(vd); 886 } 887 } 888 889 if (oldvdevs) 890 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 891 892 if (sav->sav_config == NULL) 893 goto out; 894 895 sav->sav_vdevs = newvdevs; 896 sav->sav_count = (int)nl2cache; 897 898 /* 899 * Recompute the stashed list of l2cache devices, with status 900 * information this time. 901 */ 902 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 903 DATA_TYPE_NVLIST_ARRAY) == 0); 904 905 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 906 for (i = 0; i < sav->sav_count; i++) 907 l2cache[i] = vdev_config_generate(spa, 908 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 909 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 910 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 911 out: 912 for (i = 0; i < sav->sav_count; i++) 913 nvlist_free(l2cache[i]); 914 if (sav->sav_count) 915 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 916 } 917 918 static int 919 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 920 { 921 dmu_buf_t *db; 922 char *packed = NULL; 923 size_t nvsize = 0; 924 int error; 925 *value = NULL; 926 927 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 928 nvsize = *(uint64_t *)db->db_data; 929 dmu_buf_rele(db, FTAG); 930 931 packed = kmem_alloc(nvsize, KM_SLEEP); 932 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 933 if (error == 0) 934 error = nvlist_unpack(packed, nvsize, value, 0); 935 kmem_free(packed, nvsize); 936 937 return (error); 938 } 939 940 /* 941 * Checks to see if the given vdev could not be opened, in which case we post a 942 * sysevent to notify the autoreplace code that the device has been removed. 943 */ 944 static void 945 spa_check_removed(vdev_t *vd) 946 { 947 int c; 948 949 for (c = 0; c < vd->vdev_children; c++) 950 spa_check_removed(vd->vdev_child[c]); 951 952 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 953 zfs_post_autoreplace(vd->vdev_spa, vd); 954 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 955 } 956 } 957 958 /* 959 * Load an existing storage pool, using the pool's builtin spa_config as a 960 * source of configuration information. 961 */ 962 static int 963 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 964 { 965 int error = 0; 966 nvlist_t *nvroot = NULL; 967 vdev_t *rvd; 968 uberblock_t *ub = &spa->spa_uberblock; 969 uint64_t config_cache_txg = spa->spa_config_txg; 970 uint64_t pool_guid; 971 uint64_t version; 972 zio_t *zio; 973 uint64_t autoreplace = 0; 974 975 spa->spa_load_state = state; 976 977 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 978 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 979 error = EINVAL; 980 goto out; 981 } 982 983 /* 984 * Versioning wasn't explicitly added to the label until later, so if 985 * it's not present treat it as the initial version. 986 */ 987 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 988 version = SPA_VERSION_INITIAL; 989 990 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 991 &spa->spa_config_txg); 992 993 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 994 spa_guid_exists(pool_guid, 0)) { 995 error = EEXIST; 996 goto out; 997 } 998 999 spa->spa_load_guid = pool_guid; 1000 1001 /* 1002 * Parse the configuration into a vdev tree. We explicitly set the 1003 * value that will be returned by spa_version() since parsing the 1004 * configuration requires knowing the version number. 1005 */ 1006 spa_config_enter(spa, RW_WRITER, FTAG); 1007 spa->spa_ubsync.ub_version = version; 1008 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1009 spa_config_exit(spa, FTAG); 1010 1011 if (error != 0) 1012 goto out; 1013 1014 ASSERT(spa->spa_root_vdev == rvd); 1015 ASSERT(spa_guid(spa) == pool_guid); 1016 1017 /* 1018 * Try to open all vdevs, loading each label in the process. 1019 */ 1020 error = vdev_open(rvd); 1021 if (error != 0) 1022 goto out; 1023 1024 /* 1025 * Validate the labels for all leaf vdevs. We need to grab the config 1026 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 1027 * flag. 1028 */ 1029 spa_config_enter(spa, RW_READER, FTAG); 1030 error = vdev_validate(rvd); 1031 spa_config_exit(spa, FTAG); 1032 1033 if (error != 0) 1034 goto out; 1035 1036 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1037 error = ENXIO; 1038 goto out; 1039 } 1040 1041 /* 1042 * Find the best uberblock. 1043 */ 1044 bzero(ub, sizeof (uberblock_t)); 1045 1046 zio = zio_root(spa, NULL, NULL, 1047 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1048 vdev_uberblock_load(zio, rvd, ub); 1049 error = zio_wait(zio); 1050 1051 /* 1052 * If we weren't able to find a single valid uberblock, return failure. 1053 */ 1054 if (ub->ub_txg == 0) { 1055 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1056 VDEV_AUX_CORRUPT_DATA); 1057 error = ENXIO; 1058 goto out; 1059 } 1060 1061 /* 1062 * If the pool is newer than the code, we can't open it. 1063 */ 1064 if (ub->ub_version > SPA_VERSION) { 1065 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1066 VDEV_AUX_VERSION_NEWER); 1067 error = ENOTSUP; 1068 goto out; 1069 } 1070 1071 /* 1072 * If the vdev guid sum doesn't match the uberblock, we have an 1073 * incomplete configuration. 1074 */ 1075 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1076 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1077 VDEV_AUX_BAD_GUID_SUM); 1078 error = ENXIO; 1079 goto out; 1080 } 1081 1082 /* 1083 * Initialize internal SPA structures. 1084 */ 1085 spa->spa_state = POOL_STATE_ACTIVE; 1086 spa->spa_ubsync = spa->spa_uberblock; 1087 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1088 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1089 if (error) { 1090 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1091 VDEV_AUX_CORRUPT_DATA); 1092 goto out; 1093 } 1094 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1095 1096 if (zap_lookup(spa->spa_meta_objset, 1097 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1098 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1099 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1100 VDEV_AUX_CORRUPT_DATA); 1101 error = EIO; 1102 goto out; 1103 } 1104 1105 if (!mosconfig) { 1106 nvlist_t *newconfig; 1107 uint64_t hostid; 1108 1109 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1110 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1111 VDEV_AUX_CORRUPT_DATA); 1112 error = EIO; 1113 goto out; 1114 } 1115 1116 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 1117 &hostid) == 0) { 1118 char *hostname; 1119 unsigned long myhostid = 0; 1120 1121 VERIFY(nvlist_lookup_string(newconfig, 1122 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1123 1124 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1125 if (hostid != 0 && myhostid != 0 && 1126 (unsigned long)hostid != myhostid) { 1127 cmn_err(CE_WARN, "pool '%s' could not be " 1128 "loaded as it was last accessed by " 1129 "another system (host: %s hostid: 0x%lx). " 1130 "See: http://www.sun.com/msg/ZFS-8000-EY", 1131 spa->spa_name, hostname, 1132 (unsigned long)hostid); 1133 error = EBADF; 1134 goto out; 1135 } 1136 } 1137 1138 spa_config_set(spa, newconfig); 1139 spa_unload(spa); 1140 spa_deactivate(spa); 1141 spa_activate(spa); 1142 1143 return (spa_load(spa, newconfig, state, B_TRUE)); 1144 } 1145 1146 if (zap_lookup(spa->spa_meta_objset, 1147 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1148 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1149 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1150 VDEV_AUX_CORRUPT_DATA); 1151 error = EIO; 1152 goto out; 1153 } 1154 1155 /* 1156 * Load the bit that tells us to use the new accounting function 1157 * (raid-z deflation). If we have an older pool, this will not 1158 * be present. 1159 */ 1160 error = zap_lookup(spa->spa_meta_objset, 1161 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1162 sizeof (uint64_t), 1, &spa->spa_deflate); 1163 if (error != 0 && error != ENOENT) { 1164 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1165 VDEV_AUX_CORRUPT_DATA); 1166 error = EIO; 1167 goto out; 1168 } 1169 1170 /* 1171 * Load the persistent error log. If we have an older pool, this will 1172 * not be present. 1173 */ 1174 error = zap_lookup(spa->spa_meta_objset, 1175 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1176 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1177 if (error != 0 && error != ENOENT) { 1178 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1179 VDEV_AUX_CORRUPT_DATA); 1180 error = EIO; 1181 goto out; 1182 } 1183 1184 error = zap_lookup(spa->spa_meta_objset, 1185 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1186 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1187 if (error != 0 && error != ENOENT) { 1188 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1189 VDEV_AUX_CORRUPT_DATA); 1190 error = EIO; 1191 goto out; 1192 } 1193 1194 /* 1195 * Load the history object. If we have an older pool, this 1196 * will not be present. 1197 */ 1198 error = zap_lookup(spa->spa_meta_objset, 1199 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1200 sizeof (uint64_t), 1, &spa->spa_history); 1201 if (error != 0 && error != ENOENT) { 1202 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1203 VDEV_AUX_CORRUPT_DATA); 1204 error = EIO; 1205 goto out; 1206 } 1207 1208 /* 1209 * Load any hot spares for this pool. 1210 */ 1211 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1212 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1213 if (error != 0 && error != ENOENT) { 1214 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1215 VDEV_AUX_CORRUPT_DATA); 1216 error = EIO; 1217 goto out; 1218 } 1219 if (error == 0) { 1220 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1221 if (load_nvlist(spa, spa->spa_spares.sav_object, 1222 &spa->spa_spares.sav_config) != 0) { 1223 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1224 VDEV_AUX_CORRUPT_DATA); 1225 error = EIO; 1226 goto out; 1227 } 1228 1229 spa_config_enter(spa, RW_WRITER, FTAG); 1230 spa_load_spares(spa); 1231 spa_config_exit(spa, FTAG); 1232 } 1233 1234 /* 1235 * Load any level 2 ARC devices for this pool. 1236 */ 1237 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1238 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1239 &spa->spa_l2cache.sav_object); 1240 if (error != 0 && error != ENOENT) { 1241 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1242 VDEV_AUX_CORRUPT_DATA); 1243 error = EIO; 1244 goto out; 1245 } 1246 if (error == 0) { 1247 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1248 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1249 &spa->spa_l2cache.sav_config) != 0) { 1250 vdev_set_state(rvd, B_TRUE, 1251 VDEV_STATE_CANT_OPEN, 1252 VDEV_AUX_CORRUPT_DATA); 1253 error = EIO; 1254 goto out; 1255 } 1256 1257 spa_config_enter(spa, RW_WRITER, FTAG); 1258 spa_load_l2cache(spa); 1259 spa_config_exit(spa, FTAG); 1260 } 1261 1262 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1263 1264 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1265 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1266 1267 if (error && error != ENOENT) { 1268 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1269 VDEV_AUX_CORRUPT_DATA); 1270 error = EIO; 1271 goto out; 1272 } 1273 1274 if (error == 0) { 1275 (void) zap_lookup(spa->spa_meta_objset, 1276 spa->spa_pool_props_object, 1277 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1278 sizeof (uint64_t), 1, &spa->spa_bootfs); 1279 (void) zap_lookup(spa->spa_meta_objset, 1280 spa->spa_pool_props_object, 1281 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1282 sizeof (uint64_t), 1, &autoreplace); 1283 (void) zap_lookup(spa->spa_meta_objset, 1284 spa->spa_pool_props_object, 1285 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1286 sizeof (uint64_t), 1, &spa->spa_delegation); 1287 (void) zap_lookup(spa->spa_meta_objset, 1288 spa->spa_pool_props_object, 1289 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1290 sizeof (uint64_t), 1, &spa->spa_failmode); 1291 } 1292 1293 /* 1294 * If the 'autoreplace' property is set, then post a resource notifying 1295 * the ZFS DE that it should not issue any faults for unopenable 1296 * devices. We also iterate over the vdevs, and post a sysevent for any 1297 * unopenable vdevs so that the normal autoreplace handler can take 1298 * over. 1299 */ 1300 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1301 spa_check_removed(spa->spa_root_vdev); 1302 1303 /* 1304 * Load the vdev state for all toplevel vdevs. 1305 */ 1306 vdev_load(rvd); 1307 1308 /* 1309 * Propagate the leaf DTLs we just loaded all the way up the tree. 1310 */ 1311 spa_config_enter(spa, RW_WRITER, FTAG); 1312 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1313 spa_config_exit(spa, FTAG); 1314 1315 /* 1316 * Check the state of the root vdev. If it can't be opened, it 1317 * indicates one or more toplevel vdevs are faulted. 1318 */ 1319 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1320 error = ENXIO; 1321 goto out; 1322 } 1323 1324 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1325 dmu_tx_t *tx; 1326 int need_update = B_FALSE; 1327 int c; 1328 1329 /* 1330 * Claim log blocks that haven't been committed yet. 1331 * This must all happen in a single txg. 1332 */ 1333 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1334 spa_first_txg(spa)); 1335 (void) dmu_objset_find(spa->spa_name, 1336 zil_claim, tx, DS_FIND_CHILDREN); 1337 dmu_tx_commit(tx); 1338 1339 spa->spa_sync_on = B_TRUE; 1340 txg_sync_start(spa->spa_dsl_pool); 1341 1342 /* 1343 * Wait for all claims to sync. 1344 */ 1345 txg_wait_synced(spa->spa_dsl_pool, 0); 1346 1347 /* 1348 * If the config cache is stale, or we have uninitialized 1349 * metaslabs (see spa_vdev_add()), then update the config. 1350 */ 1351 if (config_cache_txg != spa->spa_config_txg || 1352 state == SPA_LOAD_IMPORT) 1353 need_update = B_TRUE; 1354 1355 for (c = 0; c < rvd->vdev_children; c++) 1356 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1357 need_update = B_TRUE; 1358 1359 /* 1360 * Update the config cache asychronously in case we're the 1361 * root pool, in which case the config cache isn't writable yet. 1362 */ 1363 if (need_update) 1364 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1365 } 1366 1367 error = 0; 1368 out: 1369 spa->spa_minref = refcount_count(&spa->spa_refcount); 1370 if (error && error != EBADF) 1371 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 1372 spa->spa_load_state = SPA_LOAD_NONE; 1373 spa->spa_ena = 0; 1374 1375 return (error); 1376 } 1377 1378 /* 1379 * Pool Open/Import 1380 * 1381 * The import case is identical to an open except that the configuration is sent 1382 * down from userland, instead of grabbed from the configuration cache. For the 1383 * case of an open, the pool configuration will exist in the 1384 * POOL_STATE_UNINITIALIZED state. 1385 * 1386 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1387 * the same time open the pool, without having to keep around the spa_t in some 1388 * ambiguous state. 1389 */ 1390 static int 1391 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1392 { 1393 spa_t *spa; 1394 int error; 1395 int locked = B_FALSE; 1396 1397 *spapp = NULL; 1398 1399 /* 1400 * As disgusting as this is, we need to support recursive calls to this 1401 * function because dsl_dir_open() is called during spa_load(), and ends 1402 * up calling spa_open() again. The real fix is to figure out how to 1403 * avoid dsl_dir_open() calling this in the first place. 1404 */ 1405 if (mutex_owner(&spa_namespace_lock) != curthread) { 1406 mutex_enter(&spa_namespace_lock); 1407 locked = B_TRUE; 1408 } 1409 1410 if ((spa = spa_lookup(pool)) == NULL) { 1411 if (locked) 1412 mutex_exit(&spa_namespace_lock); 1413 return (ENOENT); 1414 } 1415 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1416 1417 spa_activate(spa); 1418 1419 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1420 1421 if (error == EBADF) { 1422 /* 1423 * If vdev_validate() returns failure (indicated by 1424 * EBADF), it indicates that one of the vdevs indicates 1425 * that the pool has been exported or destroyed. If 1426 * this is the case, the config cache is out of sync and 1427 * we should remove the pool from the namespace. 1428 */ 1429 spa_unload(spa); 1430 spa_deactivate(spa); 1431 spa_config_sync(spa, B_TRUE, B_TRUE); 1432 spa_remove(spa); 1433 if (locked) 1434 mutex_exit(&spa_namespace_lock); 1435 return (ENOENT); 1436 } 1437 1438 if (error) { 1439 /* 1440 * We can't open the pool, but we still have useful 1441 * information: the state of each vdev after the 1442 * attempted vdev_open(). Return this to the user. 1443 */ 1444 if (config != NULL && spa->spa_root_vdev != NULL) { 1445 spa_config_enter(spa, RW_READER, FTAG); 1446 *config = spa_config_generate(spa, NULL, -1ULL, 1447 B_TRUE); 1448 spa_config_exit(spa, FTAG); 1449 } 1450 spa_unload(spa); 1451 spa_deactivate(spa); 1452 spa->spa_last_open_failed = B_TRUE; 1453 if (locked) 1454 mutex_exit(&spa_namespace_lock); 1455 *spapp = NULL; 1456 return (error); 1457 } else { 1458 spa->spa_last_open_failed = B_FALSE; 1459 } 1460 } 1461 1462 spa_open_ref(spa, tag); 1463 1464 if (locked) 1465 mutex_exit(&spa_namespace_lock); 1466 1467 *spapp = spa; 1468 1469 if (config != NULL) { 1470 spa_config_enter(spa, RW_READER, FTAG); 1471 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1472 spa_config_exit(spa, FTAG); 1473 } 1474 1475 return (0); 1476 } 1477 1478 int 1479 spa_open(const char *name, spa_t **spapp, void *tag) 1480 { 1481 return (spa_open_common(name, spapp, tag, NULL)); 1482 } 1483 1484 /* 1485 * Lookup the given spa_t, incrementing the inject count in the process, 1486 * preventing it from being exported or destroyed. 1487 */ 1488 spa_t * 1489 spa_inject_addref(char *name) 1490 { 1491 spa_t *spa; 1492 1493 mutex_enter(&spa_namespace_lock); 1494 if ((spa = spa_lookup(name)) == NULL) { 1495 mutex_exit(&spa_namespace_lock); 1496 return (NULL); 1497 } 1498 spa->spa_inject_ref++; 1499 mutex_exit(&spa_namespace_lock); 1500 1501 return (spa); 1502 } 1503 1504 void 1505 spa_inject_delref(spa_t *spa) 1506 { 1507 mutex_enter(&spa_namespace_lock); 1508 spa->spa_inject_ref--; 1509 mutex_exit(&spa_namespace_lock); 1510 } 1511 1512 /* 1513 * Add spares device information to the nvlist. 1514 */ 1515 static void 1516 spa_add_spares(spa_t *spa, nvlist_t *config) 1517 { 1518 nvlist_t **spares; 1519 uint_t i, nspares; 1520 nvlist_t *nvroot; 1521 uint64_t guid; 1522 vdev_stat_t *vs; 1523 uint_t vsc; 1524 uint64_t pool; 1525 1526 if (spa->spa_spares.sav_count == 0) 1527 return; 1528 1529 VERIFY(nvlist_lookup_nvlist(config, 1530 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1531 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1532 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1533 if (nspares != 0) { 1534 VERIFY(nvlist_add_nvlist_array(nvroot, 1535 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1536 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1537 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1538 1539 /* 1540 * Go through and find any spares which have since been 1541 * repurposed as an active spare. If this is the case, update 1542 * their status appropriately. 1543 */ 1544 for (i = 0; i < nspares; i++) { 1545 VERIFY(nvlist_lookup_uint64(spares[i], 1546 ZPOOL_CONFIG_GUID, &guid) == 0); 1547 if (spa_spare_exists(guid, &pool, NULL) && 1548 pool != 0ULL) { 1549 VERIFY(nvlist_lookup_uint64_array( 1550 spares[i], ZPOOL_CONFIG_STATS, 1551 (uint64_t **)&vs, &vsc) == 0); 1552 vs->vs_state = VDEV_STATE_CANT_OPEN; 1553 vs->vs_aux = VDEV_AUX_SPARED; 1554 } 1555 } 1556 } 1557 } 1558 1559 /* 1560 * Add l2cache device information to the nvlist, including vdev stats. 1561 */ 1562 static void 1563 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1564 { 1565 nvlist_t **l2cache; 1566 uint_t i, j, nl2cache; 1567 nvlist_t *nvroot; 1568 uint64_t guid; 1569 vdev_t *vd; 1570 vdev_stat_t *vs; 1571 uint_t vsc; 1572 1573 if (spa->spa_l2cache.sav_count == 0) 1574 return; 1575 1576 spa_config_enter(spa, RW_READER, FTAG); 1577 1578 VERIFY(nvlist_lookup_nvlist(config, 1579 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1580 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1581 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1582 if (nl2cache != 0) { 1583 VERIFY(nvlist_add_nvlist_array(nvroot, 1584 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1585 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1586 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1587 1588 /* 1589 * Update level 2 cache device stats. 1590 */ 1591 1592 for (i = 0; i < nl2cache; i++) { 1593 VERIFY(nvlist_lookup_uint64(l2cache[i], 1594 ZPOOL_CONFIG_GUID, &guid) == 0); 1595 1596 vd = NULL; 1597 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1598 if (guid == 1599 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1600 vd = spa->spa_l2cache.sav_vdevs[j]; 1601 break; 1602 } 1603 } 1604 ASSERT(vd != NULL); 1605 1606 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1607 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1608 vdev_get_stats(vd, vs); 1609 } 1610 } 1611 1612 spa_config_exit(spa, FTAG); 1613 } 1614 1615 int 1616 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1617 { 1618 int error; 1619 spa_t *spa; 1620 1621 *config = NULL; 1622 error = spa_open_common(name, &spa, FTAG, config); 1623 1624 if (spa && *config != NULL) { 1625 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1626 spa_get_errlog_size(spa)) == 0); 1627 1628 spa_add_spares(spa, *config); 1629 spa_add_l2cache(spa, *config); 1630 } 1631 1632 /* 1633 * We want to get the alternate root even for faulted pools, so we cheat 1634 * and call spa_lookup() directly. 1635 */ 1636 if (altroot) { 1637 if (spa == NULL) { 1638 mutex_enter(&spa_namespace_lock); 1639 spa = spa_lookup(name); 1640 if (spa) 1641 spa_altroot(spa, altroot, buflen); 1642 else 1643 altroot[0] = '\0'; 1644 spa = NULL; 1645 mutex_exit(&spa_namespace_lock); 1646 } else { 1647 spa_altroot(spa, altroot, buflen); 1648 } 1649 } 1650 1651 if (spa != NULL) 1652 spa_close(spa, FTAG); 1653 1654 return (error); 1655 } 1656 1657 /* 1658 * Validate that the auxiliary device array is well formed. We must have an 1659 * array of nvlists, each which describes a valid leaf vdev. If this is an 1660 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1661 * specified, as long as they are well-formed. 1662 */ 1663 static int 1664 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1665 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1666 vdev_labeltype_t label) 1667 { 1668 nvlist_t **dev; 1669 uint_t i, ndev; 1670 vdev_t *vd; 1671 int error; 1672 1673 /* 1674 * It's acceptable to have no devs specified. 1675 */ 1676 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1677 return (0); 1678 1679 if (ndev == 0) 1680 return (EINVAL); 1681 1682 /* 1683 * Make sure the pool is formatted with a version that supports this 1684 * device type. 1685 */ 1686 if (spa_version(spa) < version) 1687 return (ENOTSUP); 1688 1689 /* 1690 * Set the pending device list so we correctly handle device in-use 1691 * checking. 1692 */ 1693 sav->sav_pending = dev; 1694 sav->sav_npending = ndev; 1695 1696 for (i = 0; i < ndev; i++) { 1697 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1698 mode)) != 0) 1699 goto out; 1700 1701 if (!vd->vdev_ops->vdev_op_leaf) { 1702 vdev_free(vd); 1703 error = EINVAL; 1704 goto out; 1705 } 1706 1707 /* 1708 * The L2ARC currently only supports disk devices. 1709 */ 1710 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1711 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1712 error = ENOTBLK; 1713 goto out; 1714 } 1715 1716 vd->vdev_top = vd; 1717 1718 if ((error = vdev_open(vd)) == 0 && 1719 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1720 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1721 vd->vdev_guid) == 0); 1722 } 1723 1724 vdev_free(vd); 1725 1726 if (error && 1727 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1728 goto out; 1729 else 1730 error = 0; 1731 } 1732 1733 out: 1734 sav->sav_pending = NULL; 1735 sav->sav_npending = 0; 1736 return (error); 1737 } 1738 1739 static int 1740 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1741 { 1742 int error; 1743 1744 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1745 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1746 VDEV_LABEL_SPARE)) != 0) { 1747 return (error); 1748 } 1749 1750 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1751 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1752 VDEV_LABEL_L2CACHE)); 1753 } 1754 1755 static void 1756 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1757 const char *config) 1758 { 1759 int i; 1760 1761 if (sav->sav_config != NULL) { 1762 nvlist_t **olddevs; 1763 uint_t oldndevs; 1764 nvlist_t **newdevs; 1765 1766 /* 1767 * Generate new dev list by concatentating with the 1768 * current dev list. 1769 */ 1770 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1771 &olddevs, &oldndevs) == 0); 1772 1773 newdevs = kmem_alloc(sizeof (void *) * 1774 (ndevs + oldndevs), KM_SLEEP); 1775 for (i = 0; i < oldndevs; i++) 1776 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1777 KM_SLEEP) == 0); 1778 for (i = 0; i < ndevs; i++) 1779 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1780 KM_SLEEP) == 0); 1781 1782 VERIFY(nvlist_remove(sav->sav_config, config, 1783 DATA_TYPE_NVLIST_ARRAY) == 0); 1784 1785 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1786 config, newdevs, ndevs + oldndevs) == 0); 1787 for (i = 0; i < oldndevs + ndevs; i++) 1788 nvlist_free(newdevs[i]); 1789 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1790 } else { 1791 /* 1792 * Generate a new dev list. 1793 */ 1794 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1795 KM_SLEEP) == 0); 1796 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1797 devs, ndevs) == 0); 1798 } 1799 } 1800 1801 /* 1802 * Stop and drop level 2 ARC devices 1803 */ 1804 void 1805 spa_l2cache_drop(spa_t *spa) 1806 { 1807 vdev_t *vd; 1808 int i; 1809 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1810 1811 for (i = 0; i < sav->sav_count; i++) { 1812 uint64_t pool; 1813 1814 vd = sav->sav_vdevs[i]; 1815 ASSERT(vd != NULL); 1816 1817 if (spa_mode & FWRITE && 1818 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1819 l2arc_vdev_present(vd)) { 1820 l2arc_remove_vdev(vd); 1821 } 1822 if (vd->vdev_isl2cache) 1823 spa_l2cache_remove(vd); 1824 vdev_clear_stats(vd); 1825 (void) vdev_close(vd); 1826 } 1827 } 1828 1829 /* 1830 * Pool Creation 1831 */ 1832 int 1833 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1834 const char *history_str, nvlist_t *zplprops) 1835 { 1836 spa_t *spa; 1837 char *altroot = NULL; 1838 vdev_t *rvd; 1839 dsl_pool_t *dp; 1840 dmu_tx_t *tx; 1841 int c, error = 0; 1842 uint64_t txg = TXG_INITIAL; 1843 nvlist_t **spares, **l2cache; 1844 uint_t nspares, nl2cache; 1845 uint64_t version; 1846 1847 /* 1848 * If this pool already exists, return failure. 1849 */ 1850 mutex_enter(&spa_namespace_lock); 1851 if (spa_lookup(pool) != NULL) { 1852 mutex_exit(&spa_namespace_lock); 1853 return (EEXIST); 1854 } 1855 1856 /* 1857 * Allocate a new spa_t structure. 1858 */ 1859 (void) nvlist_lookup_string(props, 1860 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1861 spa = spa_add(pool, altroot); 1862 spa_activate(spa); 1863 1864 spa->spa_uberblock.ub_txg = txg - 1; 1865 1866 if (props && (error = spa_prop_validate(spa, props))) { 1867 spa_unload(spa); 1868 spa_deactivate(spa); 1869 spa_remove(spa); 1870 mutex_exit(&spa_namespace_lock); 1871 return (error); 1872 } 1873 1874 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1875 &version) != 0) 1876 version = SPA_VERSION; 1877 ASSERT(version <= SPA_VERSION); 1878 spa->spa_uberblock.ub_version = version; 1879 spa->spa_ubsync = spa->spa_uberblock; 1880 1881 /* 1882 * Create the root vdev. 1883 */ 1884 spa_config_enter(spa, RW_WRITER, FTAG); 1885 1886 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1887 1888 ASSERT(error != 0 || rvd != NULL); 1889 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1890 1891 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1892 error = EINVAL; 1893 1894 if (error == 0 && 1895 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1896 (error = spa_validate_aux(spa, nvroot, txg, 1897 VDEV_ALLOC_ADD)) == 0) { 1898 for (c = 0; c < rvd->vdev_children; c++) 1899 vdev_init(rvd->vdev_child[c], txg); 1900 vdev_config_dirty(rvd); 1901 } 1902 1903 spa_config_exit(spa, FTAG); 1904 1905 if (error != 0) { 1906 spa_unload(spa); 1907 spa_deactivate(spa); 1908 spa_remove(spa); 1909 mutex_exit(&spa_namespace_lock); 1910 return (error); 1911 } 1912 1913 /* 1914 * Get the list of spares, if specified. 1915 */ 1916 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1917 &spares, &nspares) == 0) { 1918 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1919 KM_SLEEP) == 0); 1920 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1921 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1922 spa_config_enter(spa, RW_WRITER, FTAG); 1923 spa_load_spares(spa); 1924 spa_config_exit(spa, FTAG); 1925 spa->spa_spares.sav_sync = B_TRUE; 1926 } 1927 1928 /* 1929 * Get the list of level 2 cache devices, if specified. 1930 */ 1931 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1932 &l2cache, &nl2cache) == 0) { 1933 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1934 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1935 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1936 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1937 spa_config_enter(spa, RW_WRITER, FTAG); 1938 spa_load_l2cache(spa); 1939 spa_config_exit(spa, FTAG); 1940 spa->spa_l2cache.sav_sync = B_TRUE; 1941 } 1942 1943 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 1944 spa->spa_meta_objset = dp->dp_meta_objset; 1945 1946 tx = dmu_tx_create_assigned(dp, txg); 1947 1948 /* 1949 * Create the pool config object. 1950 */ 1951 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1952 DMU_OT_PACKED_NVLIST, 1 << 14, 1953 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1954 1955 if (zap_add(spa->spa_meta_objset, 1956 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1957 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1958 cmn_err(CE_PANIC, "failed to add pool config"); 1959 } 1960 1961 /* Newly created pools with the right version are always deflated. */ 1962 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 1963 spa->spa_deflate = TRUE; 1964 if (zap_add(spa->spa_meta_objset, 1965 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1966 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 1967 cmn_err(CE_PANIC, "failed to add deflate"); 1968 } 1969 } 1970 1971 /* 1972 * Create the deferred-free bplist object. Turn off compression 1973 * because sync-to-convergence takes longer if the blocksize 1974 * keeps changing. 1975 */ 1976 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1977 1 << 14, tx); 1978 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1979 ZIO_COMPRESS_OFF, tx); 1980 1981 if (zap_add(spa->spa_meta_objset, 1982 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1983 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1984 cmn_err(CE_PANIC, "failed to add bplist"); 1985 } 1986 1987 /* 1988 * Create the pool's history object. 1989 */ 1990 if (version >= SPA_VERSION_ZPOOL_HISTORY) 1991 spa_history_create_obj(spa, tx); 1992 1993 /* 1994 * Set pool properties. 1995 */ 1996 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 1997 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1998 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 1999 if (props) 2000 spa_sync_props(spa, props, CRED(), tx); 2001 2002 dmu_tx_commit(tx); 2003 2004 spa->spa_sync_on = B_TRUE; 2005 txg_sync_start(spa->spa_dsl_pool); 2006 2007 /* 2008 * We explicitly wait for the first transaction to complete so that our 2009 * bean counters are appropriately updated. 2010 */ 2011 txg_wait_synced(spa->spa_dsl_pool, txg); 2012 2013 spa_config_sync(spa, B_FALSE, B_TRUE); 2014 2015 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2016 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2017 2018 mutex_exit(&spa_namespace_lock); 2019 2020 spa->spa_minref = refcount_count(&spa->spa_refcount); 2021 2022 return (0); 2023 } 2024 2025 /* 2026 * Import the given pool into the system. We set up the necessary spa_t and 2027 * then call spa_load() to do the dirty work. 2028 */ 2029 static int 2030 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2031 boolean_t isroot, boolean_t allowfaulted) 2032 { 2033 spa_t *spa; 2034 char *altroot = NULL; 2035 int error, loaderr; 2036 nvlist_t *nvroot; 2037 nvlist_t **spares, **l2cache; 2038 uint_t nspares, nl2cache; 2039 2040 /* 2041 * If a pool with this name exists, return failure. 2042 */ 2043 mutex_enter(&spa_namespace_lock); 2044 if (spa_lookup(pool) != NULL) { 2045 mutex_exit(&spa_namespace_lock); 2046 return (EEXIST); 2047 } 2048 2049 /* 2050 * Create and initialize the spa structure. 2051 */ 2052 (void) nvlist_lookup_string(props, 2053 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2054 spa = spa_add(pool, altroot); 2055 spa_activate(spa); 2056 2057 if (allowfaulted) 2058 spa->spa_import_faulted = B_TRUE; 2059 spa->spa_is_root = isroot; 2060 2061 /* 2062 * Pass off the heavy lifting to spa_load(). 2063 * Pass TRUE for mosconfig (unless this is a root pool) because 2064 * the user-supplied config is actually the one to trust when 2065 * doing an import. 2066 */ 2067 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); 2068 2069 spa_config_enter(spa, RW_WRITER, FTAG); 2070 /* 2071 * Toss any existing sparelist, as it doesn't have any validity anymore, 2072 * and conflicts with spa_has_spare(). 2073 */ 2074 if (!isroot && spa->spa_spares.sav_config) { 2075 nvlist_free(spa->spa_spares.sav_config); 2076 spa->spa_spares.sav_config = NULL; 2077 spa_load_spares(spa); 2078 } 2079 if (!isroot && spa->spa_l2cache.sav_config) { 2080 nvlist_free(spa->spa_l2cache.sav_config); 2081 spa->spa_l2cache.sav_config = NULL; 2082 spa_load_l2cache(spa); 2083 } 2084 2085 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2086 &nvroot) == 0); 2087 if (error == 0) 2088 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2089 if (error == 0) 2090 error = spa_validate_aux(spa, nvroot, -1ULL, 2091 VDEV_ALLOC_L2CACHE); 2092 spa_config_exit(spa, FTAG); 2093 2094 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2095 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2096 /* 2097 * If we failed to load the pool, but 'allowfaulted' is 2098 * set, then manually set the config as if the config 2099 * passed in was specified in the cache file. 2100 */ 2101 error = 0; 2102 spa->spa_import_faulted = B_FALSE; 2103 if (spa->spa_config == NULL) { 2104 spa_config_enter(spa, RW_READER, FTAG); 2105 spa->spa_config = spa_config_generate(spa, 2106 NULL, -1ULL, B_TRUE); 2107 spa_config_exit(spa, FTAG); 2108 } 2109 spa_unload(spa); 2110 spa_deactivate(spa); 2111 spa_config_sync(spa, B_FALSE, B_TRUE); 2112 } else { 2113 spa_unload(spa); 2114 spa_deactivate(spa); 2115 spa_remove(spa); 2116 } 2117 mutex_exit(&spa_namespace_lock); 2118 return (error); 2119 } 2120 2121 /* 2122 * Override any spares and level 2 cache devices as specified by 2123 * the user, as these may have correct device names/devids, etc. 2124 */ 2125 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2126 &spares, &nspares) == 0) { 2127 if (spa->spa_spares.sav_config) 2128 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2129 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2130 else 2131 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2132 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2133 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2134 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2135 spa_config_enter(spa, RW_WRITER, FTAG); 2136 spa_load_spares(spa); 2137 spa_config_exit(spa, FTAG); 2138 spa->spa_spares.sav_sync = B_TRUE; 2139 } 2140 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2141 &l2cache, &nl2cache) == 0) { 2142 if (spa->spa_l2cache.sav_config) 2143 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2144 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2145 else 2146 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2147 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2148 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2149 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2150 spa_config_enter(spa, RW_WRITER, FTAG); 2151 spa_load_l2cache(spa); 2152 spa_config_exit(spa, FTAG); 2153 spa->spa_l2cache.sav_sync = B_TRUE; 2154 } 2155 2156 if (spa_mode & FWRITE) { 2157 /* 2158 * Update the config cache to include the newly-imported pool. 2159 */ 2160 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2161 } 2162 2163 spa->spa_import_faulted = B_FALSE; 2164 mutex_exit(&spa_namespace_lock); 2165 2166 return (0); 2167 } 2168 2169 #ifdef _KERNEL 2170 /* 2171 * Build a "root" vdev for a top level vdev read in from a rootpool 2172 * device label. 2173 */ 2174 static void 2175 spa_build_rootpool_config(nvlist_t *config) 2176 { 2177 nvlist_t *nvtop, *nvroot; 2178 uint64_t pgid; 2179 2180 /* 2181 * Add this top-level vdev to the child array. 2182 */ 2183 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2184 == 0); 2185 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2186 == 0); 2187 2188 /* 2189 * Put this pool's top-level vdevs into a root vdev. 2190 */ 2191 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2192 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2193 == 0); 2194 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2195 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2196 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2197 &nvtop, 1) == 0); 2198 2199 /* 2200 * Replace the existing vdev_tree with the new root vdev in 2201 * this pool's configuration (remove the old, add the new). 2202 */ 2203 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2204 nvlist_free(nvroot); 2205 } 2206 2207 /* 2208 * Get the root pool information from the root disk, then import the root pool 2209 * during the system boot up time. 2210 */ 2211 extern nvlist_t *vdev_disk_read_rootlabel(char *, char *); 2212 2213 int 2214 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, 2215 uint64_t *besttxg) 2216 { 2217 nvlist_t *config; 2218 uint64_t txg; 2219 2220 if ((config = vdev_disk_read_rootlabel(devpath, devid)) == NULL) 2221 return (-1); 2222 2223 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2224 2225 if (bestconf != NULL) 2226 *bestconf = config; 2227 *besttxg = txg; 2228 return (0); 2229 } 2230 2231 boolean_t 2232 spa_rootdev_validate(nvlist_t *nv) 2233 { 2234 uint64_t ival; 2235 2236 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2237 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2238 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2239 return (B_FALSE); 2240 2241 return (B_TRUE); 2242 } 2243 2244 2245 /* 2246 * Given the boot device's physical path or devid, check if the device 2247 * is in a valid state. If so, return the configuration from the vdev 2248 * label. 2249 */ 2250 int 2251 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) 2252 { 2253 nvlist_t *conf = NULL; 2254 uint64_t txg = 0; 2255 nvlist_t *nvtop, **child; 2256 char *type; 2257 char *bootpath = NULL; 2258 uint_t children, c; 2259 char *tmp; 2260 2261 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) 2262 *tmp = '\0'; 2263 if (spa_check_rootconf(devpath, devid, &conf, &txg) < 0) { 2264 cmn_err(CE_NOTE, "error reading device label"); 2265 nvlist_free(conf); 2266 return (EINVAL); 2267 } 2268 if (txg == 0) { 2269 cmn_err(CE_NOTE, "this device is detached"); 2270 nvlist_free(conf); 2271 return (EINVAL); 2272 } 2273 2274 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, 2275 &nvtop) == 0); 2276 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); 2277 2278 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2279 if (spa_rootdev_validate(nvtop)) { 2280 goto out; 2281 } else { 2282 nvlist_free(conf); 2283 return (EINVAL); 2284 } 2285 } 2286 2287 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); 2288 2289 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, 2290 &child, &children) == 0); 2291 2292 /* 2293 * Go thru vdevs in the mirror to see if the given device 2294 * has the most recent txg. Only the device with the most 2295 * recent txg has valid information and should be booted. 2296 */ 2297 for (c = 0; c < children; c++) { 2298 char *cdevid, *cpath; 2299 uint64_t tmptxg; 2300 2301 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, 2302 &cpath) != 0) 2303 return (EINVAL); 2304 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, 2305 &cdevid) != 0) 2306 return (EINVAL); 2307 if ((spa_check_rootconf(cpath, cdevid, NULL, 2308 &tmptxg) == 0) && (tmptxg > txg)) { 2309 txg = tmptxg; 2310 VERIFY(nvlist_lookup_string(child[c], 2311 ZPOOL_CONFIG_PATH, &bootpath) == 0); 2312 } 2313 } 2314 2315 /* Does the best device match the one we've booted from? */ 2316 if (bootpath) { 2317 cmn_err(CE_NOTE, "try booting from '%s'", bootpath); 2318 return (EINVAL); 2319 } 2320 out: 2321 *bestconf = conf; 2322 return (0); 2323 } 2324 2325 /* 2326 * Import a root pool. 2327 * 2328 * For x86. devpath_list will consist of devid and/or physpath name of 2329 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 2330 * The GRUB "findroot" command will return the vdev we should boot. 2331 * 2332 * For Sparc, devpath_list consists the physpath name of the booting device 2333 * no matter the rootpool is a single device pool or a mirrored pool. 2334 * e.g. 2335 * "/pci@1f,0/ide@d/disk@0,0:a" 2336 */ 2337 int 2338 spa_import_rootpool(char *devpath, char *devid) 2339 { 2340 nvlist_t *conf = NULL; 2341 char *pname; 2342 int error; 2343 2344 /* 2345 * Get the vdev pathname and configuation from the most 2346 * recently updated vdev (highest txg). 2347 */ 2348 if (error = spa_get_rootconf(devpath, devid, &conf)) 2349 goto msg_out; 2350 2351 /* 2352 * Add type "root" vdev to the config. 2353 */ 2354 spa_build_rootpool_config(conf); 2355 2356 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2357 2358 /* 2359 * We specify 'allowfaulted' for this to be treated like spa_open() 2360 * instead of spa_import(). This prevents us from marking vdevs as 2361 * persistently unavailable, and generates FMA ereports as if it were a 2362 * pool open, not import. 2363 */ 2364 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2365 if (error == EEXIST) 2366 error = 0; 2367 2368 nvlist_free(conf); 2369 return (error); 2370 2371 msg_out: 2372 cmn_err(CE_NOTE, "\n" 2373 " *************************************************** \n" 2374 " * This device is not bootable! * \n" 2375 " * It is either offlined or detached or faulted. * \n" 2376 " * Please try to boot from a different device. * \n" 2377 " *************************************************** "); 2378 2379 return (error); 2380 } 2381 #endif 2382 2383 /* 2384 * Import a non-root pool into the system. 2385 */ 2386 int 2387 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2388 { 2389 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2390 } 2391 2392 int 2393 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2394 { 2395 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2396 } 2397 2398 2399 /* 2400 * This (illegal) pool name is used when temporarily importing a spa_t in order 2401 * to get the vdev stats associated with the imported devices. 2402 */ 2403 #define TRYIMPORT_NAME "$import" 2404 2405 nvlist_t * 2406 spa_tryimport(nvlist_t *tryconfig) 2407 { 2408 nvlist_t *config = NULL; 2409 char *poolname; 2410 spa_t *spa; 2411 uint64_t state; 2412 2413 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2414 return (NULL); 2415 2416 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2417 return (NULL); 2418 2419 /* 2420 * Create and initialize the spa structure. 2421 */ 2422 mutex_enter(&spa_namespace_lock); 2423 spa = spa_add(TRYIMPORT_NAME, NULL); 2424 spa_activate(spa); 2425 2426 /* 2427 * Pass off the heavy lifting to spa_load(). 2428 * Pass TRUE for mosconfig because the user-supplied config 2429 * is actually the one to trust when doing an import. 2430 */ 2431 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2432 2433 /* 2434 * If 'tryconfig' was at least parsable, return the current config. 2435 */ 2436 if (spa->spa_root_vdev != NULL) { 2437 spa_config_enter(spa, RW_READER, FTAG); 2438 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2439 spa_config_exit(spa, FTAG); 2440 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2441 poolname) == 0); 2442 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2443 state) == 0); 2444 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2445 spa->spa_uberblock.ub_timestamp) == 0); 2446 2447 /* 2448 * If the bootfs property exists on this pool then we 2449 * copy it out so that external consumers can tell which 2450 * pools are bootable. 2451 */ 2452 if (spa->spa_bootfs) { 2453 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2454 2455 /* 2456 * We have to play games with the name since the 2457 * pool was opened as TRYIMPORT_NAME. 2458 */ 2459 if (dsl_dsobj_to_dsname(spa->spa_name, 2460 spa->spa_bootfs, tmpname) == 0) { 2461 char *cp; 2462 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2463 2464 cp = strchr(tmpname, '/'); 2465 if (cp == NULL) { 2466 (void) strlcpy(dsname, tmpname, 2467 MAXPATHLEN); 2468 } else { 2469 (void) snprintf(dsname, MAXPATHLEN, 2470 "%s/%s", poolname, ++cp); 2471 } 2472 VERIFY(nvlist_add_string(config, 2473 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2474 kmem_free(dsname, MAXPATHLEN); 2475 } 2476 kmem_free(tmpname, MAXPATHLEN); 2477 } 2478 2479 /* 2480 * Add the list of hot spares and level 2 cache devices. 2481 */ 2482 spa_add_spares(spa, config); 2483 spa_add_l2cache(spa, config); 2484 } 2485 2486 spa_unload(spa); 2487 spa_deactivate(spa); 2488 spa_remove(spa); 2489 mutex_exit(&spa_namespace_lock); 2490 2491 return (config); 2492 } 2493 2494 /* 2495 * Pool export/destroy 2496 * 2497 * The act of destroying or exporting a pool is very simple. We make sure there 2498 * is no more pending I/O and any references to the pool are gone. Then, we 2499 * update the pool state and sync all the labels to disk, removing the 2500 * configuration from the cache afterwards. 2501 */ 2502 static int 2503 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 2504 boolean_t force) 2505 { 2506 spa_t *spa; 2507 2508 if (oldconfig) 2509 *oldconfig = NULL; 2510 2511 if (!(spa_mode & FWRITE)) 2512 return (EROFS); 2513 2514 mutex_enter(&spa_namespace_lock); 2515 if ((spa = spa_lookup(pool)) == NULL) { 2516 mutex_exit(&spa_namespace_lock); 2517 return (ENOENT); 2518 } 2519 2520 /* 2521 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2522 * reacquire the namespace lock, and see if we can export. 2523 */ 2524 spa_open_ref(spa, FTAG); 2525 mutex_exit(&spa_namespace_lock); 2526 spa_async_suspend(spa); 2527 mutex_enter(&spa_namespace_lock); 2528 spa_close(spa, FTAG); 2529 2530 /* 2531 * The pool will be in core if it's openable, 2532 * in which case we can modify its state. 2533 */ 2534 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2535 /* 2536 * Objsets may be open only because they're dirty, so we 2537 * have to force it to sync before checking spa_refcnt. 2538 */ 2539 txg_wait_synced(spa->spa_dsl_pool, 0); 2540 2541 /* 2542 * A pool cannot be exported or destroyed if there are active 2543 * references. If we are resetting a pool, allow references by 2544 * fault injection handlers. 2545 */ 2546 if (!spa_refcount_zero(spa) || 2547 (spa->spa_inject_ref != 0 && 2548 new_state != POOL_STATE_UNINITIALIZED)) { 2549 spa_async_resume(spa); 2550 mutex_exit(&spa_namespace_lock); 2551 return (EBUSY); 2552 } 2553 2554 /* 2555 * A pool cannot be exported if it has an active shared spare. 2556 * This is to prevent other pools stealing the active spare 2557 * from an exported pool. At user's own will, such pool can 2558 * be forcedly exported. 2559 */ 2560 if (!force && new_state == POOL_STATE_EXPORTED && 2561 spa_has_active_shared_spare(spa)) { 2562 spa_async_resume(spa); 2563 mutex_exit(&spa_namespace_lock); 2564 return (EXDEV); 2565 } 2566 2567 /* 2568 * We want this to be reflected on every label, 2569 * so mark them all dirty. spa_unload() will do the 2570 * final sync that pushes these changes out. 2571 */ 2572 if (new_state != POOL_STATE_UNINITIALIZED) { 2573 spa_config_enter(spa, RW_WRITER, FTAG); 2574 spa->spa_state = new_state; 2575 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2576 vdev_config_dirty(spa->spa_root_vdev); 2577 spa_config_exit(spa, FTAG); 2578 } 2579 } 2580 2581 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2582 2583 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2584 spa_unload(spa); 2585 spa_deactivate(spa); 2586 } 2587 2588 if (oldconfig && spa->spa_config) 2589 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2590 2591 if (new_state != POOL_STATE_UNINITIALIZED) { 2592 spa_config_sync(spa, B_TRUE, B_TRUE); 2593 spa_remove(spa); 2594 } 2595 mutex_exit(&spa_namespace_lock); 2596 2597 return (0); 2598 } 2599 2600 /* 2601 * Destroy a storage pool. 2602 */ 2603 int 2604 spa_destroy(char *pool) 2605 { 2606 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE)); 2607 } 2608 2609 /* 2610 * Export a storage pool. 2611 */ 2612 int 2613 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force) 2614 { 2615 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force)); 2616 } 2617 2618 /* 2619 * Similar to spa_export(), this unloads the spa_t without actually removing it 2620 * from the namespace in any way. 2621 */ 2622 int 2623 spa_reset(char *pool) 2624 { 2625 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 2626 B_FALSE)); 2627 } 2628 2629 /* 2630 * ========================================================================== 2631 * Device manipulation 2632 * ========================================================================== 2633 */ 2634 2635 /* 2636 * Add a device to a storage pool. 2637 */ 2638 int 2639 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2640 { 2641 uint64_t txg; 2642 int c, error; 2643 vdev_t *rvd = spa->spa_root_vdev; 2644 vdev_t *vd, *tvd; 2645 nvlist_t **spares, **l2cache; 2646 uint_t nspares, nl2cache; 2647 2648 txg = spa_vdev_enter(spa); 2649 2650 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2651 VDEV_ALLOC_ADD)) != 0) 2652 return (spa_vdev_exit(spa, NULL, txg, error)); 2653 2654 spa->spa_pending_vdev = vd; 2655 2656 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2657 &nspares) != 0) 2658 nspares = 0; 2659 2660 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2661 &nl2cache) != 0) 2662 nl2cache = 0; 2663 2664 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) { 2665 spa->spa_pending_vdev = NULL; 2666 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2667 } 2668 2669 if (vd->vdev_children != 0) { 2670 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 2671 spa->spa_pending_vdev = NULL; 2672 return (spa_vdev_exit(spa, vd, txg, error)); 2673 } 2674 } 2675 2676 /* 2677 * We must validate the spares and l2cache devices after checking the 2678 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2679 */ 2680 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) { 2681 spa->spa_pending_vdev = NULL; 2682 return (spa_vdev_exit(spa, vd, txg, error)); 2683 } 2684 2685 spa->spa_pending_vdev = NULL; 2686 2687 /* 2688 * Transfer each new top-level vdev from vd to rvd. 2689 */ 2690 for (c = 0; c < vd->vdev_children; c++) { 2691 tvd = vd->vdev_child[c]; 2692 vdev_remove_child(vd, tvd); 2693 tvd->vdev_id = rvd->vdev_children; 2694 vdev_add_child(rvd, tvd); 2695 vdev_config_dirty(tvd); 2696 } 2697 2698 if (nspares != 0) { 2699 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2700 ZPOOL_CONFIG_SPARES); 2701 spa_load_spares(spa); 2702 spa->spa_spares.sav_sync = B_TRUE; 2703 } 2704 2705 if (nl2cache != 0) { 2706 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2707 ZPOOL_CONFIG_L2CACHE); 2708 spa_load_l2cache(spa); 2709 spa->spa_l2cache.sav_sync = B_TRUE; 2710 } 2711 2712 /* 2713 * We have to be careful when adding new vdevs to an existing pool. 2714 * If other threads start allocating from these vdevs before we 2715 * sync the config cache, and we lose power, then upon reboot we may 2716 * fail to open the pool because there are DVAs that the config cache 2717 * can't translate. Therefore, we first add the vdevs without 2718 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2719 * and then let spa_config_update() initialize the new metaslabs. 2720 * 2721 * spa_load() checks for added-but-not-initialized vdevs, so that 2722 * if we lose power at any point in this sequence, the remaining 2723 * steps will be completed the next time we load the pool. 2724 */ 2725 (void) spa_vdev_exit(spa, vd, txg, 0); 2726 2727 mutex_enter(&spa_namespace_lock); 2728 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2729 mutex_exit(&spa_namespace_lock); 2730 2731 return (0); 2732 } 2733 2734 /* 2735 * Attach a device to a mirror. The arguments are the path to any device 2736 * in the mirror, and the nvroot for the new device. If the path specifies 2737 * a device that is not mirrored, we automatically insert the mirror vdev. 2738 * 2739 * If 'replacing' is specified, the new device is intended to replace the 2740 * existing device; in this case the two devices are made into their own 2741 * mirror using the 'replacing' vdev, which is functionally identical to 2742 * the mirror vdev (it actually reuses all the same ops) but has a few 2743 * extra rules: you can't attach to it after it's been created, and upon 2744 * completion of resilvering, the first disk (the one being replaced) 2745 * is automatically detached. 2746 */ 2747 int 2748 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2749 { 2750 uint64_t txg, open_txg; 2751 int error; 2752 vdev_t *rvd = spa->spa_root_vdev; 2753 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2754 vdev_ops_t *pvops; 2755 int is_log; 2756 2757 txg = spa_vdev_enter(spa); 2758 2759 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2760 2761 if (oldvd == NULL) 2762 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2763 2764 if (!oldvd->vdev_ops->vdev_op_leaf) 2765 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2766 2767 pvd = oldvd->vdev_parent; 2768 2769 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2770 VDEV_ALLOC_ADD)) != 0) 2771 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2772 2773 if (newrootvd->vdev_children != 1) 2774 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2775 2776 newvd = newrootvd->vdev_child[0]; 2777 2778 if (!newvd->vdev_ops->vdev_op_leaf) 2779 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2780 2781 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2782 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2783 2784 /* 2785 * Spares can't replace logs 2786 */ 2787 is_log = oldvd->vdev_islog; 2788 if (is_log && newvd->vdev_isspare) 2789 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2790 2791 if (!replacing) { 2792 /* 2793 * For attach, the only allowable parent is a mirror or the root 2794 * vdev. 2795 */ 2796 if (pvd->vdev_ops != &vdev_mirror_ops && 2797 pvd->vdev_ops != &vdev_root_ops) 2798 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2799 2800 pvops = &vdev_mirror_ops; 2801 } else { 2802 /* 2803 * Active hot spares can only be replaced by inactive hot 2804 * spares. 2805 */ 2806 if (pvd->vdev_ops == &vdev_spare_ops && 2807 pvd->vdev_child[1] == oldvd && 2808 !spa_has_spare(spa, newvd->vdev_guid)) 2809 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2810 2811 /* 2812 * If the source is a hot spare, and the parent isn't already a 2813 * spare, then we want to create a new hot spare. Otherwise, we 2814 * want to create a replacing vdev. The user is not allowed to 2815 * attach to a spared vdev child unless the 'isspare' state is 2816 * the same (spare replaces spare, non-spare replaces 2817 * non-spare). 2818 */ 2819 if (pvd->vdev_ops == &vdev_replacing_ops) 2820 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2821 else if (pvd->vdev_ops == &vdev_spare_ops && 2822 newvd->vdev_isspare != oldvd->vdev_isspare) 2823 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2824 else if (pvd->vdev_ops != &vdev_spare_ops && 2825 newvd->vdev_isspare) 2826 pvops = &vdev_spare_ops; 2827 else 2828 pvops = &vdev_replacing_ops; 2829 } 2830 2831 /* 2832 * Compare the new device size with the replaceable/attachable 2833 * device size. 2834 */ 2835 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2836 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2837 2838 /* 2839 * The new device cannot have a higher alignment requirement 2840 * than the top-level vdev. 2841 */ 2842 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2843 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2844 2845 /* 2846 * If this is an in-place replacement, update oldvd's path and devid 2847 * to make it distinguishable from newvd, and unopenable from now on. 2848 */ 2849 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2850 spa_strfree(oldvd->vdev_path); 2851 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2852 KM_SLEEP); 2853 (void) sprintf(oldvd->vdev_path, "%s/%s", 2854 newvd->vdev_path, "old"); 2855 if (oldvd->vdev_devid != NULL) { 2856 spa_strfree(oldvd->vdev_devid); 2857 oldvd->vdev_devid = NULL; 2858 } 2859 } 2860 2861 /* 2862 * If the parent is not a mirror, or if we're replacing, insert the new 2863 * mirror/replacing/spare vdev above oldvd. 2864 */ 2865 if (pvd->vdev_ops != pvops) 2866 pvd = vdev_add_parent(oldvd, pvops); 2867 2868 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2869 ASSERT(pvd->vdev_ops == pvops); 2870 ASSERT(oldvd->vdev_parent == pvd); 2871 2872 /* 2873 * Extract the new device from its root and add it to pvd. 2874 */ 2875 vdev_remove_child(newrootvd, newvd); 2876 newvd->vdev_id = pvd->vdev_children; 2877 vdev_add_child(pvd, newvd); 2878 2879 /* 2880 * If newvd is smaller than oldvd, but larger than its rsize, 2881 * the addition of newvd may have decreased our parent's asize. 2882 */ 2883 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2884 2885 tvd = newvd->vdev_top; 2886 ASSERT(pvd->vdev_top == tvd); 2887 ASSERT(tvd->vdev_parent == rvd); 2888 2889 vdev_config_dirty(tvd); 2890 2891 /* 2892 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2893 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2894 */ 2895 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2896 2897 mutex_enter(&newvd->vdev_dtl_lock); 2898 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2899 open_txg - TXG_INITIAL + 1); 2900 mutex_exit(&newvd->vdev_dtl_lock); 2901 2902 if (newvd->vdev_isspare) 2903 spa_spare_activate(newvd); 2904 2905 /* 2906 * Mark newvd's DTL dirty in this txg. 2907 */ 2908 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2909 2910 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2911 2912 /* 2913 * Kick off a resilver to update newvd. 2914 */ 2915 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); 2916 2917 return (0); 2918 } 2919 2920 /* 2921 * Detach a device from a mirror or replacing vdev. 2922 * If 'replace_done' is specified, only detach if the parent 2923 * is a replacing vdev. 2924 */ 2925 int 2926 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2927 { 2928 uint64_t txg; 2929 int c, t, error; 2930 vdev_t *rvd = spa->spa_root_vdev; 2931 vdev_t *vd, *pvd, *cvd, *tvd; 2932 boolean_t unspare = B_FALSE; 2933 uint64_t unspare_guid; 2934 size_t len; 2935 2936 txg = spa_vdev_enter(spa); 2937 2938 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2939 2940 if (vd == NULL) 2941 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2942 2943 if (!vd->vdev_ops->vdev_op_leaf) 2944 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2945 2946 pvd = vd->vdev_parent; 2947 2948 /* 2949 * If replace_done is specified, only remove this device if it's 2950 * the first child of a replacing vdev. For the 'spare' vdev, either 2951 * disk can be removed. 2952 */ 2953 if (replace_done) { 2954 if (pvd->vdev_ops == &vdev_replacing_ops) { 2955 if (vd->vdev_id != 0) 2956 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2957 } else if (pvd->vdev_ops != &vdev_spare_ops) { 2958 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2959 } 2960 } 2961 2962 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 2963 spa_version(spa) >= SPA_VERSION_SPARES); 2964 2965 /* 2966 * Only mirror, replacing, and spare vdevs support detach. 2967 */ 2968 if (pvd->vdev_ops != &vdev_replacing_ops && 2969 pvd->vdev_ops != &vdev_mirror_ops && 2970 pvd->vdev_ops != &vdev_spare_ops) 2971 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2972 2973 /* 2974 * If there's only one replica, you can't detach it. 2975 */ 2976 if (pvd->vdev_children <= 1) 2977 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 2978 2979 /* 2980 * If all siblings have non-empty DTLs, this device may have the only 2981 * valid copy of the data, which means we cannot safely detach it. 2982 * 2983 * XXX -- as in the vdev_offline() case, we really want a more 2984 * precise DTL check. 2985 */ 2986 for (c = 0; c < pvd->vdev_children; c++) { 2987 uint64_t dirty; 2988 2989 cvd = pvd->vdev_child[c]; 2990 if (cvd == vd) 2991 continue; 2992 if (vdev_is_dead(cvd)) 2993 continue; 2994 mutex_enter(&cvd->vdev_dtl_lock); 2995 dirty = cvd->vdev_dtl_map.sm_space | 2996 cvd->vdev_dtl_scrub.sm_space; 2997 mutex_exit(&cvd->vdev_dtl_lock); 2998 if (!dirty) 2999 break; 3000 } 3001 3002 /* 3003 * If we are a replacing or spare vdev, then we can always detach the 3004 * latter child, as that is how one cancels the operation. 3005 */ 3006 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 3007 c == pvd->vdev_children) 3008 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 3009 3010 /* 3011 * If we are detaching the second disk from a replacing vdev, then 3012 * check to see if we changed the original vdev's path to have "/old" 3013 * at the end in spa_vdev_attach(). If so, undo that change now. 3014 */ 3015 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 3016 pvd->vdev_child[0]->vdev_path != NULL && 3017 pvd->vdev_child[1]->vdev_path != NULL) { 3018 ASSERT(pvd->vdev_child[1] == vd); 3019 cvd = pvd->vdev_child[0]; 3020 len = strlen(vd->vdev_path); 3021 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 3022 strcmp(cvd->vdev_path + len, "/old") == 0) { 3023 spa_strfree(cvd->vdev_path); 3024 cvd->vdev_path = spa_strdup(vd->vdev_path); 3025 } 3026 } 3027 3028 /* 3029 * If we are detaching the original disk from a spare, then it implies 3030 * that the spare should become a real disk, and be removed from the 3031 * active spare list for the pool. 3032 */ 3033 if (pvd->vdev_ops == &vdev_spare_ops && 3034 vd->vdev_id == 0) 3035 unspare = B_TRUE; 3036 3037 /* 3038 * Erase the disk labels so the disk can be used for other things. 3039 * This must be done after all other error cases are handled, 3040 * but before we disembowel vd (so we can still do I/O to it). 3041 * But if we can't do it, don't treat the error as fatal -- 3042 * it may be that the unwritability of the disk is the reason 3043 * it's being detached! 3044 */ 3045 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 3046 3047 /* 3048 * Remove vd from its parent and compact the parent's children. 3049 */ 3050 vdev_remove_child(pvd, vd); 3051 vdev_compact_children(pvd); 3052 3053 /* 3054 * Remember one of the remaining children so we can get tvd below. 3055 */ 3056 cvd = pvd->vdev_child[0]; 3057 3058 /* 3059 * If we need to remove the remaining child from the list of hot spares, 3060 * do it now, marking the vdev as no longer a spare in the process. We 3061 * must do this before vdev_remove_parent(), because that can change the 3062 * GUID if it creates a new toplevel GUID. 3063 */ 3064 if (unspare) { 3065 ASSERT(cvd->vdev_isspare); 3066 spa_spare_remove(cvd); 3067 unspare_guid = cvd->vdev_guid; 3068 } 3069 3070 /* 3071 * If the parent mirror/replacing vdev only has one child, 3072 * the parent is no longer needed. Remove it from the tree. 3073 */ 3074 if (pvd->vdev_children == 1) 3075 vdev_remove_parent(cvd); 3076 3077 /* 3078 * We don't set tvd until now because the parent we just removed 3079 * may have been the previous top-level vdev. 3080 */ 3081 tvd = cvd->vdev_top; 3082 ASSERT(tvd->vdev_parent == rvd); 3083 3084 /* 3085 * Reevaluate the parent vdev state. 3086 */ 3087 vdev_propagate_state(cvd); 3088 3089 /* 3090 * If the device we just detached was smaller than the others, it may be 3091 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3092 * can't fail because the existing metaslabs are already in core, so 3093 * there's nothing to read from disk. 3094 */ 3095 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3096 3097 vdev_config_dirty(tvd); 3098 3099 /* 3100 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3101 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3102 * But first make sure we're not on any *other* txg's DTL list, to 3103 * prevent vd from being accessed after it's freed. 3104 */ 3105 for (t = 0; t < TXG_SIZE; t++) 3106 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3107 vd->vdev_detached = B_TRUE; 3108 vdev_dirty(tvd, VDD_DTL, vd, txg); 3109 3110 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3111 3112 error = spa_vdev_exit(spa, vd, txg, 0); 3113 3114 /* 3115 * If this was the removal of the original device in a hot spare vdev, 3116 * then we want to go through and remove the device from the hot spare 3117 * list of every other pool. 3118 */ 3119 if (unspare) { 3120 spa = NULL; 3121 mutex_enter(&spa_namespace_lock); 3122 while ((spa = spa_next(spa)) != NULL) { 3123 if (spa->spa_state != POOL_STATE_ACTIVE) 3124 continue; 3125 3126 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3127 } 3128 mutex_exit(&spa_namespace_lock); 3129 } 3130 3131 return (error); 3132 } 3133 3134 /* 3135 * Remove a spares vdev from the nvlist config. 3136 */ 3137 static int 3138 spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare, 3139 nvlist_t **spares, int nspares, vdev_t *vd) 3140 { 3141 nvlist_t *nv, **newspares; 3142 int i, j; 3143 3144 nv = NULL; 3145 for (i = 0; i < nspares; i++) { 3146 uint64_t theguid; 3147 3148 VERIFY(nvlist_lookup_uint64(spares[i], 3149 ZPOOL_CONFIG_GUID, &theguid) == 0); 3150 if (theguid == guid) { 3151 nv = spares[i]; 3152 break; 3153 } 3154 } 3155 3156 /* 3157 * Only remove the hot spare if it's not currently in use in this pool. 3158 */ 3159 if (nv == NULL && vd == NULL) 3160 return (ENOENT); 3161 3162 if (nv == NULL && vd != NULL) 3163 return (ENOTSUP); 3164 3165 if (!unspare && nv != NULL && vd != NULL) 3166 return (EBUSY); 3167 3168 if (nspares == 1) { 3169 newspares = NULL; 3170 } else { 3171 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 3172 KM_SLEEP); 3173 for (i = 0, j = 0; i < nspares; i++) { 3174 if (spares[i] != nv) 3175 VERIFY(nvlist_dup(spares[i], 3176 &newspares[j++], KM_SLEEP) == 0); 3177 } 3178 } 3179 3180 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES, 3181 DATA_TYPE_NVLIST_ARRAY) == 0); 3182 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3183 ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0); 3184 for (i = 0; i < nspares - 1; i++) 3185 nvlist_free(newspares[i]); 3186 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 3187 3188 return (0); 3189 } 3190 3191 /* 3192 * Remove an l2cache vdev from the nvlist config. 3193 */ 3194 static int 3195 spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache, 3196 int nl2cache, vdev_t *vd) 3197 { 3198 nvlist_t *nv, **newl2cache; 3199 int i, j; 3200 3201 nv = NULL; 3202 for (i = 0; i < nl2cache; i++) { 3203 uint64_t theguid; 3204 3205 VERIFY(nvlist_lookup_uint64(l2cache[i], 3206 ZPOOL_CONFIG_GUID, &theguid) == 0); 3207 if (theguid == guid) { 3208 nv = l2cache[i]; 3209 break; 3210 } 3211 } 3212 3213 if (vd == NULL) { 3214 for (i = 0; i < nl2cache; i++) { 3215 if (sav->sav_vdevs[i]->vdev_guid == guid) { 3216 vd = sav->sav_vdevs[i]; 3217 break; 3218 } 3219 } 3220 } 3221 3222 if (nv == NULL && vd == NULL) 3223 return (ENOENT); 3224 3225 if (nv == NULL && vd != NULL) 3226 return (ENOTSUP); 3227 3228 if (nl2cache == 1) { 3229 newl2cache = NULL; 3230 } else { 3231 newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *), 3232 KM_SLEEP); 3233 for (i = 0, j = 0; i < nl2cache; i++) { 3234 if (l2cache[i] != nv) 3235 VERIFY(nvlist_dup(l2cache[i], 3236 &newl2cache[j++], KM_SLEEP) == 0); 3237 } 3238 } 3239 3240 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 3241 DATA_TYPE_NVLIST_ARRAY) == 0); 3242 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3243 ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0); 3244 for (i = 0; i < nl2cache - 1; i++) 3245 nvlist_free(newl2cache[i]); 3246 kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *)); 3247 3248 return (0); 3249 } 3250 3251 /* 3252 * Remove a device from the pool. Currently, this supports removing only hot 3253 * spares and level 2 ARC devices. 3254 */ 3255 int 3256 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3257 { 3258 vdev_t *vd; 3259 nvlist_t **spares, **l2cache; 3260 uint_t nspares, nl2cache; 3261 int error = 0; 3262 3263 spa_config_enter(spa, RW_WRITER, FTAG); 3264 3265 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3266 3267 if (spa->spa_spares.sav_vdevs != NULL && 3268 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3269 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 3270 if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare, 3271 spares, nspares, vd)) != 0) 3272 goto out; 3273 spa_load_spares(spa); 3274 spa->spa_spares.sav_sync = B_TRUE; 3275 goto out; 3276 } 3277 3278 if (spa->spa_l2cache.sav_vdevs != NULL && 3279 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3280 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { 3281 if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid, 3282 l2cache, nl2cache, vd)) != 0) 3283 goto out; 3284 spa_load_l2cache(spa); 3285 spa->spa_l2cache.sav_sync = B_TRUE; 3286 } 3287 3288 out: 3289 spa_config_exit(spa, FTAG); 3290 return (error); 3291 } 3292 3293 /* 3294 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3295 * current spared, so we can detach it. 3296 */ 3297 static vdev_t * 3298 spa_vdev_resilver_done_hunt(vdev_t *vd) 3299 { 3300 vdev_t *newvd, *oldvd; 3301 int c; 3302 3303 for (c = 0; c < vd->vdev_children; c++) { 3304 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3305 if (oldvd != NULL) 3306 return (oldvd); 3307 } 3308 3309 /* 3310 * Check for a completed replacement. 3311 */ 3312 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3313 oldvd = vd->vdev_child[0]; 3314 newvd = vd->vdev_child[1]; 3315 3316 mutex_enter(&newvd->vdev_dtl_lock); 3317 if (newvd->vdev_dtl_map.sm_space == 0 && 3318 newvd->vdev_dtl_scrub.sm_space == 0) { 3319 mutex_exit(&newvd->vdev_dtl_lock); 3320 return (oldvd); 3321 } 3322 mutex_exit(&newvd->vdev_dtl_lock); 3323 } 3324 3325 /* 3326 * Check for a completed resilver with the 'unspare' flag set. 3327 */ 3328 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3329 newvd = vd->vdev_child[0]; 3330 oldvd = vd->vdev_child[1]; 3331 3332 mutex_enter(&newvd->vdev_dtl_lock); 3333 if (newvd->vdev_unspare && 3334 newvd->vdev_dtl_map.sm_space == 0 && 3335 newvd->vdev_dtl_scrub.sm_space == 0) { 3336 newvd->vdev_unspare = 0; 3337 mutex_exit(&newvd->vdev_dtl_lock); 3338 return (oldvd); 3339 } 3340 mutex_exit(&newvd->vdev_dtl_lock); 3341 } 3342 3343 return (NULL); 3344 } 3345 3346 static void 3347 spa_vdev_resilver_done(spa_t *spa) 3348 { 3349 vdev_t *vd; 3350 vdev_t *pvd; 3351 uint64_t guid; 3352 uint64_t pguid = 0; 3353 3354 spa_config_enter(spa, RW_READER, FTAG); 3355 3356 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3357 guid = vd->vdev_guid; 3358 /* 3359 * If we have just finished replacing a hot spared device, then 3360 * we need to detach the parent's first child (the original hot 3361 * spare) as well. 3362 */ 3363 pvd = vd->vdev_parent; 3364 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3365 pvd->vdev_id == 0) { 3366 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3367 ASSERT(pvd->vdev_parent->vdev_children == 2); 3368 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3369 } 3370 spa_config_exit(spa, FTAG); 3371 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3372 return; 3373 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3374 return; 3375 spa_config_enter(spa, RW_READER, FTAG); 3376 } 3377 3378 spa_config_exit(spa, FTAG); 3379 } 3380 3381 /* 3382 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3383 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3384 */ 3385 int 3386 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3387 { 3388 vdev_t *vd; 3389 uint64_t txg; 3390 3391 txg = spa_vdev_enter(spa); 3392 3393 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3394 /* 3395 * Determine if this is a reference to a hot spare device. If 3396 * it is, update the path manually as there is no associated 3397 * vdev_t that can be synced to disk. 3398 */ 3399 nvlist_t **spares; 3400 uint_t i, nspares; 3401 3402 if (spa->spa_spares.sav_config != NULL) { 3403 VERIFY(nvlist_lookup_nvlist_array( 3404 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3405 &spares, &nspares) == 0); 3406 for (i = 0; i < nspares; i++) { 3407 uint64_t theguid; 3408 VERIFY(nvlist_lookup_uint64(spares[i], 3409 ZPOOL_CONFIG_GUID, &theguid) == 0); 3410 if (theguid == guid) { 3411 VERIFY(nvlist_add_string(spares[i], 3412 ZPOOL_CONFIG_PATH, newpath) == 0); 3413 spa_load_spares(spa); 3414 spa->spa_spares.sav_sync = B_TRUE; 3415 return (spa_vdev_exit(spa, NULL, txg, 3416 0)); 3417 } 3418 } 3419 } 3420 3421 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3422 } 3423 3424 if (!vd->vdev_ops->vdev_op_leaf) 3425 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3426 3427 spa_strfree(vd->vdev_path); 3428 vd->vdev_path = spa_strdup(newpath); 3429 3430 vdev_config_dirty(vd->vdev_top); 3431 3432 return (spa_vdev_exit(spa, NULL, txg, 0)); 3433 } 3434 3435 /* 3436 * ========================================================================== 3437 * SPA Scrubbing 3438 * ========================================================================== 3439 */ 3440 3441 int 3442 spa_scrub(spa_t *spa, pool_scrub_type_t type) 3443 { 3444 ASSERT(!spa_config_held(spa, RW_WRITER)); 3445 3446 if ((uint_t)type >= POOL_SCRUB_TYPES) 3447 return (ENOTSUP); 3448 3449 /* 3450 * If a resilver was requested, but there is no DTL on a 3451 * writeable leaf device, we have nothing to do. 3452 */ 3453 if (type == POOL_SCRUB_RESILVER && 3454 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 3455 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3456 return (0); 3457 } 3458 3459 if (type == POOL_SCRUB_EVERYTHING && 3460 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && 3461 spa->spa_dsl_pool->dp_scrub_isresilver) 3462 return (EBUSY); 3463 3464 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { 3465 return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); 3466 } else if (type == POOL_SCRUB_NONE) { 3467 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); 3468 } else { 3469 return (EINVAL); 3470 } 3471 } 3472 3473 /* 3474 * ========================================================================== 3475 * SPA async task processing 3476 * ========================================================================== 3477 */ 3478 3479 static void 3480 spa_async_remove(spa_t *spa, vdev_t *vd) 3481 { 3482 vdev_t *tvd; 3483 int c; 3484 3485 for (c = 0; c < vd->vdev_children; c++) { 3486 tvd = vd->vdev_child[c]; 3487 if (tvd->vdev_remove_wanted) { 3488 tvd->vdev_remove_wanted = 0; 3489 vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED, 3490 VDEV_AUX_NONE); 3491 vdev_clear(spa, tvd, B_TRUE); 3492 vdev_config_dirty(tvd->vdev_top); 3493 } 3494 spa_async_remove(spa, tvd); 3495 } 3496 } 3497 3498 static void 3499 spa_async_thread(spa_t *spa) 3500 { 3501 int tasks; 3502 uint64_t txg; 3503 3504 ASSERT(spa->spa_sync_on); 3505 3506 mutex_enter(&spa->spa_async_lock); 3507 tasks = spa->spa_async_tasks; 3508 spa->spa_async_tasks = 0; 3509 mutex_exit(&spa->spa_async_lock); 3510 3511 /* 3512 * See if the config needs to be updated. 3513 */ 3514 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3515 mutex_enter(&spa_namespace_lock); 3516 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3517 mutex_exit(&spa_namespace_lock); 3518 } 3519 3520 /* 3521 * See if any devices need to be marked REMOVED. 3522 * 3523 * XXX - We avoid doing this when we are in 3524 * I/O failure state since spa_vdev_enter() grabs 3525 * the namespace lock and would not be able to obtain 3526 * the writer config lock. 3527 */ 3528 if (tasks & SPA_ASYNC_REMOVE && 3529 spa_state(spa) != POOL_STATE_IO_FAILURE) { 3530 txg = spa_vdev_enter(spa); 3531 spa_async_remove(spa, spa->spa_root_vdev); 3532 (void) spa_vdev_exit(spa, NULL, txg, 0); 3533 } 3534 3535 /* 3536 * If any devices are done replacing, detach them. 3537 */ 3538 if (tasks & SPA_ASYNC_RESILVER_DONE) 3539 spa_vdev_resilver_done(spa); 3540 3541 /* 3542 * Kick off a resilver. 3543 */ 3544 if (tasks & SPA_ASYNC_RESILVER) 3545 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); 3546 3547 /* 3548 * Let the world know that we're done. 3549 */ 3550 mutex_enter(&spa->spa_async_lock); 3551 spa->spa_async_thread = NULL; 3552 cv_broadcast(&spa->spa_async_cv); 3553 mutex_exit(&spa->spa_async_lock); 3554 thread_exit(); 3555 } 3556 3557 void 3558 spa_async_suspend(spa_t *spa) 3559 { 3560 mutex_enter(&spa->spa_async_lock); 3561 spa->spa_async_suspended++; 3562 while (spa->spa_async_thread != NULL) 3563 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3564 mutex_exit(&spa->spa_async_lock); 3565 } 3566 3567 void 3568 spa_async_resume(spa_t *spa) 3569 { 3570 mutex_enter(&spa->spa_async_lock); 3571 ASSERT(spa->spa_async_suspended != 0); 3572 spa->spa_async_suspended--; 3573 mutex_exit(&spa->spa_async_lock); 3574 } 3575 3576 static void 3577 spa_async_dispatch(spa_t *spa) 3578 { 3579 mutex_enter(&spa->spa_async_lock); 3580 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3581 spa->spa_async_thread == NULL && 3582 rootdir != NULL && !vn_is_readonly(rootdir)) 3583 spa->spa_async_thread = thread_create(NULL, 0, 3584 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3585 mutex_exit(&spa->spa_async_lock); 3586 } 3587 3588 void 3589 spa_async_request(spa_t *spa, int task) 3590 { 3591 mutex_enter(&spa->spa_async_lock); 3592 spa->spa_async_tasks |= task; 3593 mutex_exit(&spa->spa_async_lock); 3594 } 3595 3596 /* 3597 * ========================================================================== 3598 * SPA syncing routines 3599 * ========================================================================== 3600 */ 3601 3602 static void 3603 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3604 { 3605 bplist_t *bpl = &spa->spa_sync_bplist; 3606 dmu_tx_t *tx; 3607 blkptr_t blk; 3608 uint64_t itor = 0; 3609 zio_t *zio; 3610 int error; 3611 uint8_t c = 1; 3612 3613 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 3614 3615 while (bplist_iterate(bpl, &itor, &blk) == 0) 3616 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 3617 3618 error = zio_wait(zio); 3619 ASSERT3U(error, ==, 0); 3620 3621 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3622 bplist_vacate(bpl, tx); 3623 3624 /* 3625 * Pre-dirty the first block so we sync to convergence faster. 3626 * (Usually only the first block is needed.) 3627 */ 3628 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3629 dmu_tx_commit(tx); 3630 } 3631 3632 static void 3633 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3634 { 3635 char *packed = NULL; 3636 size_t nvsize = 0; 3637 dmu_buf_t *db; 3638 3639 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3640 3641 packed = kmem_alloc(nvsize, KM_SLEEP); 3642 3643 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3644 KM_SLEEP) == 0); 3645 3646 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 3647 3648 kmem_free(packed, nvsize); 3649 3650 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3651 dmu_buf_will_dirty(db, tx); 3652 *(uint64_t *)db->db_data = nvsize; 3653 dmu_buf_rele(db, FTAG); 3654 } 3655 3656 static void 3657 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3658 const char *config, const char *entry) 3659 { 3660 nvlist_t *nvroot; 3661 nvlist_t **list; 3662 int i; 3663 3664 if (!sav->sav_sync) 3665 return; 3666 3667 /* 3668 * Update the MOS nvlist describing the list of available devices. 3669 * spa_validate_aux() will have already made sure this nvlist is 3670 * valid and the vdevs are labeled appropriately. 3671 */ 3672 if (sav->sav_object == 0) { 3673 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3674 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3675 sizeof (uint64_t), tx); 3676 VERIFY(zap_update(spa->spa_meta_objset, 3677 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3678 &sav->sav_object, tx) == 0); 3679 } 3680 3681 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3682 if (sav->sav_count == 0) { 3683 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3684 } else { 3685 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3686 for (i = 0; i < sav->sav_count; i++) 3687 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 3688 B_FALSE, B_FALSE, B_TRUE); 3689 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 3690 sav->sav_count) == 0); 3691 for (i = 0; i < sav->sav_count; i++) 3692 nvlist_free(list[i]); 3693 kmem_free(list, sav->sav_count * sizeof (void *)); 3694 } 3695 3696 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 3697 nvlist_free(nvroot); 3698 3699 sav->sav_sync = B_FALSE; 3700 } 3701 3702 static void 3703 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 3704 { 3705 nvlist_t *config; 3706 3707 if (list_is_empty(&spa->spa_dirty_list)) 3708 return; 3709 3710 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 3711 3712 if (spa->spa_config_syncing) 3713 nvlist_free(spa->spa_config_syncing); 3714 spa->spa_config_syncing = config; 3715 3716 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 3717 } 3718 3719 /* 3720 * Set zpool properties. 3721 */ 3722 static void 3723 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 3724 { 3725 spa_t *spa = arg1; 3726 objset_t *mos = spa->spa_meta_objset; 3727 nvlist_t *nvp = arg2; 3728 nvpair_t *elem; 3729 uint64_t intval; 3730 char *strval; 3731 zpool_prop_t prop; 3732 const char *propname; 3733 zprop_type_t proptype; 3734 spa_config_dirent_t *dp; 3735 3736 elem = NULL; 3737 while ((elem = nvlist_next_nvpair(nvp, elem))) { 3738 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 3739 case ZPOOL_PROP_VERSION: 3740 /* 3741 * Only set version for non-zpool-creation cases 3742 * (set/import). spa_create() needs special care 3743 * for version setting. 3744 */ 3745 if (tx->tx_txg != TXG_INITIAL) { 3746 VERIFY(nvpair_value_uint64(elem, 3747 &intval) == 0); 3748 ASSERT(intval <= SPA_VERSION); 3749 ASSERT(intval >= spa_version(spa)); 3750 spa->spa_uberblock.ub_version = intval; 3751 vdev_config_dirty(spa->spa_root_vdev); 3752 } 3753 break; 3754 3755 case ZPOOL_PROP_ALTROOT: 3756 /* 3757 * 'altroot' is a non-persistent property. It should 3758 * have been set temporarily at creation or import time. 3759 */ 3760 ASSERT(spa->spa_root != NULL); 3761 break; 3762 3763 case ZPOOL_PROP_CACHEFILE: 3764 /* 3765 * 'cachefile' is a non-persistent property, but note 3766 * an async request that the config cache needs to be 3767 * udpated. 3768 */ 3769 VERIFY(nvpair_value_string(elem, &strval) == 0); 3770 3771 dp = kmem_alloc(sizeof (spa_config_dirent_t), 3772 KM_SLEEP); 3773 3774 if (strval[0] == '\0') 3775 dp->scd_path = spa_strdup(spa_config_path); 3776 else if (strcmp(strval, "none") == 0) 3777 dp->scd_path = NULL; 3778 else 3779 dp->scd_path = spa_strdup(strval); 3780 3781 list_insert_head(&spa->spa_config_list, dp); 3782 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3783 break; 3784 default: 3785 /* 3786 * Set pool property values in the poolprops mos object. 3787 */ 3788 mutex_enter(&spa->spa_props_lock); 3789 if (spa->spa_pool_props_object == 0) { 3790 objset_t *mos = spa->spa_meta_objset; 3791 3792 VERIFY((spa->spa_pool_props_object = 3793 zap_create(mos, DMU_OT_POOL_PROPS, 3794 DMU_OT_NONE, 0, tx)) > 0); 3795 3796 VERIFY(zap_update(mos, 3797 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 3798 8, 1, &spa->spa_pool_props_object, tx) 3799 == 0); 3800 } 3801 mutex_exit(&spa->spa_props_lock); 3802 3803 /* normalize the property name */ 3804 propname = zpool_prop_to_name(prop); 3805 proptype = zpool_prop_get_type(prop); 3806 3807 if (nvpair_type(elem) == DATA_TYPE_STRING) { 3808 ASSERT(proptype == PROP_TYPE_STRING); 3809 VERIFY(nvpair_value_string(elem, &strval) == 0); 3810 VERIFY(zap_update(mos, 3811 spa->spa_pool_props_object, propname, 3812 1, strlen(strval) + 1, strval, tx) == 0); 3813 3814 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 3815 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 3816 3817 if (proptype == PROP_TYPE_INDEX) { 3818 const char *unused; 3819 VERIFY(zpool_prop_index_to_string( 3820 prop, intval, &unused) == 0); 3821 } 3822 VERIFY(zap_update(mos, 3823 spa->spa_pool_props_object, propname, 3824 8, 1, &intval, tx) == 0); 3825 } else { 3826 ASSERT(0); /* not allowed */ 3827 } 3828 3829 switch (prop) { 3830 case ZPOOL_PROP_DELEGATION: 3831 spa->spa_delegation = intval; 3832 break; 3833 case ZPOOL_PROP_BOOTFS: 3834 spa->spa_bootfs = intval; 3835 break; 3836 case ZPOOL_PROP_FAILUREMODE: 3837 spa->spa_failmode = intval; 3838 break; 3839 default: 3840 break; 3841 } 3842 } 3843 3844 /* log internal history if this is not a zpool create */ 3845 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 3846 tx->tx_txg != TXG_INITIAL) { 3847 spa_history_internal_log(LOG_POOL_PROPSET, 3848 spa, tx, cr, "%s %lld %s", 3849 nvpair_name(elem), intval, spa->spa_name); 3850 } 3851 } 3852 } 3853 3854 /* 3855 * Sync the specified transaction group. New blocks may be dirtied as 3856 * part of the process, so we iterate until it converges. 3857 */ 3858 void 3859 spa_sync(spa_t *spa, uint64_t txg) 3860 { 3861 dsl_pool_t *dp = spa->spa_dsl_pool; 3862 objset_t *mos = spa->spa_meta_objset; 3863 bplist_t *bpl = &spa->spa_sync_bplist; 3864 vdev_t *rvd = spa->spa_root_vdev; 3865 vdev_t *vd; 3866 dmu_tx_t *tx; 3867 int dirty_vdevs; 3868 3869 /* 3870 * Lock out configuration changes. 3871 */ 3872 spa_config_enter(spa, RW_READER, FTAG); 3873 3874 spa->spa_syncing_txg = txg; 3875 spa->spa_sync_pass = 0; 3876 3877 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3878 3879 tx = dmu_tx_create_assigned(dp, txg); 3880 3881 /* 3882 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 3883 * set spa_deflate if we have no raid-z vdevs. 3884 */ 3885 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 3886 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 3887 int i; 3888 3889 for (i = 0; i < rvd->vdev_children; i++) { 3890 vd = rvd->vdev_child[i]; 3891 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 3892 break; 3893 } 3894 if (i == rvd->vdev_children) { 3895 spa->spa_deflate = TRUE; 3896 VERIFY(0 == zap_add(spa->spa_meta_objset, 3897 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3898 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 3899 } 3900 } 3901 3902 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 3903 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 3904 dsl_pool_create_origin(dp, tx); 3905 3906 /* Keeping the origin open increases spa_minref */ 3907 spa->spa_minref += 3; 3908 } 3909 3910 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 3911 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 3912 dsl_pool_upgrade_clones(dp, tx); 3913 } 3914 3915 /* 3916 * If anything has changed in this txg, push the deferred frees 3917 * from the previous txg. If not, leave them alone so that we 3918 * don't generate work on an otherwise idle system. 3919 */ 3920 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 3921 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 3922 !txg_list_empty(&dp->dp_sync_tasks, txg)) 3923 spa_sync_deferred_frees(spa, txg); 3924 3925 /* 3926 * Iterate to convergence. 3927 */ 3928 do { 3929 spa->spa_sync_pass++; 3930 3931 spa_sync_config_object(spa, tx); 3932 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 3933 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 3934 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 3935 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 3936 spa_errlog_sync(spa, txg); 3937 dsl_pool_sync(dp, txg); 3938 3939 dirty_vdevs = 0; 3940 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 3941 vdev_sync(vd, txg); 3942 dirty_vdevs++; 3943 } 3944 3945 bplist_sync(bpl, tx); 3946 } while (dirty_vdevs); 3947 3948 bplist_close(bpl); 3949 3950 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 3951 3952 /* 3953 * Rewrite the vdev configuration (which includes the uberblock) 3954 * to commit the transaction group. 3955 * 3956 * If there are no dirty vdevs, we sync the uberblock to a few 3957 * random top-level vdevs that are known to be visible in the 3958 * config cache (see spa_vdev_add() for details). If there *are* 3959 * dirty vdevs -- or if the sync to our random subset fails -- 3960 * then sync the uberblock to all vdevs. 3961 */ 3962 if (list_is_empty(&spa->spa_dirty_list)) { 3963 vdev_t *svd[SPA_DVAS_PER_BP]; 3964 int svdcount = 0; 3965 int children = rvd->vdev_children; 3966 int c0 = spa_get_random(children); 3967 int c; 3968 3969 for (c = 0; c < children; c++) { 3970 vd = rvd->vdev_child[(c0 + c) % children]; 3971 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 3972 continue; 3973 svd[svdcount++] = vd; 3974 if (svdcount == SPA_DVAS_PER_BP) 3975 break; 3976 } 3977 vdev_config_sync(svd, svdcount, txg); 3978 } else { 3979 vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg); 3980 } 3981 dmu_tx_commit(tx); 3982 3983 /* 3984 * Clear the dirty config list. 3985 */ 3986 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 3987 vdev_config_clean(vd); 3988 3989 /* 3990 * Now that the new config has synced transactionally, 3991 * let it become visible to the config cache. 3992 */ 3993 if (spa->spa_config_syncing != NULL) { 3994 spa_config_set(spa, spa->spa_config_syncing); 3995 spa->spa_config_txg = txg; 3996 spa->spa_config_syncing = NULL; 3997 } 3998 3999 spa->spa_traverse_wanted = B_TRUE; 4000 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4001 spa->spa_traverse_wanted = B_FALSE; 4002 spa->spa_ubsync = spa->spa_uberblock; 4003 rw_exit(&spa->spa_traverse_lock); 4004 4005 /* 4006 * Clean up the ZIL records for the synced txg. 4007 */ 4008 dsl_pool_zil_clean(dp); 4009 4010 /* 4011 * Update usable space statistics. 4012 */ 4013 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4014 vdev_sync_done(vd, txg); 4015 4016 /* 4017 * It had better be the case that we didn't dirty anything 4018 * since vdev_config_sync(). 4019 */ 4020 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4021 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4022 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4023 ASSERT(bpl->bpl_queue == NULL); 4024 4025 spa_config_exit(spa, FTAG); 4026 4027 /* 4028 * If any async tasks have been requested, kick them off. 4029 */ 4030 spa_async_dispatch(spa); 4031 } 4032 4033 /* 4034 * Sync all pools. We don't want to hold the namespace lock across these 4035 * operations, so we take a reference on the spa_t and drop the lock during the 4036 * sync. 4037 */ 4038 void 4039 spa_sync_allpools(void) 4040 { 4041 spa_t *spa = NULL; 4042 mutex_enter(&spa_namespace_lock); 4043 while ((spa = spa_next(spa)) != NULL) { 4044 if (spa_state(spa) != POOL_STATE_ACTIVE) 4045 continue; 4046 spa_open_ref(spa, FTAG); 4047 mutex_exit(&spa_namespace_lock); 4048 txg_wait_synced(spa_get_dsl(spa), 0); 4049 mutex_enter(&spa_namespace_lock); 4050 spa_close(spa, FTAG); 4051 } 4052 mutex_exit(&spa_namespace_lock); 4053 } 4054 4055 /* 4056 * ========================================================================== 4057 * Miscellaneous routines 4058 * ========================================================================== 4059 */ 4060 4061 /* 4062 * Remove all pools in the system. 4063 */ 4064 void 4065 spa_evict_all(void) 4066 { 4067 spa_t *spa; 4068 4069 /* 4070 * Remove all cached state. All pools should be closed now, 4071 * so every spa in the AVL tree should be unreferenced. 4072 */ 4073 mutex_enter(&spa_namespace_lock); 4074 while ((spa = spa_next(NULL)) != NULL) { 4075 /* 4076 * Stop async tasks. The async thread may need to detach 4077 * a device that's been replaced, which requires grabbing 4078 * spa_namespace_lock, so we must drop it here. 4079 */ 4080 spa_open_ref(spa, FTAG); 4081 mutex_exit(&spa_namespace_lock); 4082 spa_async_suspend(spa); 4083 mutex_enter(&spa_namespace_lock); 4084 spa_close(spa, FTAG); 4085 4086 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4087 spa_unload(spa); 4088 spa_deactivate(spa); 4089 } 4090 spa_remove(spa); 4091 } 4092 mutex_exit(&spa_namespace_lock); 4093 } 4094 4095 vdev_t * 4096 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4097 { 4098 vdev_t *vd; 4099 int i; 4100 4101 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4102 return (vd); 4103 4104 if (l2cache) { 4105 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4106 vd = spa->spa_l2cache.sav_vdevs[i]; 4107 if (vd->vdev_guid == guid) 4108 return (vd); 4109 } 4110 } 4111 4112 return (NULL); 4113 } 4114 4115 void 4116 spa_upgrade(spa_t *spa, uint64_t version) 4117 { 4118 spa_config_enter(spa, RW_WRITER, FTAG); 4119 4120 /* 4121 * This should only be called for a non-faulted pool, and since a 4122 * future version would result in an unopenable pool, this shouldn't be 4123 * possible. 4124 */ 4125 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4126 ASSERT(version >= spa->spa_uberblock.ub_version); 4127 4128 spa->spa_uberblock.ub_version = version; 4129 vdev_config_dirty(spa->spa_root_vdev); 4130 4131 spa_config_exit(spa, FTAG); 4132 4133 txg_wait_synced(spa_get_dsl(spa), 0); 4134 } 4135 4136 boolean_t 4137 spa_has_spare(spa_t *spa, uint64_t guid) 4138 { 4139 int i; 4140 uint64_t spareguid; 4141 spa_aux_vdev_t *sav = &spa->spa_spares; 4142 4143 for (i = 0; i < sav->sav_count; i++) 4144 if (sav->sav_vdevs[i]->vdev_guid == guid) 4145 return (B_TRUE); 4146 4147 for (i = 0; i < sav->sav_npending; i++) { 4148 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4149 &spareguid) == 0 && spareguid == guid) 4150 return (B_TRUE); 4151 } 4152 4153 return (B_FALSE); 4154 } 4155 4156 /* 4157 * Check if a pool has an active shared spare device. 4158 * Note: reference count of an active spare is 2, as a spare and as a replace 4159 */ 4160 static boolean_t 4161 spa_has_active_shared_spare(spa_t *spa) 4162 { 4163 int i, refcnt; 4164 uint64_t pool; 4165 spa_aux_vdev_t *sav = &spa->spa_spares; 4166 4167 for (i = 0; i < sav->sav_count; i++) { 4168 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 4169 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 4170 refcnt > 2) 4171 return (B_TRUE); 4172 } 4173 4174 return (B_FALSE); 4175 } 4176 4177 /* 4178 * Post a sysevent corresponding to the given event. The 'name' must be one of 4179 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4180 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4181 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4182 * or zdb as real changes. 4183 */ 4184 void 4185 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4186 { 4187 #ifdef _KERNEL 4188 sysevent_t *ev; 4189 sysevent_attr_list_t *attr = NULL; 4190 sysevent_value_t value; 4191 sysevent_id_t eid; 4192 4193 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4194 SE_SLEEP); 4195 4196 value.value_type = SE_DATA_TYPE_STRING; 4197 value.value.sv_string = spa_name(spa); 4198 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4199 goto done; 4200 4201 value.value_type = SE_DATA_TYPE_UINT64; 4202 value.value.sv_uint64 = spa_guid(spa); 4203 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4204 goto done; 4205 4206 if (vd) { 4207 value.value_type = SE_DATA_TYPE_UINT64; 4208 value.value.sv_uint64 = vd->vdev_guid; 4209 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4210 SE_SLEEP) != 0) 4211 goto done; 4212 4213 if (vd->vdev_path) { 4214 value.value_type = SE_DATA_TYPE_STRING; 4215 value.value.sv_string = vd->vdev_path; 4216 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4217 &value, SE_SLEEP) != 0) 4218 goto done; 4219 } 4220 } 4221 4222 if (sysevent_attach_attributes(ev, attr) != 0) 4223 goto done; 4224 attr = NULL; 4225 4226 (void) log_sysevent(ev, SE_SLEEP, &eid); 4227 4228 done: 4229 if (attr) 4230 sysevent_free_attr(attr); 4231 sysevent_free(ev); 4232 #endif 4233 } 4234