1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains all the routines used when modifying on-disk SPA state. 31 * This includes opening, importing, destroying, exporting a pool, and syncing a 32 * pool. 33 */ 34 35 #include <sys/zfs_context.h> 36 #include <sys/fm/fs/zfs.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zio.h> 39 #include <sys/zio_checksum.h> 40 #include <sys/zio_compress.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/vdev_impl.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/dmu_traverse.h> 51 #include <sys/dmu_objset.h> 52 #include <sys/unique.h> 53 #include <sys/dsl_pool.h> 54 #include <sys/dsl_dataset.h> 55 #include <sys/dsl_dir.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/dsl_synctask.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/arc.h> 60 #include <sys/callb.h> 61 #include <sys/systeminfo.h> 62 #include <sys/sunddi.h> 63 #include <sys/spa_boot.h> 64 65 #include "zfs_prop.h" 66 #include "zfs_comutil.h" 67 68 int zio_taskq_threads = 8; 69 70 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 71 72 /* 73 * ========================================================================== 74 * SPA properties routines 75 * ========================================================================== 76 */ 77 78 /* 79 * Add a (source=src, propname=propval) list to an nvlist. 80 */ 81 static void 82 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 83 uint64_t intval, zprop_source_t src) 84 { 85 const char *propname = zpool_prop_to_name(prop); 86 nvlist_t *propval; 87 88 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 89 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 90 91 if (strval != NULL) 92 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 93 else 94 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 95 96 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 97 nvlist_free(propval); 98 } 99 100 /* 101 * Get property values from the spa configuration. 102 */ 103 static void 104 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 105 { 106 uint64_t size = spa_get_space(spa); 107 uint64_t used = spa_get_alloc(spa); 108 uint64_t cap, version; 109 zprop_source_t src = ZPROP_SRC_NONE; 110 spa_config_dirent_t *dp; 111 112 /* 113 * readonly properties 114 */ 115 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src); 116 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 117 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 118 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 119 120 cap = (size == 0) ? 0 : (used * 100 / size); 121 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 122 123 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 124 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 125 spa->spa_root_vdev->vdev_state, src); 126 127 /* 128 * settable properties that are not stored in the pool property object. 129 */ 130 version = spa_version(spa); 131 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 132 src = ZPROP_SRC_DEFAULT; 133 else 134 src = ZPROP_SRC_LOCAL; 135 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 136 137 if (spa->spa_root != NULL) 138 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 139 0, ZPROP_SRC_LOCAL); 140 141 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 142 if (dp->scd_path == NULL) { 143 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 144 "none", 0, ZPROP_SRC_LOCAL); 145 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 146 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 147 dp->scd_path, 0, ZPROP_SRC_LOCAL); 148 } 149 } 150 } 151 152 /* 153 * Get zpool property values. 154 */ 155 int 156 spa_prop_get(spa_t *spa, nvlist_t **nvp) 157 { 158 zap_cursor_t zc; 159 zap_attribute_t za; 160 objset_t *mos = spa->spa_meta_objset; 161 int err; 162 163 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 164 165 /* 166 * Get properties from the spa config. 167 */ 168 spa_prop_get_config(spa, nvp); 169 170 mutex_enter(&spa->spa_props_lock); 171 /* If no pool property object, no more prop to get. */ 172 if (spa->spa_pool_props_object == 0) { 173 mutex_exit(&spa->spa_props_lock); 174 return (0); 175 } 176 177 /* 178 * Get properties from the MOS pool property object. 179 */ 180 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 181 (err = zap_cursor_retrieve(&zc, &za)) == 0; 182 zap_cursor_advance(&zc)) { 183 uint64_t intval = 0; 184 char *strval = NULL; 185 zprop_source_t src = ZPROP_SRC_DEFAULT; 186 zpool_prop_t prop; 187 188 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 189 continue; 190 191 switch (za.za_integer_length) { 192 case 8: 193 /* integer property */ 194 if (za.za_first_integer != 195 zpool_prop_default_numeric(prop)) 196 src = ZPROP_SRC_LOCAL; 197 198 if (prop == ZPOOL_PROP_BOOTFS) { 199 dsl_pool_t *dp; 200 dsl_dataset_t *ds = NULL; 201 202 dp = spa_get_dsl(spa); 203 rw_enter(&dp->dp_config_rwlock, RW_READER); 204 if (err = dsl_dataset_hold_obj(dp, 205 za.za_first_integer, FTAG, &ds)) { 206 rw_exit(&dp->dp_config_rwlock); 207 break; 208 } 209 210 strval = kmem_alloc( 211 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 212 KM_SLEEP); 213 dsl_dataset_name(ds, strval); 214 dsl_dataset_rele(ds, FTAG); 215 rw_exit(&dp->dp_config_rwlock); 216 } else { 217 strval = NULL; 218 intval = za.za_first_integer; 219 } 220 221 spa_prop_add_list(*nvp, prop, strval, intval, src); 222 223 if (strval != NULL) 224 kmem_free(strval, 225 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 226 227 break; 228 229 case 1: 230 /* string property */ 231 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 232 err = zap_lookup(mos, spa->spa_pool_props_object, 233 za.za_name, 1, za.za_num_integers, strval); 234 if (err) { 235 kmem_free(strval, za.za_num_integers); 236 break; 237 } 238 spa_prop_add_list(*nvp, prop, strval, 0, src); 239 kmem_free(strval, za.za_num_integers); 240 break; 241 242 default: 243 break; 244 } 245 } 246 zap_cursor_fini(&zc); 247 mutex_exit(&spa->spa_props_lock); 248 out: 249 if (err && err != ENOENT) { 250 nvlist_free(*nvp); 251 *nvp = NULL; 252 return (err); 253 } 254 255 return (0); 256 } 257 258 /* 259 * Validate the given pool properties nvlist and modify the list 260 * for the property values to be set. 261 */ 262 static int 263 spa_prop_validate(spa_t *spa, nvlist_t *props) 264 { 265 nvpair_t *elem; 266 int error = 0, reset_bootfs = 0; 267 uint64_t objnum; 268 269 elem = NULL; 270 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 271 zpool_prop_t prop; 272 char *propname, *strval; 273 uint64_t intval; 274 vdev_t *rvdev; 275 char *vdev_type; 276 objset_t *os; 277 char *slash; 278 279 propname = nvpair_name(elem); 280 281 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 282 return (EINVAL); 283 284 switch (prop) { 285 case ZPOOL_PROP_VERSION: 286 error = nvpair_value_uint64(elem, &intval); 287 if (!error && 288 (intval < spa_version(spa) || intval > SPA_VERSION)) 289 error = EINVAL; 290 break; 291 292 case ZPOOL_PROP_DELEGATION: 293 case ZPOOL_PROP_AUTOREPLACE: 294 error = nvpair_value_uint64(elem, &intval); 295 if (!error && intval > 1) 296 error = EINVAL; 297 break; 298 299 case ZPOOL_PROP_BOOTFS: 300 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 301 error = ENOTSUP; 302 break; 303 } 304 305 /* 306 * A bootable filesystem can not be on a RAIDZ pool 307 * nor a striped pool with more than 1 device. 308 */ 309 rvdev = spa->spa_root_vdev; 310 vdev_type = 311 rvdev->vdev_child[0]->vdev_ops->vdev_op_type; 312 if (rvdev->vdev_children > 1 || 313 strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 314 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 315 error = ENOTSUP; 316 break; 317 } 318 319 reset_bootfs = 1; 320 321 error = nvpair_value_string(elem, &strval); 322 323 if (!error) { 324 if (strval == NULL || strval[0] == '\0') { 325 objnum = zpool_prop_default_numeric( 326 ZPOOL_PROP_BOOTFS); 327 break; 328 } 329 330 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 331 DS_MODE_USER | DS_MODE_READONLY, &os)) 332 break; 333 objnum = dmu_objset_id(os); 334 dmu_objset_close(os); 335 } 336 break; 337 case ZPOOL_PROP_FAILUREMODE: 338 error = nvpair_value_uint64(elem, &intval); 339 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 340 intval > ZIO_FAILURE_MODE_PANIC)) 341 error = EINVAL; 342 343 /* 344 * This is a special case which only occurs when 345 * the pool has completely failed. This allows 346 * the user to change the in-core failmode property 347 * without syncing it out to disk (I/Os might 348 * currently be blocked). We do this by returning 349 * EIO to the caller (spa_prop_set) to trick it 350 * into thinking we encountered a property validation 351 * error. 352 */ 353 if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) { 354 spa->spa_failmode = intval; 355 error = EIO; 356 } 357 break; 358 359 case ZPOOL_PROP_CACHEFILE: 360 if ((error = nvpair_value_string(elem, &strval)) != 0) 361 break; 362 363 if (strval[0] == '\0') 364 break; 365 366 if (strcmp(strval, "none") == 0) 367 break; 368 369 if (strval[0] != '/') { 370 error = EINVAL; 371 break; 372 } 373 374 slash = strrchr(strval, '/'); 375 ASSERT(slash != NULL); 376 377 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 378 strcmp(slash, "/..") == 0) 379 error = EINVAL; 380 break; 381 } 382 383 if (error) 384 break; 385 } 386 387 if (!error && reset_bootfs) { 388 error = nvlist_remove(props, 389 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 390 391 if (!error) { 392 error = nvlist_add_uint64(props, 393 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 394 } 395 } 396 397 return (error); 398 } 399 400 int 401 spa_prop_set(spa_t *spa, nvlist_t *nvp) 402 { 403 int error; 404 405 if ((error = spa_prop_validate(spa, nvp)) != 0) 406 return (error); 407 408 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 409 spa, nvp, 3)); 410 } 411 412 /* 413 * If the bootfs property value is dsobj, clear it. 414 */ 415 void 416 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 417 { 418 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 419 VERIFY(zap_remove(spa->spa_meta_objset, 420 spa->spa_pool_props_object, 421 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 422 spa->spa_bootfs = 0; 423 } 424 } 425 426 /* 427 * ========================================================================== 428 * SPA state manipulation (open/create/destroy/import/export) 429 * ========================================================================== 430 */ 431 432 static int 433 spa_error_entry_compare(const void *a, const void *b) 434 { 435 spa_error_entry_t *sa = (spa_error_entry_t *)a; 436 spa_error_entry_t *sb = (spa_error_entry_t *)b; 437 int ret; 438 439 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 440 sizeof (zbookmark_t)); 441 442 if (ret < 0) 443 return (-1); 444 else if (ret > 0) 445 return (1); 446 else 447 return (0); 448 } 449 450 /* 451 * Utility function which retrieves copies of the current logs and 452 * re-initializes them in the process. 453 */ 454 void 455 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 456 { 457 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 458 459 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 460 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 461 462 avl_create(&spa->spa_errlist_scrub, 463 spa_error_entry_compare, sizeof (spa_error_entry_t), 464 offsetof(spa_error_entry_t, se_avl)); 465 avl_create(&spa->spa_errlist_last, 466 spa_error_entry_compare, sizeof (spa_error_entry_t), 467 offsetof(spa_error_entry_t, se_avl)); 468 } 469 470 /* 471 * Activate an uninitialized pool. 472 */ 473 static void 474 spa_activate(spa_t *spa) 475 { 476 int t; 477 478 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 479 480 spa->spa_state = POOL_STATE_ACTIVE; 481 482 spa->spa_normal_class = metaslab_class_create(); 483 spa->spa_log_class = metaslab_class_create(); 484 485 for (t = 0; t < ZIO_TYPES; t++) { 486 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 487 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 488 TASKQ_PREPOPULATE); 489 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 490 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 491 TASKQ_PREPOPULATE); 492 } 493 494 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 495 offsetof(vdev_t, vdev_dirty_node)); 496 list_create(&spa->spa_zio_list, sizeof (zio_t), 497 offsetof(zio_t, zio_link_node)); 498 499 txg_list_create(&spa->spa_vdev_txg_list, 500 offsetof(struct vdev, vdev_txg_node)); 501 502 avl_create(&spa->spa_errlist_scrub, 503 spa_error_entry_compare, sizeof (spa_error_entry_t), 504 offsetof(spa_error_entry_t, se_avl)); 505 avl_create(&spa->spa_errlist_last, 506 spa_error_entry_compare, sizeof (spa_error_entry_t), 507 offsetof(spa_error_entry_t, se_avl)); 508 } 509 510 /* 511 * Opposite of spa_activate(). 512 */ 513 static void 514 spa_deactivate(spa_t *spa) 515 { 516 int t; 517 518 ASSERT(spa->spa_sync_on == B_FALSE); 519 ASSERT(spa->spa_dsl_pool == NULL); 520 ASSERT(spa->spa_root_vdev == NULL); 521 522 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 523 524 txg_list_destroy(&spa->spa_vdev_txg_list); 525 526 list_destroy(&spa->spa_dirty_list); 527 list_destroy(&spa->spa_zio_list); 528 529 for (t = 0; t < ZIO_TYPES; t++) { 530 taskq_destroy(spa->spa_zio_issue_taskq[t]); 531 taskq_destroy(spa->spa_zio_intr_taskq[t]); 532 spa->spa_zio_issue_taskq[t] = NULL; 533 spa->spa_zio_intr_taskq[t] = NULL; 534 } 535 536 metaslab_class_destroy(spa->spa_normal_class); 537 spa->spa_normal_class = NULL; 538 539 metaslab_class_destroy(spa->spa_log_class); 540 spa->spa_log_class = NULL; 541 542 /* 543 * If this was part of an import or the open otherwise failed, we may 544 * still have errors left in the queues. Empty them just in case. 545 */ 546 spa_errlog_drain(spa); 547 548 avl_destroy(&spa->spa_errlist_scrub); 549 avl_destroy(&spa->spa_errlist_last); 550 551 spa->spa_state = POOL_STATE_UNINITIALIZED; 552 } 553 554 /* 555 * Verify a pool configuration, and construct the vdev tree appropriately. This 556 * will create all the necessary vdevs in the appropriate layout, with each vdev 557 * in the CLOSED state. This will prep the pool before open/creation/import. 558 * All vdev validation is done by the vdev_alloc() routine. 559 */ 560 static int 561 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 562 uint_t id, int atype) 563 { 564 nvlist_t **child; 565 uint_t c, children; 566 int error; 567 568 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 569 return (error); 570 571 if ((*vdp)->vdev_ops->vdev_op_leaf) 572 return (0); 573 574 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 575 &child, &children) != 0) { 576 vdev_free(*vdp); 577 *vdp = NULL; 578 return (EINVAL); 579 } 580 581 for (c = 0; c < children; c++) { 582 vdev_t *vd; 583 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 584 atype)) != 0) { 585 vdev_free(*vdp); 586 *vdp = NULL; 587 return (error); 588 } 589 } 590 591 ASSERT(*vdp != NULL); 592 593 return (0); 594 } 595 596 /* 597 * Opposite of spa_load(). 598 */ 599 static void 600 spa_unload(spa_t *spa) 601 { 602 int i; 603 604 /* 605 * Stop async tasks. 606 */ 607 spa_async_suspend(spa); 608 609 /* 610 * Stop syncing. 611 */ 612 if (spa->spa_sync_on) { 613 txg_sync_stop(spa->spa_dsl_pool); 614 spa->spa_sync_on = B_FALSE; 615 } 616 617 /* 618 * Wait for any outstanding prefetch I/O to complete. 619 */ 620 spa_config_enter(spa, RW_WRITER, FTAG); 621 spa_config_exit(spa, FTAG); 622 623 /* 624 * Drop and purge level 2 cache 625 */ 626 spa_l2cache_drop(spa); 627 628 /* 629 * Close the dsl pool. 630 */ 631 if (spa->spa_dsl_pool) { 632 dsl_pool_close(spa->spa_dsl_pool); 633 spa->spa_dsl_pool = NULL; 634 } 635 636 /* 637 * Close all vdevs. 638 */ 639 if (spa->spa_root_vdev) 640 vdev_free(spa->spa_root_vdev); 641 ASSERT(spa->spa_root_vdev == NULL); 642 643 for (i = 0; i < spa->spa_spares.sav_count; i++) 644 vdev_free(spa->spa_spares.sav_vdevs[i]); 645 if (spa->spa_spares.sav_vdevs) { 646 kmem_free(spa->spa_spares.sav_vdevs, 647 spa->spa_spares.sav_count * sizeof (void *)); 648 spa->spa_spares.sav_vdevs = NULL; 649 } 650 if (spa->spa_spares.sav_config) { 651 nvlist_free(spa->spa_spares.sav_config); 652 spa->spa_spares.sav_config = NULL; 653 } 654 655 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 656 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 657 if (spa->spa_l2cache.sav_vdevs) { 658 kmem_free(spa->spa_l2cache.sav_vdevs, 659 spa->spa_l2cache.sav_count * sizeof (void *)); 660 spa->spa_l2cache.sav_vdevs = NULL; 661 } 662 if (spa->spa_l2cache.sav_config) { 663 nvlist_free(spa->spa_l2cache.sav_config); 664 spa->spa_l2cache.sav_config = NULL; 665 } 666 667 spa->spa_async_suspended = 0; 668 } 669 670 /* 671 * Load (or re-load) the current list of vdevs describing the active spares for 672 * this pool. When this is called, we have some form of basic information in 673 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 674 * then re-generate a more complete list including status information. 675 */ 676 static void 677 spa_load_spares(spa_t *spa) 678 { 679 nvlist_t **spares; 680 uint_t nspares; 681 int i; 682 vdev_t *vd, *tvd; 683 684 /* 685 * First, close and free any existing spare vdevs. 686 */ 687 for (i = 0; i < spa->spa_spares.sav_count; i++) { 688 vd = spa->spa_spares.sav_vdevs[i]; 689 690 /* Undo the call to spa_activate() below */ 691 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 692 B_FALSE)) != NULL && tvd->vdev_isspare) 693 spa_spare_remove(tvd); 694 vdev_close(vd); 695 vdev_free(vd); 696 } 697 698 if (spa->spa_spares.sav_vdevs) 699 kmem_free(spa->spa_spares.sav_vdevs, 700 spa->spa_spares.sav_count * sizeof (void *)); 701 702 if (spa->spa_spares.sav_config == NULL) 703 nspares = 0; 704 else 705 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 706 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 707 708 spa->spa_spares.sav_count = (int)nspares; 709 spa->spa_spares.sav_vdevs = NULL; 710 711 if (nspares == 0) 712 return; 713 714 /* 715 * Construct the array of vdevs, opening them to get status in the 716 * process. For each spare, there is potentially two different vdev_t 717 * structures associated with it: one in the list of spares (used only 718 * for basic validation purposes) and one in the active vdev 719 * configuration (if it's spared in). During this phase we open and 720 * validate each vdev on the spare list. If the vdev also exists in the 721 * active configuration, then we also mark this vdev as an active spare. 722 */ 723 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 724 KM_SLEEP); 725 for (i = 0; i < spa->spa_spares.sav_count; i++) { 726 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 727 VDEV_ALLOC_SPARE) == 0); 728 ASSERT(vd != NULL); 729 730 spa->spa_spares.sav_vdevs[i] = vd; 731 732 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 733 B_FALSE)) != NULL) { 734 if (!tvd->vdev_isspare) 735 spa_spare_add(tvd); 736 737 /* 738 * We only mark the spare active if we were successfully 739 * able to load the vdev. Otherwise, importing a pool 740 * with a bad active spare would result in strange 741 * behavior, because multiple pool would think the spare 742 * is actively in use. 743 * 744 * There is a vulnerability here to an equally bizarre 745 * circumstance, where a dead active spare is later 746 * brought back to life (onlined or otherwise). Given 747 * the rarity of this scenario, and the extra complexity 748 * it adds, we ignore the possibility. 749 */ 750 if (!vdev_is_dead(tvd)) 751 spa_spare_activate(tvd); 752 } 753 754 if (vdev_open(vd) != 0) 755 continue; 756 757 vd->vdev_top = vd; 758 if (vdev_validate_aux(vd) == 0) 759 spa_spare_add(vd); 760 } 761 762 /* 763 * Recompute the stashed list of spares, with status information 764 * this time. 765 */ 766 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 767 DATA_TYPE_NVLIST_ARRAY) == 0); 768 769 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 770 KM_SLEEP); 771 for (i = 0; i < spa->spa_spares.sav_count; i++) 772 spares[i] = vdev_config_generate(spa, 773 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 774 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 775 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 776 for (i = 0; i < spa->spa_spares.sav_count; i++) 777 nvlist_free(spares[i]); 778 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 779 } 780 781 /* 782 * Load (or re-load) the current list of vdevs describing the active l2cache for 783 * this pool. When this is called, we have some form of basic information in 784 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 785 * then re-generate a more complete list including status information. 786 * Devices which are already active have their details maintained, and are 787 * not re-opened. 788 */ 789 static void 790 spa_load_l2cache(spa_t *spa) 791 { 792 nvlist_t **l2cache; 793 uint_t nl2cache; 794 int i, j, oldnvdevs; 795 uint64_t guid, size; 796 vdev_t *vd, **oldvdevs, **newvdevs; 797 spa_aux_vdev_t *sav = &spa->spa_l2cache; 798 799 if (sav->sav_config != NULL) { 800 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 801 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 802 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 803 } else { 804 nl2cache = 0; 805 } 806 807 oldvdevs = sav->sav_vdevs; 808 oldnvdevs = sav->sav_count; 809 sav->sav_vdevs = NULL; 810 sav->sav_count = 0; 811 812 /* 813 * Process new nvlist of vdevs. 814 */ 815 for (i = 0; i < nl2cache; i++) { 816 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 817 &guid) == 0); 818 819 newvdevs[i] = NULL; 820 for (j = 0; j < oldnvdevs; j++) { 821 vd = oldvdevs[j]; 822 if (vd != NULL && guid == vd->vdev_guid) { 823 /* 824 * Retain previous vdev for add/remove ops. 825 */ 826 newvdevs[i] = vd; 827 oldvdevs[j] = NULL; 828 break; 829 } 830 } 831 832 if (newvdevs[i] == NULL) { 833 /* 834 * Create new vdev 835 */ 836 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 837 VDEV_ALLOC_L2CACHE) == 0); 838 ASSERT(vd != NULL); 839 newvdevs[i] = vd; 840 841 /* 842 * Commit this vdev as an l2cache device, 843 * even if it fails to open. 844 */ 845 spa_l2cache_add(vd); 846 847 vd->vdev_top = vd; 848 vd->vdev_aux = sav; 849 850 spa_l2cache_activate(vd); 851 852 if (vdev_open(vd) != 0) 853 continue; 854 855 (void) vdev_validate_aux(vd); 856 857 if (!vdev_is_dead(vd)) { 858 size = vdev_get_rsize(vd); 859 l2arc_add_vdev(spa, vd, 860 VDEV_LABEL_START_SIZE, 861 size - VDEV_LABEL_START_SIZE); 862 } 863 } 864 } 865 866 /* 867 * Purge vdevs that were dropped 868 */ 869 for (i = 0; i < oldnvdevs; i++) { 870 uint64_t pool; 871 872 vd = oldvdevs[i]; 873 if (vd != NULL) { 874 if (spa_mode & FWRITE && 875 spa_l2cache_exists(vd->vdev_guid, &pool) && 876 pool != 0ULL && 877 l2arc_vdev_present(vd)) { 878 l2arc_remove_vdev(vd); 879 } 880 (void) vdev_close(vd); 881 spa_l2cache_remove(vd); 882 } 883 } 884 885 if (oldvdevs) 886 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 887 888 if (sav->sav_config == NULL) 889 goto out; 890 891 sav->sav_vdevs = newvdevs; 892 sav->sav_count = (int)nl2cache; 893 894 /* 895 * Recompute the stashed list of l2cache devices, with status 896 * information this time. 897 */ 898 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 899 DATA_TYPE_NVLIST_ARRAY) == 0); 900 901 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 902 for (i = 0; i < sav->sav_count; i++) 903 l2cache[i] = vdev_config_generate(spa, 904 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 905 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 906 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 907 out: 908 for (i = 0; i < sav->sav_count; i++) 909 nvlist_free(l2cache[i]); 910 if (sav->sav_count) 911 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 912 } 913 914 static int 915 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 916 { 917 dmu_buf_t *db; 918 char *packed = NULL; 919 size_t nvsize = 0; 920 int error; 921 *value = NULL; 922 923 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 924 nvsize = *(uint64_t *)db->db_data; 925 dmu_buf_rele(db, FTAG); 926 927 packed = kmem_alloc(nvsize, KM_SLEEP); 928 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 929 if (error == 0) 930 error = nvlist_unpack(packed, nvsize, value, 0); 931 kmem_free(packed, nvsize); 932 933 return (error); 934 } 935 936 /* 937 * Checks to see if the given vdev could not be opened, in which case we post a 938 * sysevent to notify the autoreplace code that the device has been removed. 939 */ 940 static void 941 spa_check_removed(vdev_t *vd) 942 { 943 int c; 944 945 for (c = 0; c < vd->vdev_children; c++) 946 spa_check_removed(vd->vdev_child[c]); 947 948 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 949 zfs_post_autoreplace(vd->vdev_spa, vd); 950 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 951 } 952 } 953 954 /* 955 * Load an existing storage pool, using the pool's builtin spa_config as a 956 * source of configuration information. 957 */ 958 static int 959 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 960 { 961 int error = 0; 962 nvlist_t *nvroot = NULL; 963 vdev_t *rvd; 964 uberblock_t *ub = &spa->spa_uberblock; 965 uint64_t config_cache_txg = spa->spa_config_txg; 966 uint64_t pool_guid; 967 uint64_t version; 968 zio_t *zio; 969 uint64_t autoreplace = 0; 970 971 spa->spa_load_state = state; 972 973 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 974 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 975 error = EINVAL; 976 goto out; 977 } 978 979 /* 980 * Versioning wasn't explicitly added to the label until later, so if 981 * it's not present treat it as the initial version. 982 */ 983 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 984 version = SPA_VERSION_INITIAL; 985 986 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 987 &spa->spa_config_txg); 988 989 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 990 spa_guid_exists(pool_guid, 0)) { 991 error = EEXIST; 992 goto out; 993 } 994 995 spa->spa_load_guid = pool_guid; 996 997 /* 998 * Parse the configuration into a vdev tree. We explicitly set the 999 * value that will be returned by spa_version() since parsing the 1000 * configuration requires knowing the version number. 1001 */ 1002 spa_config_enter(spa, RW_WRITER, FTAG); 1003 spa->spa_ubsync.ub_version = version; 1004 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1005 spa_config_exit(spa, FTAG); 1006 1007 if (error != 0) 1008 goto out; 1009 1010 ASSERT(spa->spa_root_vdev == rvd); 1011 ASSERT(spa_guid(spa) == pool_guid); 1012 1013 /* 1014 * Try to open all vdevs, loading each label in the process. 1015 */ 1016 error = vdev_open(rvd); 1017 if (error != 0) 1018 goto out; 1019 1020 /* 1021 * Validate the labels for all leaf vdevs. We need to grab the config 1022 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 1023 * flag. 1024 */ 1025 spa_config_enter(spa, RW_READER, FTAG); 1026 error = vdev_validate(rvd); 1027 spa_config_exit(spa, FTAG); 1028 1029 if (error != 0) 1030 goto out; 1031 1032 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1033 error = ENXIO; 1034 goto out; 1035 } 1036 1037 /* 1038 * Find the best uberblock. 1039 */ 1040 bzero(ub, sizeof (uberblock_t)); 1041 1042 zio = zio_root(spa, NULL, NULL, 1043 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1044 vdev_uberblock_load(zio, rvd, ub); 1045 error = zio_wait(zio); 1046 1047 /* 1048 * If we weren't able to find a single valid uberblock, return failure. 1049 */ 1050 if (ub->ub_txg == 0) { 1051 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1052 VDEV_AUX_CORRUPT_DATA); 1053 error = ENXIO; 1054 goto out; 1055 } 1056 1057 /* 1058 * If the pool is newer than the code, we can't open it. 1059 */ 1060 if (ub->ub_version > SPA_VERSION) { 1061 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1062 VDEV_AUX_VERSION_NEWER); 1063 error = ENOTSUP; 1064 goto out; 1065 } 1066 1067 /* 1068 * If the vdev guid sum doesn't match the uberblock, we have an 1069 * incomplete configuration. 1070 */ 1071 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1072 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1073 VDEV_AUX_BAD_GUID_SUM); 1074 error = ENXIO; 1075 goto out; 1076 } 1077 1078 /* 1079 * Initialize internal SPA structures. 1080 */ 1081 spa->spa_state = POOL_STATE_ACTIVE; 1082 spa->spa_ubsync = spa->spa_uberblock; 1083 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1084 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1085 if (error) { 1086 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1087 VDEV_AUX_CORRUPT_DATA); 1088 goto out; 1089 } 1090 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1091 1092 if (zap_lookup(spa->spa_meta_objset, 1093 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1094 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1095 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1096 VDEV_AUX_CORRUPT_DATA); 1097 error = EIO; 1098 goto out; 1099 } 1100 1101 if (!mosconfig) { 1102 nvlist_t *newconfig; 1103 uint64_t hostid; 1104 1105 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1106 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1107 VDEV_AUX_CORRUPT_DATA); 1108 error = EIO; 1109 goto out; 1110 } 1111 1112 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 1113 &hostid) == 0) { 1114 char *hostname; 1115 unsigned long myhostid = 0; 1116 1117 VERIFY(nvlist_lookup_string(newconfig, 1118 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1119 1120 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1121 if (hostid != 0 && myhostid != 0 && 1122 (unsigned long)hostid != myhostid) { 1123 cmn_err(CE_WARN, "pool '%s' could not be " 1124 "loaded as it was last accessed by " 1125 "another system (host: %s hostid: 0x%lx). " 1126 "See: http://www.sun.com/msg/ZFS-8000-EY", 1127 spa->spa_name, hostname, 1128 (unsigned long)hostid); 1129 error = EBADF; 1130 goto out; 1131 } 1132 } 1133 1134 spa_config_set(spa, newconfig); 1135 spa_unload(spa); 1136 spa_deactivate(spa); 1137 spa_activate(spa); 1138 1139 return (spa_load(spa, newconfig, state, B_TRUE)); 1140 } 1141 1142 if (zap_lookup(spa->spa_meta_objset, 1143 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1144 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1145 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1146 VDEV_AUX_CORRUPT_DATA); 1147 error = EIO; 1148 goto out; 1149 } 1150 1151 /* 1152 * Load the bit that tells us to use the new accounting function 1153 * (raid-z deflation). If we have an older pool, this will not 1154 * be present. 1155 */ 1156 error = zap_lookup(spa->spa_meta_objset, 1157 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1158 sizeof (uint64_t), 1, &spa->spa_deflate); 1159 if (error != 0 && error != ENOENT) { 1160 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1161 VDEV_AUX_CORRUPT_DATA); 1162 error = EIO; 1163 goto out; 1164 } 1165 1166 /* 1167 * Load the persistent error log. If we have an older pool, this will 1168 * not be present. 1169 */ 1170 error = zap_lookup(spa->spa_meta_objset, 1171 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1172 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1173 if (error != 0 && error != ENOENT) { 1174 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1175 VDEV_AUX_CORRUPT_DATA); 1176 error = EIO; 1177 goto out; 1178 } 1179 1180 error = zap_lookup(spa->spa_meta_objset, 1181 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1182 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1183 if (error != 0 && error != ENOENT) { 1184 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1185 VDEV_AUX_CORRUPT_DATA); 1186 error = EIO; 1187 goto out; 1188 } 1189 1190 /* 1191 * Load the history object. If we have an older pool, this 1192 * will not be present. 1193 */ 1194 error = zap_lookup(spa->spa_meta_objset, 1195 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1196 sizeof (uint64_t), 1, &spa->spa_history); 1197 if (error != 0 && error != ENOENT) { 1198 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1199 VDEV_AUX_CORRUPT_DATA); 1200 error = EIO; 1201 goto out; 1202 } 1203 1204 /* 1205 * Load any hot spares for this pool. 1206 */ 1207 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1208 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1209 if (error != 0 && error != ENOENT) { 1210 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1211 VDEV_AUX_CORRUPT_DATA); 1212 error = EIO; 1213 goto out; 1214 } 1215 if (error == 0) { 1216 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1217 if (load_nvlist(spa, spa->spa_spares.sav_object, 1218 &spa->spa_spares.sav_config) != 0) { 1219 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1220 VDEV_AUX_CORRUPT_DATA); 1221 error = EIO; 1222 goto out; 1223 } 1224 1225 spa_config_enter(spa, RW_WRITER, FTAG); 1226 spa_load_spares(spa); 1227 spa_config_exit(spa, FTAG); 1228 } 1229 1230 /* 1231 * Load any level 2 ARC devices for this pool. 1232 */ 1233 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1234 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1235 &spa->spa_l2cache.sav_object); 1236 if (error != 0 && error != ENOENT) { 1237 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1238 VDEV_AUX_CORRUPT_DATA); 1239 error = EIO; 1240 goto out; 1241 } 1242 if (error == 0) { 1243 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1244 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1245 &spa->spa_l2cache.sav_config) != 0) { 1246 vdev_set_state(rvd, B_TRUE, 1247 VDEV_STATE_CANT_OPEN, 1248 VDEV_AUX_CORRUPT_DATA); 1249 error = EIO; 1250 goto out; 1251 } 1252 1253 spa_config_enter(spa, RW_WRITER, FTAG); 1254 spa_load_l2cache(spa); 1255 spa_config_exit(spa, FTAG); 1256 } 1257 1258 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1259 1260 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1261 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1262 1263 if (error && error != ENOENT) { 1264 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1265 VDEV_AUX_CORRUPT_DATA); 1266 error = EIO; 1267 goto out; 1268 } 1269 1270 if (error == 0) { 1271 (void) zap_lookup(spa->spa_meta_objset, 1272 spa->spa_pool_props_object, 1273 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1274 sizeof (uint64_t), 1, &spa->spa_bootfs); 1275 (void) zap_lookup(spa->spa_meta_objset, 1276 spa->spa_pool_props_object, 1277 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1278 sizeof (uint64_t), 1, &autoreplace); 1279 (void) zap_lookup(spa->spa_meta_objset, 1280 spa->spa_pool_props_object, 1281 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1282 sizeof (uint64_t), 1, &spa->spa_delegation); 1283 (void) zap_lookup(spa->spa_meta_objset, 1284 spa->spa_pool_props_object, 1285 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1286 sizeof (uint64_t), 1, &spa->spa_failmode); 1287 } 1288 1289 /* 1290 * If the 'autoreplace' property is set, then post a resource notifying 1291 * the ZFS DE that it should not issue any faults for unopenable 1292 * devices. We also iterate over the vdevs, and post a sysevent for any 1293 * unopenable vdevs so that the normal autoreplace handler can take 1294 * over. 1295 */ 1296 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1297 spa_check_removed(spa->spa_root_vdev); 1298 1299 /* 1300 * Load the vdev state for all toplevel vdevs. 1301 */ 1302 vdev_load(rvd); 1303 1304 /* 1305 * Propagate the leaf DTLs we just loaded all the way up the tree. 1306 */ 1307 spa_config_enter(spa, RW_WRITER, FTAG); 1308 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1309 spa_config_exit(spa, FTAG); 1310 1311 /* 1312 * Check the state of the root vdev. If it can't be opened, it 1313 * indicates one or more toplevel vdevs are faulted. 1314 */ 1315 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1316 error = ENXIO; 1317 goto out; 1318 } 1319 1320 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1321 dmu_tx_t *tx; 1322 int need_update = B_FALSE; 1323 int c; 1324 1325 /* 1326 * Claim log blocks that haven't been committed yet. 1327 * This must all happen in a single txg. 1328 */ 1329 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1330 spa_first_txg(spa)); 1331 (void) dmu_objset_find(spa->spa_name, 1332 zil_claim, tx, DS_FIND_CHILDREN); 1333 dmu_tx_commit(tx); 1334 1335 spa->spa_sync_on = B_TRUE; 1336 txg_sync_start(spa->spa_dsl_pool); 1337 1338 /* 1339 * Wait for all claims to sync. 1340 */ 1341 txg_wait_synced(spa->spa_dsl_pool, 0); 1342 1343 /* 1344 * If the config cache is stale, or we have uninitialized 1345 * metaslabs (see spa_vdev_add()), then update the config. 1346 */ 1347 if (config_cache_txg != spa->spa_config_txg || 1348 state == SPA_LOAD_IMPORT) 1349 need_update = B_TRUE; 1350 1351 for (c = 0; c < rvd->vdev_children; c++) 1352 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1353 need_update = B_TRUE; 1354 1355 /* 1356 * Update the config cache asychronously in case we're the 1357 * root pool, in which case the config cache isn't writable yet. 1358 */ 1359 if (need_update) 1360 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1361 } 1362 1363 error = 0; 1364 out: 1365 if (error && error != EBADF) 1366 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 1367 spa->spa_load_state = SPA_LOAD_NONE; 1368 spa->spa_ena = 0; 1369 1370 return (error); 1371 } 1372 1373 /* 1374 * Pool Open/Import 1375 * 1376 * The import case is identical to an open except that the configuration is sent 1377 * down from userland, instead of grabbed from the configuration cache. For the 1378 * case of an open, the pool configuration will exist in the 1379 * POOL_STATE_UNINITIALIZED state. 1380 * 1381 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1382 * the same time open the pool, without having to keep around the spa_t in some 1383 * ambiguous state. 1384 */ 1385 static int 1386 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1387 { 1388 spa_t *spa; 1389 int error; 1390 int loaded = B_FALSE; 1391 int locked = B_FALSE; 1392 1393 *spapp = NULL; 1394 1395 /* 1396 * As disgusting as this is, we need to support recursive calls to this 1397 * function because dsl_dir_open() is called during spa_load(), and ends 1398 * up calling spa_open() again. The real fix is to figure out how to 1399 * avoid dsl_dir_open() calling this in the first place. 1400 */ 1401 if (mutex_owner(&spa_namespace_lock) != curthread) { 1402 mutex_enter(&spa_namespace_lock); 1403 locked = B_TRUE; 1404 } 1405 1406 if ((spa = spa_lookup(pool)) == NULL) { 1407 if (locked) 1408 mutex_exit(&spa_namespace_lock); 1409 return (ENOENT); 1410 } 1411 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1412 1413 spa_activate(spa); 1414 1415 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1416 1417 if (error == EBADF) { 1418 /* 1419 * If vdev_validate() returns failure (indicated by 1420 * EBADF), it indicates that one of the vdevs indicates 1421 * that the pool has been exported or destroyed. If 1422 * this is the case, the config cache is out of sync and 1423 * we should remove the pool from the namespace. 1424 */ 1425 spa_unload(spa); 1426 spa_deactivate(spa); 1427 spa_config_sync(spa, B_TRUE, B_TRUE); 1428 spa_remove(spa); 1429 if (locked) 1430 mutex_exit(&spa_namespace_lock); 1431 return (ENOENT); 1432 } 1433 1434 if (error) { 1435 /* 1436 * We can't open the pool, but we still have useful 1437 * information: the state of each vdev after the 1438 * attempted vdev_open(). Return this to the user. 1439 */ 1440 if (config != NULL && spa->spa_root_vdev != NULL) { 1441 spa_config_enter(spa, RW_READER, FTAG); 1442 *config = spa_config_generate(spa, NULL, -1ULL, 1443 B_TRUE); 1444 spa_config_exit(spa, FTAG); 1445 } 1446 spa_unload(spa); 1447 spa_deactivate(spa); 1448 spa->spa_last_open_failed = B_TRUE; 1449 if (locked) 1450 mutex_exit(&spa_namespace_lock); 1451 *spapp = NULL; 1452 return (error); 1453 } else { 1454 spa->spa_last_open_failed = B_FALSE; 1455 } 1456 1457 loaded = B_TRUE; 1458 } 1459 1460 spa_open_ref(spa, tag); 1461 1462 /* 1463 * If we just loaded the pool, resilver anything that's out of date. 1464 */ 1465 if (loaded && (spa_mode & FWRITE)) 1466 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1467 1468 if (locked) 1469 mutex_exit(&spa_namespace_lock); 1470 1471 *spapp = spa; 1472 1473 if (config != NULL) { 1474 spa_config_enter(spa, RW_READER, FTAG); 1475 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1476 spa_config_exit(spa, FTAG); 1477 } 1478 1479 return (0); 1480 } 1481 1482 int 1483 spa_open(const char *name, spa_t **spapp, void *tag) 1484 { 1485 return (spa_open_common(name, spapp, tag, NULL)); 1486 } 1487 1488 /* 1489 * Lookup the given spa_t, incrementing the inject count in the process, 1490 * preventing it from being exported or destroyed. 1491 */ 1492 spa_t * 1493 spa_inject_addref(char *name) 1494 { 1495 spa_t *spa; 1496 1497 mutex_enter(&spa_namespace_lock); 1498 if ((spa = spa_lookup(name)) == NULL) { 1499 mutex_exit(&spa_namespace_lock); 1500 return (NULL); 1501 } 1502 spa->spa_inject_ref++; 1503 mutex_exit(&spa_namespace_lock); 1504 1505 return (spa); 1506 } 1507 1508 void 1509 spa_inject_delref(spa_t *spa) 1510 { 1511 mutex_enter(&spa_namespace_lock); 1512 spa->spa_inject_ref--; 1513 mutex_exit(&spa_namespace_lock); 1514 } 1515 1516 /* 1517 * Add spares device information to the nvlist. 1518 */ 1519 static void 1520 spa_add_spares(spa_t *spa, nvlist_t *config) 1521 { 1522 nvlist_t **spares; 1523 uint_t i, nspares; 1524 nvlist_t *nvroot; 1525 uint64_t guid; 1526 vdev_stat_t *vs; 1527 uint_t vsc; 1528 uint64_t pool; 1529 1530 if (spa->spa_spares.sav_count == 0) 1531 return; 1532 1533 VERIFY(nvlist_lookup_nvlist(config, 1534 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1535 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1536 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1537 if (nspares != 0) { 1538 VERIFY(nvlist_add_nvlist_array(nvroot, 1539 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1540 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1541 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1542 1543 /* 1544 * Go through and find any spares which have since been 1545 * repurposed as an active spare. If this is the case, update 1546 * their status appropriately. 1547 */ 1548 for (i = 0; i < nspares; i++) { 1549 VERIFY(nvlist_lookup_uint64(spares[i], 1550 ZPOOL_CONFIG_GUID, &guid) == 0); 1551 if (spa_spare_exists(guid, &pool) && pool != 0ULL) { 1552 VERIFY(nvlist_lookup_uint64_array( 1553 spares[i], ZPOOL_CONFIG_STATS, 1554 (uint64_t **)&vs, &vsc) == 0); 1555 vs->vs_state = VDEV_STATE_CANT_OPEN; 1556 vs->vs_aux = VDEV_AUX_SPARED; 1557 } 1558 } 1559 } 1560 } 1561 1562 /* 1563 * Add l2cache device information to the nvlist, including vdev stats. 1564 */ 1565 static void 1566 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1567 { 1568 nvlist_t **l2cache; 1569 uint_t i, j, nl2cache; 1570 nvlist_t *nvroot; 1571 uint64_t guid; 1572 vdev_t *vd; 1573 vdev_stat_t *vs; 1574 uint_t vsc; 1575 1576 if (spa->spa_l2cache.sav_count == 0) 1577 return; 1578 1579 spa_config_enter(spa, RW_READER, FTAG); 1580 1581 VERIFY(nvlist_lookup_nvlist(config, 1582 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1583 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1584 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1585 if (nl2cache != 0) { 1586 VERIFY(nvlist_add_nvlist_array(nvroot, 1587 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1588 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1589 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1590 1591 /* 1592 * Update level 2 cache device stats. 1593 */ 1594 1595 for (i = 0; i < nl2cache; i++) { 1596 VERIFY(nvlist_lookup_uint64(l2cache[i], 1597 ZPOOL_CONFIG_GUID, &guid) == 0); 1598 1599 vd = NULL; 1600 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1601 if (guid == 1602 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1603 vd = spa->spa_l2cache.sav_vdevs[j]; 1604 break; 1605 } 1606 } 1607 ASSERT(vd != NULL); 1608 1609 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1610 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1611 vdev_get_stats(vd, vs); 1612 } 1613 } 1614 1615 spa_config_exit(spa, FTAG); 1616 } 1617 1618 int 1619 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1620 { 1621 int error; 1622 spa_t *spa; 1623 1624 *config = NULL; 1625 error = spa_open_common(name, &spa, FTAG, config); 1626 1627 if (spa && *config != NULL) { 1628 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1629 spa_get_errlog_size(spa)) == 0); 1630 1631 spa_add_spares(spa, *config); 1632 spa_add_l2cache(spa, *config); 1633 } 1634 1635 /* 1636 * We want to get the alternate root even for faulted pools, so we cheat 1637 * and call spa_lookup() directly. 1638 */ 1639 if (altroot) { 1640 if (spa == NULL) { 1641 mutex_enter(&spa_namespace_lock); 1642 spa = spa_lookup(name); 1643 if (spa) 1644 spa_altroot(spa, altroot, buflen); 1645 else 1646 altroot[0] = '\0'; 1647 spa = NULL; 1648 mutex_exit(&spa_namespace_lock); 1649 } else { 1650 spa_altroot(spa, altroot, buflen); 1651 } 1652 } 1653 1654 if (spa != NULL) 1655 spa_close(spa, FTAG); 1656 1657 return (error); 1658 } 1659 1660 /* 1661 * Validate that the auxiliary device array is well formed. We must have an 1662 * array of nvlists, each which describes a valid leaf vdev. If this is an 1663 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1664 * specified, as long as they are well-formed. 1665 */ 1666 static int 1667 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1668 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1669 vdev_labeltype_t label) 1670 { 1671 nvlist_t **dev; 1672 uint_t i, ndev; 1673 vdev_t *vd; 1674 int error; 1675 1676 /* 1677 * It's acceptable to have no devs specified. 1678 */ 1679 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1680 return (0); 1681 1682 if (ndev == 0) 1683 return (EINVAL); 1684 1685 /* 1686 * Make sure the pool is formatted with a version that supports this 1687 * device type. 1688 */ 1689 if (spa_version(spa) < version) 1690 return (ENOTSUP); 1691 1692 /* 1693 * Set the pending device list so we correctly handle device in-use 1694 * checking. 1695 */ 1696 sav->sav_pending = dev; 1697 sav->sav_npending = ndev; 1698 1699 for (i = 0; i < ndev; i++) { 1700 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1701 mode)) != 0) 1702 goto out; 1703 1704 if (!vd->vdev_ops->vdev_op_leaf) { 1705 vdev_free(vd); 1706 error = EINVAL; 1707 goto out; 1708 } 1709 1710 /* 1711 * The L2ARC currently only supports disk devices. 1712 */ 1713 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1714 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1715 error = ENOTBLK; 1716 goto out; 1717 } 1718 1719 vd->vdev_top = vd; 1720 1721 if ((error = vdev_open(vd)) == 0 && 1722 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1723 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1724 vd->vdev_guid) == 0); 1725 } 1726 1727 vdev_free(vd); 1728 1729 if (error && 1730 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1731 goto out; 1732 else 1733 error = 0; 1734 } 1735 1736 out: 1737 sav->sav_pending = NULL; 1738 sav->sav_npending = 0; 1739 return (error); 1740 } 1741 1742 static int 1743 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1744 { 1745 int error; 1746 1747 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1748 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1749 VDEV_LABEL_SPARE)) != 0) { 1750 return (error); 1751 } 1752 1753 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1754 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1755 VDEV_LABEL_L2CACHE)); 1756 } 1757 1758 static void 1759 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1760 const char *config) 1761 { 1762 int i; 1763 1764 if (sav->sav_config != NULL) { 1765 nvlist_t **olddevs; 1766 uint_t oldndevs; 1767 nvlist_t **newdevs; 1768 1769 /* 1770 * Generate new dev list by concatentating with the 1771 * current dev list. 1772 */ 1773 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1774 &olddevs, &oldndevs) == 0); 1775 1776 newdevs = kmem_alloc(sizeof (void *) * 1777 (ndevs + oldndevs), KM_SLEEP); 1778 for (i = 0; i < oldndevs; i++) 1779 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1780 KM_SLEEP) == 0); 1781 for (i = 0; i < ndevs; i++) 1782 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1783 KM_SLEEP) == 0); 1784 1785 VERIFY(nvlist_remove(sav->sav_config, config, 1786 DATA_TYPE_NVLIST_ARRAY) == 0); 1787 1788 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1789 config, newdevs, ndevs + oldndevs) == 0); 1790 for (i = 0; i < oldndevs + ndevs; i++) 1791 nvlist_free(newdevs[i]); 1792 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1793 } else { 1794 /* 1795 * Generate a new dev list. 1796 */ 1797 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1798 KM_SLEEP) == 0); 1799 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1800 devs, ndevs) == 0); 1801 } 1802 } 1803 1804 /* 1805 * Stop and drop level 2 ARC devices 1806 */ 1807 void 1808 spa_l2cache_drop(spa_t *spa) 1809 { 1810 vdev_t *vd; 1811 int i; 1812 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1813 1814 for (i = 0; i < sav->sav_count; i++) { 1815 uint64_t pool; 1816 1817 vd = sav->sav_vdevs[i]; 1818 ASSERT(vd != NULL); 1819 1820 if (spa_mode & FWRITE && 1821 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1822 l2arc_vdev_present(vd)) { 1823 l2arc_remove_vdev(vd); 1824 } 1825 if (vd->vdev_isl2cache) 1826 spa_l2cache_remove(vd); 1827 vdev_clear_stats(vd); 1828 (void) vdev_close(vd); 1829 } 1830 } 1831 1832 /* 1833 * Pool Creation 1834 */ 1835 int 1836 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1837 const char *history_str) 1838 { 1839 spa_t *spa; 1840 char *altroot = NULL; 1841 vdev_t *rvd; 1842 dsl_pool_t *dp; 1843 dmu_tx_t *tx; 1844 int c, error = 0; 1845 uint64_t txg = TXG_INITIAL; 1846 nvlist_t **spares, **l2cache; 1847 uint_t nspares, nl2cache; 1848 uint64_t version; 1849 1850 /* 1851 * If this pool already exists, return failure. 1852 */ 1853 mutex_enter(&spa_namespace_lock); 1854 if (spa_lookup(pool) != NULL) { 1855 mutex_exit(&spa_namespace_lock); 1856 return (EEXIST); 1857 } 1858 1859 /* 1860 * Allocate a new spa_t structure. 1861 */ 1862 (void) nvlist_lookup_string(props, 1863 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1864 spa = spa_add(pool, altroot); 1865 spa_activate(spa); 1866 1867 spa->spa_uberblock.ub_txg = txg - 1; 1868 1869 if (props && (error = spa_prop_validate(spa, props))) { 1870 spa_unload(spa); 1871 spa_deactivate(spa); 1872 spa_remove(spa); 1873 mutex_exit(&spa_namespace_lock); 1874 return (error); 1875 } 1876 1877 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1878 &version) != 0) 1879 version = SPA_VERSION; 1880 ASSERT(version <= SPA_VERSION); 1881 spa->spa_uberblock.ub_version = version; 1882 spa->spa_ubsync = spa->spa_uberblock; 1883 1884 /* 1885 * Create the root vdev. 1886 */ 1887 spa_config_enter(spa, RW_WRITER, FTAG); 1888 1889 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1890 1891 ASSERT(error != 0 || rvd != NULL); 1892 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1893 1894 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1895 error = EINVAL; 1896 1897 if (error == 0 && 1898 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1899 (error = spa_validate_aux(spa, nvroot, txg, 1900 VDEV_ALLOC_ADD)) == 0) { 1901 for (c = 0; c < rvd->vdev_children; c++) 1902 vdev_init(rvd->vdev_child[c], txg); 1903 vdev_config_dirty(rvd); 1904 } 1905 1906 spa_config_exit(spa, FTAG); 1907 1908 if (error != 0) { 1909 spa_unload(spa); 1910 spa_deactivate(spa); 1911 spa_remove(spa); 1912 mutex_exit(&spa_namespace_lock); 1913 return (error); 1914 } 1915 1916 /* 1917 * Get the list of spares, if specified. 1918 */ 1919 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1920 &spares, &nspares) == 0) { 1921 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1922 KM_SLEEP) == 0); 1923 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1924 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1925 spa_config_enter(spa, RW_WRITER, FTAG); 1926 spa_load_spares(spa); 1927 spa_config_exit(spa, FTAG); 1928 spa->spa_spares.sav_sync = B_TRUE; 1929 } 1930 1931 /* 1932 * Get the list of level 2 cache devices, if specified. 1933 */ 1934 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1935 &l2cache, &nl2cache) == 0) { 1936 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1937 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1938 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1939 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1940 spa_config_enter(spa, RW_WRITER, FTAG); 1941 spa_load_l2cache(spa); 1942 spa_config_exit(spa, FTAG); 1943 spa->spa_l2cache.sav_sync = B_TRUE; 1944 } 1945 1946 spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1947 spa->spa_meta_objset = dp->dp_meta_objset; 1948 1949 tx = dmu_tx_create_assigned(dp, txg); 1950 1951 /* 1952 * Create the pool config object. 1953 */ 1954 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1955 DMU_OT_PACKED_NVLIST, 1 << 14, 1956 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1957 1958 if (zap_add(spa->spa_meta_objset, 1959 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1960 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1961 cmn_err(CE_PANIC, "failed to add pool config"); 1962 } 1963 1964 /* Newly created pools with the right version are always deflated. */ 1965 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 1966 spa->spa_deflate = TRUE; 1967 if (zap_add(spa->spa_meta_objset, 1968 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1969 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 1970 cmn_err(CE_PANIC, "failed to add deflate"); 1971 } 1972 } 1973 1974 /* 1975 * Create the deferred-free bplist object. Turn off compression 1976 * because sync-to-convergence takes longer if the blocksize 1977 * keeps changing. 1978 */ 1979 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1980 1 << 14, tx); 1981 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1982 ZIO_COMPRESS_OFF, tx); 1983 1984 if (zap_add(spa->spa_meta_objset, 1985 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1986 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1987 cmn_err(CE_PANIC, "failed to add bplist"); 1988 } 1989 1990 /* 1991 * Create the pool's history object. 1992 */ 1993 if (version >= SPA_VERSION_ZPOOL_HISTORY) 1994 spa_history_create_obj(spa, tx); 1995 1996 /* 1997 * Set pool properties. 1998 */ 1999 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2000 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2001 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2002 if (props) 2003 spa_sync_props(spa, props, CRED(), tx); 2004 2005 dmu_tx_commit(tx); 2006 2007 spa->spa_sync_on = B_TRUE; 2008 txg_sync_start(spa->spa_dsl_pool); 2009 2010 /* 2011 * We explicitly wait for the first transaction to complete so that our 2012 * bean counters are appropriately updated. 2013 */ 2014 txg_wait_synced(spa->spa_dsl_pool, txg); 2015 2016 spa_config_sync(spa, B_FALSE, B_TRUE); 2017 2018 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2019 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2020 2021 mutex_exit(&spa_namespace_lock); 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Import the given pool into the system. We set up the necessary spa_t and 2028 * then call spa_load() to do the dirty work. 2029 */ 2030 static int 2031 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2032 boolean_t isroot, boolean_t allowfaulted) 2033 { 2034 spa_t *spa; 2035 char *altroot = NULL; 2036 int error, loaderr; 2037 nvlist_t *nvroot; 2038 nvlist_t **spares, **l2cache; 2039 uint_t nspares, nl2cache; 2040 int mosconfig = isroot? B_FALSE : B_TRUE; 2041 2042 /* 2043 * If a pool with this name exists, return failure. 2044 */ 2045 mutex_enter(&spa_namespace_lock); 2046 if (spa_lookup(pool) != NULL) { 2047 mutex_exit(&spa_namespace_lock); 2048 return (EEXIST); 2049 } 2050 2051 /* 2052 * Create and initialize the spa structure. 2053 */ 2054 (void) nvlist_lookup_string(props, 2055 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2056 spa = spa_add(pool, altroot); 2057 spa_activate(spa); 2058 2059 if (allowfaulted) 2060 spa->spa_import_faulted = B_TRUE; 2061 spa->spa_is_root = isroot; 2062 2063 /* 2064 * Pass off the heavy lifting to spa_load(). 2065 * Pass TRUE for mosconfig because the user-supplied config 2066 * is actually the one to trust when doing an import. 2067 */ 2068 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, mosconfig); 2069 2070 spa_config_enter(spa, RW_WRITER, FTAG); 2071 /* 2072 * Toss any existing sparelist, as it doesn't have any validity anymore, 2073 * and conflicts with spa_has_spare(). 2074 */ 2075 if (!isroot && spa->spa_spares.sav_config) { 2076 nvlist_free(spa->spa_spares.sav_config); 2077 spa->spa_spares.sav_config = NULL; 2078 spa_load_spares(spa); 2079 } 2080 if (!isroot && spa->spa_l2cache.sav_config) { 2081 nvlist_free(spa->spa_l2cache.sav_config); 2082 spa->spa_l2cache.sav_config = NULL; 2083 spa_load_l2cache(spa); 2084 } 2085 2086 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2087 &nvroot) == 0); 2088 if (error == 0) 2089 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2090 if (error == 0) 2091 error = spa_validate_aux(spa, nvroot, -1ULL, 2092 VDEV_ALLOC_L2CACHE); 2093 spa_config_exit(spa, FTAG); 2094 2095 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2096 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2097 /* 2098 * If we failed to load the pool, but 'allowfaulted' is 2099 * set, then manually set the config as if the config 2100 * passed in was specified in the cache file. 2101 */ 2102 error = 0; 2103 spa->spa_import_faulted = B_FALSE; 2104 if (spa->spa_config == NULL) { 2105 spa_config_enter(spa, RW_READER, FTAG); 2106 spa->spa_config = spa_config_generate(spa, 2107 NULL, -1ULL, B_TRUE); 2108 spa_config_exit(spa, FTAG); 2109 } 2110 spa_unload(spa); 2111 spa_deactivate(spa); 2112 spa_config_sync(spa, B_FALSE, B_TRUE); 2113 } else { 2114 spa_unload(spa); 2115 spa_deactivate(spa); 2116 spa_remove(spa); 2117 } 2118 mutex_exit(&spa_namespace_lock); 2119 return (error); 2120 } 2121 2122 /* 2123 * Override any spares and level 2 cache devices as specified by 2124 * the user, as these may have correct device names/devids, etc. 2125 */ 2126 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2127 &spares, &nspares) == 0) { 2128 if (spa->spa_spares.sav_config) 2129 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2130 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2131 else 2132 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2133 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2134 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2135 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2136 spa_config_enter(spa, RW_WRITER, FTAG); 2137 spa_load_spares(spa); 2138 spa_config_exit(spa, FTAG); 2139 spa->spa_spares.sav_sync = B_TRUE; 2140 } 2141 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2142 &l2cache, &nl2cache) == 0) { 2143 if (spa->spa_l2cache.sav_config) 2144 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2145 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2146 else 2147 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2148 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2149 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2150 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2151 spa_config_enter(spa, RW_WRITER, FTAG); 2152 spa_load_l2cache(spa); 2153 spa_config_exit(spa, FTAG); 2154 spa->spa_l2cache.sav_sync = B_TRUE; 2155 } 2156 2157 if (spa_mode & FWRITE) { 2158 /* 2159 * Update the config cache to include the newly-imported pool. 2160 */ 2161 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2162 2163 /* 2164 * Resilver anything that's out of date. 2165 */ 2166 if (!isroot) 2167 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, 2168 B_TRUE) == 0); 2169 } 2170 2171 spa->spa_import_faulted = B_FALSE; 2172 mutex_exit(&spa_namespace_lock); 2173 2174 return (0); 2175 } 2176 2177 #ifdef _KERNEL 2178 /* 2179 * Build a "root" vdev for a top level vdev read in from a rootpool 2180 * device label. 2181 */ 2182 static void 2183 spa_build_rootpool_config(nvlist_t *config) 2184 { 2185 nvlist_t *nvtop, *nvroot; 2186 uint64_t pgid; 2187 2188 /* 2189 * Add this top-level vdev to the child array. 2190 */ 2191 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2192 == 0); 2193 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2194 == 0); 2195 2196 /* 2197 * Put this pool's top-level vdevs into a root vdev. 2198 */ 2199 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2200 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2201 == 0); 2202 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2203 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2204 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2205 &nvtop, 1) == 0); 2206 2207 /* 2208 * Replace the existing vdev_tree with the new root vdev in 2209 * this pool's configuration (remove the old, add the new). 2210 */ 2211 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2212 nvlist_free(nvroot); 2213 } 2214 2215 /* 2216 * Get the root pool information from the root disk, then import the root pool 2217 * during the system boot up time. 2218 */ 2219 extern nvlist_t *vdev_disk_read_rootlabel(char *); 2220 2221 void 2222 spa_check_rootconf(char *devpath, char **bestdev, nvlist_t **bestconf, 2223 uint64_t *besttxg) 2224 { 2225 nvlist_t *config; 2226 uint64_t txg; 2227 2228 if ((config = vdev_disk_read_rootlabel(devpath)) == NULL) 2229 return; 2230 2231 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2232 2233 if (txg > *besttxg) { 2234 *besttxg = txg; 2235 if (*bestconf != NULL) 2236 nvlist_free(*bestconf); 2237 *bestconf = config; 2238 *bestdev = devpath; 2239 } 2240 } 2241 2242 boolean_t 2243 spa_rootdev_validate(nvlist_t *nv) 2244 { 2245 uint64_t ival; 2246 2247 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2248 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2249 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, &ival) == 0 || 2250 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2251 return (B_FALSE); 2252 2253 return (B_TRUE); 2254 } 2255 2256 /* 2257 * Import a root pool. 2258 * 2259 * For x86. devpath_list will consist the physpath name of the vdev in a single 2260 * disk root pool or a list of physnames for the vdevs in a mirrored rootpool. 2261 * e.g. 2262 * "/pci@1f,0/ide@d/disk@0,0:a /pci@1f,o/ide@d/disk@2,0:a" 2263 * 2264 * For Sparc, devpath_list consists the physpath name of the booting device 2265 * no matter the rootpool is a single device pool or a mirrored pool. 2266 * e.g. 2267 * "/pci@1f,0/ide@d/disk@0,0:a" 2268 */ 2269 int 2270 spa_import_rootpool(char *devpath_list) 2271 { 2272 nvlist_t *conf = NULL; 2273 char *dev = NULL; 2274 char *pname; 2275 int error; 2276 2277 /* 2278 * Get the vdev pathname and configuation from the most 2279 * recently updated vdev (highest txg). 2280 */ 2281 if (error = spa_get_rootconf(devpath_list, &dev, &conf)) 2282 goto msg_out; 2283 2284 /* 2285 * Add type "root" vdev to the config. 2286 */ 2287 spa_build_rootpool_config(conf); 2288 2289 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2290 2291 /* 2292 * We specify 'allowfaulted' for this to be treated like spa_open() 2293 * instead of spa_import(). This prevents us from marking vdevs as 2294 * persistently unavailable, and generates FMA ereports as if it were a 2295 * pool open, not import. 2296 */ 2297 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2298 if (error == EEXIST) 2299 error = 0; 2300 2301 nvlist_free(conf); 2302 return (error); 2303 2304 msg_out: 2305 cmn_err(CE_NOTE, "\n\n" 2306 " *************************************************** \n" 2307 " * This device is not bootable! * \n" 2308 " * It is either offlined or detached or faulted. * \n" 2309 " * Please try to boot from a different device. * \n" 2310 " *************************************************** \n\n"); 2311 2312 return (error); 2313 } 2314 #endif 2315 2316 /* 2317 * Import a non-root pool into the system. 2318 */ 2319 int 2320 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2321 { 2322 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2323 } 2324 2325 int 2326 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2327 { 2328 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2329 } 2330 2331 2332 /* 2333 * This (illegal) pool name is used when temporarily importing a spa_t in order 2334 * to get the vdev stats associated with the imported devices. 2335 */ 2336 #define TRYIMPORT_NAME "$import" 2337 2338 nvlist_t * 2339 spa_tryimport(nvlist_t *tryconfig) 2340 { 2341 nvlist_t *config = NULL; 2342 char *poolname; 2343 spa_t *spa; 2344 uint64_t state; 2345 2346 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2347 return (NULL); 2348 2349 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2350 return (NULL); 2351 2352 /* 2353 * Create and initialize the spa structure. 2354 */ 2355 mutex_enter(&spa_namespace_lock); 2356 spa = spa_add(TRYIMPORT_NAME, NULL); 2357 spa_activate(spa); 2358 2359 /* 2360 * Pass off the heavy lifting to spa_load(). 2361 * Pass TRUE for mosconfig because the user-supplied config 2362 * is actually the one to trust when doing an import. 2363 */ 2364 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2365 2366 /* 2367 * If 'tryconfig' was at least parsable, return the current config. 2368 */ 2369 if (spa->spa_root_vdev != NULL) { 2370 spa_config_enter(spa, RW_READER, FTAG); 2371 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2372 spa_config_exit(spa, FTAG); 2373 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2374 poolname) == 0); 2375 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2376 state) == 0); 2377 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2378 spa->spa_uberblock.ub_timestamp) == 0); 2379 2380 /* 2381 * If the bootfs property exists on this pool then we 2382 * copy it out so that external consumers can tell which 2383 * pools are bootable. 2384 */ 2385 if (spa->spa_bootfs) { 2386 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2387 2388 /* 2389 * We have to play games with the name since the 2390 * pool was opened as TRYIMPORT_NAME. 2391 */ 2392 if (dsl_dsobj_to_dsname(spa->spa_name, 2393 spa->spa_bootfs, tmpname) == 0) { 2394 char *cp; 2395 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2396 2397 cp = strchr(tmpname, '/'); 2398 if (cp == NULL) { 2399 (void) strlcpy(dsname, tmpname, 2400 MAXPATHLEN); 2401 } else { 2402 (void) snprintf(dsname, MAXPATHLEN, 2403 "%s/%s", poolname, ++cp); 2404 } 2405 VERIFY(nvlist_add_string(config, 2406 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2407 kmem_free(dsname, MAXPATHLEN); 2408 } 2409 kmem_free(tmpname, MAXPATHLEN); 2410 } 2411 2412 /* 2413 * Add the list of hot spares and level 2 cache devices. 2414 */ 2415 spa_add_spares(spa, config); 2416 spa_add_l2cache(spa, config); 2417 } 2418 2419 spa_unload(spa); 2420 spa_deactivate(spa); 2421 spa_remove(spa); 2422 mutex_exit(&spa_namespace_lock); 2423 2424 return (config); 2425 } 2426 2427 /* 2428 * Pool export/destroy 2429 * 2430 * The act of destroying or exporting a pool is very simple. We make sure there 2431 * is no more pending I/O and any references to the pool are gone. Then, we 2432 * update the pool state and sync all the labels to disk, removing the 2433 * configuration from the cache afterwards. 2434 */ 2435 static int 2436 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 2437 { 2438 spa_t *spa; 2439 2440 if (oldconfig) 2441 *oldconfig = NULL; 2442 2443 if (!(spa_mode & FWRITE)) 2444 return (EROFS); 2445 2446 mutex_enter(&spa_namespace_lock); 2447 if ((spa = spa_lookup(pool)) == NULL) { 2448 mutex_exit(&spa_namespace_lock); 2449 return (ENOENT); 2450 } 2451 2452 /* 2453 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2454 * reacquire the namespace lock, and see if we can export. 2455 */ 2456 spa_open_ref(spa, FTAG); 2457 mutex_exit(&spa_namespace_lock); 2458 spa_async_suspend(spa); 2459 mutex_enter(&spa_namespace_lock); 2460 spa_close(spa, FTAG); 2461 2462 /* 2463 * The pool will be in core if it's openable, 2464 * in which case we can modify its state. 2465 */ 2466 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2467 /* 2468 * Objsets may be open only because they're dirty, so we 2469 * have to force it to sync before checking spa_refcnt. 2470 */ 2471 spa_scrub_suspend(spa); 2472 txg_wait_synced(spa->spa_dsl_pool, 0); 2473 2474 /* 2475 * A pool cannot be exported or destroyed if there are active 2476 * references. If we are resetting a pool, allow references by 2477 * fault injection handlers. 2478 */ 2479 if (!spa_refcount_zero(spa) || 2480 (spa->spa_inject_ref != 0 && 2481 new_state != POOL_STATE_UNINITIALIZED)) { 2482 spa_scrub_resume(spa); 2483 spa_async_resume(spa); 2484 mutex_exit(&spa_namespace_lock); 2485 return (EBUSY); 2486 } 2487 2488 spa_scrub_resume(spa); 2489 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2490 2491 /* 2492 * We want this to be reflected on every label, 2493 * so mark them all dirty. spa_unload() will do the 2494 * final sync that pushes these changes out. 2495 */ 2496 if (new_state != POOL_STATE_UNINITIALIZED) { 2497 spa_config_enter(spa, RW_WRITER, FTAG); 2498 spa->spa_state = new_state; 2499 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2500 vdev_config_dirty(spa->spa_root_vdev); 2501 spa_config_exit(spa, FTAG); 2502 } 2503 } 2504 2505 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2506 2507 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2508 spa_unload(spa); 2509 spa_deactivate(spa); 2510 } 2511 2512 if (oldconfig && spa->spa_config) 2513 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2514 2515 if (new_state != POOL_STATE_UNINITIALIZED) { 2516 spa_config_sync(spa, B_TRUE, B_TRUE); 2517 spa_remove(spa); 2518 } 2519 mutex_exit(&spa_namespace_lock); 2520 2521 return (0); 2522 } 2523 2524 /* 2525 * Destroy a storage pool. 2526 */ 2527 int 2528 spa_destroy(char *pool) 2529 { 2530 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 2531 } 2532 2533 /* 2534 * Export a storage pool. 2535 */ 2536 int 2537 spa_export(char *pool, nvlist_t **oldconfig) 2538 { 2539 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 2540 } 2541 2542 /* 2543 * Similar to spa_export(), this unloads the spa_t without actually removing it 2544 * from the namespace in any way. 2545 */ 2546 int 2547 spa_reset(char *pool) 2548 { 2549 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 2550 } 2551 2552 2553 /* 2554 * ========================================================================== 2555 * Device manipulation 2556 * ========================================================================== 2557 */ 2558 2559 /* 2560 * Add a device to a storage pool. 2561 */ 2562 int 2563 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2564 { 2565 uint64_t txg; 2566 int c, error; 2567 vdev_t *rvd = spa->spa_root_vdev; 2568 vdev_t *vd, *tvd; 2569 nvlist_t **spares, **l2cache; 2570 uint_t nspares, nl2cache; 2571 2572 txg = spa_vdev_enter(spa); 2573 2574 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2575 VDEV_ALLOC_ADD)) != 0) 2576 return (spa_vdev_exit(spa, NULL, txg, error)); 2577 2578 spa->spa_pending_vdev = vd; 2579 2580 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2581 &nspares) != 0) 2582 nspares = 0; 2583 2584 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2585 &nl2cache) != 0) 2586 nl2cache = 0; 2587 2588 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) { 2589 spa->spa_pending_vdev = NULL; 2590 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2591 } 2592 2593 if (vd->vdev_children != 0) { 2594 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 2595 spa->spa_pending_vdev = NULL; 2596 return (spa_vdev_exit(spa, vd, txg, error)); 2597 } 2598 } 2599 2600 /* 2601 * We must validate the spares and l2cache devices after checking the 2602 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2603 */ 2604 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) { 2605 spa->spa_pending_vdev = NULL; 2606 return (spa_vdev_exit(spa, vd, txg, error)); 2607 } 2608 2609 spa->spa_pending_vdev = NULL; 2610 2611 /* 2612 * Transfer each new top-level vdev from vd to rvd. 2613 */ 2614 for (c = 0; c < vd->vdev_children; c++) { 2615 tvd = vd->vdev_child[c]; 2616 vdev_remove_child(vd, tvd); 2617 tvd->vdev_id = rvd->vdev_children; 2618 vdev_add_child(rvd, tvd); 2619 vdev_config_dirty(tvd); 2620 } 2621 2622 if (nspares != 0) { 2623 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2624 ZPOOL_CONFIG_SPARES); 2625 spa_load_spares(spa); 2626 spa->spa_spares.sav_sync = B_TRUE; 2627 } 2628 2629 if (nl2cache != 0) { 2630 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2631 ZPOOL_CONFIG_L2CACHE); 2632 spa_load_l2cache(spa); 2633 spa->spa_l2cache.sav_sync = B_TRUE; 2634 } 2635 2636 /* 2637 * We have to be careful when adding new vdevs to an existing pool. 2638 * If other threads start allocating from these vdevs before we 2639 * sync the config cache, and we lose power, then upon reboot we may 2640 * fail to open the pool because there are DVAs that the config cache 2641 * can't translate. Therefore, we first add the vdevs without 2642 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2643 * and then let spa_config_update() initialize the new metaslabs. 2644 * 2645 * spa_load() checks for added-but-not-initialized vdevs, so that 2646 * if we lose power at any point in this sequence, the remaining 2647 * steps will be completed the next time we load the pool. 2648 */ 2649 (void) spa_vdev_exit(spa, vd, txg, 0); 2650 2651 mutex_enter(&spa_namespace_lock); 2652 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2653 mutex_exit(&spa_namespace_lock); 2654 2655 return (0); 2656 } 2657 2658 /* 2659 * Attach a device to a mirror. The arguments are the path to any device 2660 * in the mirror, and the nvroot for the new device. If the path specifies 2661 * a device that is not mirrored, we automatically insert the mirror vdev. 2662 * 2663 * If 'replacing' is specified, the new device is intended to replace the 2664 * existing device; in this case the two devices are made into their own 2665 * mirror using the 'replacing' vdev, which is functionally identical to 2666 * the mirror vdev (it actually reuses all the same ops) but has a few 2667 * extra rules: you can't attach to it after it's been created, and upon 2668 * completion of resilvering, the first disk (the one being replaced) 2669 * is automatically detached. 2670 */ 2671 int 2672 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2673 { 2674 uint64_t txg, open_txg; 2675 int error; 2676 vdev_t *rvd = spa->spa_root_vdev; 2677 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2678 vdev_ops_t *pvops; 2679 int is_log; 2680 2681 txg = spa_vdev_enter(spa); 2682 2683 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2684 2685 if (oldvd == NULL) 2686 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2687 2688 if (!oldvd->vdev_ops->vdev_op_leaf) 2689 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2690 2691 pvd = oldvd->vdev_parent; 2692 2693 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2694 VDEV_ALLOC_ADD)) != 0) 2695 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2696 2697 if (newrootvd->vdev_children != 1) 2698 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2699 2700 newvd = newrootvd->vdev_child[0]; 2701 2702 if (!newvd->vdev_ops->vdev_op_leaf) 2703 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2704 2705 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2706 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2707 2708 /* 2709 * Spares can't replace logs 2710 */ 2711 is_log = oldvd->vdev_islog; 2712 if (is_log && newvd->vdev_isspare) 2713 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2714 2715 if (!replacing) { 2716 /* 2717 * For attach, the only allowable parent is a mirror or the root 2718 * vdev. 2719 */ 2720 if (pvd->vdev_ops != &vdev_mirror_ops && 2721 pvd->vdev_ops != &vdev_root_ops) 2722 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2723 2724 pvops = &vdev_mirror_ops; 2725 } else { 2726 /* 2727 * Active hot spares can only be replaced by inactive hot 2728 * spares. 2729 */ 2730 if (pvd->vdev_ops == &vdev_spare_ops && 2731 pvd->vdev_child[1] == oldvd && 2732 !spa_has_spare(spa, newvd->vdev_guid)) 2733 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2734 2735 /* 2736 * If the source is a hot spare, and the parent isn't already a 2737 * spare, then we want to create a new hot spare. Otherwise, we 2738 * want to create a replacing vdev. The user is not allowed to 2739 * attach to a spared vdev child unless the 'isspare' state is 2740 * the same (spare replaces spare, non-spare replaces 2741 * non-spare). 2742 */ 2743 if (pvd->vdev_ops == &vdev_replacing_ops) 2744 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2745 else if (pvd->vdev_ops == &vdev_spare_ops && 2746 newvd->vdev_isspare != oldvd->vdev_isspare) 2747 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2748 else if (pvd->vdev_ops != &vdev_spare_ops && 2749 newvd->vdev_isspare) 2750 pvops = &vdev_spare_ops; 2751 else 2752 pvops = &vdev_replacing_ops; 2753 } 2754 2755 /* 2756 * Compare the new device size with the replaceable/attachable 2757 * device size. 2758 */ 2759 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2760 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2761 2762 /* 2763 * The new device cannot have a higher alignment requirement 2764 * than the top-level vdev. 2765 */ 2766 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2767 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2768 2769 /* 2770 * If this is an in-place replacement, update oldvd's path and devid 2771 * to make it distinguishable from newvd, and unopenable from now on. 2772 */ 2773 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2774 spa_strfree(oldvd->vdev_path); 2775 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2776 KM_SLEEP); 2777 (void) sprintf(oldvd->vdev_path, "%s/%s", 2778 newvd->vdev_path, "old"); 2779 if (oldvd->vdev_devid != NULL) { 2780 spa_strfree(oldvd->vdev_devid); 2781 oldvd->vdev_devid = NULL; 2782 } 2783 } 2784 2785 /* 2786 * If the parent is not a mirror, or if we're replacing, insert the new 2787 * mirror/replacing/spare vdev above oldvd. 2788 */ 2789 if (pvd->vdev_ops != pvops) 2790 pvd = vdev_add_parent(oldvd, pvops); 2791 2792 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2793 ASSERT(pvd->vdev_ops == pvops); 2794 ASSERT(oldvd->vdev_parent == pvd); 2795 2796 /* 2797 * Extract the new device from its root and add it to pvd. 2798 */ 2799 vdev_remove_child(newrootvd, newvd); 2800 newvd->vdev_id = pvd->vdev_children; 2801 vdev_add_child(pvd, newvd); 2802 2803 /* 2804 * If newvd is smaller than oldvd, but larger than its rsize, 2805 * the addition of newvd may have decreased our parent's asize. 2806 */ 2807 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2808 2809 tvd = newvd->vdev_top; 2810 ASSERT(pvd->vdev_top == tvd); 2811 ASSERT(tvd->vdev_parent == rvd); 2812 2813 vdev_config_dirty(tvd); 2814 2815 /* 2816 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2817 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2818 */ 2819 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2820 2821 mutex_enter(&newvd->vdev_dtl_lock); 2822 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2823 open_txg - TXG_INITIAL + 1); 2824 mutex_exit(&newvd->vdev_dtl_lock); 2825 2826 if (newvd->vdev_isspare) 2827 spa_spare_activate(newvd); 2828 2829 /* 2830 * Mark newvd's DTL dirty in this txg. 2831 */ 2832 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2833 2834 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2835 2836 /* 2837 * Kick off a resilver to update newvd. We need to grab the namespace 2838 * lock because spa_scrub() needs to post a sysevent with the pool name. 2839 */ 2840 mutex_enter(&spa_namespace_lock); 2841 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 2842 mutex_exit(&spa_namespace_lock); 2843 2844 return (0); 2845 } 2846 2847 /* 2848 * Detach a device from a mirror or replacing vdev. 2849 * If 'replace_done' is specified, only detach if the parent 2850 * is a replacing vdev. 2851 */ 2852 int 2853 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2854 { 2855 uint64_t txg; 2856 int c, t, error; 2857 vdev_t *rvd = spa->spa_root_vdev; 2858 vdev_t *vd, *pvd, *cvd, *tvd; 2859 boolean_t unspare = B_FALSE; 2860 uint64_t unspare_guid; 2861 size_t len; 2862 2863 txg = spa_vdev_enter(spa); 2864 2865 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2866 2867 if (vd == NULL) 2868 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2869 2870 if (!vd->vdev_ops->vdev_op_leaf) 2871 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2872 2873 pvd = vd->vdev_parent; 2874 2875 /* 2876 * If replace_done is specified, only remove this device if it's 2877 * the first child of a replacing vdev. For the 'spare' vdev, either 2878 * disk can be removed. 2879 */ 2880 if (replace_done) { 2881 if (pvd->vdev_ops == &vdev_replacing_ops) { 2882 if (vd->vdev_id != 0) 2883 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2884 } else if (pvd->vdev_ops != &vdev_spare_ops) { 2885 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2886 } 2887 } 2888 2889 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 2890 spa_version(spa) >= SPA_VERSION_SPARES); 2891 2892 /* 2893 * Only mirror, replacing, and spare vdevs support detach. 2894 */ 2895 if (pvd->vdev_ops != &vdev_replacing_ops && 2896 pvd->vdev_ops != &vdev_mirror_ops && 2897 pvd->vdev_ops != &vdev_spare_ops) 2898 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2899 2900 /* 2901 * If there's only one replica, you can't detach it. 2902 */ 2903 if (pvd->vdev_children <= 1) 2904 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 2905 2906 /* 2907 * If all siblings have non-empty DTLs, this device may have the only 2908 * valid copy of the data, which means we cannot safely detach it. 2909 * 2910 * XXX -- as in the vdev_offline() case, we really want a more 2911 * precise DTL check. 2912 */ 2913 for (c = 0; c < pvd->vdev_children; c++) { 2914 uint64_t dirty; 2915 2916 cvd = pvd->vdev_child[c]; 2917 if (cvd == vd) 2918 continue; 2919 if (vdev_is_dead(cvd)) 2920 continue; 2921 mutex_enter(&cvd->vdev_dtl_lock); 2922 dirty = cvd->vdev_dtl_map.sm_space | 2923 cvd->vdev_dtl_scrub.sm_space; 2924 mutex_exit(&cvd->vdev_dtl_lock); 2925 if (!dirty) 2926 break; 2927 } 2928 2929 /* 2930 * If we are a replacing or spare vdev, then we can always detach the 2931 * latter child, as that is how one cancels the operation. 2932 */ 2933 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 2934 c == pvd->vdev_children) 2935 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 2936 2937 /* 2938 * If we are detaching the second disk from a replacing vdev, then 2939 * check to see if we changed the original vdev's path to have "/old" 2940 * at the end in spa_vdev_attach(). If so, undo that change now. 2941 */ 2942 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 2943 pvd->vdev_child[0]->vdev_path != NULL && 2944 pvd->vdev_child[1]->vdev_path != NULL) { 2945 ASSERT(pvd->vdev_child[1] == vd); 2946 cvd = pvd->vdev_child[0]; 2947 len = strlen(vd->vdev_path); 2948 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 2949 strcmp(cvd->vdev_path + len, "/old") == 0) { 2950 spa_strfree(cvd->vdev_path); 2951 cvd->vdev_path = spa_strdup(vd->vdev_path); 2952 } 2953 } 2954 2955 /* 2956 * If we are detaching the original disk from a spare, then it implies 2957 * that the spare should become a real disk, and be removed from the 2958 * active spare list for the pool. 2959 */ 2960 if (pvd->vdev_ops == &vdev_spare_ops && 2961 vd->vdev_id == 0) 2962 unspare = B_TRUE; 2963 2964 /* 2965 * Erase the disk labels so the disk can be used for other things. 2966 * This must be done after all other error cases are handled, 2967 * but before we disembowel vd (so we can still do I/O to it). 2968 * But if we can't do it, don't treat the error as fatal -- 2969 * it may be that the unwritability of the disk is the reason 2970 * it's being detached! 2971 */ 2972 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 2973 2974 /* 2975 * Remove vd from its parent and compact the parent's children. 2976 */ 2977 vdev_remove_child(pvd, vd); 2978 vdev_compact_children(pvd); 2979 2980 /* 2981 * Remember one of the remaining children so we can get tvd below. 2982 */ 2983 cvd = pvd->vdev_child[0]; 2984 2985 /* 2986 * If we need to remove the remaining child from the list of hot spares, 2987 * do it now, marking the vdev as no longer a spare in the process. We 2988 * must do this before vdev_remove_parent(), because that can change the 2989 * GUID if it creates a new toplevel GUID. 2990 */ 2991 if (unspare) { 2992 ASSERT(cvd->vdev_isspare); 2993 spa_spare_remove(cvd); 2994 unspare_guid = cvd->vdev_guid; 2995 } 2996 2997 /* 2998 * If the parent mirror/replacing vdev only has one child, 2999 * the parent is no longer needed. Remove it from the tree. 3000 */ 3001 if (pvd->vdev_children == 1) 3002 vdev_remove_parent(cvd); 3003 3004 /* 3005 * We don't set tvd until now because the parent we just removed 3006 * may have been the previous top-level vdev. 3007 */ 3008 tvd = cvd->vdev_top; 3009 ASSERT(tvd->vdev_parent == rvd); 3010 3011 /* 3012 * Reevaluate the parent vdev state. 3013 */ 3014 vdev_propagate_state(cvd); 3015 3016 /* 3017 * If the device we just detached was smaller than the others, it may be 3018 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3019 * can't fail because the existing metaslabs are already in core, so 3020 * there's nothing to read from disk. 3021 */ 3022 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3023 3024 vdev_config_dirty(tvd); 3025 3026 /* 3027 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3028 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3029 * But first make sure we're not on any *other* txg's DTL list, to 3030 * prevent vd from being accessed after it's freed. 3031 */ 3032 for (t = 0; t < TXG_SIZE; t++) 3033 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3034 vd->vdev_detached = B_TRUE; 3035 vdev_dirty(tvd, VDD_DTL, vd, txg); 3036 3037 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3038 3039 error = spa_vdev_exit(spa, vd, txg, 0); 3040 3041 /* 3042 * If this was the removal of the original device in a hot spare vdev, 3043 * then we want to go through and remove the device from the hot spare 3044 * list of every other pool. 3045 */ 3046 if (unspare) { 3047 spa = NULL; 3048 mutex_enter(&spa_namespace_lock); 3049 while ((spa = spa_next(spa)) != NULL) { 3050 if (spa->spa_state != POOL_STATE_ACTIVE) 3051 continue; 3052 3053 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3054 } 3055 mutex_exit(&spa_namespace_lock); 3056 } 3057 3058 return (error); 3059 } 3060 3061 /* 3062 * Remove a spares vdev from the nvlist config. 3063 */ 3064 static int 3065 spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare, 3066 nvlist_t **spares, int nspares, vdev_t *vd) 3067 { 3068 nvlist_t *nv, **newspares; 3069 int i, j; 3070 3071 nv = NULL; 3072 for (i = 0; i < nspares; i++) { 3073 uint64_t theguid; 3074 3075 VERIFY(nvlist_lookup_uint64(spares[i], 3076 ZPOOL_CONFIG_GUID, &theguid) == 0); 3077 if (theguid == guid) { 3078 nv = spares[i]; 3079 break; 3080 } 3081 } 3082 3083 /* 3084 * Only remove the hot spare if it's not currently in use in this pool. 3085 */ 3086 if (nv == NULL && vd == NULL) 3087 return (ENOENT); 3088 3089 if (nv == NULL && vd != NULL) 3090 return (ENOTSUP); 3091 3092 if (!unspare && nv != NULL && vd != NULL) 3093 return (EBUSY); 3094 3095 if (nspares == 1) { 3096 newspares = NULL; 3097 } else { 3098 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 3099 KM_SLEEP); 3100 for (i = 0, j = 0; i < nspares; i++) { 3101 if (spares[i] != nv) 3102 VERIFY(nvlist_dup(spares[i], 3103 &newspares[j++], KM_SLEEP) == 0); 3104 } 3105 } 3106 3107 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES, 3108 DATA_TYPE_NVLIST_ARRAY) == 0); 3109 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3110 ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0); 3111 for (i = 0; i < nspares - 1; i++) 3112 nvlist_free(newspares[i]); 3113 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 3114 3115 return (0); 3116 } 3117 3118 /* 3119 * Remove an l2cache vdev from the nvlist config. 3120 */ 3121 static int 3122 spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache, 3123 int nl2cache, vdev_t *vd) 3124 { 3125 nvlist_t *nv, **newl2cache; 3126 int i, j; 3127 3128 nv = NULL; 3129 for (i = 0; i < nl2cache; i++) { 3130 uint64_t theguid; 3131 3132 VERIFY(nvlist_lookup_uint64(l2cache[i], 3133 ZPOOL_CONFIG_GUID, &theguid) == 0); 3134 if (theguid == guid) { 3135 nv = l2cache[i]; 3136 break; 3137 } 3138 } 3139 3140 if (vd == NULL) { 3141 for (i = 0; i < nl2cache; i++) { 3142 if (sav->sav_vdevs[i]->vdev_guid == guid) { 3143 vd = sav->sav_vdevs[i]; 3144 break; 3145 } 3146 } 3147 } 3148 3149 if (nv == NULL && vd == NULL) 3150 return (ENOENT); 3151 3152 if (nv == NULL && vd != NULL) 3153 return (ENOTSUP); 3154 3155 if (nl2cache == 1) { 3156 newl2cache = NULL; 3157 } else { 3158 newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *), 3159 KM_SLEEP); 3160 for (i = 0, j = 0; i < nl2cache; i++) { 3161 if (l2cache[i] != nv) 3162 VERIFY(nvlist_dup(l2cache[i], 3163 &newl2cache[j++], KM_SLEEP) == 0); 3164 } 3165 } 3166 3167 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 3168 DATA_TYPE_NVLIST_ARRAY) == 0); 3169 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3170 ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0); 3171 for (i = 0; i < nl2cache - 1; i++) 3172 nvlist_free(newl2cache[i]); 3173 kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *)); 3174 3175 return (0); 3176 } 3177 3178 /* 3179 * Remove a device from the pool. Currently, this supports removing only hot 3180 * spares and level 2 ARC devices. 3181 */ 3182 int 3183 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3184 { 3185 vdev_t *vd; 3186 nvlist_t **spares, **l2cache; 3187 uint_t nspares, nl2cache; 3188 int error = 0; 3189 3190 spa_config_enter(spa, RW_WRITER, FTAG); 3191 3192 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3193 3194 if (spa->spa_spares.sav_vdevs != NULL && 3195 spa_spare_exists(guid, NULL) && 3196 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3197 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 3198 if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare, 3199 spares, nspares, vd)) != 0) 3200 goto out; 3201 spa_load_spares(spa); 3202 spa->spa_spares.sav_sync = B_TRUE; 3203 goto out; 3204 } 3205 3206 if (spa->spa_l2cache.sav_vdevs != NULL && 3207 spa_l2cache_exists(guid, NULL) && 3208 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3209 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { 3210 if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid, 3211 l2cache, nl2cache, vd)) != 0) 3212 goto out; 3213 spa_load_l2cache(spa); 3214 spa->spa_l2cache.sav_sync = B_TRUE; 3215 } 3216 3217 out: 3218 spa_config_exit(spa, FTAG); 3219 return (error); 3220 } 3221 3222 /* 3223 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3224 * current spared, so we can detach it. 3225 */ 3226 static vdev_t * 3227 spa_vdev_resilver_done_hunt(vdev_t *vd) 3228 { 3229 vdev_t *newvd, *oldvd; 3230 int c; 3231 3232 for (c = 0; c < vd->vdev_children; c++) { 3233 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3234 if (oldvd != NULL) 3235 return (oldvd); 3236 } 3237 3238 /* 3239 * Check for a completed replacement. 3240 */ 3241 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3242 oldvd = vd->vdev_child[0]; 3243 newvd = vd->vdev_child[1]; 3244 3245 mutex_enter(&newvd->vdev_dtl_lock); 3246 if (newvd->vdev_dtl_map.sm_space == 0 && 3247 newvd->vdev_dtl_scrub.sm_space == 0) { 3248 mutex_exit(&newvd->vdev_dtl_lock); 3249 return (oldvd); 3250 } 3251 mutex_exit(&newvd->vdev_dtl_lock); 3252 } 3253 3254 /* 3255 * Check for a completed resilver with the 'unspare' flag set. 3256 */ 3257 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3258 newvd = vd->vdev_child[0]; 3259 oldvd = vd->vdev_child[1]; 3260 3261 mutex_enter(&newvd->vdev_dtl_lock); 3262 if (newvd->vdev_unspare && 3263 newvd->vdev_dtl_map.sm_space == 0 && 3264 newvd->vdev_dtl_scrub.sm_space == 0) { 3265 newvd->vdev_unspare = 0; 3266 mutex_exit(&newvd->vdev_dtl_lock); 3267 return (oldvd); 3268 } 3269 mutex_exit(&newvd->vdev_dtl_lock); 3270 } 3271 3272 return (NULL); 3273 } 3274 3275 static void 3276 spa_vdev_resilver_done(spa_t *spa) 3277 { 3278 vdev_t *vd; 3279 vdev_t *pvd; 3280 uint64_t guid; 3281 uint64_t pguid = 0; 3282 3283 spa_config_enter(spa, RW_READER, FTAG); 3284 3285 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3286 guid = vd->vdev_guid; 3287 /* 3288 * If we have just finished replacing a hot spared device, then 3289 * we need to detach the parent's first child (the original hot 3290 * spare) as well. 3291 */ 3292 pvd = vd->vdev_parent; 3293 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3294 pvd->vdev_id == 0) { 3295 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3296 ASSERT(pvd->vdev_parent->vdev_children == 2); 3297 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3298 } 3299 spa_config_exit(spa, FTAG); 3300 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3301 return; 3302 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3303 return; 3304 spa_config_enter(spa, RW_READER, FTAG); 3305 } 3306 3307 spa_config_exit(spa, FTAG); 3308 } 3309 3310 /* 3311 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3312 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3313 */ 3314 int 3315 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3316 { 3317 vdev_t *vd; 3318 uint64_t txg; 3319 3320 txg = spa_vdev_enter(spa); 3321 3322 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3323 /* 3324 * Determine if this is a reference to a hot spare device. If 3325 * it is, update the path manually as there is no associated 3326 * vdev_t that can be synced to disk. 3327 */ 3328 nvlist_t **spares; 3329 uint_t i, nspares; 3330 3331 if (spa->spa_spares.sav_config != NULL) { 3332 VERIFY(nvlist_lookup_nvlist_array( 3333 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3334 &spares, &nspares) == 0); 3335 for (i = 0; i < nspares; i++) { 3336 uint64_t theguid; 3337 VERIFY(nvlist_lookup_uint64(spares[i], 3338 ZPOOL_CONFIG_GUID, &theguid) == 0); 3339 if (theguid == guid) { 3340 VERIFY(nvlist_add_string(spares[i], 3341 ZPOOL_CONFIG_PATH, newpath) == 0); 3342 spa_load_spares(spa); 3343 spa->spa_spares.sav_sync = B_TRUE; 3344 return (spa_vdev_exit(spa, NULL, txg, 3345 0)); 3346 } 3347 } 3348 } 3349 3350 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3351 } 3352 3353 if (!vd->vdev_ops->vdev_op_leaf) 3354 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3355 3356 spa_strfree(vd->vdev_path); 3357 vd->vdev_path = spa_strdup(newpath); 3358 3359 vdev_config_dirty(vd->vdev_top); 3360 3361 return (spa_vdev_exit(spa, NULL, txg, 0)); 3362 } 3363 3364 /* 3365 * ========================================================================== 3366 * SPA Scrubbing 3367 * ========================================================================== 3368 */ 3369 3370 static void 3371 spa_scrub_io_done(zio_t *zio) 3372 { 3373 spa_t *spa = zio->io_spa; 3374 3375 arc_data_buf_free(zio->io_data, zio->io_size); 3376 3377 mutex_enter(&spa->spa_scrub_lock); 3378 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3379 vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 3380 spa->spa_scrub_errors++; 3381 mutex_enter(&vd->vdev_stat_lock); 3382 vd->vdev_stat.vs_scrub_errors++; 3383 mutex_exit(&vd->vdev_stat_lock); 3384 } 3385 3386 if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight) 3387 cv_broadcast(&spa->spa_scrub_io_cv); 3388 3389 ASSERT(spa->spa_scrub_inflight >= 0); 3390 3391 mutex_exit(&spa->spa_scrub_lock); 3392 } 3393 3394 static void 3395 spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 3396 zbookmark_t *zb) 3397 { 3398 size_t size = BP_GET_LSIZE(bp); 3399 void *data; 3400 3401 mutex_enter(&spa->spa_scrub_lock); 3402 /* 3403 * Do not give too much work to vdev(s). 3404 */ 3405 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) { 3406 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3407 } 3408 spa->spa_scrub_inflight++; 3409 mutex_exit(&spa->spa_scrub_lock); 3410 3411 data = arc_data_buf_alloc(size); 3412 3413 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 3414 flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 3415 3416 flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 3417 3418 zio_nowait(zio_read(NULL, spa, bp, data, size, 3419 spa_scrub_io_done, NULL, priority, flags, zb)); 3420 } 3421 3422 /* ARGSUSED */ 3423 static int 3424 spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 3425 { 3426 blkptr_t *bp = &bc->bc_blkptr; 3427 vdev_t *vd = spa->spa_root_vdev; 3428 dva_t *dva = bp->blk_dva; 3429 int needs_resilver = B_FALSE; 3430 int d; 3431 3432 if (bc->bc_errno) { 3433 /* 3434 * We can't scrub this block, but we can continue to scrub 3435 * the rest of the pool. Note the error and move along. 3436 */ 3437 mutex_enter(&spa->spa_scrub_lock); 3438 spa->spa_scrub_errors++; 3439 mutex_exit(&spa->spa_scrub_lock); 3440 3441 mutex_enter(&vd->vdev_stat_lock); 3442 vd->vdev_stat.vs_scrub_errors++; 3443 mutex_exit(&vd->vdev_stat_lock); 3444 3445 return (ERESTART); 3446 } 3447 3448 ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 3449 3450 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 3451 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 3452 3453 ASSERT(vd != NULL); 3454 3455 /* 3456 * Keep track of how much data we've examined so that 3457 * zpool(1M) status can make useful progress reports. 3458 */ 3459 mutex_enter(&vd->vdev_stat_lock); 3460 vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 3461 mutex_exit(&vd->vdev_stat_lock); 3462 3463 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 3464 if (DVA_GET_GANG(&dva[d])) { 3465 /* 3466 * Gang members may be spread across multiple 3467 * vdevs, so the best we can do is look at the 3468 * pool-wide DTL. 3469 * XXX -- it would be better to change our 3470 * allocation policy to ensure that this can't 3471 * happen. 3472 */ 3473 vd = spa->spa_root_vdev; 3474 } 3475 if (vdev_dtl_contains(&vd->vdev_dtl_map, 3476 bp->blk_birth, 1)) 3477 needs_resilver = B_TRUE; 3478 } 3479 } 3480 3481 if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 3482 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 3483 ZIO_FLAG_SCRUB, &bc->bc_bookmark); 3484 else if (needs_resilver) 3485 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 3486 ZIO_FLAG_RESILVER, &bc->bc_bookmark); 3487 3488 return (0); 3489 } 3490 3491 static void 3492 spa_scrub_thread(spa_t *spa) 3493 { 3494 callb_cpr_t cprinfo; 3495 traverse_handle_t *th = spa->spa_scrub_th; 3496 vdev_t *rvd = spa->spa_root_vdev; 3497 pool_scrub_type_t scrub_type = spa->spa_scrub_type; 3498 int error = 0; 3499 boolean_t complete; 3500 3501 CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 3502 3503 /* 3504 * If we're restarting due to a snapshot create/delete, 3505 * wait for that to complete. 3506 */ 3507 txg_wait_synced(spa_get_dsl(spa), 0); 3508 3509 dprintf("start %s mintxg=%llu maxtxg=%llu\n", 3510 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 3511 spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 3512 3513 spa_config_enter(spa, RW_WRITER, FTAG); 3514 vdev_reopen(rvd); /* purge all vdev caches */ 3515 vdev_config_dirty(rvd); /* rewrite all disk labels */ 3516 vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 3517 spa_config_exit(spa, FTAG); 3518 3519 mutex_enter(&spa->spa_scrub_lock); 3520 spa->spa_scrub_errors = 0; 3521 spa->spa_scrub_active = 1; 3522 ASSERT(spa->spa_scrub_inflight == 0); 3523 3524 while (!spa->spa_scrub_stop) { 3525 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3526 while (spa->spa_scrub_suspended) { 3527 spa->spa_scrub_active = 0; 3528 cv_broadcast(&spa->spa_scrub_cv); 3529 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3530 spa->spa_scrub_active = 1; 3531 } 3532 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 3533 3534 if (spa->spa_scrub_restart_txg != 0) 3535 break; 3536 3537 mutex_exit(&spa->spa_scrub_lock); 3538 error = traverse_more(th); 3539 mutex_enter(&spa->spa_scrub_lock); 3540 if (error != EAGAIN) 3541 break; 3542 } 3543 3544 while (spa->spa_scrub_inflight) 3545 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3546 3547 spa->spa_scrub_active = 0; 3548 cv_broadcast(&spa->spa_scrub_cv); 3549 3550 mutex_exit(&spa->spa_scrub_lock); 3551 3552 spa_config_enter(spa, RW_WRITER, FTAG); 3553 3554 mutex_enter(&spa->spa_scrub_lock); 3555 3556 /* 3557 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 3558 * AND the spa config lock to synchronize with any config changes 3559 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 3560 */ 3561 if (spa->spa_scrub_restart_txg != 0) 3562 error = ERESTART; 3563 3564 if (spa->spa_scrub_stop) 3565 error = EINTR; 3566 3567 /* 3568 * Even if there were uncorrectable errors, we consider the scrub 3569 * completed. The downside is that if there is a transient error during 3570 * a resilver, we won't resilver the data properly to the target. But 3571 * if the damage is permanent (more likely) we will resilver forever, 3572 * which isn't really acceptable. Since there is enough information for 3573 * the user to know what has failed and why, this seems like a more 3574 * tractable approach. 3575 */ 3576 complete = (error == 0); 3577 3578 dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 3579 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 3580 spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 3581 error, spa->spa_scrub_errors, spa->spa_scrub_stop); 3582 3583 mutex_exit(&spa->spa_scrub_lock); 3584 3585 /* 3586 * If the scrub/resilver completed, update all DTLs to reflect this. 3587 * Whether it succeeded or not, vacate all temporary scrub DTLs. 3588 */ 3589 vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 3590 complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 3591 vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 3592 spa_errlog_rotate(spa); 3593 3594 if (scrub_type == POOL_SCRUB_RESILVER && complete) 3595 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_FINISH); 3596 3597 spa_config_exit(spa, FTAG); 3598 3599 mutex_enter(&spa->spa_scrub_lock); 3600 3601 /* 3602 * We may have finished replacing a device. 3603 * Let the async thread assess this and handle the detach. 3604 */ 3605 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3606 3607 /* 3608 * If we were told to restart, our final act is to start a new scrub. 3609 */ 3610 if (error == ERESTART) 3611 spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 3612 SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 3613 3614 spa->spa_scrub_type = POOL_SCRUB_NONE; 3615 spa->spa_scrub_active = 0; 3616 spa->spa_scrub_thread = NULL; 3617 cv_broadcast(&spa->spa_scrub_cv); 3618 CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 3619 thread_exit(); 3620 } 3621 3622 void 3623 spa_scrub_suspend(spa_t *spa) 3624 { 3625 mutex_enter(&spa->spa_scrub_lock); 3626 spa->spa_scrub_suspended++; 3627 while (spa->spa_scrub_active) { 3628 cv_broadcast(&spa->spa_scrub_cv); 3629 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3630 } 3631 while (spa->spa_scrub_inflight) 3632 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3633 mutex_exit(&spa->spa_scrub_lock); 3634 } 3635 3636 void 3637 spa_scrub_resume(spa_t *spa) 3638 { 3639 mutex_enter(&spa->spa_scrub_lock); 3640 ASSERT(spa->spa_scrub_suspended != 0); 3641 if (--spa->spa_scrub_suspended == 0) 3642 cv_broadcast(&spa->spa_scrub_cv); 3643 mutex_exit(&spa->spa_scrub_lock); 3644 } 3645 3646 void 3647 spa_scrub_restart(spa_t *spa, uint64_t txg) 3648 { 3649 /* 3650 * Something happened (e.g. snapshot create/delete) that means 3651 * we must restart any in-progress scrubs. The itinerary will 3652 * fix this properly. 3653 */ 3654 mutex_enter(&spa->spa_scrub_lock); 3655 spa->spa_scrub_restart_txg = txg; 3656 mutex_exit(&spa->spa_scrub_lock); 3657 } 3658 3659 int 3660 spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 3661 { 3662 space_seg_t *ss; 3663 uint64_t mintxg, maxtxg; 3664 vdev_t *rvd = spa->spa_root_vdev; 3665 3666 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3667 ASSERT(!spa_config_held(spa, RW_WRITER)); 3668 3669 if ((uint_t)type >= POOL_SCRUB_TYPES) 3670 return (ENOTSUP); 3671 3672 mutex_enter(&spa->spa_scrub_lock); 3673 3674 /* 3675 * If there's a scrub or resilver already in progress, stop it. 3676 */ 3677 while (spa->spa_scrub_thread != NULL) { 3678 /* 3679 * Don't stop a resilver unless forced. 3680 */ 3681 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 3682 mutex_exit(&spa->spa_scrub_lock); 3683 return (EBUSY); 3684 } 3685 spa->spa_scrub_stop = 1; 3686 cv_broadcast(&spa->spa_scrub_cv); 3687 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3688 } 3689 3690 /* 3691 * Terminate the previous traverse. 3692 */ 3693 if (spa->spa_scrub_th != NULL) { 3694 traverse_fini(spa->spa_scrub_th); 3695 spa->spa_scrub_th = NULL; 3696 } 3697 3698 if (rvd == NULL) { 3699 ASSERT(spa->spa_scrub_stop == 0); 3700 ASSERT(spa->spa_scrub_type == type); 3701 ASSERT(spa->spa_scrub_restart_txg == 0); 3702 mutex_exit(&spa->spa_scrub_lock); 3703 return (0); 3704 } 3705 3706 mintxg = TXG_INITIAL - 1; 3707 maxtxg = spa_last_synced_txg(spa) + 1; 3708 3709 mutex_enter(&rvd->vdev_dtl_lock); 3710 3711 if (rvd->vdev_dtl_map.sm_space == 0) { 3712 /* 3713 * The pool-wide DTL is empty. 3714 * If this is a resilver, there's nothing to do except 3715 * check whether any in-progress replacements have completed. 3716 */ 3717 if (type == POOL_SCRUB_RESILVER) { 3718 type = POOL_SCRUB_NONE; 3719 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3720 } 3721 } else { 3722 /* 3723 * The pool-wide DTL is non-empty. 3724 * If this is a normal scrub, upgrade to a resilver instead. 3725 */ 3726 if (type == POOL_SCRUB_EVERYTHING) 3727 type = POOL_SCRUB_RESILVER; 3728 } 3729 3730 if (type == POOL_SCRUB_RESILVER) { 3731 /* 3732 * Determine the resilvering boundaries. 3733 * 3734 * Note: (mintxg, maxtxg) is an open interval, 3735 * i.e. mintxg and maxtxg themselves are not included. 3736 * 3737 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 3738 * so we don't claim to resilver a txg that's still changing. 3739 */ 3740 ss = avl_first(&rvd->vdev_dtl_map.sm_root); 3741 mintxg = ss->ss_start - 1; 3742 ss = avl_last(&rvd->vdev_dtl_map.sm_root); 3743 maxtxg = MIN(ss->ss_end, maxtxg); 3744 3745 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 3746 } 3747 3748 mutex_exit(&rvd->vdev_dtl_lock); 3749 3750 spa->spa_scrub_stop = 0; 3751 spa->spa_scrub_type = type; 3752 spa->spa_scrub_restart_txg = 0; 3753 3754 if (type != POOL_SCRUB_NONE) { 3755 spa->spa_scrub_mintxg = mintxg; 3756 spa->spa_scrub_maxtxg = maxtxg; 3757 spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 3758 ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 3759 ZIO_FLAG_CANFAIL); 3760 traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 3761 spa->spa_scrub_thread = thread_create(NULL, 0, 3762 spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 3763 } 3764 3765 mutex_exit(&spa->spa_scrub_lock); 3766 3767 return (0); 3768 } 3769 3770 /* 3771 * ========================================================================== 3772 * SPA async task processing 3773 * ========================================================================== 3774 */ 3775 3776 static void 3777 spa_async_remove(spa_t *spa, vdev_t *vd) 3778 { 3779 vdev_t *tvd; 3780 int c; 3781 3782 for (c = 0; c < vd->vdev_children; c++) { 3783 tvd = vd->vdev_child[c]; 3784 if (tvd->vdev_remove_wanted) { 3785 tvd->vdev_remove_wanted = 0; 3786 vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED, 3787 VDEV_AUX_NONE); 3788 vdev_clear(spa, tvd, B_TRUE); 3789 vdev_config_dirty(tvd->vdev_top); 3790 } 3791 spa_async_remove(spa, tvd); 3792 } 3793 } 3794 3795 static void 3796 spa_async_thread(spa_t *spa) 3797 { 3798 int tasks; 3799 uint64_t txg; 3800 3801 ASSERT(spa->spa_sync_on); 3802 3803 mutex_enter(&spa->spa_async_lock); 3804 tasks = spa->spa_async_tasks; 3805 spa->spa_async_tasks = 0; 3806 mutex_exit(&spa->spa_async_lock); 3807 3808 /* 3809 * See if the config needs to be updated. 3810 */ 3811 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3812 mutex_enter(&spa_namespace_lock); 3813 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3814 mutex_exit(&spa_namespace_lock); 3815 } 3816 3817 /* 3818 * See if any devices need to be marked REMOVED. 3819 * 3820 * XXX - We avoid doing this when we are in 3821 * I/O failure state since spa_vdev_enter() grabs 3822 * the namespace lock and would not be able to obtain 3823 * the writer config lock. 3824 */ 3825 if (tasks & SPA_ASYNC_REMOVE && 3826 spa_state(spa) != POOL_STATE_IO_FAILURE) { 3827 txg = spa_vdev_enter(spa); 3828 spa_async_remove(spa, spa->spa_root_vdev); 3829 (void) spa_vdev_exit(spa, NULL, txg, 0); 3830 } 3831 3832 /* 3833 * If any devices are done replacing, detach them. 3834 */ 3835 if (tasks & SPA_ASYNC_RESILVER_DONE) 3836 spa_vdev_resilver_done(spa); 3837 3838 /* 3839 * Kick off a scrub. When starting a RESILVER scrub (or an EVERYTHING 3840 * scrub which can become a resilver), we need to hold 3841 * spa_namespace_lock() because the sysevent we post via 3842 * spa_event_notify() needs to get the name of the pool. 3843 */ 3844 if (tasks & SPA_ASYNC_SCRUB) { 3845 mutex_enter(&spa_namespace_lock); 3846 VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 3847 mutex_exit(&spa_namespace_lock); 3848 } 3849 3850 /* 3851 * Kick off a resilver. 3852 */ 3853 if (tasks & SPA_ASYNC_RESILVER) { 3854 mutex_enter(&spa_namespace_lock); 3855 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 3856 mutex_exit(&spa_namespace_lock); 3857 } 3858 3859 /* 3860 * Let the world know that we're done. 3861 */ 3862 mutex_enter(&spa->spa_async_lock); 3863 spa->spa_async_thread = NULL; 3864 cv_broadcast(&spa->spa_async_cv); 3865 mutex_exit(&spa->spa_async_lock); 3866 thread_exit(); 3867 } 3868 3869 void 3870 spa_async_suspend(spa_t *spa) 3871 { 3872 mutex_enter(&spa->spa_async_lock); 3873 spa->spa_async_suspended++; 3874 while (spa->spa_async_thread != NULL) 3875 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3876 mutex_exit(&spa->spa_async_lock); 3877 } 3878 3879 void 3880 spa_async_resume(spa_t *spa) 3881 { 3882 mutex_enter(&spa->spa_async_lock); 3883 ASSERT(spa->spa_async_suspended != 0); 3884 spa->spa_async_suspended--; 3885 mutex_exit(&spa->spa_async_lock); 3886 } 3887 3888 static void 3889 spa_async_dispatch(spa_t *spa) 3890 { 3891 mutex_enter(&spa->spa_async_lock); 3892 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3893 spa->spa_async_thread == NULL && 3894 rootdir != NULL && !vn_is_readonly(rootdir)) 3895 spa->spa_async_thread = thread_create(NULL, 0, 3896 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3897 mutex_exit(&spa->spa_async_lock); 3898 } 3899 3900 void 3901 spa_async_request(spa_t *spa, int task) 3902 { 3903 mutex_enter(&spa->spa_async_lock); 3904 spa->spa_async_tasks |= task; 3905 mutex_exit(&spa->spa_async_lock); 3906 } 3907 3908 /* 3909 * ========================================================================== 3910 * SPA syncing routines 3911 * ========================================================================== 3912 */ 3913 3914 static void 3915 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3916 { 3917 bplist_t *bpl = &spa->spa_sync_bplist; 3918 dmu_tx_t *tx; 3919 blkptr_t blk; 3920 uint64_t itor = 0; 3921 zio_t *zio; 3922 int error; 3923 uint8_t c = 1; 3924 3925 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 3926 3927 while (bplist_iterate(bpl, &itor, &blk) == 0) 3928 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 3929 3930 error = zio_wait(zio); 3931 ASSERT3U(error, ==, 0); 3932 3933 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3934 bplist_vacate(bpl, tx); 3935 3936 /* 3937 * Pre-dirty the first block so we sync to convergence faster. 3938 * (Usually only the first block is needed.) 3939 */ 3940 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3941 dmu_tx_commit(tx); 3942 } 3943 3944 static void 3945 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3946 { 3947 char *packed = NULL; 3948 size_t nvsize = 0; 3949 dmu_buf_t *db; 3950 3951 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3952 3953 packed = kmem_alloc(nvsize, KM_SLEEP); 3954 3955 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3956 KM_SLEEP) == 0); 3957 3958 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 3959 3960 kmem_free(packed, nvsize); 3961 3962 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3963 dmu_buf_will_dirty(db, tx); 3964 *(uint64_t *)db->db_data = nvsize; 3965 dmu_buf_rele(db, FTAG); 3966 } 3967 3968 static void 3969 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3970 const char *config, const char *entry) 3971 { 3972 nvlist_t *nvroot; 3973 nvlist_t **list; 3974 int i; 3975 3976 if (!sav->sav_sync) 3977 return; 3978 3979 /* 3980 * Update the MOS nvlist describing the list of available devices. 3981 * spa_validate_aux() will have already made sure this nvlist is 3982 * valid and the vdevs are labeled appropriately. 3983 */ 3984 if (sav->sav_object == 0) { 3985 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3986 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3987 sizeof (uint64_t), tx); 3988 VERIFY(zap_update(spa->spa_meta_objset, 3989 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3990 &sav->sav_object, tx) == 0); 3991 } 3992 3993 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3994 if (sav->sav_count == 0) { 3995 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3996 } else { 3997 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3998 for (i = 0; i < sav->sav_count; i++) 3999 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 4000 B_FALSE, B_FALSE, B_TRUE); 4001 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 4002 sav->sav_count) == 0); 4003 for (i = 0; i < sav->sav_count; i++) 4004 nvlist_free(list[i]); 4005 kmem_free(list, sav->sav_count * sizeof (void *)); 4006 } 4007 4008 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 4009 nvlist_free(nvroot); 4010 4011 sav->sav_sync = B_FALSE; 4012 } 4013 4014 static void 4015 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 4016 { 4017 nvlist_t *config; 4018 4019 if (list_is_empty(&spa->spa_dirty_list)) 4020 return; 4021 4022 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 4023 4024 if (spa->spa_config_syncing) 4025 nvlist_free(spa->spa_config_syncing); 4026 spa->spa_config_syncing = config; 4027 4028 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 4029 } 4030 4031 /* 4032 * Set zpool properties. 4033 */ 4034 static void 4035 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 4036 { 4037 spa_t *spa = arg1; 4038 objset_t *mos = spa->spa_meta_objset; 4039 nvlist_t *nvp = arg2; 4040 nvpair_t *elem; 4041 uint64_t intval; 4042 char *strval; 4043 zpool_prop_t prop; 4044 const char *propname; 4045 zprop_type_t proptype; 4046 spa_config_dirent_t *dp; 4047 4048 elem = NULL; 4049 while ((elem = nvlist_next_nvpair(nvp, elem))) { 4050 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 4051 case ZPOOL_PROP_VERSION: 4052 /* 4053 * Only set version for non-zpool-creation cases 4054 * (set/import). spa_create() needs special care 4055 * for version setting. 4056 */ 4057 if (tx->tx_txg != TXG_INITIAL) { 4058 VERIFY(nvpair_value_uint64(elem, 4059 &intval) == 0); 4060 ASSERT(intval <= SPA_VERSION); 4061 ASSERT(intval >= spa_version(spa)); 4062 spa->spa_uberblock.ub_version = intval; 4063 vdev_config_dirty(spa->spa_root_vdev); 4064 } 4065 break; 4066 4067 case ZPOOL_PROP_ALTROOT: 4068 /* 4069 * 'altroot' is a non-persistent property. It should 4070 * have been set temporarily at creation or import time. 4071 */ 4072 ASSERT(spa->spa_root != NULL); 4073 break; 4074 4075 case ZPOOL_PROP_CACHEFILE: 4076 /* 4077 * 'cachefile' is a non-persistent property, but note 4078 * an async request that the config cache needs to be 4079 * udpated. 4080 */ 4081 VERIFY(nvpair_value_string(elem, &strval) == 0); 4082 4083 dp = kmem_alloc(sizeof (spa_config_dirent_t), 4084 KM_SLEEP); 4085 4086 if (strval[0] == '\0') 4087 dp->scd_path = spa_strdup(spa_config_path); 4088 else if (strcmp(strval, "none") == 0) 4089 dp->scd_path = NULL; 4090 else 4091 dp->scd_path = spa_strdup(strval); 4092 4093 list_insert_head(&spa->spa_config_list, dp); 4094 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 4095 break; 4096 default: 4097 /* 4098 * Set pool property values in the poolprops mos object. 4099 */ 4100 mutex_enter(&spa->spa_props_lock); 4101 if (spa->spa_pool_props_object == 0) { 4102 objset_t *mos = spa->spa_meta_objset; 4103 4104 VERIFY((spa->spa_pool_props_object = 4105 zap_create(mos, DMU_OT_POOL_PROPS, 4106 DMU_OT_NONE, 0, tx)) > 0); 4107 4108 VERIFY(zap_update(mos, 4109 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 4110 8, 1, &spa->spa_pool_props_object, tx) 4111 == 0); 4112 } 4113 mutex_exit(&spa->spa_props_lock); 4114 4115 /* normalize the property name */ 4116 propname = zpool_prop_to_name(prop); 4117 proptype = zpool_prop_get_type(prop); 4118 4119 if (nvpair_type(elem) == DATA_TYPE_STRING) { 4120 ASSERT(proptype == PROP_TYPE_STRING); 4121 VERIFY(nvpair_value_string(elem, &strval) == 0); 4122 VERIFY(zap_update(mos, 4123 spa->spa_pool_props_object, propname, 4124 1, strlen(strval) + 1, strval, tx) == 0); 4125 4126 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 4127 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 4128 4129 if (proptype == PROP_TYPE_INDEX) { 4130 const char *unused; 4131 VERIFY(zpool_prop_index_to_string( 4132 prop, intval, &unused) == 0); 4133 } 4134 VERIFY(zap_update(mos, 4135 spa->spa_pool_props_object, propname, 4136 8, 1, &intval, tx) == 0); 4137 } else { 4138 ASSERT(0); /* not allowed */ 4139 } 4140 4141 switch (prop) { 4142 case ZPOOL_PROP_DELEGATION: 4143 spa->spa_delegation = intval; 4144 break; 4145 case ZPOOL_PROP_BOOTFS: 4146 spa->spa_bootfs = intval; 4147 break; 4148 case ZPOOL_PROP_FAILUREMODE: 4149 spa->spa_failmode = intval; 4150 break; 4151 default: 4152 break; 4153 } 4154 } 4155 4156 /* log internal history if this is not a zpool create */ 4157 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 4158 tx->tx_txg != TXG_INITIAL) { 4159 spa_history_internal_log(LOG_POOL_PROPSET, 4160 spa, tx, cr, "%s %lld %s", 4161 nvpair_name(elem), intval, spa->spa_name); 4162 } 4163 } 4164 } 4165 4166 /* 4167 * Sync the specified transaction group. New blocks may be dirtied as 4168 * part of the process, so we iterate until it converges. 4169 */ 4170 void 4171 spa_sync(spa_t *spa, uint64_t txg) 4172 { 4173 dsl_pool_t *dp = spa->spa_dsl_pool; 4174 objset_t *mos = spa->spa_meta_objset; 4175 bplist_t *bpl = &spa->spa_sync_bplist; 4176 vdev_t *rvd = spa->spa_root_vdev; 4177 vdev_t *vd; 4178 dmu_tx_t *tx; 4179 int dirty_vdevs; 4180 4181 /* 4182 * Lock out configuration changes. 4183 */ 4184 spa_config_enter(spa, RW_READER, FTAG); 4185 4186 spa->spa_syncing_txg = txg; 4187 spa->spa_sync_pass = 0; 4188 4189 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 4190 4191 tx = dmu_tx_create_assigned(dp, txg); 4192 4193 /* 4194 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 4195 * set spa_deflate if we have no raid-z vdevs. 4196 */ 4197 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 4198 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 4199 int i; 4200 4201 for (i = 0; i < rvd->vdev_children; i++) { 4202 vd = rvd->vdev_child[i]; 4203 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 4204 break; 4205 } 4206 if (i == rvd->vdev_children) { 4207 spa->spa_deflate = TRUE; 4208 VERIFY(0 == zap_add(spa->spa_meta_objset, 4209 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 4210 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 4211 } 4212 } 4213 4214 /* 4215 * If anything has changed in this txg, push the deferred frees 4216 * from the previous txg. If not, leave them alone so that we 4217 * don't generate work on an otherwise idle system. 4218 */ 4219 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 4220 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 4221 !txg_list_empty(&dp->dp_sync_tasks, txg)) 4222 spa_sync_deferred_frees(spa, txg); 4223 4224 /* 4225 * Iterate to convergence. 4226 */ 4227 do { 4228 spa->spa_sync_pass++; 4229 4230 spa_sync_config_object(spa, tx); 4231 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 4232 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 4233 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 4234 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 4235 spa_errlog_sync(spa, txg); 4236 dsl_pool_sync(dp, txg); 4237 4238 dirty_vdevs = 0; 4239 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 4240 vdev_sync(vd, txg); 4241 dirty_vdevs++; 4242 } 4243 4244 bplist_sync(bpl, tx); 4245 } while (dirty_vdevs); 4246 4247 bplist_close(bpl); 4248 4249 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4250 4251 /* 4252 * Rewrite the vdev configuration (which includes the uberblock) 4253 * to commit the transaction group. 4254 * 4255 * If there are no dirty vdevs, we sync the uberblock to a few 4256 * random top-level vdevs that are known to be visible in the 4257 * config cache (see spa_vdev_add() for details). If there *are* 4258 * dirty vdevs -- or if the sync to our random subset fails -- 4259 * then sync the uberblock to all vdevs. 4260 */ 4261 if (list_is_empty(&spa->spa_dirty_list)) { 4262 vdev_t *svd[SPA_DVAS_PER_BP]; 4263 int svdcount = 0; 4264 int children = rvd->vdev_children; 4265 int c0 = spa_get_random(children); 4266 int c; 4267 4268 for (c = 0; c < children; c++) { 4269 vd = rvd->vdev_child[(c0 + c) % children]; 4270 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4271 continue; 4272 svd[svdcount++] = vd; 4273 if (svdcount == SPA_DVAS_PER_BP) 4274 break; 4275 } 4276 vdev_config_sync(svd, svdcount, txg); 4277 } else { 4278 vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg); 4279 } 4280 dmu_tx_commit(tx); 4281 4282 /* 4283 * Clear the dirty config list. 4284 */ 4285 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 4286 vdev_config_clean(vd); 4287 4288 /* 4289 * Now that the new config has synced transactionally, 4290 * let it become visible to the config cache. 4291 */ 4292 if (spa->spa_config_syncing != NULL) { 4293 spa_config_set(spa, spa->spa_config_syncing); 4294 spa->spa_config_txg = txg; 4295 spa->spa_config_syncing = NULL; 4296 } 4297 4298 /* 4299 * Make a stable copy of the fully synced uberblock. 4300 * We use this as the root for pool traversals. 4301 */ 4302 spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 4303 4304 spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 4305 4306 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4307 spa->spa_traverse_wanted = 0; 4308 spa->spa_ubsync = spa->spa_uberblock; 4309 rw_exit(&spa->spa_traverse_lock); 4310 4311 spa_scrub_resume(spa); /* resume scrub with new ubsync */ 4312 4313 /* 4314 * Clean up the ZIL records for the synced txg. 4315 */ 4316 dsl_pool_zil_clean(dp); 4317 4318 /* 4319 * Update usable space statistics. 4320 */ 4321 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4322 vdev_sync_done(vd, txg); 4323 4324 /* 4325 * It had better be the case that we didn't dirty anything 4326 * since vdev_config_sync(). 4327 */ 4328 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4329 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4330 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4331 ASSERT(bpl->bpl_queue == NULL); 4332 4333 spa_config_exit(spa, FTAG); 4334 4335 /* 4336 * If any async tasks have been requested, kick them off. 4337 */ 4338 spa_async_dispatch(spa); 4339 } 4340 4341 /* 4342 * Sync all pools. We don't want to hold the namespace lock across these 4343 * operations, so we take a reference on the spa_t and drop the lock during the 4344 * sync. 4345 */ 4346 void 4347 spa_sync_allpools(void) 4348 { 4349 spa_t *spa = NULL; 4350 mutex_enter(&spa_namespace_lock); 4351 while ((spa = spa_next(spa)) != NULL) { 4352 if (spa_state(spa) != POOL_STATE_ACTIVE) 4353 continue; 4354 spa_open_ref(spa, FTAG); 4355 mutex_exit(&spa_namespace_lock); 4356 txg_wait_synced(spa_get_dsl(spa), 0); 4357 mutex_enter(&spa_namespace_lock); 4358 spa_close(spa, FTAG); 4359 } 4360 mutex_exit(&spa_namespace_lock); 4361 } 4362 4363 /* 4364 * ========================================================================== 4365 * Miscellaneous routines 4366 * ========================================================================== 4367 */ 4368 4369 /* 4370 * Remove all pools in the system. 4371 */ 4372 void 4373 spa_evict_all(void) 4374 { 4375 spa_t *spa; 4376 4377 /* 4378 * Remove all cached state. All pools should be closed now, 4379 * so every spa in the AVL tree should be unreferenced. 4380 */ 4381 mutex_enter(&spa_namespace_lock); 4382 while ((spa = spa_next(NULL)) != NULL) { 4383 /* 4384 * Stop async tasks. The async thread may need to detach 4385 * a device that's been replaced, which requires grabbing 4386 * spa_namespace_lock, so we must drop it here. 4387 */ 4388 spa_open_ref(spa, FTAG); 4389 mutex_exit(&spa_namespace_lock); 4390 spa_async_suspend(spa); 4391 mutex_enter(&spa_namespace_lock); 4392 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 4393 spa_close(spa, FTAG); 4394 4395 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4396 spa_unload(spa); 4397 spa_deactivate(spa); 4398 } 4399 spa_remove(spa); 4400 } 4401 mutex_exit(&spa_namespace_lock); 4402 } 4403 4404 vdev_t * 4405 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4406 { 4407 vdev_t *vd; 4408 int i; 4409 4410 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4411 return (vd); 4412 4413 if (l2cache) { 4414 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4415 vd = spa->spa_l2cache.sav_vdevs[i]; 4416 if (vd->vdev_guid == guid) 4417 return (vd); 4418 } 4419 } 4420 4421 return (NULL); 4422 } 4423 4424 void 4425 spa_upgrade(spa_t *spa, uint64_t version) 4426 { 4427 spa_config_enter(spa, RW_WRITER, FTAG); 4428 4429 /* 4430 * This should only be called for a non-faulted pool, and since a 4431 * future version would result in an unopenable pool, this shouldn't be 4432 * possible. 4433 */ 4434 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4435 ASSERT(version >= spa->spa_uberblock.ub_version); 4436 4437 spa->spa_uberblock.ub_version = version; 4438 vdev_config_dirty(spa->spa_root_vdev); 4439 4440 spa_config_exit(spa, FTAG); 4441 4442 txg_wait_synced(spa_get_dsl(spa), 0); 4443 } 4444 4445 boolean_t 4446 spa_has_spare(spa_t *spa, uint64_t guid) 4447 { 4448 int i; 4449 uint64_t spareguid; 4450 spa_aux_vdev_t *sav = &spa->spa_spares; 4451 4452 for (i = 0; i < sav->sav_count; i++) 4453 if (sav->sav_vdevs[i]->vdev_guid == guid) 4454 return (B_TRUE); 4455 4456 for (i = 0; i < sav->sav_npending; i++) { 4457 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4458 &spareguid) == 0 && spareguid == guid) 4459 return (B_TRUE); 4460 } 4461 4462 return (B_FALSE); 4463 } 4464 4465 /* 4466 * Post a sysevent corresponding to the given event. The 'name' must be one of 4467 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4468 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4469 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4470 * or zdb as real changes. 4471 */ 4472 void 4473 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4474 { 4475 #ifdef _KERNEL 4476 sysevent_t *ev; 4477 sysevent_attr_list_t *attr = NULL; 4478 sysevent_value_t value; 4479 sysevent_id_t eid; 4480 4481 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4482 SE_SLEEP); 4483 4484 value.value_type = SE_DATA_TYPE_STRING; 4485 value.value.sv_string = spa_name(spa); 4486 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4487 goto done; 4488 4489 value.value_type = SE_DATA_TYPE_UINT64; 4490 value.value.sv_uint64 = spa_guid(spa); 4491 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4492 goto done; 4493 4494 if (vd) { 4495 value.value_type = SE_DATA_TYPE_UINT64; 4496 value.value.sv_uint64 = vd->vdev_guid; 4497 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4498 SE_SLEEP) != 0) 4499 goto done; 4500 4501 if (vd->vdev_path) { 4502 value.value_type = SE_DATA_TYPE_STRING; 4503 value.value.sv_string = vd->vdev_path; 4504 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4505 &value, SE_SLEEP) != 0) 4506 goto done; 4507 } 4508 } 4509 4510 if (sysevent_attach_attributes(ev, attr) != 0) 4511 goto done; 4512 attr = NULL; 4513 4514 (void) log_sysevent(ev, SE_SLEEP, &eid); 4515 4516 done: 4517 if (attr) 4518 sysevent_free_attr(attr); 4519 sysevent_free(ev); 4520 #endif 4521 } 4522