1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains all the routines used when modifying on-disk SPA state. 31 * This includes opening, importing, destroying, exporting a pool, and syncing a 32 * pool. 33 */ 34 35 #include <sys/zfs_context.h> 36 #include <sys/fm/fs/zfs.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zio.h> 39 #include <sys/zio_checksum.h> 40 #include <sys/zio_compress.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/vdev_impl.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/dmu_traverse.h> 51 #include <sys/dmu_objset.h> 52 #include <sys/unique.h> 53 #include <sys/dsl_pool.h> 54 #include <sys/dsl_dataset.h> 55 #include <sys/dsl_dir.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/dsl_synctask.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/arc.h> 60 #include <sys/callb.h> 61 #include <sys/systeminfo.h> 62 #include <sys/sunddi.h> 63 #include <sys/spa_boot.h> 64 65 #include "zfs_prop.h" 66 #include "zfs_comutil.h" 67 68 int zio_taskq_threads = 8; 69 70 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); 71 72 /* 73 * ========================================================================== 74 * SPA properties routines 75 * ========================================================================== 76 */ 77 78 /* 79 * Add a (source=src, propname=propval) list to an nvlist. 80 */ 81 static void 82 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 83 uint64_t intval, zprop_source_t src) 84 { 85 const char *propname = zpool_prop_to_name(prop); 86 nvlist_t *propval; 87 88 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 89 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 90 91 if (strval != NULL) 92 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 93 else 94 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 95 96 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 97 nvlist_free(propval); 98 } 99 100 /* 101 * Get property values from the spa configuration. 102 */ 103 static void 104 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 105 { 106 uint64_t size = spa_get_space(spa); 107 uint64_t used = spa_get_alloc(spa); 108 uint64_t cap, version; 109 zprop_source_t src = ZPROP_SRC_NONE; 110 spa_config_dirent_t *dp; 111 112 /* 113 * readonly properties 114 */ 115 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src); 116 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 117 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); 118 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); 119 120 cap = (size == 0) ? 0 : (used * 100 / size); 121 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 122 123 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 124 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 125 spa->spa_root_vdev->vdev_state, src); 126 127 /* 128 * settable properties that are not stored in the pool property object. 129 */ 130 version = spa_version(spa); 131 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 132 src = ZPROP_SRC_DEFAULT; 133 else 134 src = ZPROP_SRC_LOCAL; 135 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 136 137 if (spa->spa_root != NULL) 138 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 139 0, ZPROP_SRC_LOCAL); 140 141 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 142 if (dp->scd_path == NULL) { 143 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 144 "none", 0, ZPROP_SRC_LOCAL); 145 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 146 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 147 dp->scd_path, 0, ZPROP_SRC_LOCAL); 148 } 149 } 150 } 151 152 /* 153 * Get zpool property values. 154 */ 155 int 156 spa_prop_get(spa_t *spa, nvlist_t **nvp) 157 { 158 zap_cursor_t zc; 159 zap_attribute_t za; 160 objset_t *mos = spa->spa_meta_objset; 161 int err; 162 163 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 164 165 /* 166 * Get properties from the spa config. 167 */ 168 spa_prop_get_config(spa, nvp); 169 170 mutex_enter(&spa->spa_props_lock); 171 /* If no pool property object, no more prop to get. */ 172 if (spa->spa_pool_props_object == 0) { 173 mutex_exit(&spa->spa_props_lock); 174 return (0); 175 } 176 177 /* 178 * Get properties from the MOS pool property object. 179 */ 180 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 181 (err = zap_cursor_retrieve(&zc, &za)) == 0; 182 zap_cursor_advance(&zc)) { 183 uint64_t intval = 0; 184 char *strval = NULL; 185 zprop_source_t src = ZPROP_SRC_DEFAULT; 186 zpool_prop_t prop; 187 188 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 189 continue; 190 191 switch (za.za_integer_length) { 192 case 8: 193 /* integer property */ 194 if (za.za_first_integer != 195 zpool_prop_default_numeric(prop)) 196 src = ZPROP_SRC_LOCAL; 197 198 if (prop == ZPOOL_PROP_BOOTFS) { 199 dsl_pool_t *dp; 200 dsl_dataset_t *ds = NULL; 201 202 dp = spa_get_dsl(spa); 203 rw_enter(&dp->dp_config_rwlock, RW_READER); 204 if (err = dsl_dataset_hold_obj(dp, 205 za.za_first_integer, FTAG, &ds)) { 206 rw_exit(&dp->dp_config_rwlock); 207 break; 208 } 209 210 strval = kmem_alloc( 211 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 212 KM_SLEEP); 213 dsl_dataset_name(ds, strval); 214 dsl_dataset_rele(ds, FTAG); 215 rw_exit(&dp->dp_config_rwlock); 216 } else { 217 strval = NULL; 218 intval = za.za_first_integer; 219 } 220 221 spa_prop_add_list(*nvp, prop, strval, intval, src); 222 223 if (strval != NULL) 224 kmem_free(strval, 225 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 226 227 break; 228 229 case 1: 230 /* string property */ 231 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 232 err = zap_lookup(mos, spa->spa_pool_props_object, 233 za.za_name, 1, za.za_num_integers, strval); 234 if (err) { 235 kmem_free(strval, za.za_num_integers); 236 break; 237 } 238 spa_prop_add_list(*nvp, prop, strval, 0, src); 239 kmem_free(strval, za.za_num_integers); 240 break; 241 242 default: 243 break; 244 } 245 } 246 zap_cursor_fini(&zc); 247 mutex_exit(&spa->spa_props_lock); 248 out: 249 if (err && err != ENOENT) { 250 nvlist_free(*nvp); 251 *nvp = NULL; 252 return (err); 253 } 254 255 return (0); 256 } 257 258 /* 259 * Validate the given pool properties nvlist and modify the list 260 * for the property values to be set. 261 */ 262 static int 263 spa_prop_validate(spa_t *spa, nvlist_t *props) 264 { 265 nvpair_t *elem; 266 int error = 0, reset_bootfs = 0; 267 uint64_t objnum; 268 269 elem = NULL; 270 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 271 zpool_prop_t prop; 272 char *propname, *strval; 273 uint64_t intval; 274 objset_t *os; 275 char *slash; 276 277 propname = nvpair_name(elem); 278 279 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) 280 return (EINVAL); 281 282 switch (prop) { 283 case ZPOOL_PROP_VERSION: 284 error = nvpair_value_uint64(elem, &intval); 285 if (!error && 286 (intval < spa_version(spa) || intval > SPA_VERSION)) 287 error = EINVAL; 288 break; 289 290 case ZPOOL_PROP_DELEGATION: 291 case ZPOOL_PROP_AUTOREPLACE: 292 error = nvpair_value_uint64(elem, &intval); 293 if (!error && intval > 1) 294 error = EINVAL; 295 break; 296 297 case ZPOOL_PROP_BOOTFS: 298 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 299 error = ENOTSUP; 300 break; 301 } 302 303 /* 304 * Make sure the vdev config is bootable 305 */ 306 if (!vdev_is_bootable(spa->spa_root_vdev)) { 307 error = ENOTSUP; 308 break; 309 } 310 311 reset_bootfs = 1; 312 313 error = nvpair_value_string(elem, &strval); 314 315 if (!error) { 316 uint64_t compress; 317 318 if (strval == NULL || strval[0] == '\0') { 319 objnum = zpool_prop_default_numeric( 320 ZPOOL_PROP_BOOTFS); 321 break; 322 } 323 324 if (error = dmu_objset_open(strval, DMU_OST_ZFS, 325 DS_MODE_USER | DS_MODE_READONLY, &os)) 326 break; 327 328 /* We don't support gzip bootable datasets */ 329 if ((error = dsl_prop_get_integer(strval, 330 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 331 &compress, NULL)) == 0 && 332 !BOOTFS_COMPRESS_VALID(compress)) { 333 error = ENOTSUP; 334 } else { 335 objnum = dmu_objset_id(os); 336 } 337 dmu_objset_close(os); 338 } 339 break; 340 case ZPOOL_PROP_FAILUREMODE: 341 error = nvpair_value_uint64(elem, &intval); 342 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 343 intval > ZIO_FAILURE_MODE_PANIC)) 344 error = EINVAL; 345 346 /* 347 * This is a special case which only occurs when 348 * the pool has completely failed. This allows 349 * the user to change the in-core failmode property 350 * without syncing it out to disk (I/Os might 351 * currently be blocked). We do this by returning 352 * EIO to the caller (spa_prop_set) to trick it 353 * into thinking we encountered a property validation 354 * error. 355 */ 356 if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) { 357 spa->spa_failmode = intval; 358 error = EIO; 359 } 360 break; 361 362 case ZPOOL_PROP_CACHEFILE: 363 if ((error = nvpair_value_string(elem, &strval)) != 0) 364 break; 365 366 if (strval[0] == '\0') 367 break; 368 369 if (strcmp(strval, "none") == 0) 370 break; 371 372 if (strval[0] != '/') { 373 error = EINVAL; 374 break; 375 } 376 377 slash = strrchr(strval, '/'); 378 ASSERT(slash != NULL); 379 380 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 381 strcmp(slash, "/..") == 0) 382 error = EINVAL; 383 break; 384 } 385 386 if (error) 387 break; 388 } 389 390 if (!error && reset_bootfs) { 391 error = nvlist_remove(props, 392 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 393 394 if (!error) { 395 error = nvlist_add_uint64(props, 396 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 397 } 398 } 399 400 return (error); 401 } 402 403 int 404 spa_prop_set(spa_t *spa, nvlist_t *nvp) 405 { 406 int error; 407 408 if ((error = spa_prop_validate(spa, nvp)) != 0) 409 return (error); 410 411 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 412 spa, nvp, 3)); 413 } 414 415 /* 416 * If the bootfs property value is dsobj, clear it. 417 */ 418 void 419 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 420 { 421 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 422 VERIFY(zap_remove(spa->spa_meta_objset, 423 spa->spa_pool_props_object, 424 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 425 spa->spa_bootfs = 0; 426 } 427 } 428 429 /* 430 * ========================================================================== 431 * SPA state manipulation (open/create/destroy/import/export) 432 * ========================================================================== 433 */ 434 435 static int 436 spa_error_entry_compare(const void *a, const void *b) 437 { 438 spa_error_entry_t *sa = (spa_error_entry_t *)a; 439 spa_error_entry_t *sb = (spa_error_entry_t *)b; 440 int ret; 441 442 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 443 sizeof (zbookmark_t)); 444 445 if (ret < 0) 446 return (-1); 447 else if (ret > 0) 448 return (1); 449 else 450 return (0); 451 } 452 453 /* 454 * Utility function which retrieves copies of the current logs and 455 * re-initializes them in the process. 456 */ 457 void 458 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 459 { 460 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 461 462 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 463 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 464 465 avl_create(&spa->spa_errlist_scrub, 466 spa_error_entry_compare, sizeof (spa_error_entry_t), 467 offsetof(spa_error_entry_t, se_avl)); 468 avl_create(&spa->spa_errlist_last, 469 spa_error_entry_compare, sizeof (spa_error_entry_t), 470 offsetof(spa_error_entry_t, se_avl)); 471 } 472 473 /* 474 * Activate an uninitialized pool. 475 */ 476 static void 477 spa_activate(spa_t *spa) 478 { 479 int t; 480 481 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 482 483 spa->spa_state = POOL_STATE_ACTIVE; 484 485 spa->spa_normal_class = metaslab_class_create(); 486 spa->spa_log_class = metaslab_class_create(); 487 488 for (t = 0; t < ZIO_TYPES; t++) { 489 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 490 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 491 TASKQ_PREPOPULATE); 492 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 493 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 494 TASKQ_PREPOPULATE); 495 } 496 497 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 498 offsetof(vdev_t, vdev_dirty_node)); 499 list_create(&spa->spa_zio_list, sizeof (zio_t), 500 offsetof(zio_t, zio_link_node)); 501 502 txg_list_create(&spa->spa_vdev_txg_list, 503 offsetof(struct vdev, vdev_txg_node)); 504 505 avl_create(&spa->spa_errlist_scrub, 506 spa_error_entry_compare, sizeof (spa_error_entry_t), 507 offsetof(spa_error_entry_t, se_avl)); 508 avl_create(&spa->spa_errlist_last, 509 spa_error_entry_compare, sizeof (spa_error_entry_t), 510 offsetof(spa_error_entry_t, se_avl)); 511 } 512 513 /* 514 * Opposite of spa_activate(). 515 */ 516 static void 517 spa_deactivate(spa_t *spa) 518 { 519 int t; 520 521 ASSERT(spa->spa_sync_on == B_FALSE); 522 ASSERT(spa->spa_dsl_pool == NULL); 523 ASSERT(spa->spa_root_vdev == NULL); 524 525 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 526 527 txg_list_destroy(&spa->spa_vdev_txg_list); 528 529 list_destroy(&spa->spa_dirty_list); 530 list_destroy(&spa->spa_zio_list); 531 532 for (t = 0; t < ZIO_TYPES; t++) { 533 taskq_destroy(spa->spa_zio_issue_taskq[t]); 534 taskq_destroy(spa->spa_zio_intr_taskq[t]); 535 spa->spa_zio_issue_taskq[t] = NULL; 536 spa->spa_zio_intr_taskq[t] = NULL; 537 } 538 539 metaslab_class_destroy(spa->spa_normal_class); 540 spa->spa_normal_class = NULL; 541 542 metaslab_class_destroy(spa->spa_log_class); 543 spa->spa_log_class = NULL; 544 545 /* 546 * If this was part of an import or the open otherwise failed, we may 547 * still have errors left in the queues. Empty them just in case. 548 */ 549 spa_errlog_drain(spa); 550 551 avl_destroy(&spa->spa_errlist_scrub); 552 avl_destroy(&spa->spa_errlist_last); 553 554 spa->spa_state = POOL_STATE_UNINITIALIZED; 555 } 556 557 /* 558 * Verify a pool configuration, and construct the vdev tree appropriately. This 559 * will create all the necessary vdevs in the appropriate layout, with each vdev 560 * in the CLOSED state. This will prep the pool before open/creation/import. 561 * All vdev validation is done by the vdev_alloc() routine. 562 */ 563 static int 564 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 565 uint_t id, int atype) 566 { 567 nvlist_t **child; 568 uint_t c, children; 569 int error; 570 571 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 572 return (error); 573 574 if ((*vdp)->vdev_ops->vdev_op_leaf) 575 return (0); 576 577 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 578 &child, &children) != 0) { 579 vdev_free(*vdp); 580 *vdp = NULL; 581 return (EINVAL); 582 } 583 584 for (c = 0; c < children; c++) { 585 vdev_t *vd; 586 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 587 atype)) != 0) { 588 vdev_free(*vdp); 589 *vdp = NULL; 590 return (error); 591 } 592 } 593 594 ASSERT(*vdp != NULL); 595 596 return (0); 597 } 598 599 /* 600 * Opposite of spa_load(). 601 */ 602 static void 603 spa_unload(spa_t *spa) 604 { 605 int i; 606 607 /* 608 * Stop async tasks. 609 */ 610 spa_async_suspend(spa); 611 612 /* 613 * Stop syncing. 614 */ 615 if (spa->spa_sync_on) { 616 txg_sync_stop(spa->spa_dsl_pool); 617 spa->spa_sync_on = B_FALSE; 618 } 619 620 /* 621 * Wait for any outstanding prefetch I/O to complete. 622 */ 623 spa_config_enter(spa, RW_WRITER, FTAG); 624 spa_config_exit(spa, FTAG); 625 626 /* 627 * Drop and purge level 2 cache 628 */ 629 spa_l2cache_drop(spa); 630 631 /* 632 * Close the dsl pool. 633 */ 634 if (spa->spa_dsl_pool) { 635 dsl_pool_close(spa->spa_dsl_pool); 636 spa->spa_dsl_pool = NULL; 637 } 638 639 /* 640 * Close all vdevs. 641 */ 642 if (spa->spa_root_vdev) 643 vdev_free(spa->spa_root_vdev); 644 ASSERT(spa->spa_root_vdev == NULL); 645 646 for (i = 0; i < spa->spa_spares.sav_count; i++) 647 vdev_free(spa->spa_spares.sav_vdevs[i]); 648 if (spa->spa_spares.sav_vdevs) { 649 kmem_free(spa->spa_spares.sav_vdevs, 650 spa->spa_spares.sav_count * sizeof (void *)); 651 spa->spa_spares.sav_vdevs = NULL; 652 } 653 if (spa->spa_spares.sav_config) { 654 nvlist_free(spa->spa_spares.sav_config); 655 spa->spa_spares.sav_config = NULL; 656 } 657 658 for (i = 0; i < spa->spa_l2cache.sav_count; i++) 659 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 660 if (spa->spa_l2cache.sav_vdevs) { 661 kmem_free(spa->spa_l2cache.sav_vdevs, 662 spa->spa_l2cache.sav_count * sizeof (void *)); 663 spa->spa_l2cache.sav_vdevs = NULL; 664 } 665 if (spa->spa_l2cache.sav_config) { 666 nvlist_free(spa->spa_l2cache.sav_config); 667 spa->spa_l2cache.sav_config = NULL; 668 } 669 670 spa->spa_async_suspended = 0; 671 } 672 673 /* 674 * Load (or re-load) the current list of vdevs describing the active spares for 675 * this pool. When this is called, we have some form of basic information in 676 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 677 * then re-generate a more complete list including status information. 678 */ 679 static void 680 spa_load_spares(spa_t *spa) 681 { 682 nvlist_t **spares; 683 uint_t nspares; 684 int i; 685 vdev_t *vd, *tvd; 686 687 /* 688 * First, close and free any existing spare vdevs. 689 */ 690 for (i = 0; i < spa->spa_spares.sav_count; i++) { 691 vd = spa->spa_spares.sav_vdevs[i]; 692 693 /* Undo the call to spa_activate() below */ 694 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 695 B_FALSE)) != NULL && tvd->vdev_isspare) 696 spa_spare_remove(tvd); 697 vdev_close(vd); 698 vdev_free(vd); 699 } 700 701 if (spa->spa_spares.sav_vdevs) 702 kmem_free(spa->spa_spares.sav_vdevs, 703 spa->spa_spares.sav_count * sizeof (void *)); 704 705 if (spa->spa_spares.sav_config == NULL) 706 nspares = 0; 707 else 708 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 709 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 710 711 spa->spa_spares.sav_count = (int)nspares; 712 spa->spa_spares.sav_vdevs = NULL; 713 714 if (nspares == 0) 715 return; 716 717 /* 718 * Construct the array of vdevs, opening them to get status in the 719 * process. For each spare, there is potentially two different vdev_t 720 * structures associated with it: one in the list of spares (used only 721 * for basic validation purposes) and one in the active vdev 722 * configuration (if it's spared in). During this phase we open and 723 * validate each vdev on the spare list. If the vdev also exists in the 724 * active configuration, then we also mark this vdev as an active spare. 725 */ 726 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 727 KM_SLEEP); 728 for (i = 0; i < spa->spa_spares.sav_count; i++) { 729 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 730 VDEV_ALLOC_SPARE) == 0); 731 ASSERT(vd != NULL); 732 733 spa->spa_spares.sav_vdevs[i] = vd; 734 735 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 736 B_FALSE)) != NULL) { 737 if (!tvd->vdev_isspare) 738 spa_spare_add(tvd); 739 740 /* 741 * We only mark the spare active if we were successfully 742 * able to load the vdev. Otherwise, importing a pool 743 * with a bad active spare would result in strange 744 * behavior, because multiple pool would think the spare 745 * is actively in use. 746 * 747 * There is a vulnerability here to an equally bizarre 748 * circumstance, where a dead active spare is later 749 * brought back to life (onlined or otherwise). Given 750 * the rarity of this scenario, and the extra complexity 751 * it adds, we ignore the possibility. 752 */ 753 if (!vdev_is_dead(tvd)) 754 spa_spare_activate(tvd); 755 } 756 757 if (vdev_open(vd) != 0) 758 continue; 759 760 vd->vdev_top = vd; 761 if (vdev_validate_aux(vd) == 0) 762 spa_spare_add(vd); 763 } 764 765 /* 766 * Recompute the stashed list of spares, with status information 767 * this time. 768 */ 769 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 770 DATA_TYPE_NVLIST_ARRAY) == 0); 771 772 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 773 KM_SLEEP); 774 for (i = 0; i < spa->spa_spares.sav_count; i++) 775 spares[i] = vdev_config_generate(spa, 776 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); 777 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 778 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 779 for (i = 0; i < spa->spa_spares.sav_count; i++) 780 nvlist_free(spares[i]); 781 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 782 } 783 784 /* 785 * Load (or re-load) the current list of vdevs describing the active l2cache for 786 * this pool. When this is called, we have some form of basic information in 787 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 788 * then re-generate a more complete list including status information. 789 * Devices which are already active have their details maintained, and are 790 * not re-opened. 791 */ 792 static void 793 spa_load_l2cache(spa_t *spa) 794 { 795 nvlist_t **l2cache; 796 uint_t nl2cache; 797 int i, j, oldnvdevs; 798 uint64_t guid, size; 799 vdev_t *vd, **oldvdevs, **newvdevs; 800 spa_aux_vdev_t *sav = &spa->spa_l2cache; 801 802 if (sav->sav_config != NULL) { 803 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 804 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 805 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 806 } else { 807 nl2cache = 0; 808 } 809 810 oldvdevs = sav->sav_vdevs; 811 oldnvdevs = sav->sav_count; 812 sav->sav_vdevs = NULL; 813 sav->sav_count = 0; 814 815 /* 816 * Process new nvlist of vdevs. 817 */ 818 for (i = 0; i < nl2cache; i++) { 819 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 820 &guid) == 0); 821 822 newvdevs[i] = NULL; 823 for (j = 0; j < oldnvdevs; j++) { 824 vd = oldvdevs[j]; 825 if (vd != NULL && guid == vd->vdev_guid) { 826 /* 827 * Retain previous vdev for add/remove ops. 828 */ 829 newvdevs[i] = vd; 830 oldvdevs[j] = NULL; 831 break; 832 } 833 } 834 835 if (newvdevs[i] == NULL) { 836 /* 837 * Create new vdev 838 */ 839 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 840 VDEV_ALLOC_L2CACHE) == 0); 841 ASSERT(vd != NULL); 842 newvdevs[i] = vd; 843 844 /* 845 * Commit this vdev as an l2cache device, 846 * even if it fails to open. 847 */ 848 spa_l2cache_add(vd); 849 850 vd->vdev_top = vd; 851 vd->vdev_aux = sav; 852 853 spa_l2cache_activate(vd); 854 855 if (vdev_open(vd) != 0) 856 continue; 857 858 (void) vdev_validate_aux(vd); 859 860 if (!vdev_is_dead(vd)) { 861 size = vdev_get_rsize(vd); 862 l2arc_add_vdev(spa, vd, 863 VDEV_LABEL_START_SIZE, 864 size - VDEV_LABEL_START_SIZE); 865 } 866 } 867 } 868 869 /* 870 * Purge vdevs that were dropped 871 */ 872 for (i = 0; i < oldnvdevs; i++) { 873 uint64_t pool; 874 875 vd = oldvdevs[i]; 876 if (vd != NULL) { 877 if (spa_mode & FWRITE && 878 spa_l2cache_exists(vd->vdev_guid, &pool) && 879 pool != 0ULL && 880 l2arc_vdev_present(vd)) { 881 l2arc_remove_vdev(vd); 882 } 883 (void) vdev_close(vd); 884 spa_l2cache_remove(vd); 885 } 886 } 887 888 if (oldvdevs) 889 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 890 891 if (sav->sav_config == NULL) 892 goto out; 893 894 sav->sav_vdevs = newvdevs; 895 sav->sav_count = (int)nl2cache; 896 897 /* 898 * Recompute the stashed list of l2cache devices, with status 899 * information this time. 900 */ 901 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 902 DATA_TYPE_NVLIST_ARRAY) == 0); 903 904 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 905 for (i = 0; i < sav->sav_count; i++) 906 l2cache[i] = vdev_config_generate(spa, 907 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); 908 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 909 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 910 out: 911 for (i = 0; i < sav->sav_count; i++) 912 nvlist_free(l2cache[i]); 913 if (sav->sav_count) 914 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 915 } 916 917 static int 918 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 919 { 920 dmu_buf_t *db; 921 char *packed = NULL; 922 size_t nvsize = 0; 923 int error; 924 *value = NULL; 925 926 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 927 nvsize = *(uint64_t *)db->db_data; 928 dmu_buf_rele(db, FTAG); 929 930 packed = kmem_alloc(nvsize, KM_SLEEP); 931 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 932 if (error == 0) 933 error = nvlist_unpack(packed, nvsize, value, 0); 934 kmem_free(packed, nvsize); 935 936 return (error); 937 } 938 939 /* 940 * Checks to see if the given vdev could not be opened, in which case we post a 941 * sysevent to notify the autoreplace code that the device has been removed. 942 */ 943 static void 944 spa_check_removed(vdev_t *vd) 945 { 946 int c; 947 948 for (c = 0; c < vd->vdev_children; c++) 949 spa_check_removed(vd->vdev_child[c]); 950 951 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 952 zfs_post_autoreplace(vd->vdev_spa, vd); 953 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 954 } 955 } 956 957 /* 958 * Load an existing storage pool, using the pool's builtin spa_config as a 959 * source of configuration information. 960 */ 961 static int 962 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 963 { 964 int error = 0; 965 nvlist_t *nvroot = NULL; 966 vdev_t *rvd; 967 uberblock_t *ub = &spa->spa_uberblock; 968 uint64_t config_cache_txg = spa->spa_config_txg; 969 uint64_t pool_guid; 970 uint64_t version; 971 zio_t *zio; 972 uint64_t autoreplace = 0; 973 974 spa->spa_load_state = state; 975 976 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 977 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 978 error = EINVAL; 979 goto out; 980 } 981 982 /* 983 * Versioning wasn't explicitly added to the label until later, so if 984 * it's not present treat it as the initial version. 985 */ 986 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 987 version = SPA_VERSION_INITIAL; 988 989 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 990 &spa->spa_config_txg); 991 992 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 993 spa_guid_exists(pool_guid, 0)) { 994 error = EEXIST; 995 goto out; 996 } 997 998 spa->spa_load_guid = pool_guid; 999 1000 /* 1001 * Parse the configuration into a vdev tree. We explicitly set the 1002 * value that will be returned by spa_version() since parsing the 1003 * configuration requires knowing the version number. 1004 */ 1005 spa_config_enter(spa, RW_WRITER, FTAG); 1006 spa->spa_ubsync.ub_version = version; 1007 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 1008 spa_config_exit(spa, FTAG); 1009 1010 if (error != 0) 1011 goto out; 1012 1013 ASSERT(spa->spa_root_vdev == rvd); 1014 ASSERT(spa_guid(spa) == pool_guid); 1015 1016 /* 1017 * Try to open all vdevs, loading each label in the process. 1018 */ 1019 error = vdev_open(rvd); 1020 if (error != 0) 1021 goto out; 1022 1023 /* 1024 * Validate the labels for all leaf vdevs. We need to grab the config 1025 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 1026 * flag. 1027 */ 1028 spa_config_enter(spa, RW_READER, FTAG); 1029 error = vdev_validate(rvd); 1030 spa_config_exit(spa, FTAG); 1031 1032 if (error != 0) 1033 goto out; 1034 1035 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1036 error = ENXIO; 1037 goto out; 1038 } 1039 1040 /* 1041 * Find the best uberblock. 1042 */ 1043 bzero(ub, sizeof (uberblock_t)); 1044 1045 zio = zio_root(spa, NULL, NULL, 1046 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1047 vdev_uberblock_load(zio, rvd, ub); 1048 error = zio_wait(zio); 1049 1050 /* 1051 * If we weren't able to find a single valid uberblock, return failure. 1052 */ 1053 if (ub->ub_txg == 0) { 1054 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1055 VDEV_AUX_CORRUPT_DATA); 1056 error = ENXIO; 1057 goto out; 1058 } 1059 1060 /* 1061 * If the pool is newer than the code, we can't open it. 1062 */ 1063 if (ub->ub_version > SPA_VERSION) { 1064 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1065 VDEV_AUX_VERSION_NEWER); 1066 error = ENOTSUP; 1067 goto out; 1068 } 1069 1070 /* 1071 * If the vdev guid sum doesn't match the uberblock, we have an 1072 * incomplete configuration. 1073 */ 1074 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 1075 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1076 VDEV_AUX_BAD_GUID_SUM); 1077 error = ENXIO; 1078 goto out; 1079 } 1080 1081 /* 1082 * Initialize internal SPA structures. 1083 */ 1084 spa->spa_state = POOL_STATE_ACTIVE; 1085 spa->spa_ubsync = spa->spa_uberblock; 1086 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 1087 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 1088 if (error) { 1089 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1090 VDEV_AUX_CORRUPT_DATA); 1091 goto out; 1092 } 1093 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 1094 1095 if (zap_lookup(spa->spa_meta_objset, 1096 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1097 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 1098 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1099 VDEV_AUX_CORRUPT_DATA); 1100 error = EIO; 1101 goto out; 1102 } 1103 1104 if (!mosconfig) { 1105 nvlist_t *newconfig; 1106 uint64_t hostid; 1107 1108 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 1109 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1110 VDEV_AUX_CORRUPT_DATA); 1111 error = EIO; 1112 goto out; 1113 } 1114 1115 if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 1116 &hostid) == 0) { 1117 char *hostname; 1118 unsigned long myhostid = 0; 1119 1120 VERIFY(nvlist_lookup_string(newconfig, 1121 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 1122 1123 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 1124 if (hostid != 0 && myhostid != 0 && 1125 (unsigned long)hostid != myhostid) { 1126 cmn_err(CE_WARN, "pool '%s' could not be " 1127 "loaded as it was last accessed by " 1128 "another system (host: %s hostid: 0x%lx). " 1129 "See: http://www.sun.com/msg/ZFS-8000-EY", 1130 spa->spa_name, hostname, 1131 (unsigned long)hostid); 1132 error = EBADF; 1133 goto out; 1134 } 1135 } 1136 1137 spa_config_set(spa, newconfig); 1138 spa_unload(spa); 1139 spa_deactivate(spa); 1140 spa_activate(spa); 1141 1142 return (spa_load(spa, newconfig, state, B_TRUE)); 1143 } 1144 1145 if (zap_lookup(spa->spa_meta_objset, 1146 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1147 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 1148 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1149 VDEV_AUX_CORRUPT_DATA); 1150 error = EIO; 1151 goto out; 1152 } 1153 1154 /* 1155 * Load the bit that tells us to use the new accounting function 1156 * (raid-z deflation). If we have an older pool, this will not 1157 * be present. 1158 */ 1159 error = zap_lookup(spa->spa_meta_objset, 1160 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1161 sizeof (uint64_t), 1, &spa->spa_deflate); 1162 if (error != 0 && error != ENOENT) { 1163 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1164 VDEV_AUX_CORRUPT_DATA); 1165 error = EIO; 1166 goto out; 1167 } 1168 1169 /* 1170 * Load the persistent error log. If we have an older pool, this will 1171 * not be present. 1172 */ 1173 error = zap_lookup(spa->spa_meta_objset, 1174 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 1175 sizeof (uint64_t), 1, &spa->spa_errlog_last); 1176 if (error != 0 && error != ENOENT) { 1177 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1178 VDEV_AUX_CORRUPT_DATA); 1179 error = EIO; 1180 goto out; 1181 } 1182 1183 error = zap_lookup(spa->spa_meta_objset, 1184 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 1185 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 1186 if (error != 0 && error != ENOENT) { 1187 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1188 VDEV_AUX_CORRUPT_DATA); 1189 error = EIO; 1190 goto out; 1191 } 1192 1193 /* 1194 * Load the history object. If we have an older pool, this 1195 * will not be present. 1196 */ 1197 error = zap_lookup(spa->spa_meta_objset, 1198 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 1199 sizeof (uint64_t), 1, &spa->spa_history); 1200 if (error != 0 && error != ENOENT) { 1201 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1202 VDEV_AUX_CORRUPT_DATA); 1203 error = EIO; 1204 goto out; 1205 } 1206 1207 /* 1208 * Load any hot spares for this pool. 1209 */ 1210 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1211 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); 1212 if (error != 0 && error != ENOENT) { 1213 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1214 VDEV_AUX_CORRUPT_DATA); 1215 error = EIO; 1216 goto out; 1217 } 1218 if (error == 0) { 1219 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 1220 if (load_nvlist(spa, spa->spa_spares.sav_object, 1221 &spa->spa_spares.sav_config) != 0) { 1222 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1223 VDEV_AUX_CORRUPT_DATA); 1224 error = EIO; 1225 goto out; 1226 } 1227 1228 spa_config_enter(spa, RW_WRITER, FTAG); 1229 spa_load_spares(spa); 1230 spa_config_exit(spa, FTAG); 1231 } 1232 1233 /* 1234 * Load any level 2 ARC devices for this pool. 1235 */ 1236 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1237 DMU_POOL_L2CACHE, sizeof (uint64_t), 1, 1238 &spa->spa_l2cache.sav_object); 1239 if (error != 0 && error != ENOENT) { 1240 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1241 VDEV_AUX_CORRUPT_DATA); 1242 error = EIO; 1243 goto out; 1244 } 1245 if (error == 0) { 1246 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 1247 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 1248 &spa->spa_l2cache.sav_config) != 0) { 1249 vdev_set_state(rvd, B_TRUE, 1250 VDEV_STATE_CANT_OPEN, 1251 VDEV_AUX_CORRUPT_DATA); 1252 error = EIO; 1253 goto out; 1254 } 1255 1256 spa_config_enter(spa, RW_WRITER, FTAG); 1257 spa_load_l2cache(spa); 1258 spa_config_exit(spa, FTAG); 1259 } 1260 1261 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 1262 1263 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1264 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 1265 1266 if (error && error != ENOENT) { 1267 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 1268 VDEV_AUX_CORRUPT_DATA); 1269 error = EIO; 1270 goto out; 1271 } 1272 1273 if (error == 0) { 1274 (void) zap_lookup(spa->spa_meta_objset, 1275 spa->spa_pool_props_object, 1276 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 1277 sizeof (uint64_t), 1, &spa->spa_bootfs); 1278 (void) zap_lookup(spa->spa_meta_objset, 1279 spa->spa_pool_props_object, 1280 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1281 sizeof (uint64_t), 1, &autoreplace); 1282 (void) zap_lookup(spa->spa_meta_objset, 1283 spa->spa_pool_props_object, 1284 zpool_prop_to_name(ZPOOL_PROP_DELEGATION), 1285 sizeof (uint64_t), 1, &spa->spa_delegation); 1286 (void) zap_lookup(spa->spa_meta_objset, 1287 spa->spa_pool_props_object, 1288 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), 1289 sizeof (uint64_t), 1, &spa->spa_failmode); 1290 } 1291 1292 /* 1293 * If the 'autoreplace' property is set, then post a resource notifying 1294 * the ZFS DE that it should not issue any faults for unopenable 1295 * devices. We also iterate over the vdevs, and post a sysevent for any 1296 * unopenable vdevs so that the normal autoreplace handler can take 1297 * over. 1298 */ 1299 if (autoreplace && state != SPA_LOAD_TRYIMPORT) 1300 spa_check_removed(spa->spa_root_vdev); 1301 1302 /* 1303 * Load the vdev state for all toplevel vdevs. 1304 */ 1305 vdev_load(rvd); 1306 1307 /* 1308 * Propagate the leaf DTLs we just loaded all the way up the tree. 1309 */ 1310 spa_config_enter(spa, RW_WRITER, FTAG); 1311 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 1312 spa_config_exit(spa, FTAG); 1313 1314 /* 1315 * Check the state of the root vdev. If it can't be opened, it 1316 * indicates one or more toplevel vdevs are faulted. 1317 */ 1318 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 1319 error = ENXIO; 1320 goto out; 1321 } 1322 1323 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 1324 dmu_tx_t *tx; 1325 int need_update = B_FALSE; 1326 int c; 1327 1328 /* 1329 * Claim log blocks that haven't been committed yet. 1330 * This must all happen in a single txg. 1331 */ 1332 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 1333 spa_first_txg(spa)); 1334 (void) dmu_objset_find(spa->spa_name, 1335 zil_claim, tx, DS_FIND_CHILDREN); 1336 dmu_tx_commit(tx); 1337 1338 spa->spa_sync_on = B_TRUE; 1339 txg_sync_start(spa->spa_dsl_pool); 1340 1341 /* 1342 * Wait for all claims to sync. 1343 */ 1344 txg_wait_synced(spa->spa_dsl_pool, 0); 1345 1346 /* 1347 * If the config cache is stale, or we have uninitialized 1348 * metaslabs (see spa_vdev_add()), then update the config. 1349 */ 1350 if (config_cache_txg != spa->spa_config_txg || 1351 state == SPA_LOAD_IMPORT) 1352 need_update = B_TRUE; 1353 1354 for (c = 0; c < rvd->vdev_children; c++) 1355 if (rvd->vdev_child[c]->vdev_ms_array == 0) 1356 need_update = B_TRUE; 1357 1358 /* 1359 * Update the config cache asychronously in case we're the 1360 * root pool, in which case the config cache isn't writable yet. 1361 */ 1362 if (need_update) 1363 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1364 } 1365 1366 error = 0; 1367 out: 1368 if (error && error != EBADF) 1369 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 1370 spa->spa_load_state = SPA_LOAD_NONE; 1371 spa->spa_ena = 0; 1372 1373 return (error); 1374 } 1375 1376 /* 1377 * Pool Open/Import 1378 * 1379 * The import case is identical to an open except that the configuration is sent 1380 * down from userland, instead of grabbed from the configuration cache. For the 1381 * case of an open, the pool configuration will exist in the 1382 * POOL_STATE_UNINITIALIZED state. 1383 * 1384 * The stats information (gen/count/ustats) is used to gather vdev statistics at 1385 * the same time open the pool, without having to keep around the spa_t in some 1386 * ambiguous state. 1387 */ 1388 static int 1389 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 1390 { 1391 spa_t *spa; 1392 int error; 1393 int loaded = B_FALSE; 1394 int locked = B_FALSE; 1395 1396 *spapp = NULL; 1397 1398 /* 1399 * As disgusting as this is, we need to support recursive calls to this 1400 * function because dsl_dir_open() is called during spa_load(), and ends 1401 * up calling spa_open() again. The real fix is to figure out how to 1402 * avoid dsl_dir_open() calling this in the first place. 1403 */ 1404 if (mutex_owner(&spa_namespace_lock) != curthread) { 1405 mutex_enter(&spa_namespace_lock); 1406 locked = B_TRUE; 1407 } 1408 1409 if ((spa = spa_lookup(pool)) == NULL) { 1410 if (locked) 1411 mutex_exit(&spa_namespace_lock); 1412 return (ENOENT); 1413 } 1414 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 1415 1416 spa_activate(spa); 1417 1418 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 1419 1420 if (error == EBADF) { 1421 /* 1422 * If vdev_validate() returns failure (indicated by 1423 * EBADF), it indicates that one of the vdevs indicates 1424 * that the pool has been exported or destroyed. If 1425 * this is the case, the config cache is out of sync and 1426 * we should remove the pool from the namespace. 1427 */ 1428 spa_unload(spa); 1429 spa_deactivate(spa); 1430 spa_config_sync(spa, B_TRUE, B_TRUE); 1431 spa_remove(spa); 1432 if (locked) 1433 mutex_exit(&spa_namespace_lock); 1434 return (ENOENT); 1435 } 1436 1437 if (error) { 1438 /* 1439 * We can't open the pool, but we still have useful 1440 * information: the state of each vdev after the 1441 * attempted vdev_open(). Return this to the user. 1442 */ 1443 if (config != NULL && spa->spa_root_vdev != NULL) { 1444 spa_config_enter(spa, RW_READER, FTAG); 1445 *config = spa_config_generate(spa, NULL, -1ULL, 1446 B_TRUE); 1447 spa_config_exit(spa, FTAG); 1448 } 1449 spa_unload(spa); 1450 spa_deactivate(spa); 1451 spa->spa_last_open_failed = B_TRUE; 1452 if (locked) 1453 mutex_exit(&spa_namespace_lock); 1454 *spapp = NULL; 1455 return (error); 1456 } else { 1457 spa->spa_last_open_failed = B_FALSE; 1458 } 1459 1460 loaded = B_TRUE; 1461 } 1462 1463 spa_open_ref(spa, tag); 1464 1465 /* 1466 * If we just loaded the pool, resilver anything that's out of date. 1467 */ 1468 if (loaded && (spa_mode & FWRITE)) 1469 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1470 1471 if (locked) 1472 mutex_exit(&spa_namespace_lock); 1473 1474 *spapp = spa; 1475 1476 if (config != NULL) { 1477 spa_config_enter(spa, RW_READER, FTAG); 1478 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1479 spa_config_exit(spa, FTAG); 1480 } 1481 1482 return (0); 1483 } 1484 1485 int 1486 spa_open(const char *name, spa_t **spapp, void *tag) 1487 { 1488 return (spa_open_common(name, spapp, tag, NULL)); 1489 } 1490 1491 /* 1492 * Lookup the given spa_t, incrementing the inject count in the process, 1493 * preventing it from being exported or destroyed. 1494 */ 1495 spa_t * 1496 spa_inject_addref(char *name) 1497 { 1498 spa_t *spa; 1499 1500 mutex_enter(&spa_namespace_lock); 1501 if ((spa = spa_lookup(name)) == NULL) { 1502 mutex_exit(&spa_namespace_lock); 1503 return (NULL); 1504 } 1505 spa->spa_inject_ref++; 1506 mutex_exit(&spa_namespace_lock); 1507 1508 return (spa); 1509 } 1510 1511 void 1512 spa_inject_delref(spa_t *spa) 1513 { 1514 mutex_enter(&spa_namespace_lock); 1515 spa->spa_inject_ref--; 1516 mutex_exit(&spa_namespace_lock); 1517 } 1518 1519 /* 1520 * Add spares device information to the nvlist. 1521 */ 1522 static void 1523 spa_add_spares(spa_t *spa, nvlist_t *config) 1524 { 1525 nvlist_t **spares; 1526 uint_t i, nspares; 1527 nvlist_t *nvroot; 1528 uint64_t guid; 1529 vdev_stat_t *vs; 1530 uint_t vsc; 1531 uint64_t pool; 1532 1533 if (spa->spa_spares.sav_count == 0) 1534 return; 1535 1536 VERIFY(nvlist_lookup_nvlist(config, 1537 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1538 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1539 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1540 if (nspares != 0) { 1541 VERIFY(nvlist_add_nvlist_array(nvroot, 1542 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1543 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1544 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1545 1546 /* 1547 * Go through and find any spares which have since been 1548 * repurposed as an active spare. If this is the case, update 1549 * their status appropriately. 1550 */ 1551 for (i = 0; i < nspares; i++) { 1552 VERIFY(nvlist_lookup_uint64(spares[i], 1553 ZPOOL_CONFIG_GUID, &guid) == 0); 1554 if (spa_spare_exists(guid, &pool) && pool != 0ULL) { 1555 VERIFY(nvlist_lookup_uint64_array( 1556 spares[i], ZPOOL_CONFIG_STATS, 1557 (uint64_t **)&vs, &vsc) == 0); 1558 vs->vs_state = VDEV_STATE_CANT_OPEN; 1559 vs->vs_aux = VDEV_AUX_SPARED; 1560 } 1561 } 1562 } 1563 } 1564 1565 /* 1566 * Add l2cache device information to the nvlist, including vdev stats. 1567 */ 1568 static void 1569 spa_add_l2cache(spa_t *spa, nvlist_t *config) 1570 { 1571 nvlist_t **l2cache; 1572 uint_t i, j, nl2cache; 1573 nvlist_t *nvroot; 1574 uint64_t guid; 1575 vdev_t *vd; 1576 vdev_stat_t *vs; 1577 uint_t vsc; 1578 1579 if (spa->spa_l2cache.sav_count == 0) 1580 return; 1581 1582 spa_config_enter(spa, RW_READER, FTAG); 1583 1584 VERIFY(nvlist_lookup_nvlist(config, 1585 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1586 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1587 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1588 if (nl2cache != 0) { 1589 VERIFY(nvlist_add_nvlist_array(nvroot, 1590 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1591 VERIFY(nvlist_lookup_nvlist_array(nvroot, 1592 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1593 1594 /* 1595 * Update level 2 cache device stats. 1596 */ 1597 1598 for (i = 0; i < nl2cache; i++) { 1599 VERIFY(nvlist_lookup_uint64(l2cache[i], 1600 ZPOOL_CONFIG_GUID, &guid) == 0); 1601 1602 vd = NULL; 1603 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 1604 if (guid == 1605 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 1606 vd = spa->spa_l2cache.sav_vdevs[j]; 1607 break; 1608 } 1609 } 1610 ASSERT(vd != NULL); 1611 1612 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 1613 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 1614 vdev_get_stats(vd, vs); 1615 } 1616 } 1617 1618 spa_config_exit(spa, FTAG); 1619 } 1620 1621 int 1622 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1623 { 1624 int error; 1625 spa_t *spa; 1626 1627 *config = NULL; 1628 error = spa_open_common(name, &spa, FTAG, config); 1629 1630 if (spa && *config != NULL) { 1631 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1632 spa_get_errlog_size(spa)) == 0); 1633 1634 spa_add_spares(spa, *config); 1635 spa_add_l2cache(spa, *config); 1636 } 1637 1638 /* 1639 * We want to get the alternate root even for faulted pools, so we cheat 1640 * and call spa_lookup() directly. 1641 */ 1642 if (altroot) { 1643 if (spa == NULL) { 1644 mutex_enter(&spa_namespace_lock); 1645 spa = spa_lookup(name); 1646 if (spa) 1647 spa_altroot(spa, altroot, buflen); 1648 else 1649 altroot[0] = '\0'; 1650 spa = NULL; 1651 mutex_exit(&spa_namespace_lock); 1652 } else { 1653 spa_altroot(spa, altroot, buflen); 1654 } 1655 } 1656 1657 if (spa != NULL) 1658 spa_close(spa, FTAG); 1659 1660 return (error); 1661 } 1662 1663 /* 1664 * Validate that the auxiliary device array is well formed. We must have an 1665 * array of nvlists, each which describes a valid leaf vdev. If this is an 1666 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 1667 * specified, as long as they are well-formed. 1668 */ 1669 static int 1670 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 1671 spa_aux_vdev_t *sav, const char *config, uint64_t version, 1672 vdev_labeltype_t label) 1673 { 1674 nvlist_t **dev; 1675 uint_t i, ndev; 1676 vdev_t *vd; 1677 int error; 1678 1679 /* 1680 * It's acceptable to have no devs specified. 1681 */ 1682 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 1683 return (0); 1684 1685 if (ndev == 0) 1686 return (EINVAL); 1687 1688 /* 1689 * Make sure the pool is formatted with a version that supports this 1690 * device type. 1691 */ 1692 if (spa_version(spa) < version) 1693 return (ENOTSUP); 1694 1695 /* 1696 * Set the pending device list so we correctly handle device in-use 1697 * checking. 1698 */ 1699 sav->sav_pending = dev; 1700 sav->sav_npending = ndev; 1701 1702 for (i = 0; i < ndev; i++) { 1703 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 1704 mode)) != 0) 1705 goto out; 1706 1707 if (!vd->vdev_ops->vdev_op_leaf) { 1708 vdev_free(vd); 1709 error = EINVAL; 1710 goto out; 1711 } 1712 1713 /* 1714 * The L2ARC currently only supports disk devices. 1715 */ 1716 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 1717 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 1718 error = ENOTBLK; 1719 goto out; 1720 } 1721 1722 vd->vdev_top = vd; 1723 1724 if ((error = vdev_open(vd)) == 0 && 1725 (error = vdev_label_init(vd, crtxg, label)) == 0) { 1726 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 1727 vd->vdev_guid) == 0); 1728 } 1729 1730 vdev_free(vd); 1731 1732 if (error && 1733 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 1734 goto out; 1735 else 1736 error = 0; 1737 } 1738 1739 out: 1740 sav->sav_pending = NULL; 1741 sav->sav_npending = 0; 1742 return (error); 1743 } 1744 1745 static int 1746 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 1747 { 1748 int error; 1749 1750 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1751 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 1752 VDEV_LABEL_SPARE)) != 0) { 1753 return (error); 1754 } 1755 1756 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 1757 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 1758 VDEV_LABEL_L2CACHE)); 1759 } 1760 1761 static void 1762 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 1763 const char *config) 1764 { 1765 int i; 1766 1767 if (sav->sav_config != NULL) { 1768 nvlist_t **olddevs; 1769 uint_t oldndevs; 1770 nvlist_t **newdevs; 1771 1772 /* 1773 * Generate new dev list by concatentating with the 1774 * current dev list. 1775 */ 1776 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 1777 &olddevs, &oldndevs) == 0); 1778 1779 newdevs = kmem_alloc(sizeof (void *) * 1780 (ndevs + oldndevs), KM_SLEEP); 1781 for (i = 0; i < oldndevs; i++) 1782 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 1783 KM_SLEEP) == 0); 1784 for (i = 0; i < ndevs; i++) 1785 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 1786 KM_SLEEP) == 0); 1787 1788 VERIFY(nvlist_remove(sav->sav_config, config, 1789 DATA_TYPE_NVLIST_ARRAY) == 0); 1790 1791 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1792 config, newdevs, ndevs + oldndevs) == 0); 1793 for (i = 0; i < oldndevs + ndevs; i++) 1794 nvlist_free(newdevs[i]); 1795 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 1796 } else { 1797 /* 1798 * Generate a new dev list. 1799 */ 1800 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 1801 KM_SLEEP) == 0); 1802 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 1803 devs, ndevs) == 0); 1804 } 1805 } 1806 1807 /* 1808 * Stop and drop level 2 ARC devices 1809 */ 1810 void 1811 spa_l2cache_drop(spa_t *spa) 1812 { 1813 vdev_t *vd; 1814 int i; 1815 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1816 1817 for (i = 0; i < sav->sav_count; i++) { 1818 uint64_t pool; 1819 1820 vd = sav->sav_vdevs[i]; 1821 ASSERT(vd != NULL); 1822 1823 if (spa_mode & FWRITE && 1824 spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && 1825 l2arc_vdev_present(vd)) { 1826 l2arc_remove_vdev(vd); 1827 } 1828 if (vd->vdev_isl2cache) 1829 spa_l2cache_remove(vd); 1830 vdev_clear_stats(vd); 1831 (void) vdev_close(vd); 1832 } 1833 } 1834 1835 /* 1836 * Pool Creation 1837 */ 1838 int 1839 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 1840 const char *history_str) 1841 { 1842 spa_t *spa; 1843 char *altroot = NULL; 1844 vdev_t *rvd; 1845 dsl_pool_t *dp; 1846 dmu_tx_t *tx; 1847 int c, error = 0; 1848 uint64_t txg = TXG_INITIAL; 1849 nvlist_t **spares, **l2cache; 1850 uint_t nspares, nl2cache; 1851 uint64_t version; 1852 1853 /* 1854 * If this pool already exists, return failure. 1855 */ 1856 mutex_enter(&spa_namespace_lock); 1857 if (spa_lookup(pool) != NULL) { 1858 mutex_exit(&spa_namespace_lock); 1859 return (EEXIST); 1860 } 1861 1862 /* 1863 * Allocate a new spa_t structure. 1864 */ 1865 (void) nvlist_lookup_string(props, 1866 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 1867 spa = spa_add(pool, altroot); 1868 spa_activate(spa); 1869 1870 spa->spa_uberblock.ub_txg = txg - 1; 1871 1872 if (props && (error = spa_prop_validate(spa, props))) { 1873 spa_unload(spa); 1874 spa_deactivate(spa); 1875 spa_remove(spa); 1876 mutex_exit(&spa_namespace_lock); 1877 return (error); 1878 } 1879 1880 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), 1881 &version) != 0) 1882 version = SPA_VERSION; 1883 ASSERT(version <= SPA_VERSION); 1884 spa->spa_uberblock.ub_version = version; 1885 spa->spa_ubsync = spa->spa_uberblock; 1886 1887 /* 1888 * Create the root vdev. 1889 */ 1890 spa_config_enter(spa, RW_WRITER, FTAG); 1891 1892 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1893 1894 ASSERT(error != 0 || rvd != NULL); 1895 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1896 1897 if (error == 0 && !zfs_allocatable_devs(nvroot)) 1898 error = EINVAL; 1899 1900 if (error == 0 && 1901 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1902 (error = spa_validate_aux(spa, nvroot, txg, 1903 VDEV_ALLOC_ADD)) == 0) { 1904 for (c = 0; c < rvd->vdev_children; c++) 1905 vdev_init(rvd->vdev_child[c], txg); 1906 vdev_config_dirty(rvd); 1907 } 1908 1909 spa_config_exit(spa, FTAG); 1910 1911 if (error != 0) { 1912 spa_unload(spa); 1913 spa_deactivate(spa); 1914 spa_remove(spa); 1915 mutex_exit(&spa_namespace_lock); 1916 return (error); 1917 } 1918 1919 /* 1920 * Get the list of spares, if specified. 1921 */ 1922 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1923 &spares, &nspares) == 0) { 1924 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 1925 KM_SLEEP) == 0); 1926 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1927 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1928 spa_config_enter(spa, RW_WRITER, FTAG); 1929 spa_load_spares(spa); 1930 spa_config_exit(spa, FTAG); 1931 spa->spa_spares.sav_sync = B_TRUE; 1932 } 1933 1934 /* 1935 * Get the list of level 2 cache devices, if specified. 1936 */ 1937 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1938 &l2cache, &nl2cache) == 0) { 1939 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 1940 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1941 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 1942 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 1943 spa_config_enter(spa, RW_WRITER, FTAG); 1944 spa_load_l2cache(spa); 1945 spa_config_exit(spa, FTAG); 1946 spa->spa_l2cache.sav_sync = B_TRUE; 1947 } 1948 1949 spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1950 spa->spa_meta_objset = dp->dp_meta_objset; 1951 1952 tx = dmu_tx_create_assigned(dp, txg); 1953 1954 /* 1955 * Create the pool config object. 1956 */ 1957 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1958 DMU_OT_PACKED_NVLIST, 1 << 14, 1959 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1960 1961 if (zap_add(spa->spa_meta_objset, 1962 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1963 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1964 cmn_err(CE_PANIC, "failed to add pool config"); 1965 } 1966 1967 /* Newly created pools with the right version are always deflated. */ 1968 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 1969 spa->spa_deflate = TRUE; 1970 if (zap_add(spa->spa_meta_objset, 1971 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1972 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 1973 cmn_err(CE_PANIC, "failed to add deflate"); 1974 } 1975 } 1976 1977 /* 1978 * Create the deferred-free bplist object. Turn off compression 1979 * because sync-to-convergence takes longer if the blocksize 1980 * keeps changing. 1981 */ 1982 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1983 1 << 14, tx); 1984 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1985 ZIO_COMPRESS_OFF, tx); 1986 1987 if (zap_add(spa->spa_meta_objset, 1988 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1989 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1990 cmn_err(CE_PANIC, "failed to add bplist"); 1991 } 1992 1993 /* 1994 * Create the pool's history object. 1995 */ 1996 if (version >= SPA_VERSION_ZPOOL_HISTORY) 1997 spa_history_create_obj(spa, tx); 1998 1999 /* 2000 * Set pool properties. 2001 */ 2002 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 2003 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2004 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 2005 if (props) 2006 spa_sync_props(spa, props, CRED(), tx); 2007 2008 dmu_tx_commit(tx); 2009 2010 spa->spa_sync_on = B_TRUE; 2011 txg_sync_start(spa->spa_dsl_pool); 2012 2013 /* 2014 * We explicitly wait for the first transaction to complete so that our 2015 * bean counters are appropriately updated. 2016 */ 2017 txg_wait_synced(spa->spa_dsl_pool, txg); 2018 2019 spa_config_sync(spa, B_FALSE, B_TRUE); 2020 2021 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) 2022 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); 2023 2024 mutex_exit(&spa_namespace_lock); 2025 2026 return (0); 2027 } 2028 2029 /* 2030 * Import the given pool into the system. We set up the necessary spa_t and 2031 * then call spa_load() to do the dirty work. 2032 */ 2033 static int 2034 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, 2035 boolean_t isroot, boolean_t allowfaulted) 2036 { 2037 spa_t *spa; 2038 char *altroot = NULL; 2039 int error, loaderr; 2040 nvlist_t *nvroot; 2041 nvlist_t **spares, **l2cache; 2042 uint_t nspares, nl2cache; 2043 int mosconfig = isroot? B_FALSE : B_TRUE; 2044 2045 /* 2046 * If a pool with this name exists, return failure. 2047 */ 2048 mutex_enter(&spa_namespace_lock); 2049 if (spa_lookup(pool) != NULL) { 2050 mutex_exit(&spa_namespace_lock); 2051 return (EEXIST); 2052 } 2053 2054 /* 2055 * Create and initialize the spa structure. 2056 */ 2057 (void) nvlist_lookup_string(props, 2058 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 2059 spa = spa_add(pool, altroot); 2060 spa_activate(spa); 2061 2062 if (allowfaulted) 2063 spa->spa_import_faulted = B_TRUE; 2064 spa->spa_is_root = isroot; 2065 2066 /* 2067 * Pass off the heavy lifting to spa_load(). 2068 * Pass TRUE for mosconfig because the user-supplied config 2069 * is actually the one to trust when doing an import. 2070 */ 2071 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, mosconfig); 2072 2073 spa_config_enter(spa, RW_WRITER, FTAG); 2074 /* 2075 * Toss any existing sparelist, as it doesn't have any validity anymore, 2076 * and conflicts with spa_has_spare(). 2077 */ 2078 if (!isroot && spa->spa_spares.sav_config) { 2079 nvlist_free(spa->spa_spares.sav_config); 2080 spa->spa_spares.sav_config = NULL; 2081 spa_load_spares(spa); 2082 } 2083 if (!isroot && spa->spa_l2cache.sav_config) { 2084 nvlist_free(spa->spa_l2cache.sav_config); 2085 spa->spa_l2cache.sav_config = NULL; 2086 spa_load_l2cache(spa); 2087 } 2088 2089 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2090 &nvroot) == 0); 2091 if (error == 0) 2092 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); 2093 if (error == 0) 2094 error = spa_validate_aux(spa, nvroot, -1ULL, 2095 VDEV_ALLOC_L2CACHE); 2096 spa_config_exit(spa, FTAG); 2097 2098 if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { 2099 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { 2100 /* 2101 * If we failed to load the pool, but 'allowfaulted' is 2102 * set, then manually set the config as if the config 2103 * passed in was specified in the cache file. 2104 */ 2105 error = 0; 2106 spa->spa_import_faulted = B_FALSE; 2107 if (spa->spa_config == NULL) { 2108 spa_config_enter(spa, RW_READER, FTAG); 2109 spa->spa_config = spa_config_generate(spa, 2110 NULL, -1ULL, B_TRUE); 2111 spa_config_exit(spa, FTAG); 2112 } 2113 spa_unload(spa); 2114 spa_deactivate(spa); 2115 spa_config_sync(spa, B_FALSE, B_TRUE); 2116 } else { 2117 spa_unload(spa); 2118 spa_deactivate(spa); 2119 spa_remove(spa); 2120 } 2121 mutex_exit(&spa_namespace_lock); 2122 return (error); 2123 } 2124 2125 /* 2126 * Override any spares and level 2 cache devices as specified by 2127 * the user, as these may have correct device names/devids, etc. 2128 */ 2129 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2130 &spares, &nspares) == 0) { 2131 if (spa->spa_spares.sav_config) 2132 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 2133 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 2134 else 2135 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 2136 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2137 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 2138 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 2139 spa_config_enter(spa, RW_WRITER, FTAG); 2140 spa_load_spares(spa); 2141 spa_config_exit(spa, FTAG); 2142 spa->spa_spares.sav_sync = B_TRUE; 2143 } 2144 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 2145 &l2cache, &nl2cache) == 0) { 2146 if (spa->spa_l2cache.sav_config) 2147 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 2148 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 2149 else 2150 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 2151 NV_UNIQUE_NAME, KM_SLEEP) == 0); 2152 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 2153 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 2154 spa_config_enter(spa, RW_WRITER, FTAG); 2155 spa_load_l2cache(spa); 2156 spa_config_exit(spa, FTAG); 2157 spa->spa_l2cache.sav_sync = B_TRUE; 2158 } 2159 2160 if (spa_mode & FWRITE) { 2161 /* 2162 * Update the config cache to include the newly-imported pool. 2163 */ 2164 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); 2165 2166 /* 2167 * Resilver anything that's out of date. 2168 */ 2169 if (!isroot) 2170 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, 2171 B_TRUE) == 0); 2172 } 2173 2174 spa->spa_import_faulted = B_FALSE; 2175 mutex_exit(&spa_namespace_lock); 2176 2177 return (0); 2178 } 2179 2180 #ifdef _KERNEL 2181 /* 2182 * Build a "root" vdev for a top level vdev read in from a rootpool 2183 * device label. 2184 */ 2185 static void 2186 spa_build_rootpool_config(nvlist_t *config) 2187 { 2188 nvlist_t *nvtop, *nvroot; 2189 uint64_t pgid; 2190 2191 /* 2192 * Add this top-level vdev to the child array. 2193 */ 2194 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) 2195 == 0); 2196 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) 2197 == 0); 2198 2199 /* 2200 * Put this pool's top-level vdevs into a root vdev. 2201 */ 2202 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2203 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) 2204 == 0); 2205 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 2206 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 2207 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2208 &nvtop, 1) == 0); 2209 2210 /* 2211 * Replace the existing vdev_tree with the new root vdev in 2212 * this pool's configuration (remove the old, add the new). 2213 */ 2214 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 2215 nvlist_free(nvroot); 2216 } 2217 2218 /* 2219 * Get the root pool information from the root disk, then import the root pool 2220 * during the system boot up time. 2221 */ 2222 extern nvlist_t *vdev_disk_read_rootlabel(char *); 2223 2224 void 2225 spa_check_rootconf(char *devpath, char **bestdev, nvlist_t **bestconf, 2226 uint64_t *besttxg) 2227 { 2228 nvlist_t *config; 2229 uint64_t txg; 2230 2231 if ((config = vdev_disk_read_rootlabel(devpath)) == NULL) 2232 return; 2233 2234 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 2235 2236 if (txg > *besttxg) { 2237 *besttxg = txg; 2238 if (*bestconf != NULL) 2239 nvlist_free(*bestconf); 2240 *bestconf = config; 2241 *bestdev = devpath; 2242 } 2243 } 2244 2245 boolean_t 2246 spa_rootdev_validate(nvlist_t *nv) 2247 { 2248 uint64_t ival; 2249 2250 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2251 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2252 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, &ival) == 0 || 2253 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2254 return (B_FALSE); 2255 2256 return (B_TRUE); 2257 } 2258 2259 /* 2260 * Import a root pool. 2261 * 2262 * For x86. devpath_list will consist the physpath name of the vdev in a single 2263 * disk root pool or a list of physnames for the vdevs in a mirrored rootpool. 2264 * e.g. 2265 * "/pci@1f,0/ide@d/disk@0,0:a /pci@1f,o/ide@d/disk@2,0:a" 2266 * 2267 * For Sparc, devpath_list consists the physpath name of the booting device 2268 * no matter the rootpool is a single device pool or a mirrored pool. 2269 * e.g. 2270 * "/pci@1f,0/ide@d/disk@0,0:a" 2271 */ 2272 int 2273 spa_import_rootpool(char *devpath_list) 2274 { 2275 nvlist_t *conf = NULL; 2276 char *dev = NULL; 2277 char *pname; 2278 int error; 2279 2280 /* 2281 * Get the vdev pathname and configuation from the most 2282 * recently updated vdev (highest txg). 2283 */ 2284 if (error = spa_get_rootconf(devpath_list, &dev, &conf)) 2285 goto msg_out; 2286 2287 /* 2288 * Add type "root" vdev to the config. 2289 */ 2290 spa_build_rootpool_config(conf); 2291 2292 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); 2293 2294 /* 2295 * We specify 'allowfaulted' for this to be treated like spa_open() 2296 * instead of spa_import(). This prevents us from marking vdevs as 2297 * persistently unavailable, and generates FMA ereports as if it were a 2298 * pool open, not import. 2299 */ 2300 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); 2301 if (error == EEXIST) 2302 error = 0; 2303 2304 nvlist_free(conf); 2305 return (error); 2306 2307 msg_out: 2308 cmn_err(CE_NOTE, "\n\n" 2309 " *************************************************** \n" 2310 " * This device is not bootable! * \n" 2311 " * It is either offlined or detached or faulted. * \n" 2312 " * Please try to boot from a different device. * \n" 2313 " *************************************************** \n\n"); 2314 2315 return (error); 2316 } 2317 #endif 2318 2319 /* 2320 * Import a non-root pool into the system. 2321 */ 2322 int 2323 spa_import(const char *pool, nvlist_t *config, nvlist_t *props) 2324 { 2325 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); 2326 } 2327 2328 int 2329 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) 2330 { 2331 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); 2332 } 2333 2334 2335 /* 2336 * This (illegal) pool name is used when temporarily importing a spa_t in order 2337 * to get the vdev stats associated with the imported devices. 2338 */ 2339 #define TRYIMPORT_NAME "$import" 2340 2341 nvlist_t * 2342 spa_tryimport(nvlist_t *tryconfig) 2343 { 2344 nvlist_t *config = NULL; 2345 char *poolname; 2346 spa_t *spa; 2347 uint64_t state; 2348 2349 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 2350 return (NULL); 2351 2352 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 2353 return (NULL); 2354 2355 /* 2356 * Create and initialize the spa structure. 2357 */ 2358 mutex_enter(&spa_namespace_lock); 2359 spa = spa_add(TRYIMPORT_NAME, NULL); 2360 spa_activate(spa); 2361 2362 /* 2363 * Pass off the heavy lifting to spa_load(). 2364 * Pass TRUE for mosconfig because the user-supplied config 2365 * is actually the one to trust when doing an import. 2366 */ 2367 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 2368 2369 /* 2370 * If 'tryconfig' was at least parsable, return the current config. 2371 */ 2372 if (spa->spa_root_vdev != NULL) { 2373 spa_config_enter(spa, RW_READER, FTAG); 2374 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2375 spa_config_exit(spa, FTAG); 2376 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 2377 poolname) == 0); 2378 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2379 state) == 0); 2380 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 2381 spa->spa_uberblock.ub_timestamp) == 0); 2382 2383 /* 2384 * If the bootfs property exists on this pool then we 2385 * copy it out so that external consumers can tell which 2386 * pools are bootable. 2387 */ 2388 if (spa->spa_bootfs) { 2389 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2390 2391 /* 2392 * We have to play games with the name since the 2393 * pool was opened as TRYIMPORT_NAME. 2394 */ 2395 if (dsl_dsobj_to_dsname(spa->spa_name, 2396 spa->spa_bootfs, tmpname) == 0) { 2397 char *cp; 2398 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2399 2400 cp = strchr(tmpname, '/'); 2401 if (cp == NULL) { 2402 (void) strlcpy(dsname, tmpname, 2403 MAXPATHLEN); 2404 } else { 2405 (void) snprintf(dsname, MAXPATHLEN, 2406 "%s/%s", poolname, ++cp); 2407 } 2408 VERIFY(nvlist_add_string(config, 2409 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 2410 kmem_free(dsname, MAXPATHLEN); 2411 } 2412 kmem_free(tmpname, MAXPATHLEN); 2413 } 2414 2415 /* 2416 * Add the list of hot spares and level 2 cache devices. 2417 */ 2418 spa_add_spares(spa, config); 2419 spa_add_l2cache(spa, config); 2420 } 2421 2422 spa_unload(spa); 2423 spa_deactivate(spa); 2424 spa_remove(spa); 2425 mutex_exit(&spa_namespace_lock); 2426 2427 return (config); 2428 } 2429 2430 /* 2431 * Pool export/destroy 2432 * 2433 * The act of destroying or exporting a pool is very simple. We make sure there 2434 * is no more pending I/O and any references to the pool are gone. Then, we 2435 * update the pool state and sync all the labels to disk, removing the 2436 * configuration from the cache afterwards. 2437 */ 2438 static int 2439 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 2440 { 2441 spa_t *spa; 2442 2443 if (oldconfig) 2444 *oldconfig = NULL; 2445 2446 if (!(spa_mode & FWRITE)) 2447 return (EROFS); 2448 2449 mutex_enter(&spa_namespace_lock); 2450 if ((spa = spa_lookup(pool)) == NULL) { 2451 mutex_exit(&spa_namespace_lock); 2452 return (ENOENT); 2453 } 2454 2455 /* 2456 * Put a hold on the pool, drop the namespace lock, stop async tasks, 2457 * reacquire the namespace lock, and see if we can export. 2458 */ 2459 spa_open_ref(spa, FTAG); 2460 mutex_exit(&spa_namespace_lock); 2461 spa_async_suspend(spa); 2462 mutex_enter(&spa_namespace_lock); 2463 spa_close(spa, FTAG); 2464 2465 /* 2466 * The pool will be in core if it's openable, 2467 * in which case we can modify its state. 2468 */ 2469 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 2470 /* 2471 * Objsets may be open only because they're dirty, so we 2472 * have to force it to sync before checking spa_refcnt. 2473 */ 2474 spa_scrub_suspend(spa); 2475 txg_wait_synced(spa->spa_dsl_pool, 0); 2476 2477 /* 2478 * A pool cannot be exported or destroyed if there are active 2479 * references. If we are resetting a pool, allow references by 2480 * fault injection handlers. 2481 */ 2482 if (!spa_refcount_zero(spa) || 2483 (spa->spa_inject_ref != 0 && 2484 new_state != POOL_STATE_UNINITIALIZED)) { 2485 spa_scrub_resume(spa); 2486 spa_async_resume(spa); 2487 mutex_exit(&spa_namespace_lock); 2488 return (EBUSY); 2489 } 2490 2491 spa_scrub_resume(spa); 2492 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2493 2494 /* 2495 * We want this to be reflected on every label, 2496 * so mark them all dirty. spa_unload() will do the 2497 * final sync that pushes these changes out. 2498 */ 2499 if (new_state != POOL_STATE_UNINITIALIZED) { 2500 spa_config_enter(spa, RW_WRITER, FTAG); 2501 spa->spa_state = new_state; 2502 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 2503 vdev_config_dirty(spa->spa_root_vdev); 2504 spa_config_exit(spa, FTAG); 2505 } 2506 } 2507 2508 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 2509 2510 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2511 spa_unload(spa); 2512 spa_deactivate(spa); 2513 } 2514 2515 if (oldconfig && spa->spa_config) 2516 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 2517 2518 if (new_state != POOL_STATE_UNINITIALIZED) { 2519 spa_config_sync(spa, B_TRUE, B_TRUE); 2520 spa_remove(spa); 2521 } 2522 mutex_exit(&spa_namespace_lock); 2523 2524 return (0); 2525 } 2526 2527 /* 2528 * Destroy a storage pool. 2529 */ 2530 int 2531 spa_destroy(char *pool) 2532 { 2533 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 2534 } 2535 2536 /* 2537 * Export a storage pool. 2538 */ 2539 int 2540 spa_export(char *pool, nvlist_t **oldconfig) 2541 { 2542 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 2543 } 2544 2545 /* 2546 * Similar to spa_export(), this unloads the spa_t without actually removing it 2547 * from the namespace in any way. 2548 */ 2549 int 2550 spa_reset(char *pool) 2551 { 2552 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 2553 } 2554 2555 2556 /* 2557 * ========================================================================== 2558 * Device manipulation 2559 * ========================================================================== 2560 */ 2561 2562 /* 2563 * Add a device to a storage pool. 2564 */ 2565 int 2566 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 2567 { 2568 uint64_t txg; 2569 int c, error; 2570 vdev_t *rvd = spa->spa_root_vdev; 2571 vdev_t *vd, *tvd; 2572 nvlist_t **spares, **l2cache; 2573 uint_t nspares, nl2cache; 2574 2575 txg = spa_vdev_enter(spa); 2576 2577 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 2578 VDEV_ALLOC_ADD)) != 0) 2579 return (spa_vdev_exit(spa, NULL, txg, error)); 2580 2581 spa->spa_pending_vdev = vd; 2582 2583 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 2584 &nspares) != 0) 2585 nspares = 0; 2586 2587 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 2588 &nl2cache) != 0) 2589 nl2cache = 0; 2590 2591 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) { 2592 spa->spa_pending_vdev = NULL; 2593 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 2594 } 2595 2596 if (vd->vdev_children != 0) { 2597 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 2598 spa->spa_pending_vdev = NULL; 2599 return (spa_vdev_exit(spa, vd, txg, error)); 2600 } 2601 } 2602 2603 /* 2604 * We must validate the spares and l2cache devices after checking the 2605 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 2606 */ 2607 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) { 2608 spa->spa_pending_vdev = NULL; 2609 return (spa_vdev_exit(spa, vd, txg, error)); 2610 } 2611 2612 spa->spa_pending_vdev = NULL; 2613 2614 /* 2615 * Transfer each new top-level vdev from vd to rvd. 2616 */ 2617 for (c = 0; c < vd->vdev_children; c++) { 2618 tvd = vd->vdev_child[c]; 2619 vdev_remove_child(vd, tvd); 2620 tvd->vdev_id = rvd->vdev_children; 2621 vdev_add_child(rvd, tvd); 2622 vdev_config_dirty(tvd); 2623 } 2624 2625 if (nspares != 0) { 2626 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 2627 ZPOOL_CONFIG_SPARES); 2628 spa_load_spares(spa); 2629 spa->spa_spares.sav_sync = B_TRUE; 2630 } 2631 2632 if (nl2cache != 0) { 2633 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 2634 ZPOOL_CONFIG_L2CACHE); 2635 spa_load_l2cache(spa); 2636 spa->spa_l2cache.sav_sync = B_TRUE; 2637 } 2638 2639 /* 2640 * We have to be careful when adding new vdevs to an existing pool. 2641 * If other threads start allocating from these vdevs before we 2642 * sync the config cache, and we lose power, then upon reboot we may 2643 * fail to open the pool because there are DVAs that the config cache 2644 * can't translate. Therefore, we first add the vdevs without 2645 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 2646 * and then let spa_config_update() initialize the new metaslabs. 2647 * 2648 * spa_load() checks for added-but-not-initialized vdevs, so that 2649 * if we lose power at any point in this sequence, the remaining 2650 * steps will be completed the next time we load the pool. 2651 */ 2652 (void) spa_vdev_exit(spa, vd, txg, 0); 2653 2654 mutex_enter(&spa_namespace_lock); 2655 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2656 mutex_exit(&spa_namespace_lock); 2657 2658 return (0); 2659 } 2660 2661 /* 2662 * Attach a device to a mirror. The arguments are the path to any device 2663 * in the mirror, and the nvroot for the new device. If the path specifies 2664 * a device that is not mirrored, we automatically insert the mirror vdev. 2665 * 2666 * If 'replacing' is specified, the new device is intended to replace the 2667 * existing device; in this case the two devices are made into their own 2668 * mirror using the 'replacing' vdev, which is functionally identical to 2669 * the mirror vdev (it actually reuses all the same ops) but has a few 2670 * extra rules: you can't attach to it after it's been created, and upon 2671 * completion of resilvering, the first disk (the one being replaced) 2672 * is automatically detached. 2673 */ 2674 int 2675 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 2676 { 2677 uint64_t txg, open_txg; 2678 int error; 2679 vdev_t *rvd = spa->spa_root_vdev; 2680 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 2681 vdev_ops_t *pvops; 2682 int is_log; 2683 2684 txg = spa_vdev_enter(spa); 2685 2686 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 2687 2688 if (oldvd == NULL) 2689 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2690 2691 if (!oldvd->vdev_ops->vdev_op_leaf) 2692 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2693 2694 pvd = oldvd->vdev_parent; 2695 2696 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 2697 VDEV_ALLOC_ADD)) != 0) 2698 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 2699 2700 if (newrootvd->vdev_children != 1) 2701 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2702 2703 newvd = newrootvd->vdev_child[0]; 2704 2705 if (!newvd->vdev_ops->vdev_op_leaf) 2706 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 2707 2708 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 2709 return (spa_vdev_exit(spa, newrootvd, txg, error)); 2710 2711 /* 2712 * Spares can't replace logs 2713 */ 2714 is_log = oldvd->vdev_islog; 2715 if (is_log && newvd->vdev_isspare) 2716 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2717 2718 if (!replacing) { 2719 /* 2720 * For attach, the only allowable parent is a mirror or the root 2721 * vdev. 2722 */ 2723 if (pvd->vdev_ops != &vdev_mirror_ops && 2724 pvd->vdev_ops != &vdev_root_ops) 2725 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2726 2727 pvops = &vdev_mirror_ops; 2728 } else { 2729 /* 2730 * Active hot spares can only be replaced by inactive hot 2731 * spares. 2732 */ 2733 if (pvd->vdev_ops == &vdev_spare_ops && 2734 pvd->vdev_child[1] == oldvd && 2735 !spa_has_spare(spa, newvd->vdev_guid)) 2736 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2737 2738 /* 2739 * If the source is a hot spare, and the parent isn't already a 2740 * spare, then we want to create a new hot spare. Otherwise, we 2741 * want to create a replacing vdev. The user is not allowed to 2742 * attach to a spared vdev child unless the 'isspare' state is 2743 * the same (spare replaces spare, non-spare replaces 2744 * non-spare). 2745 */ 2746 if (pvd->vdev_ops == &vdev_replacing_ops) 2747 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2748 else if (pvd->vdev_ops == &vdev_spare_ops && 2749 newvd->vdev_isspare != oldvd->vdev_isspare) 2750 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 2751 else if (pvd->vdev_ops != &vdev_spare_ops && 2752 newvd->vdev_isspare) 2753 pvops = &vdev_spare_ops; 2754 else 2755 pvops = &vdev_replacing_ops; 2756 } 2757 2758 /* 2759 * Compare the new device size with the replaceable/attachable 2760 * device size. 2761 */ 2762 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 2763 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 2764 2765 /* 2766 * The new device cannot have a higher alignment requirement 2767 * than the top-level vdev. 2768 */ 2769 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 2770 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 2771 2772 /* 2773 * If this is an in-place replacement, update oldvd's path and devid 2774 * to make it distinguishable from newvd, and unopenable from now on. 2775 */ 2776 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 2777 spa_strfree(oldvd->vdev_path); 2778 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 2779 KM_SLEEP); 2780 (void) sprintf(oldvd->vdev_path, "%s/%s", 2781 newvd->vdev_path, "old"); 2782 if (oldvd->vdev_devid != NULL) { 2783 spa_strfree(oldvd->vdev_devid); 2784 oldvd->vdev_devid = NULL; 2785 } 2786 } 2787 2788 /* 2789 * If the parent is not a mirror, or if we're replacing, insert the new 2790 * mirror/replacing/spare vdev above oldvd. 2791 */ 2792 if (pvd->vdev_ops != pvops) 2793 pvd = vdev_add_parent(oldvd, pvops); 2794 2795 ASSERT(pvd->vdev_top->vdev_parent == rvd); 2796 ASSERT(pvd->vdev_ops == pvops); 2797 ASSERT(oldvd->vdev_parent == pvd); 2798 2799 /* 2800 * Extract the new device from its root and add it to pvd. 2801 */ 2802 vdev_remove_child(newrootvd, newvd); 2803 newvd->vdev_id = pvd->vdev_children; 2804 vdev_add_child(pvd, newvd); 2805 2806 /* 2807 * If newvd is smaller than oldvd, but larger than its rsize, 2808 * the addition of newvd may have decreased our parent's asize. 2809 */ 2810 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 2811 2812 tvd = newvd->vdev_top; 2813 ASSERT(pvd->vdev_top == tvd); 2814 ASSERT(tvd->vdev_parent == rvd); 2815 2816 vdev_config_dirty(tvd); 2817 2818 /* 2819 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 2820 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 2821 */ 2822 open_txg = txg + TXG_CONCURRENT_STATES - 1; 2823 2824 mutex_enter(&newvd->vdev_dtl_lock); 2825 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 2826 open_txg - TXG_INITIAL + 1); 2827 mutex_exit(&newvd->vdev_dtl_lock); 2828 2829 if (newvd->vdev_isspare) 2830 spa_spare_activate(newvd); 2831 2832 /* 2833 * Mark newvd's DTL dirty in this txg. 2834 */ 2835 vdev_dirty(tvd, VDD_DTL, newvd, txg); 2836 2837 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 2838 2839 /* 2840 * Kick off a resilver to update newvd. We need to grab the namespace 2841 * lock because spa_scrub() needs to post a sysevent with the pool name. 2842 */ 2843 mutex_enter(&spa_namespace_lock); 2844 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 2845 mutex_exit(&spa_namespace_lock); 2846 2847 return (0); 2848 } 2849 2850 /* 2851 * Detach a device from a mirror or replacing vdev. 2852 * If 'replace_done' is specified, only detach if the parent 2853 * is a replacing vdev. 2854 */ 2855 int 2856 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 2857 { 2858 uint64_t txg; 2859 int c, t, error; 2860 vdev_t *rvd = spa->spa_root_vdev; 2861 vdev_t *vd, *pvd, *cvd, *tvd; 2862 boolean_t unspare = B_FALSE; 2863 uint64_t unspare_guid; 2864 size_t len; 2865 2866 txg = spa_vdev_enter(spa); 2867 2868 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2869 2870 if (vd == NULL) 2871 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 2872 2873 if (!vd->vdev_ops->vdev_op_leaf) 2874 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2875 2876 pvd = vd->vdev_parent; 2877 2878 /* 2879 * If replace_done is specified, only remove this device if it's 2880 * the first child of a replacing vdev. For the 'spare' vdev, either 2881 * disk can be removed. 2882 */ 2883 if (replace_done) { 2884 if (pvd->vdev_ops == &vdev_replacing_ops) { 2885 if (vd->vdev_id != 0) 2886 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2887 } else if (pvd->vdev_ops != &vdev_spare_ops) { 2888 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2889 } 2890 } 2891 2892 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 2893 spa_version(spa) >= SPA_VERSION_SPARES); 2894 2895 /* 2896 * Only mirror, replacing, and spare vdevs support detach. 2897 */ 2898 if (pvd->vdev_ops != &vdev_replacing_ops && 2899 pvd->vdev_ops != &vdev_mirror_ops && 2900 pvd->vdev_ops != &vdev_spare_ops) 2901 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2902 2903 /* 2904 * If there's only one replica, you can't detach it. 2905 */ 2906 if (pvd->vdev_children <= 1) 2907 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 2908 2909 /* 2910 * If all siblings have non-empty DTLs, this device may have the only 2911 * valid copy of the data, which means we cannot safely detach it. 2912 * 2913 * XXX -- as in the vdev_offline() case, we really want a more 2914 * precise DTL check. 2915 */ 2916 for (c = 0; c < pvd->vdev_children; c++) { 2917 uint64_t dirty; 2918 2919 cvd = pvd->vdev_child[c]; 2920 if (cvd == vd) 2921 continue; 2922 if (vdev_is_dead(cvd)) 2923 continue; 2924 mutex_enter(&cvd->vdev_dtl_lock); 2925 dirty = cvd->vdev_dtl_map.sm_space | 2926 cvd->vdev_dtl_scrub.sm_space; 2927 mutex_exit(&cvd->vdev_dtl_lock); 2928 if (!dirty) 2929 break; 2930 } 2931 2932 /* 2933 * If we are a replacing or spare vdev, then we can always detach the 2934 * latter child, as that is how one cancels the operation. 2935 */ 2936 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 2937 c == pvd->vdev_children) 2938 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 2939 2940 /* 2941 * If we are detaching the second disk from a replacing vdev, then 2942 * check to see if we changed the original vdev's path to have "/old" 2943 * at the end in spa_vdev_attach(). If so, undo that change now. 2944 */ 2945 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && 2946 pvd->vdev_child[0]->vdev_path != NULL && 2947 pvd->vdev_child[1]->vdev_path != NULL) { 2948 ASSERT(pvd->vdev_child[1] == vd); 2949 cvd = pvd->vdev_child[0]; 2950 len = strlen(vd->vdev_path); 2951 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 2952 strcmp(cvd->vdev_path + len, "/old") == 0) { 2953 spa_strfree(cvd->vdev_path); 2954 cvd->vdev_path = spa_strdup(vd->vdev_path); 2955 } 2956 } 2957 2958 /* 2959 * If we are detaching the original disk from a spare, then it implies 2960 * that the spare should become a real disk, and be removed from the 2961 * active spare list for the pool. 2962 */ 2963 if (pvd->vdev_ops == &vdev_spare_ops && 2964 vd->vdev_id == 0) 2965 unspare = B_TRUE; 2966 2967 /* 2968 * Erase the disk labels so the disk can be used for other things. 2969 * This must be done after all other error cases are handled, 2970 * but before we disembowel vd (so we can still do I/O to it). 2971 * But if we can't do it, don't treat the error as fatal -- 2972 * it may be that the unwritability of the disk is the reason 2973 * it's being detached! 2974 */ 2975 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 2976 2977 /* 2978 * Remove vd from its parent and compact the parent's children. 2979 */ 2980 vdev_remove_child(pvd, vd); 2981 vdev_compact_children(pvd); 2982 2983 /* 2984 * Remember one of the remaining children so we can get tvd below. 2985 */ 2986 cvd = pvd->vdev_child[0]; 2987 2988 /* 2989 * If we need to remove the remaining child from the list of hot spares, 2990 * do it now, marking the vdev as no longer a spare in the process. We 2991 * must do this before vdev_remove_parent(), because that can change the 2992 * GUID if it creates a new toplevel GUID. 2993 */ 2994 if (unspare) { 2995 ASSERT(cvd->vdev_isspare); 2996 spa_spare_remove(cvd); 2997 unspare_guid = cvd->vdev_guid; 2998 } 2999 3000 /* 3001 * If the parent mirror/replacing vdev only has one child, 3002 * the parent is no longer needed. Remove it from the tree. 3003 */ 3004 if (pvd->vdev_children == 1) 3005 vdev_remove_parent(cvd); 3006 3007 /* 3008 * We don't set tvd until now because the parent we just removed 3009 * may have been the previous top-level vdev. 3010 */ 3011 tvd = cvd->vdev_top; 3012 ASSERT(tvd->vdev_parent == rvd); 3013 3014 /* 3015 * Reevaluate the parent vdev state. 3016 */ 3017 vdev_propagate_state(cvd); 3018 3019 /* 3020 * If the device we just detached was smaller than the others, it may be 3021 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 3022 * can't fail because the existing metaslabs are already in core, so 3023 * there's nothing to read from disk. 3024 */ 3025 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 3026 3027 vdev_config_dirty(tvd); 3028 3029 /* 3030 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 3031 * vd->vdev_detached is set and free vd's DTL object in syncing context. 3032 * But first make sure we're not on any *other* txg's DTL list, to 3033 * prevent vd from being accessed after it's freed. 3034 */ 3035 for (t = 0; t < TXG_SIZE; t++) 3036 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 3037 vd->vdev_detached = B_TRUE; 3038 vdev_dirty(tvd, VDD_DTL, vd, txg); 3039 3040 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 3041 3042 error = spa_vdev_exit(spa, vd, txg, 0); 3043 3044 /* 3045 * If this was the removal of the original device in a hot spare vdev, 3046 * then we want to go through and remove the device from the hot spare 3047 * list of every other pool. 3048 */ 3049 if (unspare) { 3050 spa = NULL; 3051 mutex_enter(&spa_namespace_lock); 3052 while ((spa = spa_next(spa)) != NULL) { 3053 if (spa->spa_state != POOL_STATE_ACTIVE) 3054 continue; 3055 3056 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 3057 } 3058 mutex_exit(&spa_namespace_lock); 3059 } 3060 3061 return (error); 3062 } 3063 3064 /* 3065 * Remove a spares vdev from the nvlist config. 3066 */ 3067 static int 3068 spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare, 3069 nvlist_t **spares, int nspares, vdev_t *vd) 3070 { 3071 nvlist_t *nv, **newspares; 3072 int i, j; 3073 3074 nv = NULL; 3075 for (i = 0; i < nspares; i++) { 3076 uint64_t theguid; 3077 3078 VERIFY(nvlist_lookup_uint64(spares[i], 3079 ZPOOL_CONFIG_GUID, &theguid) == 0); 3080 if (theguid == guid) { 3081 nv = spares[i]; 3082 break; 3083 } 3084 } 3085 3086 /* 3087 * Only remove the hot spare if it's not currently in use in this pool. 3088 */ 3089 if (nv == NULL && vd == NULL) 3090 return (ENOENT); 3091 3092 if (nv == NULL && vd != NULL) 3093 return (ENOTSUP); 3094 3095 if (!unspare && nv != NULL && vd != NULL) 3096 return (EBUSY); 3097 3098 if (nspares == 1) { 3099 newspares = NULL; 3100 } else { 3101 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 3102 KM_SLEEP); 3103 for (i = 0, j = 0; i < nspares; i++) { 3104 if (spares[i] != nv) 3105 VERIFY(nvlist_dup(spares[i], 3106 &newspares[j++], KM_SLEEP) == 0); 3107 } 3108 } 3109 3110 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES, 3111 DATA_TYPE_NVLIST_ARRAY) == 0); 3112 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3113 ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0); 3114 for (i = 0; i < nspares - 1; i++) 3115 nvlist_free(newspares[i]); 3116 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 3117 3118 return (0); 3119 } 3120 3121 /* 3122 * Remove an l2cache vdev from the nvlist config. 3123 */ 3124 static int 3125 spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache, 3126 int nl2cache, vdev_t *vd) 3127 { 3128 nvlist_t *nv, **newl2cache; 3129 int i, j; 3130 3131 nv = NULL; 3132 for (i = 0; i < nl2cache; i++) { 3133 uint64_t theguid; 3134 3135 VERIFY(nvlist_lookup_uint64(l2cache[i], 3136 ZPOOL_CONFIG_GUID, &theguid) == 0); 3137 if (theguid == guid) { 3138 nv = l2cache[i]; 3139 break; 3140 } 3141 } 3142 3143 if (vd == NULL) { 3144 for (i = 0; i < nl2cache; i++) { 3145 if (sav->sav_vdevs[i]->vdev_guid == guid) { 3146 vd = sav->sav_vdevs[i]; 3147 break; 3148 } 3149 } 3150 } 3151 3152 if (nv == NULL && vd == NULL) 3153 return (ENOENT); 3154 3155 if (nv == NULL && vd != NULL) 3156 return (ENOTSUP); 3157 3158 if (nl2cache == 1) { 3159 newl2cache = NULL; 3160 } else { 3161 newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *), 3162 KM_SLEEP); 3163 for (i = 0, j = 0; i < nl2cache; i++) { 3164 if (l2cache[i] != nv) 3165 VERIFY(nvlist_dup(l2cache[i], 3166 &newl2cache[j++], KM_SLEEP) == 0); 3167 } 3168 } 3169 3170 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 3171 DATA_TYPE_NVLIST_ARRAY) == 0); 3172 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3173 ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0); 3174 for (i = 0; i < nl2cache - 1; i++) 3175 nvlist_free(newl2cache[i]); 3176 kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *)); 3177 3178 return (0); 3179 } 3180 3181 /* 3182 * Remove a device from the pool. Currently, this supports removing only hot 3183 * spares and level 2 ARC devices. 3184 */ 3185 int 3186 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 3187 { 3188 vdev_t *vd; 3189 nvlist_t **spares, **l2cache; 3190 uint_t nspares, nl2cache; 3191 int error = 0; 3192 3193 spa_config_enter(spa, RW_WRITER, FTAG); 3194 3195 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 3196 3197 if (spa->spa_spares.sav_vdevs != NULL && 3198 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3199 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 3200 if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare, 3201 spares, nspares, vd)) != 0) 3202 goto out; 3203 spa_load_spares(spa); 3204 spa->spa_spares.sav_sync = B_TRUE; 3205 goto out; 3206 } 3207 3208 if (spa->spa_l2cache.sav_vdevs != NULL && 3209 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3210 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { 3211 if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid, 3212 l2cache, nl2cache, vd)) != 0) 3213 goto out; 3214 spa_load_l2cache(spa); 3215 spa->spa_l2cache.sav_sync = B_TRUE; 3216 } 3217 3218 out: 3219 spa_config_exit(spa, FTAG); 3220 return (error); 3221 } 3222 3223 /* 3224 * Find any device that's done replacing, or a vdev marked 'unspare' that's 3225 * current spared, so we can detach it. 3226 */ 3227 static vdev_t * 3228 spa_vdev_resilver_done_hunt(vdev_t *vd) 3229 { 3230 vdev_t *newvd, *oldvd; 3231 int c; 3232 3233 for (c = 0; c < vd->vdev_children; c++) { 3234 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 3235 if (oldvd != NULL) 3236 return (oldvd); 3237 } 3238 3239 /* 3240 * Check for a completed replacement. 3241 */ 3242 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 3243 oldvd = vd->vdev_child[0]; 3244 newvd = vd->vdev_child[1]; 3245 3246 mutex_enter(&newvd->vdev_dtl_lock); 3247 if (newvd->vdev_dtl_map.sm_space == 0 && 3248 newvd->vdev_dtl_scrub.sm_space == 0) { 3249 mutex_exit(&newvd->vdev_dtl_lock); 3250 return (oldvd); 3251 } 3252 mutex_exit(&newvd->vdev_dtl_lock); 3253 } 3254 3255 /* 3256 * Check for a completed resilver with the 'unspare' flag set. 3257 */ 3258 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 3259 newvd = vd->vdev_child[0]; 3260 oldvd = vd->vdev_child[1]; 3261 3262 mutex_enter(&newvd->vdev_dtl_lock); 3263 if (newvd->vdev_unspare && 3264 newvd->vdev_dtl_map.sm_space == 0 && 3265 newvd->vdev_dtl_scrub.sm_space == 0) { 3266 newvd->vdev_unspare = 0; 3267 mutex_exit(&newvd->vdev_dtl_lock); 3268 return (oldvd); 3269 } 3270 mutex_exit(&newvd->vdev_dtl_lock); 3271 } 3272 3273 return (NULL); 3274 } 3275 3276 static void 3277 spa_vdev_resilver_done(spa_t *spa) 3278 { 3279 vdev_t *vd; 3280 vdev_t *pvd; 3281 uint64_t guid; 3282 uint64_t pguid = 0; 3283 3284 spa_config_enter(spa, RW_READER, FTAG); 3285 3286 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 3287 guid = vd->vdev_guid; 3288 /* 3289 * If we have just finished replacing a hot spared device, then 3290 * we need to detach the parent's first child (the original hot 3291 * spare) as well. 3292 */ 3293 pvd = vd->vdev_parent; 3294 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3295 pvd->vdev_id == 0) { 3296 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 3297 ASSERT(pvd->vdev_parent->vdev_children == 2); 3298 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 3299 } 3300 spa_config_exit(spa, FTAG); 3301 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 3302 return; 3303 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 3304 return; 3305 spa_config_enter(spa, RW_READER, FTAG); 3306 } 3307 3308 spa_config_exit(spa, FTAG); 3309 } 3310 3311 /* 3312 * Update the stored path for this vdev. Dirty the vdev configuration, relying 3313 * on spa_vdev_enter/exit() to synchronize the labels and cache. 3314 */ 3315 int 3316 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 3317 { 3318 vdev_t *vd; 3319 uint64_t txg; 3320 3321 txg = spa_vdev_enter(spa); 3322 3323 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { 3324 /* 3325 * Determine if this is a reference to a hot spare device. If 3326 * it is, update the path manually as there is no associated 3327 * vdev_t that can be synced to disk. 3328 */ 3329 nvlist_t **spares; 3330 uint_t i, nspares; 3331 3332 if (spa->spa_spares.sav_config != NULL) { 3333 VERIFY(nvlist_lookup_nvlist_array( 3334 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 3335 &spares, &nspares) == 0); 3336 for (i = 0; i < nspares; i++) { 3337 uint64_t theguid; 3338 VERIFY(nvlist_lookup_uint64(spares[i], 3339 ZPOOL_CONFIG_GUID, &theguid) == 0); 3340 if (theguid == guid) { 3341 VERIFY(nvlist_add_string(spares[i], 3342 ZPOOL_CONFIG_PATH, newpath) == 0); 3343 spa_load_spares(spa); 3344 spa->spa_spares.sav_sync = B_TRUE; 3345 return (spa_vdev_exit(spa, NULL, txg, 3346 0)); 3347 } 3348 } 3349 } 3350 3351 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 3352 } 3353 3354 if (!vd->vdev_ops->vdev_op_leaf) 3355 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 3356 3357 spa_strfree(vd->vdev_path); 3358 vd->vdev_path = spa_strdup(newpath); 3359 3360 vdev_config_dirty(vd->vdev_top); 3361 3362 return (spa_vdev_exit(spa, NULL, txg, 0)); 3363 } 3364 3365 /* 3366 * ========================================================================== 3367 * SPA Scrubbing 3368 * ========================================================================== 3369 */ 3370 3371 static void 3372 spa_scrub_io_done(zio_t *zio) 3373 { 3374 spa_t *spa = zio->io_spa; 3375 3376 arc_data_buf_free(zio->io_data, zio->io_size); 3377 3378 mutex_enter(&spa->spa_scrub_lock); 3379 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3380 vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 3381 spa->spa_scrub_errors++; 3382 mutex_enter(&vd->vdev_stat_lock); 3383 vd->vdev_stat.vs_scrub_errors++; 3384 mutex_exit(&vd->vdev_stat_lock); 3385 } 3386 3387 if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight) 3388 cv_broadcast(&spa->spa_scrub_io_cv); 3389 3390 ASSERT(spa->spa_scrub_inflight >= 0); 3391 3392 mutex_exit(&spa->spa_scrub_lock); 3393 } 3394 3395 static void 3396 spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 3397 zbookmark_t *zb) 3398 { 3399 size_t size = BP_GET_LSIZE(bp); 3400 void *data; 3401 3402 mutex_enter(&spa->spa_scrub_lock); 3403 /* 3404 * Do not give too much work to vdev(s). 3405 */ 3406 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) { 3407 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3408 } 3409 spa->spa_scrub_inflight++; 3410 mutex_exit(&spa->spa_scrub_lock); 3411 3412 data = arc_data_buf_alloc(size); 3413 3414 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 3415 flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 3416 3417 flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 3418 3419 zio_nowait(zio_read(NULL, spa, bp, data, size, 3420 spa_scrub_io_done, NULL, priority, flags, zb)); 3421 } 3422 3423 /* ARGSUSED */ 3424 static int 3425 spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 3426 { 3427 blkptr_t *bp = &bc->bc_blkptr; 3428 vdev_t *vd = spa->spa_root_vdev; 3429 dva_t *dva = bp->blk_dva; 3430 int needs_resilver = B_FALSE; 3431 int d; 3432 3433 if (bc->bc_errno) { 3434 /* 3435 * We can't scrub this block, but we can continue to scrub 3436 * the rest of the pool. Note the error and move along. 3437 */ 3438 mutex_enter(&spa->spa_scrub_lock); 3439 spa->spa_scrub_errors++; 3440 mutex_exit(&spa->spa_scrub_lock); 3441 3442 mutex_enter(&vd->vdev_stat_lock); 3443 vd->vdev_stat.vs_scrub_errors++; 3444 mutex_exit(&vd->vdev_stat_lock); 3445 3446 return (ERESTART); 3447 } 3448 3449 ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 3450 3451 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 3452 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 3453 3454 ASSERT(vd != NULL); 3455 3456 /* 3457 * Keep track of how much data we've examined so that 3458 * zpool(1M) status can make useful progress reports. 3459 */ 3460 mutex_enter(&vd->vdev_stat_lock); 3461 vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 3462 mutex_exit(&vd->vdev_stat_lock); 3463 3464 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 3465 if (DVA_GET_GANG(&dva[d])) { 3466 /* 3467 * Gang members may be spread across multiple 3468 * vdevs, so the best we can do is look at the 3469 * pool-wide DTL. 3470 * XXX -- it would be better to change our 3471 * allocation policy to ensure that this can't 3472 * happen. 3473 */ 3474 vd = spa->spa_root_vdev; 3475 } 3476 if (vdev_dtl_contains(&vd->vdev_dtl_map, 3477 bp->blk_birth, 1)) 3478 needs_resilver = B_TRUE; 3479 } 3480 } 3481 3482 if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 3483 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 3484 ZIO_FLAG_SCRUB, &bc->bc_bookmark); 3485 else if (needs_resilver) 3486 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 3487 ZIO_FLAG_RESILVER, &bc->bc_bookmark); 3488 3489 return (0); 3490 } 3491 3492 static void 3493 spa_scrub_thread(spa_t *spa) 3494 { 3495 callb_cpr_t cprinfo; 3496 traverse_handle_t *th = spa->spa_scrub_th; 3497 vdev_t *rvd = spa->spa_root_vdev; 3498 pool_scrub_type_t scrub_type = spa->spa_scrub_type; 3499 int error = 0; 3500 boolean_t complete; 3501 3502 CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 3503 3504 /* 3505 * If we're restarting due to a snapshot create/delete, 3506 * wait for that to complete. 3507 */ 3508 txg_wait_synced(spa_get_dsl(spa), 0); 3509 3510 dprintf("start %s mintxg=%llu maxtxg=%llu\n", 3511 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 3512 spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 3513 3514 spa_config_enter(spa, RW_WRITER, FTAG); 3515 vdev_reopen(rvd); /* purge all vdev caches */ 3516 vdev_config_dirty(rvd); /* rewrite all disk labels */ 3517 vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 3518 spa_config_exit(spa, FTAG); 3519 3520 mutex_enter(&spa->spa_scrub_lock); 3521 spa->spa_scrub_errors = 0; 3522 spa->spa_scrub_active = 1; 3523 ASSERT(spa->spa_scrub_inflight == 0); 3524 3525 while (!spa->spa_scrub_stop) { 3526 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3527 while (spa->spa_scrub_suspended) { 3528 spa->spa_scrub_active = 0; 3529 cv_broadcast(&spa->spa_scrub_cv); 3530 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3531 spa->spa_scrub_active = 1; 3532 } 3533 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 3534 3535 if (spa->spa_scrub_restart_txg != 0) 3536 break; 3537 3538 mutex_exit(&spa->spa_scrub_lock); 3539 error = traverse_more(th); 3540 mutex_enter(&spa->spa_scrub_lock); 3541 if (error != EAGAIN) 3542 break; 3543 } 3544 3545 while (spa->spa_scrub_inflight) 3546 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3547 3548 spa->spa_scrub_active = 0; 3549 cv_broadcast(&spa->spa_scrub_cv); 3550 3551 mutex_exit(&spa->spa_scrub_lock); 3552 3553 spa_config_enter(spa, RW_WRITER, FTAG); 3554 3555 mutex_enter(&spa->spa_scrub_lock); 3556 3557 /* 3558 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 3559 * AND the spa config lock to synchronize with any config changes 3560 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 3561 */ 3562 if (spa->spa_scrub_restart_txg != 0) 3563 error = ERESTART; 3564 3565 if (spa->spa_scrub_stop) 3566 error = EINTR; 3567 3568 /* 3569 * Even if there were uncorrectable errors, we consider the scrub 3570 * completed. The downside is that if there is a transient error during 3571 * a resilver, we won't resilver the data properly to the target. But 3572 * if the damage is permanent (more likely) we will resilver forever, 3573 * which isn't really acceptable. Since there is enough information for 3574 * the user to know what has failed and why, this seems like a more 3575 * tractable approach. 3576 */ 3577 complete = (error == 0); 3578 3579 dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 3580 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 3581 spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 3582 error, spa->spa_scrub_errors, spa->spa_scrub_stop); 3583 3584 mutex_exit(&spa->spa_scrub_lock); 3585 3586 /* 3587 * If the scrub/resilver completed, update all DTLs to reflect this. 3588 * Whether it succeeded or not, vacate all temporary scrub DTLs. 3589 */ 3590 vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 3591 complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 3592 vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 3593 spa_errlog_rotate(spa); 3594 3595 if (scrub_type == POOL_SCRUB_RESILVER && complete) 3596 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_FINISH); 3597 3598 spa_config_exit(spa, FTAG); 3599 3600 mutex_enter(&spa->spa_scrub_lock); 3601 3602 /* 3603 * We may have finished replacing a device. 3604 * Let the async thread assess this and handle the detach. 3605 */ 3606 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3607 3608 /* 3609 * If we were told to restart, our final act is to start a new scrub. 3610 */ 3611 if (error == ERESTART) 3612 spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 3613 SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 3614 3615 spa->spa_scrub_type = POOL_SCRUB_NONE; 3616 spa->spa_scrub_active = 0; 3617 spa->spa_scrub_thread = NULL; 3618 cv_broadcast(&spa->spa_scrub_cv); 3619 CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 3620 thread_exit(); 3621 } 3622 3623 void 3624 spa_scrub_suspend(spa_t *spa) 3625 { 3626 mutex_enter(&spa->spa_scrub_lock); 3627 spa->spa_scrub_suspended++; 3628 while (spa->spa_scrub_active) { 3629 cv_broadcast(&spa->spa_scrub_cv); 3630 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3631 } 3632 while (spa->spa_scrub_inflight) 3633 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3634 mutex_exit(&spa->spa_scrub_lock); 3635 } 3636 3637 void 3638 spa_scrub_resume(spa_t *spa) 3639 { 3640 mutex_enter(&spa->spa_scrub_lock); 3641 ASSERT(spa->spa_scrub_suspended != 0); 3642 if (--spa->spa_scrub_suspended == 0) 3643 cv_broadcast(&spa->spa_scrub_cv); 3644 mutex_exit(&spa->spa_scrub_lock); 3645 } 3646 3647 void 3648 spa_scrub_restart(spa_t *spa, uint64_t txg) 3649 { 3650 /* 3651 * Something happened (e.g. snapshot create/delete) that means 3652 * we must restart any in-progress scrubs. The itinerary will 3653 * fix this properly. 3654 */ 3655 mutex_enter(&spa->spa_scrub_lock); 3656 spa->spa_scrub_restart_txg = txg; 3657 mutex_exit(&spa->spa_scrub_lock); 3658 } 3659 3660 int 3661 spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 3662 { 3663 space_seg_t *ss; 3664 uint64_t mintxg, maxtxg; 3665 vdev_t *rvd = spa->spa_root_vdev; 3666 3667 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 3668 ASSERT(!spa_config_held(spa, RW_WRITER)); 3669 3670 if ((uint_t)type >= POOL_SCRUB_TYPES) 3671 return (ENOTSUP); 3672 3673 mutex_enter(&spa->spa_scrub_lock); 3674 3675 /* 3676 * If there's a scrub or resilver already in progress, stop it. 3677 */ 3678 while (spa->spa_scrub_thread != NULL) { 3679 /* 3680 * Don't stop a resilver unless forced. 3681 */ 3682 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 3683 mutex_exit(&spa->spa_scrub_lock); 3684 return (EBUSY); 3685 } 3686 spa->spa_scrub_stop = 1; 3687 cv_broadcast(&spa->spa_scrub_cv); 3688 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 3689 } 3690 3691 /* 3692 * Terminate the previous traverse. 3693 */ 3694 if (spa->spa_scrub_th != NULL) { 3695 traverse_fini(spa->spa_scrub_th); 3696 spa->spa_scrub_th = NULL; 3697 } 3698 3699 if (rvd == NULL) { 3700 ASSERT(spa->spa_scrub_stop == 0); 3701 ASSERT(spa->spa_scrub_type == type); 3702 ASSERT(spa->spa_scrub_restart_txg == 0); 3703 mutex_exit(&spa->spa_scrub_lock); 3704 return (0); 3705 } 3706 3707 mintxg = TXG_INITIAL - 1; 3708 maxtxg = spa_last_synced_txg(spa) + 1; 3709 3710 mutex_enter(&rvd->vdev_dtl_lock); 3711 3712 if (rvd->vdev_dtl_map.sm_space == 0) { 3713 /* 3714 * The pool-wide DTL is empty. 3715 * If this is a resilver, there's nothing to do except 3716 * check whether any in-progress replacements have completed. 3717 */ 3718 if (type == POOL_SCRUB_RESILVER) { 3719 type = POOL_SCRUB_NONE; 3720 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3721 } 3722 } else { 3723 /* 3724 * The pool-wide DTL is non-empty. 3725 * If this is a normal scrub, upgrade to a resilver instead. 3726 */ 3727 if (type == POOL_SCRUB_EVERYTHING) 3728 type = POOL_SCRUB_RESILVER; 3729 } 3730 3731 if (type == POOL_SCRUB_RESILVER) { 3732 /* 3733 * Determine the resilvering boundaries. 3734 * 3735 * Note: (mintxg, maxtxg) is an open interval, 3736 * i.e. mintxg and maxtxg themselves are not included. 3737 * 3738 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 3739 * so we don't claim to resilver a txg that's still changing. 3740 */ 3741 ss = avl_first(&rvd->vdev_dtl_map.sm_root); 3742 mintxg = ss->ss_start - 1; 3743 ss = avl_last(&rvd->vdev_dtl_map.sm_root); 3744 maxtxg = MIN(ss->ss_end, maxtxg); 3745 3746 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 3747 } 3748 3749 mutex_exit(&rvd->vdev_dtl_lock); 3750 3751 spa->spa_scrub_stop = 0; 3752 spa->spa_scrub_type = type; 3753 spa->spa_scrub_restart_txg = 0; 3754 3755 if (type != POOL_SCRUB_NONE) { 3756 spa->spa_scrub_mintxg = mintxg; 3757 spa->spa_scrub_maxtxg = maxtxg; 3758 spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 3759 ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 3760 ZIO_FLAG_CANFAIL); 3761 traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 3762 spa->spa_scrub_thread = thread_create(NULL, 0, 3763 spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 3764 } 3765 3766 mutex_exit(&spa->spa_scrub_lock); 3767 3768 return (0); 3769 } 3770 3771 /* 3772 * ========================================================================== 3773 * SPA async task processing 3774 * ========================================================================== 3775 */ 3776 3777 static void 3778 spa_async_remove(spa_t *spa, vdev_t *vd) 3779 { 3780 vdev_t *tvd; 3781 int c; 3782 3783 for (c = 0; c < vd->vdev_children; c++) { 3784 tvd = vd->vdev_child[c]; 3785 if (tvd->vdev_remove_wanted) { 3786 tvd->vdev_remove_wanted = 0; 3787 vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED, 3788 VDEV_AUX_NONE); 3789 vdev_clear(spa, tvd, B_TRUE); 3790 vdev_config_dirty(tvd->vdev_top); 3791 } 3792 spa_async_remove(spa, tvd); 3793 } 3794 } 3795 3796 static void 3797 spa_async_thread(spa_t *spa) 3798 { 3799 int tasks; 3800 uint64_t txg; 3801 3802 ASSERT(spa->spa_sync_on); 3803 3804 mutex_enter(&spa->spa_async_lock); 3805 tasks = spa->spa_async_tasks; 3806 spa->spa_async_tasks = 0; 3807 mutex_exit(&spa->spa_async_lock); 3808 3809 /* 3810 * See if the config needs to be updated. 3811 */ 3812 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 3813 mutex_enter(&spa_namespace_lock); 3814 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 3815 mutex_exit(&spa_namespace_lock); 3816 } 3817 3818 /* 3819 * See if any devices need to be marked REMOVED. 3820 * 3821 * XXX - We avoid doing this when we are in 3822 * I/O failure state since spa_vdev_enter() grabs 3823 * the namespace lock and would not be able to obtain 3824 * the writer config lock. 3825 */ 3826 if (tasks & SPA_ASYNC_REMOVE && 3827 spa_state(spa) != POOL_STATE_IO_FAILURE) { 3828 txg = spa_vdev_enter(spa); 3829 spa_async_remove(spa, spa->spa_root_vdev); 3830 (void) spa_vdev_exit(spa, NULL, txg, 0); 3831 } 3832 3833 /* 3834 * If any devices are done replacing, detach them. 3835 */ 3836 if (tasks & SPA_ASYNC_RESILVER_DONE) 3837 spa_vdev_resilver_done(spa); 3838 3839 /* 3840 * Kick off a scrub. When starting a RESILVER scrub (or an EVERYTHING 3841 * scrub which can become a resilver), we need to hold 3842 * spa_namespace_lock() because the sysevent we post via 3843 * spa_event_notify() needs to get the name of the pool. 3844 */ 3845 if (tasks & SPA_ASYNC_SCRUB) { 3846 mutex_enter(&spa_namespace_lock); 3847 VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 3848 mutex_exit(&spa_namespace_lock); 3849 } 3850 3851 /* 3852 * Kick off a resilver. 3853 */ 3854 if (tasks & SPA_ASYNC_RESILVER) { 3855 mutex_enter(&spa_namespace_lock); 3856 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 3857 mutex_exit(&spa_namespace_lock); 3858 } 3859 3860 /* 3861 * Let the world know that we're done. 3862 */ 3863 mutex_enter(&spa->spa_async_lock); 3864 spa->spa_async_thread = NULL; 3865 cv_broadcast(&spa->spa_async_cv); 3866 mutex_exit(&spa->spa_async_lock); 3867 thread_exit(); 3868 } 3869 3870 void 3871 spa_async_suspend(spa_t *spa) 3872 { 3873 mutex_enter(&spa->spa_async_lock); 3874 spa->spa_async_suspended++; 3875 while (spa->spa_async_thread != NULL) 3876 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 3877 mutex_exit(&spa->spa_async_lock); 3878 } 3879 3880 void 3881 spa_async_resume(spa_t *spa) 3882 { 3883 mutex_enter(&spa->spa_async_lock); 3884 ASSERT(spa->spa_async_suspended != 0); 3885 spa->spa_async_suspended--; 3886 mutex_exit(&spa->spa_async_lock); 3887 } 3888 3889 static void 3890 spa_async_dispatch(spa_t *spa) 3891 { 3892 mutex_enter(&spa->spa_async_lock); 3893 if (spa->spa_async_tasks && !spa->spa_async_suspended && 3894 spa->spa_async_thread == NULL && 3895 rootdir != NULL && !vn_is_readonly(rootdir)) 3896 spa->spa_async_thread = thread_create(NULL, 0, 3897 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 3898 mutex_exit(&spa->spa_async_lock); 3899 } 3900 3901 void 3902 spa_async_request(spa_t *spa, int task) 3903 { 3904 mutex_enter(&spa->spa_async_lock); 3905 spa->spa_async_tasks |= task; 3906 mutex_exit(&spa->spa_async_lock); 3907 } 3908 3909 /* 3910 * ========================================================================== 3911 * SPA syncing routines 3912 * ========================================================================== 3913 */ 3914 3915 static void 3916 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 3917 { 3918 bplist_t *bpl = &spa->spa_sync_bplist; 3919 dmu_tx_t *tx; 3920 blkptr_t blk; 3921 uint64_t itor = 0; 3922 zio_t *zio; 3923 int error; 3924 uint8_t c = 1; 3925 3926 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 3927 3928 while (bplist_iterate(bpl, &itor, &blk) == 0) 3929 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 3930 3931 error = zio_wait(zio); 3932 ASSERT3U(error, ==, 0); 3933 3934 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3935 bplist_vacate(bpl, tx); 3936 3937 /* 3938 * Pre-dirty the first block so we sync to convergence faster. 3939 * (Usually only the first block is needed.) 3940 */ 3941 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 3942 dmu_tx_commit(tx); 3943 } 3944 3945 static void 3946 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 3947 { 3948 char *packed = NULL; 3949 size_t nvsize = 0; 3950 dmu_buf_t *db; 3951 3952 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 3953 3954 packed = kmem_alloc(nvsize, KM_SLEEP); 3955 3956 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 3957 KM_SLEEP) == 0); 3958 3959 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 3960 3961 kmem_free(packed, nvsize); 3962 3963 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 3964 dmu_buf_will_dirty(db, tx); 3965 *(uint64_t *)db->db_data = nvsize; 3966 dmu_buf_rele(db, FTAG); 3967 } 3968 3969 static void 3970 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 3971 const char *config, const char *entry) 3972 { 3973 nvlist_t *nvroot; 3974 nvlist_t **list; 3975 int i; 3976 3977 if (!sav->sav_sync) 3978 return; 3979 3980 /* 3981 * Update the MOS nvlist describing the list of available devices. 3982 * spa_validate_aux() will have already made sure this nvlist is 3983 * valid and the vdevs are labeled appropriately. 3984 */ 3985 if (sav->sav_object == 0) { 3986 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 3987 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 3988 sizeof (uint64_t), tx); 3989 VERIFY(zap_update(spa->spa_meta_objset, 3990 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 3991 &sav->sav_object, tx) == 0); 3992 } 3993 3994 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3995 if (sav->sav_count == 0) { 3996 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 3997 } else { 3998 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 3999 for (i = 0; i < sav->sav_count; i++) 4000 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 4001 B_FALSE, B_FALSE, B_TRUE); 4002 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 4003 sav->sav_count) == 0); 4004 for (i = 0; i < sav->sav_count; i++) 4005 nvlist_free(list[i]); 4006 kmem_free(list, sav->sav_count * sizeof (void *)); 4007 } 4008 4009 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 4010 nvlist_free(nvroot); 4011 4012 sav->sav_sync = B_FALSE; 4013 } 4014 4015 static void 4016 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 4017 { 4018 nvlist_t *config; 4019 4020 if (list_is_empty(&spa->spa_dirty_list)) 4021 return; 4022 4023 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 4024 4025 if (spa->spa_config_syncing) 4026 nvlist_free(spa->spa_config_syncing); 4027 spa->spa_config_syncing = config; 4028 4029 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 4030 } 4031 4032 /* 4033 * Set zpool properties. 4034 */ 4035 static void 4036 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 4037 { 4038 spa_t *spa = arg1; 4039 objset_t *mos = spa->spa_meta_objset; 4040 nvlist_t *nvp = arg2; 4041 nvpair_t *elem; 4042 uint64_t intval; 4043 char *strval; 4044 zpool_prop_t prop; 4045 const char *propname; 4046 zprop_type_t proptype; 4047 spa_config_dirent_t *dp; 4048 4049 elem = NULL; 4050 while ((elem = nvlist_next_nvpair(nvp, elem))) { 4051 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 4052 case ZPOOL_PROP_VERSION: 4053 /* 4054 * Only set version for non-zpool-creation cases 4055 * (set/import). spa_create() needs special care 4056 * for version setting. 4057 */ 4058 if (tx->tx_txg != TXG_INITIAL) { 4059 VERIFY(nvpair_value_uint64(elem, 4060 &intval) == 0); 4061 ASSERT(intval <= SPA_VERSION); 4062 ASSERT(intval >= spa_version(spa)); 4063 spa->spa_uberblock.ub_version = intval; 4064 vdev_config_dirty(spa->spa_root_vdev); 4065 } 4066 break; 4067 4068 case ZPOOL_PROP_ALTROOT: 4069 /* 4070 * 'altroot' is a non-persistent property. It should 4071 * have been set temporarily at creation or import time. 4072 */ 4073 ASSERT(spa->spa_root != NULL); 4074 break; 4075 4076 case ZPOOL_PROP_CACHEFILE: 4077 /* 4078 * 'cachefile' is a non-persistent property, but note 4079 * an async request that the config cache needs to be 4080 * udpated. 4081 */ 4082 VERIFY(nvpair_value_string(elem, &strval) == 0); 4083 4084 dp = kmem_alloc(sizeof (spa_config_dirent_t), 4085 KM_SLEEP); 4086 4087 if (strval[0] == '\0') 4088 dp->scd_path = spa_strdup(spa_config_path); 4089 else if (strcmp(strval, "none") == 0) 4090 dp->scd_path = NULL; 4091 else 4092 dp->scd_path = spa_strdup(strval); 4093 4094 list_insert_head(&spa->spa_config_list, dp); 4095 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 4096 break; 4097 default: 4098 /* 4099 * Set pool property values in the poolprops mos object. 4100 */ 4101 mutex_enter(&spa->spa_props_lock); 4102 if (spa->spa_pool_props_object == 0) { 4103 objset_t *mos = spa->spa_meta_objset; 4104 4105 VERIFY((spa->spa_pool_props_object = 4106 zap_create(mos, DMU_OT_POOL_PROPS, 4107 DMU_OT_NONE, 0, tx)) > 0); 4108 4109 VERIFY(zap_update(mos, 4110 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 4111 8, 1, &spa->spa_pool_props_object, tx) 4112 == 0); 4113 } 4114 mutex_exit(&spa->spa_props_lock); 4115 4116 /* normalize the property name */ 4117 propname = zpool_prop_to_name(prop); 4118 proptype = zpool_prop_get_type(prop); 4119 4120 if (nvpair_type(elem) == DATA_TYPE_STRING) { 4121 ASSERT(proptype == PROP_TYPE_STRING); 4122 VERIFY(nvpair_value_string(elem, &strval) == 0); 4123 VERIFY(zap_update(mos, 4124 spa->spa_pool_props_object, propname, 4125 1, strlen(strval) + 1, strval, tx) == 0); 4126 4127 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 4128 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 4129 4130 if (proptype == PROP_TYPE_INDEX) { 4131 const char *unused; 4132 VERIFY(zpool_prop_index_to_string( 4133 prop, intval, &unused) == 0); 4134 } 4135 VERIFY(zap_update(mos, 4136 spa->spa_pool_props_object, propname, 4137 8, 1, &intval, tx) == 0); 4138 } else { 4139 ASSERT(0); /* not allowed */ 4140 } 4141 4142 switch (prop) { 4143 case ZPOOL_PROP_DELEGATION: 4144 spa->spa_delegation = intval; 4145 break; 4146 case ZPOOL_PROP_BOOTFS: 4147 spa->spa_bootfs = intval; 4148 break; 4149 case ZPOOL_PROP_FAILUREMODE: 4150 spa->spa_failmode = intval; 4151 break; 4152 default: 4153 break; 4154 } 4155 } 4156 4157 /* log internal history if this is not a zpool create */ 4158 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && 4159 tx->tx_txg != TXG_INITIAL) { 4160 spa_history_internal_log(LOG_POOL_PROPSET, 4161 spa, tx, cr, "%s %lld %s", 4162 nvpair_name(elem), intval, spa->spa_name); 4163 } 4164 } 4165 } 4166 4167 /* 4168 * Sync the specified transaction group. New blocks may be dirtied as 4169 * part of the process, so we iterate until it converges. 4170 */ 4171 void 4172 spa_sync(spa_t *spa, uint64_t txg) 4173 { 4174 dsl_pool_t *dp = spa->spa_dsl_pool; 4175 objset_t *mos = spa->spa_meta_objset; 4176 bplist_t *bpl = &spa->spa_sync_bplist; 4177 vdev_t *rvd = spa->spa_root_vdev; 4178 vdev_t *vd; 4179 dmu_tx_t *tx; 4180 int dirty_vdevs; 4181 4182 /* 4183 * Lock out configuration changes. 4184 */ 4185 spa_config_enter(spa, RW_READER, FTAG); 4186 4187 spa->spa_syncing_txg = txg; 4188 spa->spa_sync_pass = 0; 4189 4190 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 4191 4192 tx = dmu_tx_create_assigned(dp, txg); 4193 4194 /* 4195 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 4196 * set spa_deflate if we have no raid-z vdevs. 4197 */ 4198 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 4199 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 4200 int i; 4201 4202 for (i = 0; i < rvd->vdev_children; i++) { 4203 vd = rvd->vdev_child[i]; 4204 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 4205 break; 4206 } 4207 if (i == rvd->vdev_children) { 4208 spa->spa_deflate = TRUE; 4209 VERIFY(0 == zap_add(spa->spa_meta_objset, 4210 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 4211 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 4212 } 4213 } 4214 4215 /* 4216 * If anything has changed in this txg, push the deferred frees 4217 * from the previous txg. If not, leave them alone so that we 4218 * don't generate work on an otherwise idle system. 4219 */ 4220 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 4221 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 4222 !txg_list_empty(&dp->dp_sync_tasks, txg)) 4223 spa_sync_deferred_frees(spa, txg); 4224 4225 /* 4226 * Iterate to convergence. 4227 */ 4228 do { 4229 spa->spa_sync_pass++; 4230 4231 spa_sync_config_object(spa, tx); 4232 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 4233 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 4234 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 4235 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 4236 spa_errlog_sync(spa, txg); 4237 dsl_pool_sync(dp, txg); 4238 4239 dirty_vdevs = 0; 4240 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 4241 vdev_sync(vd, txg); 4242 dirty_vdevs++; 4243 } 4244 4245 bplist_sync(bpl, tx); 4246 } while (dirty_vdevs); 4247 4248 bplist_close(bpl); 4249 4250 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 4251 4252 /* 4253 * Rewrite the vdev configuration (which includes the uberblock) 4254 * to commit the transaction group. 4255 * 4256 * If there are no dirty vdevs, we sync the uberblock to a few 4257 * random top-level vdevs that are known to be visible in the 4258 * config cache (see spa_vdev_add() for details). If there *are* 4259 * dirty vdevs -- or if the sync to our random subset fails -- 4260 * then sync the uberblock to all vdevs. 4261 */ 4262 if (list_is_empty(&spa->spa_dirty_list)) { 4263 vdev_t *svd[SPA_DVAS_PER_BP]; 4264 int svdcount = 0; 4265 int children = rvd->vdev_children; 4266 int c0 = spa_get_random(children); 4267 int c; 4268 4269 for (c = 0; c < children; c++) { 4270 vd = rvd->vdev_child[(c0 + c) % children]; 4271 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 4272 continue; 4273 svd[svdcount++] = vd; 4274 if (svdcount == SPA_DVAS_PER_BP) 4275 break; 4276 } 4277 vdev_config_sync(svd, svdcount, txg); 4278 } else { 4279 vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg); 4280 } 4281 dmu_tx_commit(tx); 4282 4283 /* 4284 * Clear the dirty config list. 4285 */ 4286 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 4287 vdev_config_clean(vd); 4288 4289 /* 4290 * Now that the new config has synced transactionally, 4291 * let it become visible to the config cache. 4292 */ 4293 if (spa->spa_config_syncing != NULL) { 4294 spa_config_set(spa, spa->spa_config_syncing); 4295 spa->spa_config_txg = txg; 4296 spa->spa_config_syncing = NULL; 4297 } 4298 4299 /* 4300 * Make a stable copy of the fully synced uberblock. 4301 * We use this as the root for pool traversals. 4302 */ 4303 spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 4304 4305 spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 4306 4307 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 4308 spa->spa_traverse_wanted = 0; 4309 spa->spa_ubsync = spa->spa_uberblock; 4310 rw_exit(&spa->spa_traverse_lock); 4311 4312 spa_scrub_resume(spa); /* resume scrub with new ubsync */ 4313 4314 /* 4315 * Clean up the ZIL records for the synced txg. 4316 */ 4317 dsl_pool_zil_clean(dp); 4318 4319 /* 4320 * Update usable space statistics. 4321 */ 4322 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 4323 vdev_sync_done(vd, txg); 4324 4325 /* 4326 * It had better be the case that we didn't dirty anything 4327 * since vdev_config_sync(). 4328 */ 4329 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 4330 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 4331 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 4332 ASSERT(bpl->bpl_queue == NULL); 4333 4334 spa_config_exit(spa, FTAG); 4335 4336 /* 4337 * If any async tasks have been requested, kick them off. 4338 */ 4339 spa_async_dispatch(spa); 4340 } 4341 4342 /* 4343 * Sync all pools. We don't want to hold the namespace lock across these 4344 * operations, so we take a reference on the spa_t and drop the lock during the 4345 * sync. 4346 */ 4347 void 4348 spa_sync_allpools(void) 4349 { 4350 spa_t *spa = NULL; 4351 mutex_enter(&spa_namespace_lock); 4352 while ((spa = spa_next(spa)) != NULL) { 4353 if (spa_state(spa) != POOL_STATE_ACTIVE) 4354 continue; 4355 spa_open_ref(spa, FTAG); 4356 mutex_exit(&spa_namespace_lock); 4357 txg_wait_synced(spa_get_dsl(spa), 0); 4358 mutex_enter(&spa_namespace_lock); 4359 spa_close(spa, FTAG); 4360 } 4361 mutex_exit(&spa_namespace_lock); 4362 } 4363 4364 /* 4365 * ========================================================================== 4366 * Miscellaneous routines 4367 * ========================================================================== 4368 */ 4369 4370 /* 4371 * Remove all pools in the system. 4372 */ 4373 void 4374 spa_evict_all(void) 4375 { 4376 spa_t *spa; 4377 4378 /* 4379 * Remove all cached state. All pools should be closed now, 4380 * so every spa in the AVL tree should be unreferenced. 4381 */ 4382 mutex_enter(&spa_namespace_lock); 4383 while ((spa = spa_next(NULL)) != NULL) { 4384 /* 4385 * Stop async tasks. The async thread may need to detach 4386 * a device that's been replaced, which requires grabbing 4387 * spa_namespace_lock, so we must drop it here. 4388 */ 4389 spa_open_ref(spa, FTAG); 4390 mutex_exit(&spa_namespace_lock); 4391 spa_async_suspend(spa); 4392 mutex_enter(&spa_namespace_lock); 4393 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 4394 spa_close(spa, FTAG); 4395 4396 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4397 spa_unload(spa); 4398 spa_deactivate(spa); 4399 } 4400 spa_remove(spa); 4401 } 4402 mutex_exit(&spa_namespace_lock); 4403 } 4404 4405 vdev_t * 4406 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) 4407 { 4408 vdev_t *vd; 4409 int i; 4410 4411 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 4412 return (vd); 4413 4414 if (l2cache) { 4415 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 4416 vd = spa->spa_l2cache.sav_vdevs[i]; 4417 if (vd->vdev_guid == guid) 4418 return (vd); 4419 } 4420 } 4421 4422 return (NULL); 4423 } 4424 4425 void 4426 spa_upgrade(spa_t *spa, uint64_t version) 4427 { 4428 spa_config_enter(spa, RW_WRITER, FTAG); 4429 4430 /* 4431 * This should only be called for a non-faulted pool, and since a 4432 * future version would result in an unopenable pool, this shouldn't be 4433 * possible. 4434 */ 4435 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); 4436 ASSERT(version >= spa->spa_uberblock.ub_version); 4437 4438 spa->spa_uberblock.ub_version = version; 4439 vdev_config_dirty(spa->spa_root_vdev); 4440 4441 spa_config_exit(spa, FTAG); 4442 4443 txg_wait_synced(spa_get_dsl(spa), 0); 4444 } 4445 4446 boolean_t 4447 spa_has_spare(spa_t *spa, uint64_t guid) 4448 { 4449 int i; 4450 uint64_t spareguid; 4451 spa_aux_vdev_t *sav = &spa->spa_spares; 4452 4453 for (i = 0; i < sav->sav_count; i++) 4454 if (sav->sav_vdevs[i]->vdev_guid == guid) 4455 return (B_TRUE); 4456 4457 for (i = 0; i < sav->sav_npending; i++) { 4458 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 4459 &spareguid) == 0 && spareguid == guid) 4460 return (B_TRUE); 4461 } 4462 4463 return (B_FALSE); 4464 } 4465 4466 /* 4467 * Post a sysevent corresponding to the given event. The 'name' must be one of 4468 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 4469 * filled in from the spa and (optionally) the vdev. This doesn't do anything 4470 * in the userland libzpool, as we don't want consumers to misinterpret ztest 4471 * or zdb as real changes. 4472 */ 4473 void 4474 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 4475 { 4476 #ifdef _KERNEL 4477 sysevent_t *ev; 4478 sysevent_attr_list_t *attr = NULL; 4479 sysevent_value_t value; 4480 sysevent_id_t eid; 4481 4482 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 4483 SE_SLEEP); 4484 4485 value.value_type = SE_DATA_TYPE_STRING; 4486 value.value.sv_string = spa_name(spa); 4487 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 4488 goto done; 4489 4490 value.value_type = SE_DATA_TYPE_UINT64; 4491 value.value.sv_uint64 = spa_guid(spa); 4492 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 4493 goto done; 4494 4495 if (vd) { 4496 value.value_type = SE_DATA_TYPE_UINT64; 4497 value.value.sv_uint64 = vd->vdev_guid; 4498 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 4499 SE_SLEEP) != 0) 4500 goto done; 4501 4502 if (vd->vdev_path) { 4503 value.value_type = SE_DATA_TYPE_STRING; 4504 value.value.sv_string = vd->vdev_path; 4505 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 4506 &value, SE_SLEEP) != 0) 4507 goto done; 4508 } 4509 } 4510 4511 if (sysevent_attach_attributes(ev, attr) != 0) 4512 goto done; 4513 attr = NULL; 4514 4515 (void) log_sysevent(ev, SE_SLEEP, &eid); 4516 4517 done: 4518 if (attr) 4519 sysevent_free_attr(attr); 4520 sysevent_free(ev); 4521 #endif 4522 } 4523