1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains all the routines used when modifying on-disk SPA state. 31 * This includes opening, importing, destroying, exporting a pool, and syncing a 32 * pool. 33 */ 34 35 #include <sys/zfs_context.h> 36 #include <sys/fm/fs/zfs.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zio.h> 39 #include <sys/zio_checksum.h> 40 #include <sys/zio_compress.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/vdev_impl.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/dmu_traverse.h> 51 #include <sys/unique.h> 52 #include <sys/dsl_pool.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/fs/zfs.h> 56 #include <sys/callb.h> 57 58 int zio_taskq_threads = 8; 59 60 /* 61 * ========================================================================== 62 * SPA state manipulation (open/create/destroy/import/export) 63 * ========================================================================== 64 */ 65 66 static int 67 spa_error_entry_compare(const void *a, const void *b) 68 { 69 spa_error_entry_t *sa = (spa_error_entry_t *)a; 70 spa_error_entry_t *sb = (spa_error_entry_t *)b; 71 int ret; 72 73 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 74 sizeof (zbookmark_t)); 75 76 if (ret < 0) 77 return (-1); 78 else if (ret > 0) 79 return (1); 80 else 81 return (0); 82 } 83 84 /* 85 * Utility function which retrieves copies of the current logs and 86 * re-initializes them in the process. 87 */ 88 void 89 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 90 { 91 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 92 93 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 94 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 95 96 avl_create(&spa->spa_errlist_scrub, 97 spa_error_entry_compare, sizeof (spa_error_entry_t), 98 offsetof(spa_error_entry_t, se_avl)); 99 avl_create(&spa->spa_errlist_last, 100 spa_error_entry_compare, sizeof (spa_error_entry_t), 101 offsetof(spa_error_entry_t, se_avl)); 102 } 103 104 /* 105 * Activate an uninitialized pool. 106 */ 107 static void 108 spa_activate(spa_t *spa) 109 { 110 int t; 111 112 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 113 114 spa->spa_state = POOL_STATE_ACTIVE; 115 116 spa->spa_normal_class = metaslab_class_create(); 117 118 for (t = 0; t < ZIO_TYPES; t++) { 119 spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 120 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 121 TASKQ_PREPOPULATE); 122 spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 123 zio_taskq_threads, maxclsyspri, 50, INT_MAX, 124 TASKQ_PREPOPULATE); 125 } 126 127 rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 128 129 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 130 mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); 131 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 132 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 133 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 134 mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL); 135 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 136 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 137 138 list_create(&spa->spa_dirty_list, sizeof (vdev_t), 139 offsetof(vdev_t, vdev_dirty_node)); 140 141 txg_list_create(&spa->spa_vdev_txg_list, 142 offsetof(struct vdev, vdev_txg_node)); 143 144 avl_create(&spa->spa_errlist_scrub, 145 spa_error_entry_compare, sizeof (spa_error_entry_t), 146 offsetof(spa_error_entry_t, se_avl)); 147 avl_create(&spa->spa_errlist_last, 148 spa_error_entry_compare, sizeof (spa_error_entry_t), 149 offsetof(spa_error_entry_t, se_avl)); 150 } 151 152 /* 153 * Opposite of spa_activate(). 154 */ 155 static void 156 spa_deactivate(spa_t *spa) 157 { 158 int t; 159 160 ASSERT(spa->spa_sync_on == B_FALSE); 161 ASSERT(spa->spa_dsl_pool == NULL); 162 ASSERT(spa->spa_root_vdev == NULL); 163 164 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 165 166 txg_list_destroy(&spa->spa_vdev_txg_list); 167 168 list_destroy(&spa->spa_dirty_list); 169 170 rw_destroy(&spa->spa_traverse_lock); 171 172 for (t = 0; t < ZIO_TYPES; t++) { 173 taskq_destroy(spa->spa_zio_issue_taskq[t]); 174 taskq_destroy(spa->spa_zio_intr_taskq[t]); 175 spa->spa_zio_issue_taskq[t] = NULL; 176 spa->spa_zio_intr_taskq[t] = NULL; 177 } 178 179 metaslab_class_destroy(spa->spa_normal_class); 180 spa->spa_normal_class = NULL; 181 182 /* 183 * If this was part of an import or the open otherwise failed, we may 184 * still have errors left in the queues. Empty them just in case. 185 */ 186 spa_errlog_drain(spa); 187 188 avl_destroy(&spa->spa_errlist_scrub); 189 avl_destroy(&spa->spa_errlist_last); 190 191 spa->spa_state = POOL_STATE_UNINITIALIZED; 192 } 193 194 /* 195 * Verify a pool configuration, and construct the vdev tree appropriately. This 196 * will create all the necessary vdevs in the appropriate layout, with each vdev 197 * in the CLOSED state. This will prep the pool before open/creation/import. 198 * All vdev validation is done by the vdev_alloc() routine. 199 */ 200 static int 201 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 202 uint_t id, int atype) 203 { 204 nvlist_t **child; 205 uint_t c, children; 206 int error; 207 208 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 209 return (error); 210 211 if ((*vdp)->vdev_ops->vdev_op_leaf) 212 return (0); 213 214 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 215 &child, &children) != 0) { 216 vdev_free(*vdp); 217 *vdp = NULL; 218 return (EINVAL); 219 } 220 221 for (c = 0; c < children; c++) { 222 vdev_t *vd; 223 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 224 atype)) != 0) { 225 vdev_free(*vdp); 226 *vdp = NULL; 227 return (error); 228 } 229 } 230 231 ASSERT(*vdp != NULL); 232 233 return (0); 234 } 235 236 /* 237 * Opposite of spa_load(). 238 */ 239 static void 240 spa_unload(spa_t *spa) 241 { 242 int i; 243 244 /* 245 * Stop async tasks. 246 */ 247 spa_async_suspend(spa); 248 249 /* 250 * Stop syncing. 251 */ 252 if (spa->spa_sync_on) { 253 txg_sync_stop(spa->spa_dsl_pool); 254 spa->spa_sync_on = B_FALSE; 255 } 256 257 /* 258 * Wait for any outstanding prefetch I/O to complete. 259 */ 260 spa_config_enter(spa, RW_WRITER, FTAG); 261 spa_config_exit(spa, FTAG); 262 263 /* 264 * Close the dsl pool. 265 */ 266 if (spa->spa_dsl_pool) { 267 dsl_pool_close(spa->spa_dsl_pool); 268 spa->spa_dsl_pool = NULL; 269 } 270 271 /* 272 * Close all vdevs. 273 */ 274 if (spa->spa_root_vdev) 275 vdev_free(spa->spa_root_vdev); 276 ASSERT(spa->spa_root_vdev == NULL); 277 278 for (i = 0; i < spa->spa_nspares; i++) 279 vdev_free(spa->spa_spares[i]); 280 if (spa->spa_spares) { 281 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 282 spa->spa_spares = NULL; 283 } 284 if (spa->spa_sparelist) { 285 nvlist_free(spa->spa_sparelist); 286 spa->spa_sparelist = NULL; 287 } 288 289 spa->spa_async_suspended = 0; 290 } 291 292 /* 293 * Load (or re-load) the current list of vdevs describing the active spares for 294 * this pool. When this is called, we have some form of basic information in 295 * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 296 * re-generate a more complete list including status information. 297 */ 298 static void 299 spa_load_spares(spa_t *spa) 300 { 301 nvlist_t **spares; 302 uint_t nspares; 303 int i; 304 305 /* 306 * First, close and free any existing spare vdevs. 307 */ 308 for (i = 0; i < spa->spa_nspares; i++) { 309 vdev_close(spa->spa_spares[i]); 310 vdev_free(spa->spa_spares[i]); 311 } 312 if (spa->spa_spares) 313 kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 314 315 if (spa->spa_sparelist == NULL) 316 nspares = 0; 317 else 318 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 319 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 320 321 spa->spa_nspares = (int)nspares; 322 spa->spa_spares = NULL; 323 324 if (nspares == 0) 325 return; 326 327 /* 328 * Construct the array of vdevs, opening them to get status in the 329 * process. 330 */ 331 spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 332 for (i = 0; i < spa->spa_nspares; i++) { 333 vdev_t *vd; 334 335 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 336 VDEV_ALLOC_SPARE) == 0); 337 ASSERT(vd != NULL); 338 339 spa->spa_spares[i] = vd; 340 341 if (vdev_open(vd) != 0) 342 continue; 343 344 vd->vdev_top = vd; 345 (void) vdev_validate_spare(vd); 346 } 347 348 /* 349 * Recompute the stashed list of spares, with status information 350 * this time. 351 */ 352 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 353 DATA_TYPE_NVLIST_ARRAY) == 0); 354 355 spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 356 for (i = 0; i < spa->spa_nspares; i++) 357 spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 358 B_TRUE, B_TRUE); 359 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 360 spares, spa->spa_nspares) == 0); 361 for (i = 0; i < spa->spa_nspares; i++) 362 nvlist_free(spares[i]); 363 kmem_free(spares, spa->spa_nspares * sizeof (void *)); 364 } 365 366 static int 367 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 368 { 369 dmu_buf_t *db; 370 char *packed = NULL; 371 size_t nvsize = 0; 372 int error; 373 *value = NULL; 374 375 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 376 nvsize = *(uint64_t *)db->db_data; 377 dmu_buf_rele(db, FTAG); 378 379 packed = kmem_alloc(nvsize, KM_SLEEP); 380 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 381 if (error == 0) 382 error = nvlist_unpack(packed, nvsize, value, 0); 383 kmem_free(packed, nvsize); 384 385 return (error); 386 } 387 388 /* 389 * Load an existing storage pool, using the pool's builtin spa_config as a 390 * source of configuration information. 391 */ 392 static int 393 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 394 { 395 int error = 0; 396 nvlist_t *nvroot = NULL; 397 vdev_t *rvd; 398 uberblock_t *ub = &spa->spa_uberblock; 399 uint64_t config_cache_txg = spa->spa_config_txg; 400 uint64_t pool_guid; 401 uint64_t version; 402 zio_t *zio; 403 404 spa->spa_load_state = state; 405 406 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 407 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 408 error = EINVAL; 409 goto out; 410 } 411 412 /* 413 * Versioning wasn't explicitly added to the label until later, so if 414 * it's not present treat it as the initial version. 415 */ 416 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 417 version = ZFS_VERSION_INITIAL; 418 419 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 420 &spa->spa_config_txg); 421 422 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 423 spa_guid_exists(pool_guid, 0)) { 424 error = EEXIST; 425 goto out; 426 } 427 428 spa->spa_load_guid = pool_guid; 429 430 /* 431 * Parse the configuration into a vdev tree. We explicitly set the 432 * value that will be returned by spa_version() since parsing the 433 * configuration requires knowing the version number. 434 */ 435 spa_config_enter(spa, RW_WRITER, FTAG); 436 spa->spa_ubsync.ub_version = version; 437 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 438 spa_config_exit(spa, FTAG); 439 440 if (error != 0) 441 goto out; 442 443 ASSERT(spa->spa_root_vdev == rvd); 444 ASSERT(spa_guid(spa) == pool_guid); 445 446 /* 447 * Try to open all vdevs, loading each label in the process. 448 */ 449 if (vdev_open(rvd) != 0) { 450 error = ENXIO; 451 goto out; 452 } 453 454 /* 455 * Validate the labels for all leaf vdevs. We need to grab the config 456 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 457 * flag. 458 */ 459 spa_config_enter(spa, RW_READER, FTAG); 460 error = vdev_validate(rvd); 461 spa_config_exit(spa, FTAG); 462 463 if (error != 0) { 464 error = EBADF; 465 goto out; 466 } 467 468 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 469 error = ENXIO; 470 goto out; 471 } 472 473 /* 474 * Find the best uberblock. 475 */ 476 bzero(ub, sizeof (uberblock_t)); 477 478 zio = zio_root(spa, NULL, NULL, 479 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 480 vdev_uberblock_load(zio, rvd, ub); 481 error = zio_wait(zio); 482 483 /* 484 * If we weren't able to find a single valid uberblock, return failure. 485 */ 486 if (ub->ub_txg == 0) { 487 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 488 VDEV_AUX_CORRUPT_DATA); 489 error = ENXIO; 490 goto out; 491 } 492 493 /* 494 * If the pool is newer than the code, we can't open it. 495 */ 496 if (ub->ub_version > ZFS_VERSION) { 497 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 498 VDEV_AUX_VERSION_NEWER); 499 error = ENOTSUP; 500 goto out; 501 } 502 503 /* 504 * If the vdev guid sum doesn't match the uberblock, we have an 505 * incomplete configuration. 506 */ 507 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 508 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 509 VDEV_AUX_BAD_GUID_SUM); 510 error = ENXIO; 511 goto out; 512 } 513 514 /* 515 * Initialize internal SPA structures. 516 */ 517 spa->spa_state = POOL_STATE_ACTIVE; 518 spa->spa_ubsync = spa->spa_uberblock; 519 spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 520 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 521 if (error) { 522 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 523 VDEV_AUX_CORRUPT_DATA); 524 goto out; 525 } 526 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 527 528 if (zap_lookup(spa->spa_meta_objset, 529 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 530 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 531 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 532 VDEV_AUX_CORRUPT_DATA); 533 error = EIO; 534 goto out; 535 } 536 537 if (!mosconfig) { 538 nvlist_t *newconfig; 539 540 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 541 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 542 VDEV_AUX_CORRUPT_DATA); 543 error = EIO; 544 goto out; 545 } 546 547 spa_config_set(spa, newconfig); 548 spa_unload(spa); 549 spa_deactivate(spa); 550 spa_activate(spa); 551 552 return (spa_load(spa, newconfig, state, B_TRUE)); 553 } 554 555 if (zap_lookup(spa->spa_meta_objset, 556 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 557 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 558 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 559 VDEV_AUX_CORRUPT_DATA); 560 error = EIO; 561 goto out; 562 } 563 564 /* 565 * Load the bit that tells us to use the new accounting function 566 * (raid-z deflation). If we have an older pool, this will not 567 * be present. 568 */ 569 error = zap_lookup(spa->spa_meta_objset, 570 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 571 sizeof (uint64_t), 1, &spa->spa_deflate); 572 if (error != 0 && error != ENOENT) { 573 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 574 VDEV_AUX_CORRUPT_DATA); 575 error = EIO; 576 goto out; 577 } 578 579 /* 580 * Load the persistent error log. If we have an older pool, this will 581 * not be present. 582 */ 583 error = zap_lookup(spa->spa_meta_objset, 584 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 585 sizeof (uint64_t), 1, &spa->spa_errlog_last); 586 if (error != 0 && error != ENOENT) { 587 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 588 VDEV_AUX_CORRUPT_DATA); 589 error = EIO; 590 goto out; 591 } 592 593 error = zap_lookup(spa->spa_meta_objset, 594 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 595 sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 596 if (error != 0 && error != ENOENT) { 597 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 598 VDEV_AUX_CORRUPT_DATA); 599 error = EIO; 600 goto out; 601 } 602 603 /* 604 * Load the history object. If we have an older pool, this 605 * will not be present. 606 */ 607 error = zap_lookup(spa->spa_meta_objset, 608 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 609 sizeof (uint64_t), 1, &spa->spa_history); 610 if (error != 0 && error != ENOENT) { 611 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 612 VDEV_AUX_CORRUPT_DATA); 613 error = EIO; 614 goto out; 615 } 616 617 /* 618 * Load any hot spares for this pool. 619 */ 620 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 621 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 622 if (error != 0 && error != ENOENT) { 623 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 624 VDEV_AUX_CORRUPT_DATA); 625 error = EIO; 626 goto out; 627 } 628 if (error == 0) { 629 ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 630 if (load_nvlist(spa, spa->spa_spares_object, 631 &spa->spa_sparelist) != 0) { 632 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 633 VDEV_AUX_CORRUPT_DATA); 634 error = EIO; 635 goto out; 636 } 637 638 spa_config_enter(spa, RW_WRITER, FTAG); 639 spa_load_spares(spa); 640 spa_config_exit(spa, FTAG); 641 } 642 643 /* 644 * Load the vdev state for all toplevel vdevs. 645 */ 646 vdev_load(rvd); 647 648 /* 649 * Propagate the leaf DTLs we just loaded all the way up the tree. 650 */ 651 spa_config_enter(spa, RW_WRITER, FTAG); 652 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 653 spa_config_exit(spa, FTAG); 654 655 /* 656 * Check the state of the root vdev. If it can't be opened, it 657 * indicates one or more toplevel vdevs are faulted. 658 */ 659 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 660 error = ENXIO; 661 goto out; 662 } 663 664 if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 665 dmu_tx_t *tx; 666 int need_update = B_FALSE; 667 int c; 668 669 /* 670 * Claim log blocks that haven't been committed yet. 671 * This must all happen in a single txg. 672 */ 673 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 674 spa_first_txg(spa)); 675 (void) dmu_objset_find(spa->spa_name, 676 zil_claim, tx, DS_FIND_CHILDREN); 677 dmu_tx_commit(tx); 678 679 spa->spa_sync_on = B_TRUE; 680 txg_sync_start(spa->spa_dsl_pool); 681 682 /* 683 * Wait for all claims to sync. 684 */ 685 txg_wait_synced(spa->spa_dsl_pool, 0); 686 687 /* 688 * If the config cache is stale, or we have uninitialized 689 * metaslabs (see spa_vdev_add()), then update the config. 690 */ 691 if (config_cache_txg != spa->spa_config_txg || 692 state == SPA_LOAD_IMPORT) 693 need_update = B_TRUE; 694 695 for (c = 0; c < rvd->vdev_children; c++) 696 if (rvd->vdev_child[c]->vdev_ms_array == 0) 697 need_update = B_TRUE; 698 699 /* 700 * Update the config cache asychronously in case we're the 701 * root pool, in which case the config cache isn't writable yet. 702 */ 703 if (need_update) 704 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 705 } 706 707 error = 0; 708 out: 709 if (error && error != EBADF) 710 zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 711 spa->spa_load_state = SPA_LOAD_NONE; 712 spa->spa_ena = 0; 713 714 return (error); 715 } 716 717 /* 718 * Pool Open/Import 719 * 720 * The import case is identical to an open except that the configuration is sent 721 * down from userland, instead of grabbed from the configuration cache. For the 722 * case of an open, the pool configuration will exist in the 723 * POOL_STATE_UNITIALIZED state. 724 * 725 * The stats information (gen/count/ustats) is used to gather vdev statistics at 726 * the same time open the pool, without having to keep around the spa_t in some 727 * ambiguous state. 728 */ 729 static int 730 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 731 { 732 spa_t *spa; 733 int error; 734 int loaded = B_FALSE; 735 int locked = B_FALSE; 736 737 *spapp = NULL; 738 739 /* 740 * As disgusting as this is, we need to support recursive calls to this 741 * function because dsl_dir_open() is called during spa_load(), and ends 742 * up calling spa_open() again. The real fix is to figure out how to 743 * avoid dsl_dir_open() calling this in the first place. 744 */ 745 if (mutex_owner(&spa_namespace_lock) != curthread) { 746 mutex_enter(&spa_namespace_lock); 747 locked = B_TRUE; 748 } 749 750 if ((spa = spa_lookup(pool)) == NULL) { 751 if (locked) 752 mutex_exit(&spa_namespace_lock); 753 return (ENOENT); 754 } 755 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 756 757 spa_activate(spa); 758 759 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 760 761 if (error == EBADF) { 762 /* 763 * If vdev_validate() returns failure (indicated by 764 * EBADF), it indicates that one of the vdevs indicates 765 * that the pool has been exported or destroyed. If 766 * this is the case, the config cache is out of sync and 767 * we should remove the pool from the namespace. 768 */ 769 zfs_post_ok(spa, NULL); 770 spa_unload(spa); 771 spa_deactivate(spa); 772 spa_remove(spa); 773 spa_config_sync(); 774 if (locked) 775 mutex_exit(&spa_namespace_lock); 776 return (ENOENT); 777 } 778 779 if (error) { 780 /* 781 * We can't open the pool, but we still have useful 782 * information: the state of each vdev after the 783 * attempted vdev_open(). Return this to the user. 784 */ 785 if (config != NULL && spa->spa_root_vdev != NULL) { 786 spa_config_enter(spa, RW_READER, FTAG); 787 *config = spa_config_generate(spa, NULL, -1ULL, 788 B_TRUE); 789 spa_config_exit(spa, FTAG); 790 } 791 spa_unload(spa); 792 spa_deactivate(spa); 793 spa->spa_last_open_failed = B_TRUE; 794 if (locked) 795 mutex_exit(&spa_namespace_lock); 796 *spapp = NULL; 797 return (error); 798 } else { 799 zfs_post_ok(spa, NULL); 800 spa->spa_last_open_failed = B_FALSE; 801 } 802 803 loaded = B_TRUE; 804 } 805 806 spa_open_ref(spa, tag); 807 if (locked) 808 mutex_exit(&spa_namespace_lock); 809 810 *spapp = spa; 811 812 if (config != NULL) { 813 spa_config_enter(spa, RW_READER, FTAG); 814 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 815 spa_config_exit(spa, FTAG); 816 } 817 818 /* 819 * If we just loaded the pool, resilver anything that's out of date. 820 */ 821 if (loaded && (spa_mode & FWRITE)) 822 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 823 824 return (0); 825 } 826 827 int 828 spa_open(const char *name, spa_t **spapp, void *tag) 829 { 830 return (spa_open_common(name, spapp, tag, NULL)); 831 } 832 833 /* 834 * Lookup the given spa_t, incrementing the inject count in the process, 835 * preventing it from being exported or destroyed. 836 */ 837 spa_t * 838 spa_inject_addref(char *name) 839 { 840 spa_t *spa; 841 842 mutex_enter(&spa_namespace_lock); 843 if ((spa = spa_lookup(name)) == NULL) { 844 mutex_exit(&spa_namespace_lock); 845 return (NULL); 846 } 847 spa->spa_inject_ref++; 848 mutex_exit(&spa_namespace_lock); 849 850 return (spa); 851 } 852 853 void 854 spa_inject_delref(spa_t *spa) 855 { 856 mutex_enter(&spa_namespace_lock); 857 spa->spa_inject_ref--; 858 mutex_exit(&spa_namespace_lock); 859 } 860 861 static void 862 spa_add_spares(spa_t *spa, nvlist_t *config) 863 { 864 nvlist_t **spares; 865 uint_t i, nspares; 866 nvlist_t *nvroot; 867 uint64_t guid; 868 vdev_stat_t *vs; 869 uint_t vsc; 870 871 if (spa->spa_nspares == 0) 872 return; 873 874 VERIFY(nvlist_lookup_nvlist(config, 875 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 876 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 877 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 878 if (nspares != 0) { 879 VERIFY(nvlist_add_nvlist_array(nvroot, 880 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 881 VERIFY(nvlist_lookup_nvlist_array(nvroot, 882 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 883 884 /* 885 * Go through and find any spares which have since been 886 * repurposed as an active spare. If this is the case, update 887 * their status appropriately. 888 */ 889 for (i = 0; i < nspares; i++) { 890 VERIFY(nvlist_lookup_uint64(spares[i], 891 ZPOOL_CONFIG_GUID, &guid) == 0); 892 if (spa_spare_inuse(guid)) { 893 VERIFY(nvlist_lookup_uint64_array( 894 spares[i], ZPOOL_CONFIG_STATS, 895 (uint64_t **)&vs, &vsc) == 0); 896 vs->vs_state = VDEV_STATE_CANT_OPEN; 897 vs->vs_aux = VDEV_AUX_SPARED; 898 } 899 } 900 } 901 } 902 903 int 904 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 905 { 906 int error; 907 spa_t *spa; 908 909 *config = NULL; 910 error = spa_open_common(name, &spa, FTAG, config); 911 912 if (spa && *config != NULL) { 913 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 914 spa_get_errlog_size(spa)) == 0); 915 916 spa_add_spares(spa, *config); 917 } 918 919 /* 920 * We want to get the alternate root even for faulted pools, so we cheat 921 * and call spa_lookup() directly. 922 */ 923 if (altroot) { 924 if (spa == NULL) { 925 mutex_enter(&spa_namespace_lock); 926 spa = spa_lookup(name); 927 if (spa) 928 spa_altroot(spa, altroot, buflen); 929 else 930 altroot[0] = '\0'; 931 spa = NULL; 932 mutex_exit(&spa_namespace_lock); 933 } else { 934 spa_altroot(spa, altroot, buflen); 935 } 936 } 937 938 if (spa != NULL) 939 spa_close(spa, FTAG); 940 941 return (error); 942 } 943 944 /* 945 * Validate that the 'spares' array is well formed. We must have an array of 946 * nvlists, each which describes a valid leaf vdev. 947 */ 948 static int 949 spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 950 { 951 nvlist_t **spares; 952 uint_t i, nspares; 953 vdev_t *vd; 954 int error; 955 956 /* 957 * It's acceptable to have no spares specified. 958 */ 959 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 960 &spares, &nspares) != 0) 961 return (0); 962 963 if (nspares == 0) 964 return (EINVAL); 965 966 /* 967 * Make sure the pool is formatted with a version that supports hot 968 * spares. 969 */ 970 if (spa_version(spa) < ZFS_VERSION_SPARES) 971 return (ENOTSUP); 972 973 for (i = 0; i < nspares; i++) { 974 if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 975 mode)) != 0) 976 return (error); 977 978 if (!vd->vdev_ops->vdev_op_leaf) { 979 vdev_free(vd); 980 return (EINVAL); 981 } 982 983 if ((error = vdev_open(vd)) != 0) { 984 vdev_free(vd); 985 return (error); 986 } 987 988 vd->vdev_top = vd; 989 if ((error = vdev_label_spare(vd, crtxg)) != 0) { 990 vdev_free(vd); 991 return (error); 992 } 993 994 VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 995 vd->vdev_guid) == 0); 996 997 vdev_free(vd); 998 } 999 1000 return (0); 1001 } 1002 1003 /* 1004 * Pool Creation 1005 */ 1006 int 1007 spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 1008 { 1009 spa_t *spa; 1010 vdev_t *rvd; 1011 dsl_pool_t *dp; 1012 dmu_tx_t *tx; 1013 int c, error = 0; 1014 uint64_t txg = TXG_INITIAL; 1015 nvlist_t **spares; 1016 uint_t nspares; 1017 1018 /* 1019 * If this pool already exists, return failure. 1020 */ 1021 mutex_enter(&spa_namespace_lock); 1022 if (spa_lookup(pool) != NULL) { 1023 mutex_exit(&spa_namespace_lock); 1024 return (EEXIST); 1025 } 1026 1027 /* 1028 * Allocate a new spa_t structure. 1029 */ 1030 spa = spa_add(pool, altroot); 1031 spa_activate(spa); 1032 1033 spa->spa_uberblock.ub_txg = txg - 1; 1034 spa->spa_uberblock.ub_version = ZFS_VERSION; 1035 spa->spa_ubsync = spa->spa_uberblock; 1036 1037 /* 1038 * Create the root vdev. 1039 */ 1040 spa_config_enter(spa, RW_WRITER, FTAG); 1041 1042 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 1043 1044 ASSERT(error != 0 || rvd != NULL); 1045 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 1046 1047 if (error == 0 && rvd->vdev_children == 0) 1048 error = EINVAL; 1049 1050 if (error == 0 && 1051 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 1052 (error = spa_validate_spares(spa, nvroot, txg, 1053 VDEV_ALLOC_ADD)) == 0) { 1054 for (c = 0; c < rvd->vdev_children; c++) 1055 vdev_init(rvd->vdev_child[c], txg); 1056 vdev_config_dirty(rvd); 1057 } 1058 1059 spa_config_exit(spa, FTAG); 1060 1061 if (error != 0) { 1062 spa_unload(spa); 1063 spa_deactivate(spa); 1064 spa_remove(spa); 1065 mutex_exit(&spa_namespace_lock); 1066 return (error); 1067 } 1068 1069 /* 1070 * Get the list of spares, if specified. 1071 */ 1072 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1073 &spares, &nspares) == 0) { 1074 VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 1075 KM_SLEEP) == 0); 1076 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 1077 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1078 spa_config_enter(spa, RW_WRITER, FTAG); 1079 spa_load_spares(spa); 1080 spa_config_exit(spa, FTAG); 1081 spa->spa_sync_spares = B_TRUE; 1082 } 1083 1084 spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1085 spa->spa_meta_objset = dp->dp_meta_objset; 1086 1087 tx = dmu_tx_create_assigned(dp, txg); 1088 1089 /* 1090 * Create the pool config object. 1091 */ 1092 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1093 DMU_OT_PACKED_NVLIST, 1 << 14, 1094 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1095 1096 if (zap_add(spa->spa_meta_objset, 1097 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1098 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1099 cmn_err(CE_PANIC, "failed to add pool config"); 1100 } 1101 1102 /* Newly created pools are always deflated. */ 1103 spa->spa_deflate = TRUE; 1104 if (zap_add(spa->spa_meta_objset, 1105 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 1106 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 1107 cmn_err(CE_PANIC, "failed to add deflate"); 1108 } 1109 1110 /* 1111 * Create the deferred-free bplist object. Turn off compression 1112 * because sync-to-convergence takes longer if the blocksize 1113 * keeps changing. 1114 */ 1115 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1116 1 << 14, tx); 1117 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1118 ZIO_COMPRESS_OFF, tx); 1119 1120 if (zap_add(spa->spa_meta_objset, 1121 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1122 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1123 cmn_err(CE_PANIC, "failed to add bplist"); 1124 } 1125 1126 /* 1127 * Create the pool's history object. 1128 */ 1129 spa_history_create_obj(spa, tx); 1130 1131 dmu_tx_commit(tx); 1132 1133 spa->spa_sync_on = B_TRUE; 1134 txg_sync_start(spa->spa_dsl_pool); 1135 1136 /* 1137 * We explicitly wait for the first transaction to complete so that our 1138 * bean counters are appropriately updated. 1139 */ 1140 txg_wait_synced(spa->spa_dsl_pool, txg); 1141 1142 spa_config_sync(); 1143 1144 mutex_exit(&spa_namespace_lock); 1145 1146 return (0); 1147 } 1148 1149 /* 1150 * Import the given pool into the system. We set up the necessary spa_t and 1151 * then call spa_load() to do the dirty work. 1152 */ 1153 int 1154 spa_import(const char *pool, nvlist_t *config, const char *altroot) 1155 { 1156 spa_t *spa; 1157 int error; 1158 nvlist_t *nvroot; 1159 nvlist_t **spares; 1160 uint_t nspares; 1161 1162 if (!(spa_mode & FWRITE)) 1163 return (EROFS); 1164 1165 /* 1166 * If a pool with this name exists, return failure. 1167 */ 1168 mutex_enter(&spa_namespace_lock); 1169 if (spa_lookup(pool) != NULL) { 1170 mutex_exit(&spa_namespace_lock); 1171 return (EEXIST); 1172 } 1173 1174 /* 1175 * Create and initialize the spa structure. 1176 */ 1177 spa = spa_add(pool, altroot); 1178 spa_activate(spa); 1179 1180 /* 1181 * Pass off the heavy lifting to spa_load(). 1182 * Pass TRUE for mosconfig because the user-supplied config 1183 * is actually the one to trust when doing an import. 1184 */ 1185 error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1186 1187 spa_config_enter(spa, RW_WRITER, FTAG); 1188 /* 1189 * Toss any existing sparelist, as it doesn't have any validity anymore, 1190 * and conflicts with spa_has_spare(). 1191 */ 1192 if (spa->spa_sparelist) { 1193 nvlist_free(spa->spa_sparelist); 1194 spa->spa_sparelist = NULL; 1195 spa_load_spares(spa); 1196 } 1197 1198 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1199 &nvroot) == 0); 1200 if (error == 0) 1201 error = spa_validate_spares(spa, nvroot, -1ULL, 1202 VDEV_ALLOC_SPARE); 1203 spa_config_exit(spa, FTAG); 1204 1205 if (error != 0) { 1206 spa_unload(spa); 1207 spa_deactivate(spa); 1208 spa_remove(spa); 1209 mutex_exit(&spa_namespace_lock); 1210 return (error); 1211 } 1212 1213 /* 1214 * Override any spares as specified by the user, as these may have 1215 * correct device names/devids, etc. 1216 */ 1217 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1218 &spares, &nspares) == 0) { 1219 if (spa->spa_sparelist) 1220 VERIFY(nvlist_remove(spa->spa_sparelist, 1221 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 1222 else 1223 VERIFY(nvlist_alloc(&spa->spa_sparelist, 1224 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1225 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 1226 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1227 spa_config_enter(spa, RW_WRITER, FTAG); 1228 spa_load_spares(spa); 1229 spa_config_exit(spa, FTAG); 1230 spa->spa_sync_spares = B_TRUE; 1231 } 1232 1233 /* 1234 * Update the config cache to include the newly-imported pool. 1235 */ 1236 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 1237 1238 mutex_exit(&spa_namespace_lock); 1239 1240 /* 1241 * Resilver anything that's out of date. 1242 */ 1243 if (spa_mode & FWRITE) 1244 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1245 1246 return (0); 1247 } 1248 1249 /* 1250 * This (illegal) pool name is used when temporarily importing a spa_t in order 1251 * to get the vdev stats associated with the imported devices. 1252 */ 1253 #define TRYIMPORT_NAME "$import" 1254 1255 nvlist_t * 1256 spa_tryimport(nvlist_t *tryconfig) 1257 { 1258 nvlist_t *config = NULL; 1259 char *poolname; 1260 spa_t *spa; 1261 uint64_t state; 1262 1263 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1264 return (NULL); 1265 1266 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1267 return (NULL); 1268 1269 /* 1270 * Create and initialize the spa structure. 1271 */ 1272 mutex_enter(&spa_namespace_lock); 1273 spa = spa_add(TRYIMPORT_NAME, NULL); 1274 spa_activate(spa); 1275 1276 /* 1277 * Pass off the heavy lifting to spa_load(). 1278 * Pass TRUE for mosconfig because the user-supplied config 1279 * is actually the one to trust when doing an import. 1280 */ 1281 (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1282 1283 /* 1284 * If 'tryconfig' was at least parsable, return the current config. 1285 */ 1286 if (spa->spa_root_vdev != NULL) { 1287 spa_config_enter(spa, RW_READER, FTAG); 1288 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 1289 spa_config_exit(spa, FTAG); 1290 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1291 poolname) == 0); 1292 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1293 state) == 0); 1294 1295 /* 1296 * Add the list of hot spares. 1297 */ 1298 spa_add_spares(spa, config); 1299 } 1300 1301 spa_unload(spa); 1302 spa_deactivate(spa); 1303 spa_remove(spa); 1304 mutex_exit(&spa_namespace_lock); 1305 1306 return (config); 1307 } 1308 1309 /* 1310 * Pool export/destroy 1311 * 1312 * The act of destroying or exporting a pool is very simple. We make sure there 1313 * is no more pending I/O and any references to the pool are gone. Then, we 1314 * update the pool state and sync all the labels to disk, removing the 1315 * configuration from the cache afterwards. 1316 */ 1317 static int 1318 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1319 { 1320 spa_t *spa; 1321 1322 if (oldconfig) 1323 *oldconfig = NULL; 1324 1325 if (!(spa_mode & FWRITE)) 1326 return (EROFS); 1327 1328 mutex_enter(&spa_namespace_lock); 1329 if ((spa = spa_lookup(pool)) == NULL) { 1330 mutex_exit(&spa_namespace_lock); 1331 return (ENOENT); 1332 } 1333 1334 /* 1335 * Put a hold on the pool, drop the namespace lock, stop async tasks, 1336 * reacquire the namespace lock, and see if we can export. 1337 */ 1338 spa_open_ref(spa, FTAG); 1339 mutex_exit(&spa_namespace_lock); 1340 spa_async_suspend(spa); 1341 mutex_enter(&spa_namespace_lock); 1342 spa_close(spa, FTAG); 1343 1344 /* 1345 * The pool will be in core if it's openable, 1346 * in which case we can modify its state. 1347 */ 1348 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1349 /* 1350 * Objsets may be open only because they're dirty, so we 1351 * have to force it to sync before checking spa_refcnt. 1352 */ 1353 spa_scrub_suspend(spa); 1354 txg_wait_synced(spa->spa_dsl_pool, 0); 1355 1356 /* 1357 * A pool cannot be exported or destroyed if there are active 1358 * references. If we are resetting a pool, allow references by 1359 * fault injection handlers. 1360 */ 1361 if (!spa_refcount_zero(spa) || 1362 (spa->spa_inject_ref != 0 && 1363 new_state != POOL_STATE_UNINITIALIZED)) { 1364 spa_scrub_resume(spa); 1365 spa_async_resume(spa); 1366 mutex_exit(&spa_namespace_lock); 1367 return (EBUSY); 1368 } 1369 1370 spa_scrub_resume(spa); 1371 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1372 1373 /* 1374 * We want this to be reflected on every label, 1375 * so mark them all dirty. spa_unload() will do the 1376 * final sync that pushes these changes out. 1377 */ 1378 if (new_state != POOL_STATE_UNINITIALIZED) { 1379 spa_config_enter(spa, RW_WRITER, FTAG); 1380 spa->spa_state = new_state; 1381 spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 1382 vdev_config_dirty(spa->spa_root_vdev); 1383 spa_config_exit(spa, FTAG); 1384 } 1385 } 1386 1387 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1388 spa_unload(spa); 1389 spa_deactivate(spa); 1390 } 1391 1392 if (oldconfig && spa->spa_config) 1393 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 1394 1395 if (new_state != POOL_STATE_UNINITIALIZED) { 1396 spa_remove(spa); 1397 spa_config_sync(); 1398 } 1399 mutex_exit(&spa_namespace_lock); 1400 1401 return (0); 1402 } 1403 1404 /* 1405 * Destroy a storage pool. 1406 */ 1407 int 1408 spa_destroy(char *pool) 1409 { 1410 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1411 } 1412 1413 /* 1414 * Export a storage pool. 1415 */ 1416 int 1417 spa_export(char *pool, nvlist_t **oldconfig) 1418 { 1419 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1420 } 1421 1422 /* 1423 * Similar to spa_export(), this unloads the spa_t without actually removing it 1424 * from the namespace in any way. 1425 */ 1426 int 1427 spa_reset(char *pool) 1428 { 1429 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 1430 } 1431 1432 1433 /* 1434 * ========================================================================== 1435 * Device manipulation 1436 * ========================================================================== 1437 */ 1438 1439 /* 1440 * Add capacity to a storage pool. 1441 */ 1442 int 1443 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1444 { 1445 uint64_t txg; 1446 int c, error; 1447 vdev_t *rvd = spa->spa_root_vdev; 1448 vdev_t *vd, *tvd; 1449 nvlist_t **spares; 1450 uint_t i, nspares; 1451 1452 txg = spa_vdev_enter(spa); 1453 1454 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 1455 VDEV_ALLOC_ADD)) != 0) 1456 return (spa_vdev_exit(spa, NULL, txg, error)); 1457 1458 if ((error = spa_validate_spares(spa, nvroot, txg, 1459 VDEV_ALLOC_ADD)) != 0) 1460 return (spa_vdev_exit(spa, vd, txg, error)); 1461 1462 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1463 &spares, &nspares) != 0) 1464 nspares = 0; 1465 1466 if (vd->vdev_children == 0 && nspares == 0) 1467 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 1468 1469 if (vd->vdev_children != 0) { 1470 if ((error = vdev_create(vd, txg, B_FALSE)) != 0) 1471 return (spa_vdev_exit(spa, vd, txg, error)); 1472 1473 /* 1474 * Transfer each new top-level vdev from vd to rvd. 1475 */ 1476 for (c = 0; c < vd->vdev_children; c++) { 1477 tvd = vd->vdev_child[c]; 1478 vdev_remove_child(vd, tvd); 1479 tvd->vdev_id = rvd->vdev_children; 1480 vdev_add_child(rvd, tvd); 1481 vdev_config_dirty(tvd); 1482 } 1483 } 1484 1485 if (nspares != 0) { 1486 if (spa->spa_sparelist != NULL) { 1487 nvlist_t **oldspares; 1488 uint_t oldnspares; 1489 nvlist_t **newspares; 1490 1491 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 1492 ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 1493 1494 newspares = kmem_alloc(sizeof (void *) * 1495 (nspares + oldnspares), KM_SLEEP); 1496 for (i = 0; i < oldnspares; i++) 1497 VERIFY(nvlist_dup(oldspares[i], 1498 &newspares[i], KM_SLEEP) == 0); 1499 for (i = 0; i < nspares; i++) 1500 VERIFY(nvlist_dup(spares[i], 1501 &newspares[i + oldnspares], 1502 KM_SLEEP) == 0); 1503 1504 VERIFY(nvlist_remove(spa->spa_sparelist, 1505 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 1506 1507 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 1508 ZPOOL_CONFIG_SPARES, newspares, 1509 nspares + oldnspares) == 0); 1510 for (i = 0; i < oldnspares + nspares; i++) 1511 nvlist_free(newspares[i]); 1512 kmem_free(newspares, (oldnspares + nspares) * 1513 sizeof (void *)); 1514 } else { 1515 VERIFY(nvlist_alloc(&spa->spa_sparelist, 1516 NV_UNIQUE_NAME, KM_SLEEP) == 0); 1517 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 1518 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 1519 } 1520 1521 spa_load_spares(spa); 1522 spa->spa_sync_spares = B_TRUE; 1523 } 1524 1525 /* 1526 * We have to be careful when adding new vdevs to an existing pool. 1527 * If other threads start allocating from these vdevs before we 1528 * sync the config cache, and we lose power, then upon reboot we may 1529 * fail to open the pool because there are DVAs that the config cache 1530 * can't translate. Therefore, we first add the vdevs without 1531 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 1532 * and then let spa_config_update() initialize the new metaslabs. 1533 * 1534 * spa_load() checks for added-but-not-initialized vdevs, so that 1535 * if we lose power at any point in this sequence, the remaining 1536 * steps will be completed the next time we load the pool. 1537 */ 1538 (void) spa_vdev_exit(spa, vd, txg, 0); 1539 1540 mutex_enter(&spa_namespace_lock); 1541 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 1542 mutex_exit(&spa_namespace_lock); 1543 1544 return (0); 1545 } 1546 1547 /* 1548 * Attach a device to a mirror. The arguments are the path to any device 1549 * in the mirror, and the nvroot for the new device. If the path specifies 1550 * a device that is not mirrored, we automatically insert the mirror vdev. 1551 * 1552 * If 'replacing' is specified, the new device is intended to replace the 1553 * existing device; in this case the two devices are made into their own 1554 * mirror using the 'replacing' vdev, which is functionally idendical to 1555 * the mirror vdev (it actually reuses all the same ops) but has a few 1556 * extra rules: you can't attach to it after it's been created, and upon 1557 * completion of resilvering, the first disk (the one being replaced) 1558 * is automatically detached. 1559 */ 1560 int 1561 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1562 { 1563 uint64_t txg, open_txg; 1564 int error; 1565 vdev_t *rvd = spa->spa_root_vdev; 1566 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 1567 vdev_ops_t *pvops; 1568 1569 txg = spa_vdev_enter(spa); 1570 1571 oldvd = vdev_lookup_by_guid(rvd, guid); 1572 1573 if (oldvd == NULL) 1574 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1575 1576 if (!oldvd->vdev_ops->vdev_op_leaf) 1577 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1578 1579 pvd = oldvd->vdev_parent; 1580 1581 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 1582 VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1) 1583 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1584 1585 newvd = newrootvd->vdev_child[0]; 1586 1587 if (!newvd->vdev_ops->vdev_op_leaf) 1588 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1589 1590 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1591 return (spa_vdev_exit(spa, newrootvd, txg, error)); 1592 1593 if (!replacing) { 1594 /* 1595 * For attach, the only allowable parent is a mirror or the root 1596 * vdev. 1597 */ 1598 if (pvd->vdev_ops != &vdev_mirror_ops && 1599 pvd->vdev_ops != &vdev_root_ops) 1600 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 1601 1602 pvops = &vdev_mirror_ops; 1603 } else { 1604 /* 1605 * Active hot spares can only be replaced by inactive hot 1606 * spares. 1607 */ 1608 if (pvd->vdev_ops == &vdev_spare_ops && 1609 pvd->vdev_child[1] == oldvd && 1610 !spa_has_spare(spa, newvd->vdev_guid)) 1611 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 1612 1613 /* 1614 * If the source is a hot spare, and the parent isn't already a 1615 * spare, then we want to create a new hot spare. Otherwise, we 1616 * want to create a replacing vdev. 1617 */ 1618 if (pvd->vdev_ops == &vdev_replacing_ops) 1619 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 1620 else if (pvd->vdev_ops != &vdev_spare_ops && 1621 newvd->vdev_isspare) 1622 pvops = &vdev_spare_ops; 1623 else 1624 pvops = &vdev_replacing_ops; 1625 } 1626 1627 /* 1628 * Compare the new device size with the replaceable/attachable 1629 * device size. 1630 */ 1631 if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1632 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1633 1634 /* 1635 * The new device cannot have a higher alignment requirement 1636 * than the top-level vdev. 1637 */ 1638 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1639 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1640 1641 /* 1642 * If this is an in-place replacement, update oldvd's path and devid 1643 * to make it distinguishable from newvd, and unopenable from now on. 1644 */ 1645 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1646 spa_strfree(oldvd->vdev_path); 1647 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1648 KM_SLEEP); 1649 (void) sprintf(oldvd->vdev_path, "%s/%s", 1650 newvd->vdev_path, "old"); 1651 if (oldvd->vdev_devid != NULL) { 1652 spa_strfree(oldvd->vdev_devid); 1653 oldvd->vdev_devid = NULL; 1654 } 1655 } 1656 1657 /* 1658 * If the parent is not a mirror, or if we're replacing, insert the new 1659 * mirror/replacing/spare vdev above oldvd. 1660 */ 1661 if (pvd->vdev_ops != pvops) 1662 pvd = vdev_add_parent(oldvd, pvops); 1663 1664 ASSERT(pvd->vdev_top->vdev_parent == rvd); 1665 ASSERT(pvd->vdev_ops == pvops); 1666 ASSERT(oldvd->vdev_parent == pvd); 1667 1668 /* 1669 * Extract the new device from its root and add it to pvd. 1670 */ 1671 vdev_remove_child(newrootvd, newvd); 1672 newvd->vdev_id = pvd->vdev_children; 1673 vdev_add_child(pvd, newvd); 1674 1675 /* 1676 * If newvd is smaller than oldvd, but larger than its rsize, 1677 * the addition of newvd may have decreased our parent's asize. 1678 */ 1679 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 1680 1681 tvd = newvd->vdev_top; 1682 ASSERT(pvd->vdev_top == tvd); 1683 ASSERT(tvd->vdev_parent == rvd); 1684 1685 vdev_config_dirty(tvd); 1686 1687 /* 1688 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1689 * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1690 */ 1691 open_txg = txg + TXG_CONCURRENT_STATES - 1; 1692 1693 mutex_enter(&newvd->vdev_dtl_lock); 1694 space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1695 open_txg - TXG_INITIAL + 1); 1696 mutex_exit(&newvd->vdev_dtl_lock); 1697 1698 dprintf("attached %s in txg %llu\n", newvd->vdev_path, txg); 1699 1700 /* 1701 * Mark newvd's DTL dirty in this txg. 1702 */ 1703 vdev_dirty(tvd, VDD_DTL, newvd, txg); 1704 1705 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1706 1707 /* 1708 * Kick off a resilver to update newvd. 1709 */ 1710 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1711 1712 return (0); 1713 } 1714 1715 /* 1716 * Detach a device from a mirror or replacing vdev. 1717 * If 'replace_done' is specified, only detach if the parent 1718 * is a replacing vdev. 1719 */ 1720 int 1721 spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1722 { 1723 uint64_t txg; 1724 int c, t, error; 1725 vdev_t *rvd = spa->spa_root_vdev; 1726 vdev_t *vd, *pvd, *cvd, *tvd; 1727 boolean_t unspare = B_FALSE; 1728 uint64_t unspare_guid; 1729 1730 txg = spa_vdev_enter(spa); 1731 1732 vd = vdev_lookup_by_guid(rvd, guid); 1733 1734 if (vd == NULL) 1735 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1736 1737 if (!vd->vdev_ops->vdev_op_leaf) 1738 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1739 1740 pvd = vd->vdev_parent; 1741 1742 /* 1743 * If replace_done is specified, only remove this device if it's 1744 * the first child of a replacing vdev. For the 'spare' vdev, either 1745 * disk can be removed. 1746 */ 1747 if (replace_done) { 1748 if (pvd->vdev_ops == &vdev_replacing_ops) { 1749 if (vd->vdev_id != 0) 1750 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1751 } else if (pvd->vdev_ops != &vdev_spare_ops) { 1752 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1753 } 1754 } 1755 1756 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 1757 spa_version(spa) >= ZFS_VERSION_SPARES); 1758 1759 /* 1760 * Only mirror, replacing, and spare vdevs support detach. 1761 */ 1762 if (pvd->vdev_ops != &vdev_replacing_ops && 1763 pvd->vdev_ops != &vdev_mirror_ops && 1764 pvd->vdev_ops != &vdev_spare_ops) 1765 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1766 1767 /* 1768 * If there's only one replica, you can't detach it. 1769 */ 1770 if (pvd->vdev_children <= 1) 1771 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1772 1773 /* 1774 * If all siblings have non-empty DTLs, this device may have the only 1775 * valid copy of the data, which means we cannot safely detach it. 1776 * 1777 * XXX -- as in the vdev_offline() case, we really want a more 1778 * precise DTL check. 1779 */ 1780 for (c = 0; c < pvd->vdev_children; c++) { 1781 uint64_t dirty; 1782 1783 cvd = pvd->vdev_child[c]; 1784 if (cvd == vd) 1785 continue; 1786 if (vdev_is_dead(cvd)) 1787 continue; 1788 mutex_enter(&cvd->vdev_dtl_lock); 1789 dirty = cvd->vdev_dtl_map.sm_space | 1790 cvd->vdev_dtl_scrub.sm_space; 1791 mutex_exit(&cvd->vdev_dtl_lock); 1792 if (!dirty) 1793 break; 1794 } 1795 1796 /* 1797 * If we are a replacing or spare vdev, then we can always detach the 1798 * latter child, as that is how one cancels the operation. 1799 */ 1800 if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 1801 c == pvd->vdev_children) 1802 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1803 1804 /* 1805 * If we are detaching the original disk from a spare, then it implies 1806 * that the spare should become a real disk, and be removed from the 1807 * active spare list for the pool. 1808 */ 1809 if (pvd->vdev_ops == &vdev_spare_ops && 1810 vd->vdev_id == 0) 1811 unspare = B_TRUE; 1812 1813 /* 1814 * Erase the disk labels so the disk can be used for other things. 1815 * This must be done after all other error cases are handled, 1816 * but before we disembowel vd (so we can still do I/O to it). 1817 * But if we can't do it, don't treat the error as fatal -- 1818 * it may be that the unwritability of the disk is the reason 1819 * it's being detached! 1820 */ 1821 error = vdev_label_init(vd, 0, B_FALSE); 1822 if (error) 1823 dprintf("unable to erase labels on %s\n", vdev_description(vd)); 1824 1825 /* 1826 * Remove vd from its parent and compact the parent's children. 1827 */ 1828 vdev_remove_child(pvd, vd); 1829 vdev_compact_children(pvd); 1830 1831 /* 1832 * Remember one of the remaining children so we can get tvd below. 1833 */ 1834 cvd = pvd->vdev_child[0]; 1835 1836 /* 1837 * If we need to remove the remaining child from the list of hot spares, 1838 * do it now, marking the vdev as no longer a spare in the process. We 1839 * must do this before vdev_remove_parent(), because that can change the 1840 * GUID if it creates a new toplevel GUID. 1841 */ 1842 if (unspare) { 1843 ASSERT(cvd->vdev_isspare); 1844 spa_spare_remove(cvd->vdev_guid); 1845 cvd->vdev_isspare = B_FALSE; 1846 unspare_guid = cvd->vdev_guid; 1847 } 1848 1849 /* 1850 * If the parent mirror/replacing vdev only has one child, 1851 * the parent is no longer needed. Remove it from the tree. 1852 */ 1853 if (pvd->vdev_children == 1) 1854 vdev_remove_parent(cvd); 1855 1856 /* 1857 * We don't set tvd until now because the parent we just removed 1858 * may have been the previous top-level vdev. 1859 */ 1860 tvd = cvd->vdev_top; 1861 ASSERT(tvd->vdev_parent == rvd); 1862 1863 /* 1864 * Reopen this top-level vdev to reassess health after detach. 1865 */ 1866 vdev_reopen(tvd); 1867 1868 /* 1869 * If the device we just detached was smaller than the others, 1870 * it may be possible to add metaslabs (i.e. grow the pool). 1871 * vdev_metaslab_init() can't fail because the existing metaslabs 1872 * are already in core, so there's nothing to read from disk. 1873 */ 1874 VERIFY(vdev_metaslab_init(tvd, txg) == 0); 1875 1876 vdev_config_dirty(tvd); 1877 1878 /* 1879 * Mark vd's DTL as dirty in this txg. 1880 * vdev_dtl_sync() will see that vd->vdev_detached is set 1881 * and free vd's DTL object in syncing context. 1882 * But first make sure we're not on any *other* txg's DTL list, 1883 * to prevent vd from being accessed after it's freed. 1884 */ 1885 for (t = 0; t < TXG_SIZE; t++) 1886 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 1887 vd->vdev_detached = B_TRUE; 1888 vdev_dirty(tvd, VDD_DTL, vd, txg); 1889 1890 dprintf("detached %s in txg %llu\n", vd->vdev_path, txg); 1891 1892 error = spa_vdev_exit(spa, vd, txg, 0); 1893 1894 /* 1895 * If we are supposed to remove the given vdev from the list of spares, 1896 * iterate over all pools in the system and replace it if it's present. 1897 */ 1898 if (unspare) { 1899 spa = NULL; 1900 mutex_enter(&spa_namespace_lock); 1901 while ((spa = spa_next(spa)) != NULL) { 1902 if (spa->spa_state != POOL_STATE_ACTIVE) 1903 continue; 1904 1905 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 1906 } 1907 mutex_exit(&spa_namespace_lock); 1908 } 1909 1910 return (error); 1911 } 1912 1913 /* 1914 * Remove a device from the pool. Currently, this supports removing only hot 1915 * spares. 1916 */ 1917 int 1918 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 1919 { 1920 vdev_t *vd; 1921 nvlist_t **spares, *nv, **newspares; 1922 uint_t i, j, nspares; 1923 int ret = 0; 1924 1925 spa_config_enter(spa, RW_WRITER, FTAG); 1926 1927 vd = spa_lookup_by_guid(spa, guid); 1928 1929 nv = NULL; 1930 if (spa->spa_spares != NULL && 1931 nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 1932 &spares, &nspares) == 0) { 1933 for (i = 0; i < nspares; i++) { 1934 uint64_t theguid; 1935 1936 VERIFY(nvlist_lookup_uint64(spares[i], 1937 ZPOOL_CONFIG_GUID, &theguid) == 0); 1938 if (theguid == guid) { 1939 nv = spares[i]; 1940 break; 1941 } 1942 } 1943 } 1944 1945 /* 1946 * We only support removing a hot spare, and only if it's not currently 1947 * in use in this pool. 1948 */ 1949 if (nv == NULL && vd == NULL) { 1950 ret = ENOENT; 1951 goto out; 1952 } 1953 1954 if (nv == NULL && vd != NULL) { 1955 ret = ENOTSUP; 1956 goto out; 1957 } 1958 1959 if (!unspare && nv != NULL && vd != NULL) { 1960 ret = EBUSY; 1961 goto out; 1962 } 1963 1964 if (nspares == 1) { 1965 newspares = NULL; 1966 } else { 1967 newspares = kmem_alloc((nspares - 1) * sizeof (void *), 1968 KM_SLEEP); 1969 for (i = 0, j = 0; i < nspares; i++) { 1970 if (spares[i] != nv) 1971 VERIFY(nvlist_dup(spares[i], 1972 &newspares[j++], KM_SLEEP) == 0); 1973 } 1974 } 1975 1976 VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 1977 DATA_TYPE_NVLIST_ARRAY) == 0); 1978 VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 1979 newspares, nspares - 1) == 0); 1980 for (i = 0; i < nspares - 1; i++) 1981 nvlist_free(newspares[i]); 1982 kmem_free(newspares, (nspares - 1) * sizeof (void *)); 1983 spa_load_spares(spa); 1984 spa->spa_sync_spares = B_TRUE; 1985 1986 out: 1987 spa_config_exit(spa, FTAG); 1988 1989 return (ret); 1990 } 1991 1992 /* 1993 * Find any device that's done replacing, so we can detach it. 1994 */ 1995 static vdev_t * 1996 spa_vdev_replace_done_hunt(vdev_t *vd) 1997 { 1998 vdev_t *newvd, *oldvd; 1999 int c; 2000 2001 for (c = 0; c < vd->vdev_children; c++) { 2002 oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]); 2003 if (oldvd != NULL) 2004 return (oldvd); 2005 } 2006 2007 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 2008 oldvd = vd->vdev_child[0]; 2009 newvd = vd->vdev_child[1]; 2010 2011 mutex_enter(&newvd->vdev_dtl_lock); 2012 if (newvd->vdev_dtl_map.sm_space == 0 && 2013 newvd->vdev_dtl_scrub.sm_space == 0) { 2014 mutex_exit(&newvd->vdev_dtl_lock); 2015 return (oldvd); 2016 } 2017 mutex_exit(&newvd->vdev_dtl_lock); 2018 } 2019 2020 return (NULL); 2021 } 2022 2023 static void 2024 spa_vdev_replace_done(spa_t *spa) 2025 { 2026 vdev_t *vd; 2027 vdev_t *pvd; 2028 uint64_t guid; 2029 uint64_t pguid = 0; 2030 2031 spa_config_enter(spa, RW_READER, FTAG); 2032 2033 while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) { 2034 guid = vd->vdev_guid; 2035 /* 2036 * If we have just finished replacing a hot spared device, then 2037 * we need to detach the parent's first child (the original hot 2038 * spare) as well. 2039 */ 2040 pvd = vd->vdev_parent; 2041 if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 2042 pvd->vdev_id == 0) { 2043 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 2044 ASSERT(pvd->vdev_parent->vdev_children == 2); 2045 pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 2046 } 2047 spa_config_exit(spa, FTAG); 2048 if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 2049 return; 2050 if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 2051 return; 2052 spa_config_enter(spa, RW_READER, FTAG); 2053 } 2054 2055 spa_config_exit(spa, FTAG); 2056 } 2057 2058 /* 2059 * Update the stored path for this vdev. Dirty the vdev configuration, relying 2060 * on spa_vdev_enter/exit() to synchronize the labels and cache. 2061 */ 2062 int 2063 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 2064 { 2065 vdev_t *rvd, *vd; 2066 uint64_t txg; 2067 2068 rvd = spa->spa_root_vdev; 2069 2070 txg = spa_vdev_enter(spa); 2071 2072 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 2073 /* 2074 * Determine if this is a reference to a hot spare. In that 2075 * case, update the path as stored in the spare list. 2076 */ 2077 nvlist_t **spares; 2078 uint_t i, nspares; 2079 if (spa->spa_sparelist != NULL) { 2080 VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 2081 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 2082 for (i = 0; i < nspares; i++) { 2083 uint64_t theguid; 2084 VERIFY(nvlist_lookup_uint64(spares[i], 2085 ZPOOL_CONFIG_GUID, &theguid) == 0); 2086 if (theguid == guid) 2087 break; 2088 } 2089 2090 if (i == nspares) 2091 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 2092 2093 VERIFY(nvlist_add_string(spares[i], 2094 ZPOOL_CONFIG_PATH, newpath) == 0); 2095 spa_load_spares(spa); 2096 spa->spa_sync_spares = B_TRUE; 2097 return (spa_vdev_exit(spa, NULL, txg, 0)); 2098 } else { 2099 return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 2100 } 2101 } 2102 2103 if (!vd->vdev_ops->vdev_op_leaf) 2104 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 2105 2106 spa_strfree(vd->vdev_path); 2107 vd->vdev_path = spa_strdup(newpath); 2108 2109 vdev_config_dirty(vd->vdev_top); 2110 2111 return (spa_vdev_exit(spa, NULL, txg, 0)); 2112 } 2113 2114 /* 2115 * ========================================================================== 2116 * SPA Scrubbing 2117 * ========================================================================== 2118 */ 2119 2120 void 2121 spa_scrub_throttle(spa_t *spa, int direction) 2122 { 2123 mutex_enter(&spa->spa_scrub_lock); 2124 spa->spa_scrub_throttled += direction; 2125 ASSERT(spa->spa_scrub_throttled >= 0); 2126 if (spa->spa_scrub_throttled == 0) 2127 cv_broadcast(&spa->spa_scrub_io_cv); 2128 mutex_exit(&spa->spa_scrub_lock); 2129 } 2130 2131 static void 2132 spa_scrub_io_done(zio_t *zio) 2133 { 2134 spa_t *spa = zio->io_spa; 2135 2136 zio_data_buf_free(zio->io_data, zio->io_size); 2137 2138 mutex_enter(&spa->spa_scrub_lock); 2139 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2140 vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2141 spa->spa_scrub_errors++; 2142 mutex_enter(&vd->vdev_stat_lock); 2143 vd->vdev_stat.vs_scrub_errors++; 2144 mutex_exit(&vd->vdev_stat_lock); 2145 } 2146 if (--spa->spa_scrub_inflight == 0) { 2147 cv_broadcast(&spa->spa_scrub_io_cv); 2148 ASSERT(spa->spa_scrub_throttled == 0); 2149 } 2150 mutex_exit(&spa->spa_scrub_lock); 2151 } 2152 2153 static void 2154 spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 2155 zbookmark_t *zb) 2156 { 2157 size_t size = BP_GET_LSIZE(bp); 2158 void *data = zio_data_buf_alloc(size); 2159 2160 mutex_enter(&spa->spa_scrub_lock); 2161 spa->spa_scrub_inflight++; 2162 mutex_exit(&spa->spa_scrub_lock); 2163 2164 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 2165 flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 2166 2167 flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 2168 2169 zio_nowait(zio_read(NULL, spa, bp, data, size, 2170 spa_scrub_io_done, NULL, priority, flags, zb)); 2171 } 2172 2173 /* ARGSUSED */ 2174 static int 2175 spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2176 { 2177 blkptr_t *bp = &bc->bc_blkptr; 2178 vdev_t *vd = spa->spa_root_vdev; 2179 dva_t *dva = bp->blk_dva; 2180 int needs_resilver = B_FALSE; 2181 int d; 2182 2183 if (bc->bc_errno) { 2184 /* 2185 * We can't scrub this block, but we can continue to scrub 2186 * the rest of the pool. Note the error and move along. 2187 */ 2188 mutex_enter(&spa->spa_scrub_lock); 2189 spa->spa_scrub_errors++; 2190 mutex_exit(&spa->spa_scrub_lock); 2191 2192 mutex_enter(&vd->vdev_stat_lock); 2193 vd->vdev_stat.vs_scrub_errors++; 2194 mutex_exit(&vd->vdev_stat_lock); 2195 2196 return (ERESTART); 2197 } 2198 2199 ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2200 2201 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 2202 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 2203 2204 ASSERT(vd != NULL); 2205 2206 /* 2207 * Keep track of how much data we've examined so that 2208 * zpool(1M) status can make useful progress reports. 2209 */ 2210 mutex_enter(&vd->vdev_stat_lock); 2211 vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 2212 mutex_exit(&vd->vdev_stat_lock); 2213 2214 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 2215 if (DVA_GET_GANG(&dva[d])) { 2216 /* 2217 * Gang members may be spread across multiple 2218 * vdevs, so the best we can do is look at the 2219 * pool-wide DTL. 2220 * XXX -- it would be better to change our 2221 * allocation policy to ensure that this can't 2222 * happen. 2223 */ 2224 vd = spa->spa_root_vdev; 2225 } 2226 if (vdev_dtl_contains(&vd->vdev_dtl_map, 2227 bp->blk_birth, 1)) 2228 needs_resilver = B_TRUE; 2229 } 2230 } 2231 2232 if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2233 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 2234 ZIO_FLAG_SCRUB, &bc->bc_bookmark); 2235 else if (needs_resilver) 2236 spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 2237 ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2238 2239 return (0); 2240 } 2241 2242 static void 2243 spa_scrub_thread(spa_t *spa) 2244 { 2245 callb_cpr_t cprinfo; 2246 traverse_handle_t *th = spa->spa_scrub_th; 2247 vdev_t *rvd = spa->spa_root_vdev; 2248 pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2249 int error = 0; 2250 boolean_t complete; 2251 2252 CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2253 2254 /* 2255 * If we're restarting due to a snapshot create/delete, 2256 * wait for that to complete. 2257 */ 2258 txg_wait_synced(spa_get_dsl(spa), 0); 2259 2260 dprintf("start %s mintxg=%llu maxtxg=%llu\n", 2261 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2262 spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 2263 2264 spa_config_enter(spa, RW_WRITER, FTAG); 2265 vdev_reopen(rvd); /* purge all vdev caches */ 2266 vdev_config_dirty(rvd); /* rewrite all disk labels */ 2267 vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 2268 spa_config_exit(spa, FTAG); 2269 2270 mutex_enter(&spa->spa_scrub_lock); 2271 spa->spa_scrub_errors = 0; 2272 spa->spa_scrub_active = 1; 2273 ASSERT(spa->spa_scrub_inflight == 0); 2274 ASSERT(spa->spa_scrub_throttled == 0); 2275 2276 while (!spa->spa_scrub_stop) { 2277 CALLB_CPR_SAFE_BEGIN(&cprinfo); 2278 while (spa->spa_scrub_suspended) { 2279 spa->spa_scrub_active = 0; 2280 cv_broadcast(&spa->spa_scrub_cv); 2281 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2282 spa->spa_scrub_active = 1; 2283 } 2284 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2285 2286 if (spa->spa_scrub_restart_txg != 0) 2287 break; 2288 2289 mutex_exit(&spa->spa_scrub_lock); 2290 error = traverse_more(th); 2291 mutex_enter(&spa->spa_scrub_lock); 2292 if (error != EAGAIN) 2293 break; 2294 2295 while (spa->spa_scrub_throttled > 0) 2296 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2297 } 2298 2299 while (spa->spa_scrub_inflight) 2300 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2301 2302 spa->spa_scrub_active = 0; 2303 cv_broadcast(&spa->spa_scrub_cv); 2304 2305 mutex_exit(&spa->spa_scrub_lock); 2306 2307 spa_config_enter(spa, RW_WRITER, FTAG); 2308 2309 mutex_enter(&spa->spa_scrub_lock); 2310 2311 /* 2312 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 2313 * AND the spa config lock to synchronize with any config changes 2314 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 2315 */ 2316 if (spa->spa_scrub_restart_txg != 0) 2317 error = ERESTART; 2318 2319 if (spa->spa_scrub_stop) 2320 error = EINTR; 2321 2322 /* 2323 * Even if there were uncorrectable errors, we consider the scrub 2324 * completed. The downside is that if there is a transient error during 2325 * a resilver, we won't resilver the data properly to the target. But 2326 * if the damage is permanent (more likely) we will resilver forever, 2327 * which isn't really acceptable. Since there is enough information for 2328 * the user to know what has failed and why, this seems like a more 2329 * tractable approach. 2330 */ 2331 complete = (error == 0); 2332 2333 dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 2334 scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2335 spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2336 error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2337 2338 mutex_exit(&spa->spa_scrub_lock); 2339 2340 /* 2341 * If the scrub/resilver completed, update all DTLs to reflect this. 2342 * Whether it succeeded or not, vacate all temporary scrub DTLs. 2343 */ 2344 vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2345 complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2346 vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 2347 spa_errlog_rotate(spa); 2348 2349 spa_config_exit(spa, FTAG); 2350 2351 mutex_enter(&spa->spa_scrub_lock); 2352 2353 /* 2354 * We may have finished replacing a device. 2355 * Let the async thread assess this and handle the detach. 2356 */ 2357 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2358 2359 /* 2360 * If we were told to restart, our final act is to start a new scrub. 2361 */ 2362 if (error == ERESTART) 2363 spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 2364 SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2365 2366 spa->spa_scrub_type = POOL_SCRUB_NONE; 2367 spa->spa_scrub_active = 0; 2368 spa->spa_scrub_thread = NULL; 2369 cv_broadcast(&spa->spa_scrub_cv); 2370 CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2371 thread_exit(); 2372 } 2373 2374 void 2375 spa_scrub_suspend(spa_t *spa) 2376 { 2377 mutex_enter(&spa->spa_scrub_lock); 2378 spa->spa_scrub_suspended++; 2379 while (spa->spa_scrub_active) { 2380 cv_broadcast(&spa->spa_scrub_cv); 2381 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2382 } 2383 while (spa->spa_scrub_inflight) 2384 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2385 mutex_exit(&spa->spa_scrub_lock); 2386 } 2387 2388 void 2389 spa_scrub_resume(spa_t *spa) 2390 { 2391 mutex_enter(&spa->spa_scrub_lock); 2392 ASSERT(spa->spa_scrub_suspended != 0); 2393 if (--spa->spa_scrub_suspended == 0) 2394 cv_broadcast(&spa->spa_scrub_cv); 2395 mutex_exit(&spa->spa_scrub_lock); 2396 } 2397 2398 void 2399 spa_scrub_restart(spa_t *spa, uint64_t txg) 2400 { 2401 /* 2402 * Something happened (e.g. snapshot create/delete) that means 2403 * we must restart any in-progress scrubs. The itinerary will 2404 * fix this properly. 2405 */ 2406 mutex_enter(&spa->spa_scrub_lock); 2407 spa->spa_scrub_restart_txg = txg; 2408 mutex_exit(&spa->spa_scrub_lock); 2409 } 2410 2411 int 2412 spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2413 { 2414 space_seg_t *ss; 2415 uint64_t mintxg, maxtxg; 2416 vdev_t *rvd = spa->spa_root_vdev; 2417 2418 if ((uint_t)type >= POOL_SCRUB_TYPES) 2419 return (ENOTSUP); 2420 2421 mutex_enter(&spa->spa_scrub_lock); 2422 2423 /* 2424 * If there's a scrub or resilver already in progress, stop it. 2425 */ 2426 while (spa->spa_scrub_thread != NULL) { 2427 /* 2428 * Don't stop a resilver unless forced. 2429 */ 2430 if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 2431 mutex_exit(&spa->spa_scrub_lock); 2432 return (EBUSY); 2433 } 2434 spa->spa_scrub_stop = 1; 2435 cv_broadcast(&spa->spa_scrub_cv); 2436 cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2437 } 2438 2439 /* 2440 * Terminate the previous traverse. 2441 */ 2442 if (spa->spa_scrub_th != NULL) { 2443 traverse_fini(spa->spa_scrub_th); 2444 spa->spa_scrub_th = NULL; 2445 } 2446 2447 if (rvd == NULL) { 2448 ASSERT(spa->spa_scrub_stop == 0); 2449 ASSERT(spa->spa_scrub_type == type); 2450 ASSERT(spa->spa_scrub_restart_txg == 0); 2451 mutex_exit(&spa->spa_scrub_lock); 2452 return (0); 2453 } 2454 2455 mintxg = TXG_INITIAL - 1; 2456 maxtxg = spa_last_synced_txg(spa) + 1; 2457 2458 mutex_enter(&rvd->vdev_dtl_lock); 2459 2460 if (rvd->vdev_dtl_map.sm_space == 0) { 2461 /* 2462 * The pool-wide DTL is empty. 2463 * If this is a resilver, there's nothing to do except 2464 * check whether any in-progress replacements have completed. 2465 */ 2466 if (type == POOL_SCRUB_RESILVER) { 2467 type = POOL_SCRUB_NONE; 2468 spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2469 } 2470 } else { 2471 /* 2472 * The pool-wide DTL is non-empty. 2473 * If this is a normal scrub, upgrade to a resilver instead. 2474 */ 2475 if (type == POOL_SCRUB_EVERYTHING) 2476 type = POOL_SCRUB_RESILVER; 2477 } 2478 2479 if (type == POOL_SCRUB_RESILVER) { 2480 /* 2481 * Determine the resilvering boundaries. 2482 * 2483 * Note: (mintxg, maxtxg) is an open interval, 2484 * i.e. mintxg and maxtxg themselves are not included. 2485 * 2486 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2487 * so we don't claim to resilver a txg that's still changing. 2488 */ 2489 ss = avl_first(&rvd->vdev_dtl_map.sm_root); 2490 mintxg = ss->ss_start - 1; 2491 ss = avl_last(&rvd->vdev_dtl_map.sm_root); 2492 maxtxg = MIN(ss->ss_end, maxtxg); 2493 } 2494 2495 mutex_exit(&rvd->vdev_dtl_lock); 2496 2497 spa->spa_scrub_stop = 0; 2498 spa->spa_scrub_type = type; 2499 spa->spa_scrub_restart_txg = 0; 2500 2501 if (type != POOL_SCRUB_NONE) { 2502 spa->spa_scrub_mintxg = mintxg; 2503 spa->spa_scrub_maxtxg = maxtxg; 2504 spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 2505 ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 2506 ZIO_FLAG_CANFAIL); 2507 traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2508 spa->spa_scrub_thread = thread_create(NULL, 0, 2509 spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2510 } 2511 2512 mutex_exit(&spa->spa_scrub_lock); 2513 2514 return (0); 2515 } 2516 2517 /* 2518 * ========================================================================== 2519 * SPA async task processing 2520 * ========================================================================== 2521 */ 2522 2523 static void 2524 spa_async_reopen(spa_t *spa) 2525 { 2526 vdev_t *rvd = spa->spa_root_vdev; 2527 vdev_t *tvd; 2528 int c; 2529 2530 spa_config_enter(spa, RW_WRITER, FTAG); 2531 2532 for (c = 0; c < rvd->vdev_children; c++) { 2533 tvd = rvd->vdev_child[c]; 2534 if (tvd->vdev_reopen_wanted) { 2535 tvd->vdev_reopen_wanted = 0; 2536 vdev_reopen(tvd); 2537 } 2538 } 2539 2540 spa_config_exit(spa, FTAG); 2541 } 2542 2543 static void 2544 spa_async_thread(spa_t *spa) 2545 { 2546 int tasks; 2547 2548 ASSERT(spa->spa_sync_on); 2549 2550 mutex_enter(&spa->spa_async_lock); 2551 tasks = spa->spa_async_tasks; 2552 spa->spa_async_tasks = 0; 2553 mutex_exit(&spa->spa_async_lock); 2554 2555 /* 2556 * See if the config needs to be updated. 2557 */ 2558 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 2559 mutex_enter(&spa_namespace_lock); 2560 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 2561 mutex_exit(&spa_namespace_lock); 2562 } 2563 2564 /* 2565 * See if any devices need to be reopened. 2566 */ 2567 if (tasks & SPA_ASYNC_REOPEN) 2568 spa_async_reopen(spa); 2569 2570 /* 2571 * If any devices are done replacing, detach them. 2572 */ 2573 if (tasks & SPA_ASYNC_REPLACE_DONE) 2574 spa_vdev_replace_done(spa); 2575 2576 /* 2577 * Kick off a scrub. 2578 */ 2579 if (tasks & SPA_ASYNC_SCRUB) 2580 VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 2581 2582 /* 2583 * Kick off a resilver. 2584 */ 2585 if (tasks & SPA_ASYNC_RESILVER) 2586 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 2587 2588 /* 2589 * Let the world know that we're done. 2590 */ 2591 mutex_enter(&spa->spa_async_lock); 2592 spa->spa_async_thread = NULL; 2593 cv_broadcast(&spa->spa_async_cv); 2594 mutex_exit(&spa->spa_async_lock); 2595 thread_exit(); 2596 } 2597 2598 void 2599 spa_async_suspend(spa_t *spa) 2600 { 2601 mutex_enter(&spa->spa_async_lock); 2602 spa->spa_async_suspended++; 2603 while (spa->spa_async_thread != NULL) 2604 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 2605 mutex_exit(&spa->spa_async_lock); 2606 } 2607 2608 void 2609 spa_async_resume(spa_t *spa) 2610 { 2611 mutex_enter(&spa->spa_async_lock); 2612 ASSERT(spa->spa_async_suspended != 0); 2613 spa->spa_async_suspended--; 2614 mutex_exit(&spa->spa_async_lock); 2615 } 2616 2617 static void 2618 spa_async_dispatch(spa_t *spa) 2619 { 2620 mutex_enter(&spa->spa_async_lock); 2621 if (spa->spa_async_tasks && !spa->spa_async_suspended && 2622 spa->spa_async_thread == NULL && 2623 rootdir != NULL && !vn_is_readonly(rootdir)) 2624 spa->spa_async_thread = thread_create(NULL, 0, 2625 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 2626 mutex_exit(&spa->spa_async_lock); 2627 } 2628 2629 void 2630 spa_async_request(spa_t *spa, int task) 2631 { 2632 mutex_enter(&spa->spa_async_lock); 2633 spa->spa_async_tasks |= task; 2634 mutex_exit(&spa->spa_async_lock); 2635 } 2636 2637 /* 2638 * ========================================================================== 2639 * SPA syncing routines 2640 * ========================================================================== 2641 */ 2642 2643 static void 2644 spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2645 { 2646 bplist_t *bpl = &spa->spa_sync_bplist; 2647 dmu_tx_t *tx; 2648 blkptr_t blk; 2649 uint64_t itor = 0; 2650 zio_t *zio; 2651 int error; 2652 uint8_t c = 1; 2653 2654 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2655 2656 while (bplist_iterate(bpl, &itor, &blk) == 0) 2657 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2658 2659 error = zio_wait(zio); 2660 ASSERT3U(error, ==, 0); 2661 2662 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2663 bplist_vacate(bpl, tx); 2664 2665 /* 2666 * Pre-dirty the first block so we sync to convergence faster. 2667 * (Usually only the first block is needed.) 2668 */ 2669 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2670 dmu_tx_commit(tx); 2671 } 2672 2673 static void 2674 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 2675 { 2676 char *packed = NULL; 2677 size_t nvsize = 0; 2678 dmu_buf_t *db; 2679 2680 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 2681 2682 packed = kmem_alloc(nvsize, KM_SLEEP); 2683 2684 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 2685 KM_SLEEP) == 0); 2686 2687 dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 2688 2689 kmem_free(packed, nvsize); 2690 2691 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 2692 dmu_buf_will_dirty(db, tx); 2693 *(uint64_t *)db->db_data = nvsize; 2694 dmu_buf_rele(db, FTAG); 2695 } 2696 2697 static void 2698 spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 2699 { 2700 nvlist_t *nvroot; 2701 nvlist_t **spares; 2702 int i; 2703 2704 if (!spa->spa_sync_spares) 2705 return; 2706 2707 /* 2708 * Update the MOS nvlist describing the list of available spares. 2709 * spa_validate_spares() will have already made sure this nvlist is 2710 * valid and the vdevs are labelled appropriately. 2711 */ 2712 if (spa->spa_spares_object == 0) { 2713 spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 2714 DMU_OT_PACKED_NVLIST, 1 << 14, 2715 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 2716 VERIFY(zap_update(spa->spa_meta_objset, 2717 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 2718 sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 2719 } 2720 2721 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2722 if (spa->spa_nspares == 0) { 2723 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2724 NULL, 0) == 0); 2725 } else { 2726 spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 2727 KM_SLEEP); 2728 for (i = 0; i < spa->spa_nspares; i++) 2729 spares[i] = vdev_config_generate(spa, 2730 spa->spa_spares[i], B_FALSE, B_TRUE); 2731 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 2732 spares, spa->spa_nspares) == 0); 2733 for (i = 0; i < spa->spa_nspares; i++) 2734 nvlist_free(spares[i]); 2735 kmem_free(spares, spa->spa_nspares * sizeof (void *)); 2736 } 2737 2738 spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 2739 nvlist_free(nvroot); 2740 2741 spa->spa_sync_spares = B_FALSE; 2742 } 2743 2744 static void 2745 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 2746 { 2747 nvlist_t *config; 2748 2749 if (list_is_empty(&spa->spa_dirty_list)) 2750 return; 2751 2752 config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 2753 2754 if (spa->spa_config_syncing) 2755 nvlist_free(spa->spa_config_syncing); 2756 spa->spa_config_syncing = config; 2757 2758 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 2759 } 2760 2761 /* 2762 * Sync the specified transaction group. New blocks may be dirtied as 2763 * part of the process, so we iterate until it converges. 2764 */ 2765 void 2766 spa_sync(spa_t *spa, uint64_t txg) 2767 { 2768 dsl_pool_t *dp = spa->spa_dsl_pool; 2769 objset_t *mos = spa->spa_meta_objset; 2770 bplist_t *bpl = &spa->spa_sync_bplist; 2771 vdev_t *rvd = spa->spa_root_vdev; 2772 vdev_t *vd; 2773 dmu_tx_t *tx; 2774 int dirty_vdevs; 2775 2776 /* 2777 * Lock out configuration changes. 2778 */ 2779 spa_config_enter(spa, RW_READER, FTAG); 2780 2781 spa->spa_syncing_txg = txg; 2782 spa->spa_sync_pass = 0; 2783 2784 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 2785 2786 tx = dmu_tx_create_assigned(dp, txg); 2787 2788 /* 2789 * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 2790 * set spa_deflate if we have no raid-z vdevs. 2791 */ 2792 if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 2793 spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 2794 int i; 2795 2796 for (i = 0; i < rvd->vdev_children; i++) { 2797 vd = rvd->vdev_child[i]; 2798 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 2799 break; 2800 } 2801 if (i == rvd->vdev_children) { 2802 spa->spa_deflate = TRUE; 2803 VERIFY(0 == zap_add(spa->spa_meta_objset, 2804 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 2805 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 2806 } 2807 } 2808 2809 /* 2810 * If anything has changed in this txg, push the deferred frees 2811 * from the previous txg. If not, leave them alone so that we 2812 * don't generate work on an otherwise idle system. 2813 */ 2814 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 2815 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 2816 !txg_list_empty(&dp->dp_sync_tasks, txg)) 2817 spa_sync_deferred_frees(spa, txg); 2818 2819 /* 2820 * Iterate to convergence. 2821 */ 2822 do { 2823 spa->spa_sync_pass++; 2824 2825 spa_sync_config_object(spa, tx); 2826 spa_sync_spares(spa, tx); 2827 spa_errlog_sync(spa, txg); 2828 dsl_pool_sync(dp, txg); 2829 2830 dirty_vdevs = 0; 2831 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 2832 vdev_sync(vd, txg); 2833 dirty_vdevs++; 2834 } 2835 2836 bplist_sync(bpl, tx); 2837 } while (dirty_vdevs); 2838 2839 bplist_close(bpl); 2840 2841 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 2842 2843 /* 2844 * Rewrite the vdev configuration (which includes the uberblock) 2845 * to commit the transaction group. 2846 * 2847 * If there are any dirty vdevs, sync the uberblock to all vdevs. 2848 * Otherwise, pick a random top-level vdev that's known to be 2849 * visible in the config cache (see spa_vdev_add() for details). 2850 * If the write fails, try the next vdev until we're tried them all. 2851 */ 2852 if (!list_is_empty(&spa->spa_dirty_list)) { 2853 VERIFY(vdev_config_sync(rvd, txg) == 0); 2854 } else { 2855 int children = rvd->vdev_children; 2856 int c0 = spa_get_random(children); 2857 int c; 2858 2859 for (c = 0; c < children; c++) { 2860 vd = rvd->vdev_child[(c0 + c) % children]; 2861 if (vd->vdev_ms_array == 0) 2862 continue; 2863 if (vdev_config_sync(vd, txg) == 0) 2864 break; 2865 } 2866 if (c == children) 2867 VERIFY(vdev_config_sync(rvd, txg) == 0); 2868 } 2869 2870 dmu_tx_commit(tx); 2871 2872 /* 2873 * Clear the dirty config list. 2874 */ 2875 while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 2876 vdev_config_clean(vd); 2877 2878 /* 2879 * Now that the new config has synced transactionally, 2880 * let it become visible to the config cache. 2881 */ 2882 if (spa->spa_config_syncing != NULL) { 2883 spa_config_set(spa, spa->spa_config_syncing); 2884 spa->spa_config_txg = txg; 2885 spa->spa_config_syncing = NULL; 2886 } 2887 2888 /* 2889 * Make a stable copy of the fully synced uberblock. 2890 * We use this as the root for pool traversals. 2891 */ 2892 spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 2893 2894 spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 2895 2896 rw_enter(&spa->spa_traverse_lock, RW_WRITER); 2897 spa->spa_traverse_wanted = 0; 2898 spa->spa_ubsync = spa->spa_uberblock; 2899 rw_exit(&spa->spa_traverse_lock); 2900 2901 spa_scrub_resume(spa); /* resume scrub with new ubsync */ 2902 2903 /* 2904 * Clean up the ZIL records for the synced txg. 2905 */ 2906 dsl_pool_zil_clean(dp); 2907 2908 /* 2909 * Update usable space statistics. 2910 */ 2911 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 2912 vdev_sync_done(vd, txg); 2913 2914 /* 2915 * It had better be the case that we didn't dirty anything 2916 * since vdev_config_sync(). 2917 */ 2918 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 2919 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 2920 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 2921 ASSERT(bpl->bpl_queue == NULL); 2922 2923 spa_config_exit(spa, FTAG); 2924 2925 /* 2926 * If any async tasks have been requested, kick them off. 2927 */ 2928 spa_async_dispatch(spa); 2929 } 2930 2931 /* 2932 * Sync all pools. We don't want to hold the namespace lock across these 2933 * operations, so we take a reference on the spa_t and drop the lock during the 2934 * sync. 2935 */ 2936 void 2937 spa_sync_allpools(void) 2938 { 2939 spa_t *spa = NULL; 2940 mutex_enter(&spa_namespace_lock); 2941 while ((spa = spa_next(spa)) != NULL) { 2942 if (spa_state(spa) != POOL_STATE_ACTIVE) 2943 continue; 2944 spa_open_ref(spa, FTAG); 2945 mutex_exit(&spa_namespace_lock); 2946 txg_wait_synced(spa_get_dsl(spa), 0); 2947 mutex_enter(&spa_namespace_lock); 2948 spa_close(spa, FTAG); 2949 } 2950 mutex_exit(&spa_namespace_lock); 2951 } 2952 2953 /* 2954 * ========================================================================== 2955 * Miscellaneous routines 2956 * ========================================================================== 2957 */ 2958 2959 /* 2960 * Remove all pools in the system. 2961 */ 2962 void 2963 spa_evict_all(void) 2964 { 2965 spa_t *spa; 2966 2967 /* 2968 * Remove all cached state. All pools should be closed now, 2969 * so every spa in the AVL tree should be unreferenced. 2970 */ 2971 mutex_enter(&spa_namespace_lock); 2972 while ((spa = spa_next(NULL)) != NULL) { 2973 /* 2974 * Stop async tasks. The async thread may need to detach 2975 * a device that's been replaced, which requires grabbing 2976 * spa_namespace_lock, so we must drop it here. 2977 */ 2978 spa_open_ref(spa, FTAG); 2979 mutex_exit(&spa_namespace_lock); 2980 spa_async_suspend(spa); 2981 VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 2982 mutex_enter(&spa_namespace_lock); 2983 spa_close(spa, FTAG); 2984 2985 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 2986 spa_unload(spa); 2987 spa_deactivate(spa); 2988 } 2989 spa_remove(spa); 2990 } 2991 mutex_exit(&spa_namespace_lock); 2992 } 2993 2994 vdev_t * 2995 spa_lookup_by_guid(spa_t *spa, uint64_t guid) 2996 { 2997 return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 2998 } 2999 3000 void 3001 spa_upgrade(spa_t *spa) 3002 { 3003 spa_config_enter(spa, RW_WRITER, FTAG); 3004 3005 /* 3006 * This should only be called for a non-faulted pool, and since a 3007 * future version would result in an unopenable pool, this shouldn't be 3008 * possible. 3009 */ 3010 ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 3011 3012 spa->spa_uberblock.ub_version = ZFS_VERSION; 3013 vdev_config_dirty(spa->spa_root_vdev); 3014 3015 spa_config_exit(spa, FTAG); 3016 3017 txg_wait_synced(spa_get_dsl(spa), 0); 3018 } 3019 3020 boolean_t 3021 spa_has_spare(spa_t *spa, uint64_t guid) 3022 { 3023 int i; 3024 3025 for (i = 0; i < spa->spa_nspares; i++) 3026 if (spa->spa_spares[i]->vdev_guid == guid) 3027 return (B_TRUE); 3028 3029 return (B_FALSE); 3030 } 3031