1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/fm/fs/zfs.h> 29 #include <sys/spa.h> 30 #include <sys/spa_impl.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/uberblock_impl.h> 35 #include <sys/metaslab.h> 36 #include <sys/metaslab_impl.h> 37 #include <sys/space_map.h> 38 #include <sys/zio.h> 39 #include <sys/zap.h> 40 #include <sys/fs/zfs.h> 41 #include <sys/arc.h> 42 #include <sys/zil.h> 43 44 /* 45 * Virtual device management. 46 */ 47 48 static vdev_ops_t *vdev_ops_table[] = { 49 &vdev_root_ops, 50 &vdev_raidz_ops, 51 &vdev_mirror_ops, 52 &vdev_replacing_ops, 53 &vdev_spare_ops, 54 &vdev_disk_ops, 55 &vdev_file_ops, 56 &vdev_missing_ops, 57 &vdev_hole_ops, 58 NULL 59 }; 60 61 /* maximum scrub/resilver I/O queue per leaf vdev */ 62 int zfs_scrub_limit = 10; 63 64 /* 65 * Given a vdev type, return the appropriate ops vector. 66 */ 67 static vdev_ops_t * 68 vdev_getops(const char *type) 69 { 70 vdev_ops_t *ops, **opspp; 71 72 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 73 if (strcmp(ops->vdev_op_type, type) == 0) 74 break; 75 76 return (ops); 77 } 78 79 /* 80 * Default asize function: return the MAX of psize with the asize of 81 * all children. This is what's used by anything other than RAID-Z. 82 */ 83 uint64_t 84 vdev_default_asize(vdev_t *vd, uint64_t psize) 85 { 86 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 87 uint64_t csize; 88 89 for (int c = 0; c < vd->vdev_children; c++) { 90 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91 asize = MAX(asize, csize); 92 } 93 94 return (asize); 95 } 96 97 /* 98 * Get the minimum allocatable size. We define the allocatable size as 99 * the vdev's asize rounded to the nearest metaslab. This allows us to 100 * replace or attach devices which don't have the same physical size but 101 * can still satisfy the same number of allocations. 102 */ 103 uint64_t 104 vdev_get_min_asize(vdev_t *vd) 105 { 106 vdev_t *pvd = vd->vdev_parent; 107 108 /* 109 * The our parent is NULL (inactive spare or cache) or is the root, 110 * just return our own asize. 111 */ 112 if (pvd == NULL) 113 return (vd->vdev_asize); 114 115 /* 116 * The top-level vdev just returns the allocatable size rounded 117 * to the nearest metaslab. 118 */ 119 if (vd == vd->vdev_top) 120 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 121 122 /* 123 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 124 * so each child must provide at least 1/Nth of its asize. 125 */ 126 if (pvd->vdev_ops == &vdev_raidz_ops) 127 return (pvd->vdev_min_asize / pvd->vdev_children); 128 129 return (pvd->vdev_min_asize); 130 } 131 132 void 133 vdev_set_min_asize(vdev_t *vd) 134 { 135 vd->vdev_min_asize = vdev_get_min_asize(vd); 136 137 for (int c = 0; c < vd->vdev_children; c++) 138 vdev_set_min_asize(vd->vdev_child[c]); 139 } 140 141 vdev_t * 142 vdev_lookup_top(spa_t *spa, uint64_t vdev) 143 { 144 vdev_t *rvd = spa->spa_root_vdev; 145 146 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 147 148 if (vdev < rvd->vdev_children) { 149 ASSERT(rvd->vdev_child[vdev] != NULL); 150 return (rvd->vdev_child[vdev]); 151 } 152 153 return (NULL); 154 } 155 156 vdev_t * 157 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 158 { 159 vdev_t *mvd; 160 161 if (vd->vdev_guid == guid) 162 return (vd); 163 164 for (int c = 0; c < vd->vdev_children; c++) 165 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 166 NULL) 167 return (mvd); 168 169 return (NULL); 170 } 171 172 void 173 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 174 { 175 size_t oldsize, newsize; 176 uint64_t id = cvd->vdev_id; 177 vdev_t **newchild; 178 179 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 180 ASSERT(cvd->vdev_parent == NULL); 181 182 cvd->vdev_parent = pvd; 183 184 if (pvd == NULL) 185 return; 186 187 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 188 189 oldsize = pvd->vdev_children * sizeof (vdev_t *); 190 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 191 newsize = pvd->vdev_children * sizeof (vdev_t *); 192 193 newchild = kmem_zalloc(newsize, KM_SLEEP); 194 if (pvd->vdev_child != NULL) { 195 bcopy(pvd->vdev_child, newchild, oldsize); 196 kmem_free(pvd->vdev_child, oldsize); 197 } 198 199 pvd->vdev_child = newchild; 200 pvd->vdev_child[id] = cvd; 201 202 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 203 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 204 205 /* 206 * Walk up all ancestors to update guid sum. 207 */ 208 for (; pvd != NULL; pvd = pvd->vdev_parent) 209 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 210 211 if (cvd->vdev_ops->vdev_op_leaf) 212 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 213 } 214 215 void 216 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 217 { 218 int c; 219 uint_t id = cvd->vdev_id; 220 221 ASSERT(cvd->vdev_parent == pvd); 222 223 if (pvd == NULL) 224 return; 225 226 ASSERT(id < pvd->vdev_children); 227 ASSERT(pvd->vdev_child[id] == cvd); 228 229 pvd->vdev_child[id] = NULL; 230 cvd->vdev_parent = NULL; 231 232 for (c = 0; c < pvd->vdev_children; c++) 233 if (pvd->vdev_child[c]) 234 break; 235 236 if (c == pvd->vdev_children) { 237 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 238 pvd->vdev_child = NULL; 239 pvd->vdev_children = 0; 240 } 241 242 /* 243 * Walk up all ancestors to update guid sum. 244 */ 245 for (; pvd != NULL; pvd = pvd->vdev_parent) 246 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 247 248 if (cvd->vdev_ops->vdev_op_leaf) 249 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 250 } 251 252 /* 253 * Remove any holes in the child array. 254 */ 255 void 256 vdev_compact_children(vdev_t *pvd) 257 { 258 vdev_t **newchild, *cvd; 259 int oldc = pvd->vdev_children; 260 int newc; 261 262 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 263 264 for (int c = newc = 0; c < oldc; c++) 265 if (pvd->vdev_child[c]) 266 newc++; 267 268 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 269 270 for (int c = newc = 0; c < oldc; c++) { 271 if ((cvd = pvd->vdev_child[c]) != NULL) { 272 newchild[newc] = cvd; 273 cvd->vdev_id = newc++; 274 } 275 } 276 277 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 278 pvd->vdev_child = newchild; 279 pvd->vdev_children = newc; 280 } 281 282 /* 283 * Allocate and minimally initialize a vdev_t. 284 */ 285 vdev_t * 286 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 287 { 288 vdev_t *vd; 289 290 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 291 292 if (spa->spa_root_vdev == NULL) { 293 ASSERT(ops == &vdev_root_ops); 294 spa->spa_root_vdev = vd; 295 } 296 297 if (guid == 0 && ops != &vdev_hole_ops) { 298 if (spa->spa_root_vdev == vd) { 299 /* 300 * The root vdev's guid will also be the pool guid, 301 * which must be unique among all pools. 302 */ 303 guid = spa_generate_guid(NULL); 304 } else { 305 /* 306 * Any other vdev's guid must be unique within the pool. 307 */ 308 guid = spa_generate_guid(spa); 309 } 310 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 311 } 312 313 vd->vdev_spa = spa; 314 vd->vdev_id = id; 315 vd->vdev_guid = guid; 316 vd->vdev_guid_sum = guid; 317 vd->vdev_ops = ops; 318 vd->vdev_state = VDEV_STATE_CLOSED; 319 vd->vdev_ishole = (ops == &vdev_hole_ops); 320 321 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 322 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 323 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 324 for (int t = 0; t < DTL_TYPES; t++) { 325 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 326 &vd->vdev_dtl_lock); 327 } 328 txg_list_create(&vd->vdev_ms_list, 329 offsetof(struct metaslab, ms_txg_node)); 330 txg_list_create(&vd->vdev_dtl_list, 331 offsetof(struct vdev, vdev_dtl_node)); 332 vd->vdev_stat.vs_timestamp = gethrtime(); 333 vdev_queue_init(vd); 334 vdev_cache_init(vd); 335 336 return (vd); 337 } 338 339 /* 340 * Allocate a new vdev. The 'alloctype' is used to control whether we are 341 * creating a new vdev or loading an existing one - the behavior is slightly 342 * different for each case. 343 */ 344 int 345 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 346 int alloctype) 347 { 348 vdev_ops_t *ops; 349 char *type; 350 uint64_t guid = 0, islog, nparity; 351 vdev_t *vd; 352 353 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 354 355 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 356 return (EINVAL); 357 358 if ((ops = vdev_getops(type)) == NULL) 359 return (EINVAL); 360 361 /* 362 * If this is a load, get the vdev guid from the nvlist. 363 * Otherwise, vdev_alloc_common() will generate one for us. 364 */ 365 if (alloctype == VDEV_ALLOC_LOAD) { 366 uint64_t label_id; 367 368 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 369 label_id != id) 370 return (EINVAL); 371 372 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 373 return (EINVAL); 374 } else if (alloctype == VDEV_ALLOC_SPARE) { 375 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 376 return (EINVAL); 377 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 378 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 379 return (EINVAL); 380 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 381 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 382 return (EINVAL); 383 } 384 385 /* 386 * The first allocated vdev must be of type 'root'. 387 */ 388 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 389 return (EINVAL); 390 391 /* 392 * Determine whether we're a log vdev. 393 */ 394 islog = 0; 395 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 396 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 397 return (ENOTSUP); 398 399 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 400 return (ENOTSUP); 401 402 /* 403 * Set the nparity property for RAID-Z vdevs. 404 */ 405 nparity = -1ULL; 406 if (ops == &vdev_raidz_ops) { 407 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 408 &nparity) == 0) { 409 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 410 return (EINVAL); 411 /* 412 * Previous versions could only support 1 or 2 parity 413 * device. 414 */ 415 if (nparity > 1 && 416 spa_version(spa) < SPA_VERSION_RAIDZ2) 417 return (ENOTSUP); 418 if (nparity > 2 && 419 spa_version(spa) < SPA_VERSION_RAIDZ3) 420 return (ENOTSUP); 421 } else { 422 /* 423 * We require the parity to be specified for SPAs that 424 * support multiple parity levels. 425 */ 426 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 427 return (EINVAL); 428 /* 429 * Otherwise, we default to 1 parity device for RAID-Z. 430 */ 431 nparity = 1; 432 } 433 } else { 434 nparity = 0; 435 } 436 ASSERT(nparity != -1ULL); 437 438 vd = vdev_alloc_common(spa, id, guid, ops); 439 440 vd->vdev_islog = islog; 441 vd->vdev_nparity = nparity; 442 443 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 444 vd->vdev_path = spa_strdup(vd->vdev_path); 445 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 446 vd->vdev_devid = spa_strdup(vd->vdev_devid); 447 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 448 &vd->vdev_physpath) == 0) 449 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 450 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 451 vd->vdev_fru = spa_strdup(vd->vdev_fru); 452 453 /* 454 * Set the whole_disk property. If it's not specified, leave the value 455 * as -1. 456 */ 457 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 458 &vd->vdev_wholedisk) != 0) 459 vd->vdev_wholedisk = -1ULL; 460 461 /* 462 * Look for the 'not present' flag. This will only be set if the device 463 * was not present at the time of import. 464 */ 465 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 466 &vd->vdev_not_present); 467 468 /* 469 * Get the alignment requirement. 470 */ 471 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 472 473 /* 474 * Retrieve the vdev creation time. 475 */ 476 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 477 &vd->vdev_crtxg); 478 479 /* 480 * If we're a top-level vdev, try to load the allocation parameters. 481 */ 482 if (parent && !parent->vdev_parent && 483 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 484 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 485 &vd->vdev_ms_array); 486 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 487 &vd->vdev_ms_shift); 488 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 489 &vd->vdev_asize); 490 } 491 492 if (parent && !parent->vdev_parent) { 493 ASSERT(alloctype == VDEV_ALLOC_LOAD || 494 alloctype == VDEV_ALLOC_ADD || 495 alloctype == VDEV_ALLOC_SPLIT || 496 alloctype == VDEV_ALLOC_ROOTPOOL); 497 vd->vdev_mg = metaslab_group_create(islog ? 498 spa_log_class(spa) : spa_normal_class(spa), vd); 499 } 500 501 /* 502 * If we're a leaf vdev, try to load the DTL object and other state. 503 */ 504 if (vd->vdev_ops->vdev_op_leaf && 505 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 506 alloctype == VDEV_ALLOC_ROOTPOOL)) { 507 if (alloctype == VDEV_ALLOC_LOAD) { 508 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 509 &vd->vdev_dtl_smo.smo_object); 510 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 511 &vd->vdev_unspare); 512 } 513 514 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 515 uint64_t spare = 0; 516 517 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 518 &spare) == 0 && spare) 519 spa_spare_add(vd); 520 } 521 522 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 523 &vd->vdev_offline); 524 525 /* 526 * When importing a pool, we want to ignore the persistent fault 527 * state, as the diagnosis made on another system may not be 528 * valid in the current context. Local vdevs will 529 * remain in the faulted state. 530 */ 531 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 532 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 533 &vd->vdev_faulted); 534 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 535 &vd->vdev_degraded); 536 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 537 &vd->vdev_removed); 538 539 if (vd->vdev_faulted || vd->vdev_degraded) { 540 char *aux; 541 542 vd->vdev_label_aux = 543 VDEV_AUX_ERR_EXCEEDED; 544 if (nvlist_lookup_string(nv, 545 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 546 strcmp(aux, "external") == 0) 547 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 548 } 549 } 550 } 551 552 /* 553 * Add ourselves to the parent's list of children. 554 */ 555 vdev_add_child(parent, vd); 556 557 *vdp = vd; 558 559 return (0); 560 } 561 562 void 563 vdev_free(vdev_t *vd) 564 { 565 spa_t *spa = vd->vdev_spa; 566 567 /* 568 * vdev_free() implies closing the vdev first. This is simpler than 569 * trying to ensure complicated semantics for all callers. 570 */ 571 vdev_close(vd); 572 573 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 574 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 575 576 /* 577 * Free all children. 578 */ 579 for (int c = 0; c < vd->vdev_children; c++) 580 vdev_free(vd->vdev_child[c]); 581 582 ASSERT(vd->vdev_child == NULL); 583 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 584 585 /* 586 * Discard allocation state. 587 */ 588 if (vd->vdev_mg != NULL) { 589 vdev_metaslab_fini(vd); 590 metaslab_group_destroy(vd->vdev_mg); 591 } 592 593 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 594 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 595 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 596 597 /* 598 * Remove this vdev from its parent's child list. 599 */ 600 vdev_remove_child(vd->vdev_parent, vd); 601 602 ASSERT(vd->vdev_parent == NULL); 603 604 /* 605 * Clean up vdev structure. 606 */ 607 vdev_queue_fini(vd); 608 vdev_cache_fini(vd); 609 610 if (vd->vdev_path) 611 spa_strfree(vd->vdev_path); 612 if (vd->vdev_devid) 613 spa_strfree(vd->vdev_devid); 614 if (vd->vdev_physpath) 615 spa_strfree(vd->vdev_physpath); 616 if (vd->vdev_fru) 617 spa_strfree(vd->vdev_fru); 618 619 if (vd->vdev_isspare) 620 spa_spare_remove(vd); 621 if (vd->vdev_isl2cache) 622 spa_l2cache_remove(vd); 623 624 txg_list_destroy(&vd->vdev_ms_list); 625 txg_list_destroy(&vd->vdev_dtl_list); 626 627 mutex_enter(&vd->vdev_dtl_lock); 628 for (int t = 0; t < DTL_TYPES; t++) { 629 space_map_unload(&vd->vdev_dtl[t]); 630 space_map_destroy(&vd->vdev_dtl[t]); 631 } 632 mutex_exit(&vd->vdev_dtl_lock); 633 634 mutex_destroy(&vd->vdev_dtl_lock); 635 mutex_destroy(&vd->vdev_stat_lock); 636 mutex_destroy(&vd->vdev_probe_lock); 637 638 if (vd == spa->spa_root_vdev) 639 spa->spa_root_vdev = NULL; 640 641 kmem_free(vd, sizeof (vdev_t)); 642 } 643 644 /* 645 * Transfer top-level vdev state from svd to tvd. 646 */ 647 static void 648 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 649 { 650 spa_t *spa = svd->vdev_spa; 651 metaslab_t *msp; 652 vdev_t *vd; 653 int t; 654 655 ASSERT(tvd == tvd->vdev_top); 656 657 tvd->vdev_ms_array = svd->vdev_ms_array; 658 tvd->vdev_ms_shift = svd->vdev_ms_shift; 659 tvd->vdev_ms_count = svd->vdev_ms_count; 660 661 svd->vdev_ms_array = 0; 662 svd->vdev_ms_shift = 0; 663 svd->vdev_ms_count = 0; 664 665 tvd->vdev_mg = svd->vdev_mg; 666 tvd->vdev_ms = svd->vdev_ms; 667 668 svd->vdev_mg = NULL; 669 svd->vdev_ms = NULL; 670 671 if (tvd->vdev_mg != NULL) 672 tvd->vdev_mg->mg_vd = tvd; 673 674 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 675 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 676 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 677 678 svd->vdev_stat.vs_alloc = 0; 679 svd->vdev_stat.vs_space = 0; 680 svd->vdev_stat.vs_dspace = 0; 681 682 for (t = 0; t < TXG_SIZE; t++) { 683 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 684 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 685 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 686 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 687 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 688 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 689 } 690 691 if (list_link_active(&svd->vdev_config_dirty_node)) { 692 vdev_config_clean(svd); 693 vdev_config_dirty(tvd); 694 } 695 696 if (list_link_active(&svd->vdev_state_dirty_node)) { 697 vdev_state_clean(svd); 698 vdev_state_dirty(tvd); 699 } 700 701 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 702 svd->vdev_deflate_ratio = 0; 703 704 tvd->vdev_islog = svd->vdev_islog; 705 svd->vdev_islog = 0; 706 } 707 708 static void 709 vdev_top_update(vdev_t *tvd, vdev_t *vd) 710 { 711 if (vd == NULL) 712 return; 713 714 vd->vdev_top = tvd; 715 716 for (int c = 0; c < vd->vdev_children; c++) 717 vdev_top_update(tvd, vd->vdev_child[c]); 718 } 719 720 /* 721 * Add a mirror/replacing vdev above an existing vdev. 722 */ 723 vdev_t * 724 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 725 { 726 spa_t *spa = cvd->vdev_spa; 727 vdev_t *pvd = cvd->vdev_parent; 728 vdev_t *mvd; 729 730 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 731 732 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 733 734 mvd->vdev_asize = cvd->vdev_asize; 735 mvd->vdev_min_asize = cvd->vdev_min_asize; 736 mvd->vdev_ashift = cvd->vdev_ashift; 737 mvd->vdev_state = cvd->vdev_state; 738 mvd->vdev_crtxg = cvd->vdev_crtxg; 739 740 vdev_remove_child(pvd, cvd); 741 vdev_add_child(pvd, mvd); 742 cvd->vdev_id = mvd->vdev_children; 743 vdev_add_child(mvd, cvd); 744 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 745 746 if (mvd == mvd->vdev_top) 747 vdev_top_transfer(cvd, mvd); 748 749 return (mvd); 750 } 751 752 /* 753 * Remove a 1-way mirror/replacing vdev from the tree. 754 */ 755 void 756 vdev_remove_parent(vdev_t *cvd) 757 { 758 vdev_t *mvd = cvd->vdev_parent; 759 vdev_t *pvd = mvd->vdev_parent; 760 761 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 762 763 ASSERT(mvd->vdev_children == 1); 764 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 765 mvd->vdev_ops == &vdev_replacing_ops || 766 mvd->vdev_ops == &vdev_spare_ops); 767 cvd->vdev_ashift = mvd->vdev_ashift; 768 769 vdev_remove_child(mvd, cvd); 770 vdev_remove_child(pvd, mvd); 771 772 /* 773 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 774 * Otherwise, we could have detached an offline device, and when we 775 * go to import the pool we'll think we have two top-level vdevs, 776 * instead of a different version of the same top-level vdev. 777 */ 778 if (mvd->vdev_top == mvd) { 779 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 780 cvd->vdev_orig_guid = cvd->vdev_guid; 781 cvd->vdev_guid += guid_delta; 782 cvd->vdev_guid_sum += guid_delta; 783 } 784 cvd->vdev_id = mvd->vdev_id; 785 vdev_add_child(pvd, cvd); 786 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 787 788 if (cvd == cvd->vdev_top) 789 vdev_top_transfer(mvd, cvd); 790 791 ASSERT(mvd->vdev_children == 0); 792 vdev_free(mvd); 793 } 794 795 int 796 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 797 { 798 spa_t *spa = vd->vdev_spa; 799 objset_t *mos = spa->spa_meta_objset; 800 uint64_t m; 801 uint64_t oldc = vd->vdev_ms_count; 802 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 803 metaslab_t **mspp; 804 int error; 805 806 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 807 808 /* 809 * This vdev is not being allocated from yet or is a hole. 810 */ 811 if (vd->vdev_ms_shift == 0) 812 return (0); 813 814 ASSERT(!vd->vdev_ishole); 815 816 /* 817 * Compute the raidz-deflation ratio. Note, we hard-code 818 * in 128k (1 << 17) because it is the current "typical" blocksize. 819 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 820 * or we will inconsistently account for existing bp's. 821 */ 822 vd->vdev_deflate_ratio = (1 << 17) / 823 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 824 825 ASSERT(oldc <= newc); 826 827 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 828 829 if (oldc != 0) { 830 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 831 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 832 } 833 834 vd->vdev_ms = mspp; 835 vd->vdev_ms_count = newc; 836 837 for (m = oldc; m < newc; m++) { 838 space_map_obj_t smo = { 0, 0, 0 }; 839 if (txg == 0) { 840 uint64_t object = 0; 841 error = dmu_read(mos, vd->vdev_ms_array, 842 m * sizeof (uint64_t), sizeof (uint64_t), &object, 843 DMU_READ_PREFETCH); 844 if (error) 845 return (error); 846 if (object != 0) { 847 dmu_buf_t *db; 848 error = dmu_bonus_hold(mos, object, FTAG, &db); 849 if (error) 850 return (error); 851 ASSERT3U(db->db_size, >=, sizeof (smo)); 852 bcopy(db->db_data, &smo, sizeof (smo)); 853 ASSERT3U(smo.smo_object, ==, object); 854 dmu_buf_rele(db, FTAG); 855 } 856 } 857 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 858 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 859 } 860 861 if (txg == 0) 862 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 863 864 if (oldc == 0) 865 metaslab_group_activate(vd->vdev_mg); 866 867 if (txg == 0) 868 spa_config_exit(spa, SCL_ALLOC, FTAG); 869 870 return (0); 871 } 872 873 void 874 vdev_metaslab_fini(vdev_t *vd) 875 { 876 uint64_t m; 877 uint64_t count = vd->vdev_ms_count; 878 879 if (vd->vdev_ms != NULL) { 880 metaslab_group_passivate(vd->vdev_mg); 881 for (m = 0; m < count; m++) 882 if (vd->vdev_ms[m] != NULL) 883 metaslab_fini(vd->vdev_ms[m]); 884 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 885 vd->vdev_ms = NULL; 886 } 887 } 888 889 typedef struct vdev_probe_stats { 890 boolean_t vps_readable; 891 boolean_t vps_writeable; 892 int vps_flags; 893 } vdev_probe_stats_t; 894 895 static void 896 vdev_probe_done(zio_t *zio) 897 { 898 spa_t *spa = zio->io_spa; 899 vdev_t *vd = zio->io_vd; 900 vdev_probe_stats_t *vps = zio->io_private; 901 902 ASSERT(vd->vdev_probe_zio != NULL); 903 904 if (zio->io_type == ZIO_TYPE_READ) { 905 if (zio->io_error == 0) 906 vps->vps_readable = 1; 907 if (zio->io_error == 0 && spa_writeable(spa)) { 908 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 909 zio->io_offset, zio->io_size, zio->io_data, 910 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 911 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 912 } else { 913 zio_buf_free(zio->io_data, zio->io_size); 914 } 915 } else if (zio->io_type == ZIO_TYPE_WRITE) { 916 if (zio->io_error == 0) 917 vps->vps_writeable = 1; 918 zio_buf_free(zio->io_data, zio->io_size); 919 } else if (zio->io_type == ZIO_TYPE_NULL) { 920 zio_t *pio; 921 922 vd->vdev_cant_read |= !vps->vps_readable; 923 vd->vdev_cant_write |= !vps->vps_writeable; 924 925 if (vdev_readable(vd) && 926 (vdev_writeable(vd) || !spa_writeable(spa))) { 927 zio->io_error = 0; 928 } else { 929 ASSERT(zio->io_error != 0); 930 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 931 spa, vd, NULL, 0, 0); 932 zio->io_error = ENXIO; 933 } 934 935 mutex_enter(&vd->vdev_probe_lock); 936 ASSERT(vd->vdev_probe_zio == zio); 937 vd->vdev_probe_zio = NULL; 938 mutex_exit(&vd->vdev_probe_lock); 939 940 while ((pio = zio_walk_parents(zio)) != NULL) 941 if (!vdev_accessible(vd, pio)) 942 pio->io_error = ENXIO; 943 944 kmem_free(vps, sizeof (*vps)); 945 } 946 } 947 948 /* 949 * Determine whether this device is accessible by reading and writing 950 * to several known locations: the pad regions of each vdev label 951 * but the first (which we leave alone in case it contains a VTOC). 952 */ 953 zio_t * 954 vdev_probe(vdev_t *vd, zio_t *zio) 955 { 956 spa_t *spa = vd->vdev_spa; 957 vdev_probe_stats_t *vps = NULL; 958 zio_t *pio; 959 960 ASSERT(vd->vdev_ops->vdev_op_leaf); 961 962 /* 963 * Don't probe the probe. 964 */ 965 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 966 return (NULL); 967 968 /* 969 * To prevent 'probe storms' when a device fails, we create 970 * just one probe i/o at a time. All zios that want to probe 971 * this vdev will become parents of the probe io. 972 */ 973 mutex_enter(&vd->vdev_probe_lock); 974 975 if ((pio = vd->vdev_probe_zio) == NULL) { 976 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 977 978 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 979 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 980 ZIO_FLAG_TRYHARD; 981 982 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 983 /* 984 * vdev_cant_read and vdev_cant_write can only 985 * transition from TRUE to FALSE when we have the 986 * SCL_ZIO lock as writer; otherwise they can only 987 * transition from FALSE to TRUE. This ensures that 988 * any zio looking at these values can assume that 989 * failures persist for the life of the I/O. That's 990 * important because when a device has intermittent 991 * connectivity problems, we want to ensure that 992 * they're ascribed to the device (ENXIO) and not 993 * the zio (EIO). 994 * 995 * Since we hold SCL_ZIO as writer here, clear both 996 * values so the probe can reevaluate from first 997 * principles. 998 */ 999 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1000 vd->vdev_cant_read = B_FALSE; 1001 vd->vdev_cant_write = B_FALSE; 1002 } 1003 1004 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1005 vdev_probe_done, vps, 1006 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1007 1008 if (zio != NULL) { 1009 vd->vdev_probe_wanted = B_TRUE; 1010 spa_async_request(spa, SPA_ASYNC_PROBE); 1011 } 1012 } 1013 1014 if (zio != NULL) 1015 zio_add_child(zio, pio); 1016 1017 mutex_exit(&vd->vdev_probe_lock); 1018 1019 if (vps == NULL) { 1020 ASSERT(zio != NULL); 1021 return (NULL); 1022 } 1023 1024 for (int l = 1; l < VDEV_LABELS; l++) { 1025 zio_nowait(zio_read_phys(pio, vd, 1026 vdev_label_offset(vd->vdev_psize, l, 1027 offsetof(vdev_label_t, vl_pad2)), 1028 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 1029 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1030 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1031 } 1032 1033 if (zio == NULL) 1034 return (pio); 1035 1036 zio_nowait(pio); 1037 return (NULL); 1038 } 1039 1040 static void 1041 vdev_open_child(void *arg) 1042 { 1043 vdev_t *vd = arg; 1044 1045 vd->vdev_open_thread = curthread; 1046 vd->vdev_open_error = vdev_open(vd); 1047 vd->vdev_open_thread = NULL; 1048 } 1049 1050 boolean_t 1051 vdev_uses_zvols(vdev_t *vd) 1052 { 1053 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1054 strlen(ZVOL_DIR)) == 0) 1055 return (B_TRUE); 1056 for (int c = 0; c < vd->vdev_children; c++) 1057 if (vdev_uses_zvols(vd->vdev_child[c])) 1058 return (B_TRUE); 1059 return (B_FALSE); 1060 } 1061 1062 void 1063 vdev_open_children(vdev_t *vd) 1064 { 1065 taskq_t *tq; 1066 int children = vd->vdev_children; 1067 1068 /* 1069 * in order to handle pools on top of zvols, do the opens 1070 * in a single thread so that the same thread holds the 1071 * spa_namespace_lock 1072 */ 1073 if (vdev_uses_zvols(vd)) { 1074 for (int c = 0; c < children; c++) 1075 vd->vdev_child[c]->vdev_open_error = 1076 vdev_open(vd->vdev_child[c]); 1077 return; 1078 } 1079 tq = taskq_create("vdev_open", children, minclsyspri, 1080 children, children, TASKQ_PREPOPULATE); 1081 1082 for (int c = 0; c < children; c++) 1083 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1084 TQ_SLEEP) != NULL); 1085 1086 taskq_destroy(tq); 1087 } 1088 1089 /* 1090 * Prepare a virtual device for access. 1091 */ 1092 int 1093 vdev_open(vdev_t *vd) 1094 { 1095 spa_t *spa = vd->vdev_spa; 1096 int error; 1097 uint64_t osize = 0; 1098 uint64_t asize, psize; 1099 uint64_t ashift = 0; 1100 1101 ASSERT(vd->vdev_open_thread == curthread || 1102 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1103 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1104 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1105 vd->vdev_state == VDEV_STATE_OFFLINE); 1106 1107 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1108 vd->vdev_cant_read = B_FALSE; 1109 vd->vdev_cant_write = B_FALSE; 1110 vd->vdev_min_asize = vdev_get_min_asize(vd); 1111 1112 /* 1113 * If this vdev is not removed, check its fault status. If it's 1114 * faulted, bail out of the open. 1115 */ 1116 if (!vd->vdev_removed && vd->vdev_faulted) { 1117 ASSERT(vd->vdev_children == 0); 1118 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1119 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1120 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1121 vd->vdev_label_aux); 1122 return (ENXIO); 1123 } else if (vd->vdev_offline) { 1124 ASSERT(vd->vdev_children == 0); 1125 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1126 return (ENXIO); 1127 } 1128 1129 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1130 1131 /* 1132 * Reset the vdev_reopening flag so that we actually close 1133 * the vdev on error. 1134 */ 1135 vd->vdev_reopening = B_FALSE; 1136 if (zio_injection_enabled && error == 0) 1137 error = zio_handle_device_injection(vd, NULL, ENXIO); 1138 1139 if (error) { 1140 if (vd->vdev_removed && 1141 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1142 vd->vdev_removed = B_FALSE; 1143 1144 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1145 vd->vdev_stat.vs_aux); 1146 return (error); 1147 } 1148 1149 vd->vdev_removed = B_FALSE; 1150 1151 /* 1152 * Recheck the faulted flag now that we have confirmed that 1153 * the vdev is accessible. If we're faulted, bail. 1154 */ 1155 if (vd->vdev_faulted) { 1156 ASSERT(vd->vdev_children == 0); 1157 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1158 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1159 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1160 vd->vdev_label_aux); 1161 return (ENXIO); 1162 } 1163 1164 if (vd->vdev_degraded) { 1165 ASSERT(vd->vdev_children == 0); 1166 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1167 VDEV_AUX_ERR_EXCEEDED); 1168 } else { 1169 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1170 } 1171 1172 /* 1173 * For hole or missing vdevs we just return success. 1174 */ 1175 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1176 return (0); 1177 1178 for (int c = 0; c < vd->vdev_children; c++) { 1179 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1180 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1181 VDEV_AUX_NONE); 1182 break; 1183 } 1184 } 1185 1186 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1187 1188 if (vd->vdev_children == 0) { 1189 if (osize < SPA_MINDEVSIZE) { 1190 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1191 VDEV_AUX_TOO_SMALL); 1192 return (EOVERFLOW); 1193 } 1194 psize = osize; 1195 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1196 } else { 1197 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1198 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1199 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1200 VDEV_AUX_TOO_SMALL); 1201 return (EOVERFLOW); 1202 } 1203 psize = 0; 1204 asize = osize; 1205 } 1206 1207 vd->vdev_psize = psize; 1208 1209 /* 1210 * Make sure the allocatable size hasn't shrunk. 1211 */ 1212 if (asize < vd->vdev_min_asize) { 1213 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1214 VDEV_AUX_BAD_LABEL); 1215 return (EINVAL); 1216 } 1217 1218 if (vd->vdev_asize == 0) { 1219 /* 1220 * This is the first-ever open, so use the computed values. 1221 * For testing purposes, a higher ashift can be requested. 1222 */ 1223 vd->vdev_asize = asize; 1224 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1225 } else { 1226 /* 1227 * Make sure the alignment requirement hasn't increased. 1228 */ 1229 if (ashift > vd->vdev_top->vdev_ashift) { 1230 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1231 VDEV_AUX_BAD_LABEL); 1232 return (EINVAL); 1233 } 1234 } 1235 1236 /* 1237 * If all children are healthy and the asize has increased, 1238 * then we've experienced dynamic LUN growth. If automatic 1239 * expansion is enabled then use the additional space. 1240 */ 1241 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1242 (vd->vdev_expanding || spa->spa_autoexpand)) 1243 vd->vdev_asize = asize; 1244 1245 vdev_set_min_asize(vd); 1246 1247 /* 1248 * Ensure we can issue some IO before declaring the 1249 * vdev open for business. 1250 */ 1251 if (vd->vdev_ops->vdev_op_leaf && 1252 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1253 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1254 VDEV_AUX_IO_FAILURE); 1255 return (error); 1256 } 1257 1258 /* 1259 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1260 * resilver. But don't do this if we are doing a reopen for a scrub, 1261 * since this would just restart the scrub we are already doing. 1262 */ 1263 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1264 vdev_resilver_needed(vd, NULL, NULL)) 1265 spa_async_request(spa, SPA_ASYNC_RESILVER); 1266 1267 return (0); 1268 } 1269 1270 /* 1271 * Called once the vdevs are all opened, this routine validates the label 1272 * contents. This needs to be done before vdev_load() so that we don't 1273 * inadvertently do repair I/Os to the wrong device. 1274 * 1275 * This function will only return failure if one of the vdevs indicates that it 1276 * has since been destroyed or exported. This is only possible if 1277 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1278 * will be updated but the function will return 0. 1279 */ 1280 int 1281 vdev_validate(vdev_t *vd) 1282 { 1283 spa_t *spa = vd->vdev_spa; 1284 nvlist_t *label; 1285 uint64_t guid = 0, top_guid; 1286 uint64_t state; 1287 1288 for (int c = 0; c < vd->vdev_children; c++) 1289 if (vdev_validate(vd->vdev_child[c]) != 0) 1290 return (EBADF); 1291 1292 /* 1293 * If the device has already failed, or was marked offline, don't do 1294 * any further validation. Otherwise, label I/O will fail and we will 1295 * overwrite the previous state. 1296 */ 1297 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1298 uint64_t aux_guid = 0; 1299 nvlist_t *nvl; 1300 1301 if ((label = vdev_label_read_config(vd)) == NULL) { 1302 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1303 VDEV_AUX_BAD_LABEL); 1304 return (0); 1305 } 1306 1307 /* 1308 * Determine if this vdev has been split off into another 1309 * pool. If so, then refuse to open it. 1310 */ 1311 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1312 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1313 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1314 VDEV_AUX_SPLIT_POOL); 1315 nvlist_free(label); 1316 return (0); 1317 } 1318 1319 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1320 &guid) != 0 || guid != spa_guid(spa)) { 1321 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1322 VDEV_AUX_CORRUPT_DATA); 1323 nvlist_free(label); 1324 return (0); 1325 } 1326 1327 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1328 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1329 &aux_guid) != 0) 1330 aux_guid = 0; 1331 1332 /* 1333 * If this vdev just became a top-level vdev because its 1334 * sibling was detached, it will have adopted the parent's 1335 * vdev guid -- but the label may or may not be on disk yet. 1336 * Fortunately, either version of the label will have the 1337 * same top guid, so if we're a top-level vdev, we can 1338 * safely compare to that instead. 1339 * 1340 * If we split this vdev off instead, then we also check the 1341 * original pool's guid. We don't want to consider the vdev 1342 * corrupt if it is partway through a split operation. 1343 */ 1344 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1345 &guid) != 0 || 1346 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1347 &top_guid) != 0 || 1348 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) && 1349 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1350 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1351 VDEV_AUX_CORRUPT_DATA); 1352 nvlist_free(label); 1353 return (0); 1354 } 1355 1356 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1357 &state) != 0) { 1358 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1359 VDEV_AUX_CORRUPT_DATA); 1360 nvlist_free(label); 1361 return (0); 1362 } 1363 1364 nvlist_free(label); 1365 1366 /* 1367 * If spa->spa_load_verbatim is true, no need to check the 1368 * state of the pool. 1369 */ 1370 if (!spa->spa_load_verbatim && 1371 spa_load_state(spa) == SPA_LOAD_OPEN && 1372 state != POOL_STATE_ACTIVE) 1373 return (EBADF); 1374 1375 /* 1376 * If we were able to open and validate a vdev that was 1377 * previously marked permanently unavailable, clear that state 1378 * now. 1379 */ 1380 if (vd->vdev_not_present) 1381 vd->vdev_not_present = 0; 1382 } 1383 1384 return (0); 1385 } 1386 1387 /* 1388 * Close a virtual device. 1389 */ 1390 void 1391 vdev_close(vdev_t *vd) 1392 { 1393 spa_t *spa = vd->vdev_spa; 1394 vdev_t *pvd = vd->vdev_parent; 1395 1396 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1397 1398 /* 1399 * If our parent is reopening, then we are as well, unless we are 1400 * going offline. 1401 */ 1402 if (pvd != NULL && pvd->vdev_reopening) 1403 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 1404 1405 vd->vdev_ops->vdev_op_close(vd); 1406 1407 vdev_cache_purge(vd); 1408 1409 /* 1410 * We record the previous state before we close it, so that if we are 1411 * doing a reopen(), we don't generate FMA ereports if we notice that 1412 * it's still faulted. 1413 */ 1414 vd->vdev_prevstate = vd->vdev_state; 1415 1416 if (vd->vdev_offline) 1417 vd->vdev_state = VDEV_STATE_OFFLINE; 1418 else 1419 vd->vdev_state = VDEV_STATE_CLOSED; 1420 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1421 } 1422 1423 void 1424 vdev_hold(vdev_t *vd) 1425 { 1426 spa_t *spa = vd->vdev_spa; 1427 1428 ASSERT(spa_is_root(spa)); 1429 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1430 return; 1431 1432 for (int c = 0; c < vd->vdev_children; c++) 1433 vdev_hold(vd->vdev_child[c]); 1434 1435 if (vd->vdev_ops->vdev_op_leaf) 1436 vd->vdev_ops->vdev_op_hold(vd); 1437 } 1438 1439 void 1440 vdev_rele(vdev_t *vd) 1441 { 1442 spa_t *spa = vd->vdev_spa; 1443 1444 ASSERT(spa_is_root(spa)); 1445 for (int c = 0; c < vd->vdev_children; c++) 1446 vdev_rele(vd->vdev_child[c]); 1447 1448 if (vd->vdev_ops->vdev_op_leaf) 1449 vd->vdev_ops->vdev_op_rele(vd); 1450 } 1451 1452 /* 1453 * Reopen all interior vdevs and any unopened leaves. We don't actually 1454 * reopen leaf vdevs which had previously been opened as they might deadlock 1455 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1456 * If the leaf has never been opened then open it, as usual. 1457 */ 1458 void 1459 vdev_reopen(vdev_t *vd) 1460 { 1461 spa_t *spa = vd->vdev_spa; 1462 1463 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1464 1465 /* set the reopening flag unless we're taking the vdev offline */ 1466 vd->vdev_reopening = !vd->vdev_offline; 1467 vdev_close(vd); 1468 (void) vdev_open(vd); 1469 1470 /* 1471 * Call vdev_validate() here to make sure we have the same device. 1472 * Otherwise, a device with an invalid label could be successfully 1473 * opened in response to vdev_reopen(). 1474 */ 1475 if (vd->vdev_aux) { 1476 (void) vdev_validate_aux(vd); 1477 if (vdev_readable(vd) && vdev_writeable(vd) && 1478 vd->vdev_aux == &spa->spa_l2cache && 1479 !l2arc_vdev_present(vd)) 1480 l2arc_add_vdev(spa, vd); 1481 } else { 1482 (void) vdev_validate(vd); 1483 } 1484 1485 /* 1486 * Reassess parent vdev's health. 1487 */ 1488 vdev_propagate_state(vd); 1489 } 1490 1491 int 1492 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1493 { 1494 int error; 1495 1496 /* 1497 * Normally, partial opens (e.g. of a mirror) are allowed. 1498 * For a create, however, we want to fail the request if 1499 * there are any components we can't open. 1500 */ 1501 error = vdev_open(vd); 1502 1503 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1504 vdev_close(vd); 1505 return (error ? error : ENXIO); 1506 } 1507 1508 /* 1509 * Recursively initialize all labels. 1510 */ 1511 if ((error = vdev_label_init(vd, txg, isreplacing ? 1512 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1513 vdev_close(vd); 1514 return (error); 1515 } 1516 1517 return (0); 1518 } 1519 1520 void 1521 vdev_metaslab_set_size(vdev_t *vd) 1522 { 1523 /* 1524 * Aim for roughly 200 metaslabs per vdev. 1525 */ 1526 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1527 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1528 } 1529 1530 void 1531 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1532 { 1533 ASSERT(vd == vd->vdev_top); 1534 ASSERT(!vd->vdev_ishole); 1535 ASSERT(ISP2(flags)); 1536 1537 if (flags & VDD_METASLAB) 1538 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1539 1540 if (flags & VDD_DTL) 1541 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1542 1543 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1544 } 1545 1546 /* 1547 * DTLs. 1548 * 1549 * A vdev's DTL (dirty time log) is the set of transaction groups for which 1550 * the vdev has less than perfect replication. There are four kinds of DTL: 1551 * 1552 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 1553 * 1554 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 1555 * 1556 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 1557 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 1558 * txgs that was scrubbed. 1559 * 1560 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 1561 * persistent errors or just some device being offline. 1562 * Unlike the other three, the DTL_OUTAGE map is not generally 1563 * maintained; it's only computed when needed, typically to 1564 * determine whether a device can be detached. 1565 * 1566 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 1567 * either has the data or it doesn't. 1568 * 1569 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 1570 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 1571 * if any child is less than fully replicated, then so is its parent. 1572 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 1573 * comprising only those txgs which appear in 'maxfaults' or more children; 1574 * those are the txgs we don't have enough replication to read. For example, 1575 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 1576 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 1577 * two child DTL_MISSING maps. 1578 * 1579 * It should be clear from the above that to compute the DTLs and outage maps 1580 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 1581 * Therefore, that is all we keep on disk. When loading the pool, or after 1582 * a configuration change, we generate all other DTLs from first principles. 1583 */ 1584 void 1585 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1586 { 1587 space_map_t *sm = &vd->vdev_dtl[t]; 1588 1589 ASSERT(t < DTL_TYPES); 1590 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1591 1592 mutex_enter(sm->sm_lock); 1593 if (!space_map_contains(sm, txg, size)) 1594 space_map_add(sm, txg, size); 1595 mutex_exit(sm->sm_lock); 1596 } 1597 1598 boolean_t 1599 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1600 { 1601 space_map_t *sm = &vd->vdev_dtl[t]; 1602 boolean_t dirty = B_FALSE; 1603 1604 ASSERT(t < DTL_TYPES); 1605 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1606 1607 mutex_enter(sm->sm_lock); 1608 if (sm->sm_space != 0) 1609 dirty = space_map_contains(sm, txg, size); 1610 mutex_exit(sm->sm_lock); 1611 1612 return (dirty); 1613 } 1614 1615 boolean_t 1616 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 1617 { 1618 space_map_t *sm = &vd->vdev_dtl[t]; 1619 boolean_t empty; 1620 1621 mutex_enter(sm->sm_lock); 1622 empty = (sm->sm_space == 0); 1623 mutex_exit(sm->sm_lock); 1624 1625 return (empty); 1626 } 1627 1628 /* 1629 * Reassess DTLs after a config change or scrub completion. 1630 */ 1631 void 1632 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1633 { 1634 spa_t *spa = vd->vdev_spa; 1635 avl_tree_t reftree; 1636 int minref; 1637 1638 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1639 1640 for (int c = 0; c < vd->vdev_children; c++) 1641 vdev_dtl_reassess(vd->vdev_child[c], txg, 1642 scrub_txg, scrub_done); 1643 1644 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux) 1645 return; 1646 1647 if (vd->vdev_ops->vdev_op_leaf) { 1648 mutex_enter(&vd->vdev_dtl_lock); 1649 if (scrub_txg != 0 && 1650 (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 1651 /* 1652 * We completed a scrub up to scrub_txg. If we 1653 * did it without rebooting, then the scrub dtl 1654 * will be valid, so excise the old region and 1655 * fold in the scrub dtl. Otherwise, leave the 1656 * dtl as-is if there was an error. 1657 * 1658 * There's little trick here: to excise the beginning 1659 * of the DTL_MISSING map, we put it into a reference 1660 * tree and then add a segment with refcnt -1 that 1661 * covers the range [0, scrub_txg). This means 1662 * that each txg in that range has refcnt -1 or 0. 1663 * We then add DTL_SCRUB with a refcnt of 2, so that 1664 * entries in the range [0, scrub_txg) will have a 1665 * positive refcnt -- either 1 or 2. We then convert 1666 * the reference tree into the new DTL_MISSING map. 1667 */ 1668 space_map_ref_create(&reftree); 1669 space_map_ref_add_map(&reftree, 1670 &vd->vdev_dtl[DTL_MISSING], 1); 1671 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 1672 space_map_ref_add_map(&reftree, 1673 &vd->vdev_dtl[DTL_SCRUB], 2); 1674 space_map_ref_generate_map(&reftree, 1675 &vd->vdev_dtl[DTL_MISSING], 1); 1676 space_map_ref_destroy(&reftree); 1677 } 1678 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 1679 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1680 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1681 if (scrub_done) 1682 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 1683 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 1684 if (!vdev_readable(vd)) 1685 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 1686 else 1687 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1688 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1689 mutex_exit(&vd->vdev_dtl_lock); 1690 1691 if (txg != 0) 1692 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1693 return; 1694 } 1695 1696 mutex_enter(&vd->vdev_dtl_lock); 1697 for (int t = 0; t < DTL_TYPES; t++) { 1698 /* account for child's outage in parent's missing map */ 1699 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 1700 if (t == DTL_SCRUB) 1701 continue; /* leaf vdevs only */ 1702 if (t == DTL_PARTIAL) 1703 minref = 1; /* i.e. non-zero */ 1704 else if (vd->vdev_nparity != 0) 1705 minref = vd->vdev_nparity + 1; /* RAID-Z */ 1706 else 1707 minref = vd->vdev_children; /* any kind of mirror */ 1708 space_map_ref_create(&reftree); 1709 for (int c = 0; c < vd->vdev_children; c++) { 1710 vdev_t *cvd = vd->vdev_child[c]; 1711 mutex_enter(&cvd->vdev_dtl_lock); 1712 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); 1713 mutex_exit(&cvd->vdev_dtl_lock); 1714 } 1715 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 1716 space_map_ref_destroy(&reftree); 1717 } 1718 mutex_exit(&vd->vdev_dtl_lock); 1719 } 1720 1721 static int 1722 vdev_dtl_load(vdev_t *vd) 1723 { 1724 spa_t *spa = vd->vdev_spa; 1725 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1726 objset_t *mos = spa->spa_meta_objset; 1727 dmu_buf_t *db; 1728 int error; 1729 1730 ASSERT(vd->vdev_children == 0); 1731 1732 if (smo->smo_object == 0) 1733 return (0); 1734 1735 ASSERT(!vd->vdev_ishole); 1736 1737 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1738 return (error); 1739 1740 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1741 bcopy(db->db_data, smo, sizeof (*smo)); 1742 dmu_buf_rele(db, FTAG); 1743 1744 mutex_enter(&vd->vdev_dtl_lock); 1745 error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 1746 NULL, SM_ALLOC, smo, mos); 1747 mutex_exit(&vd->vdev_dtl_lock); 1748 1749 return (error); 1750 } 1751 1752 void 1753 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1754 { 1755 spa_t *spa = vd->vdev_spa; 1756 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1757 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1758 objset_t *mos = spa->spa_meta_objset; 1759 space_map_t smsync; 1760 kmutex_t smlock; 1761 dmu_buf_t *db; 1762 dmu_tx_t *tx; 1763 1764 ASSERT(!vd->vdev_ishole); 1765 1766 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1767 1768 if (vd->vdev_detached) { 1769 if (smo->smo_object != 0) { 1770 int err = dmu_object_free(mos, smo->smo_object, tx); 1771 ASSERT3U(err, ==, 0); 1772 smo->smo_object = 0; 1773 } 1774 dmu_tx_commit(tx); 1775 return; 1776 } 1777 1778 if (smo->smo_object == 0) { 1779 ASSERT(smo->smo_objsize == 0); 1780 ASSERT(smo->smo_alloc == 0); 1781 smo->smo_object = dmu_object_alloc(mos, 1782 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1783 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1784 ASSERT(smo->smo_object != 0); 1785 vdev_config_dirty(vd->vdev_top); 1786 } 1787 1788 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1789 1790 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1791 &smlock); 1792 1793 mutex_enter(&smlock); 1794 1795 mutex_enter(&vd->vdev_dtl_lock); 1796 space_map_walk(sm, space_map_add, &smsync); 1797 mutex_exit(&vd->vdev_dtl_lock); 1798 1799 space_map_truncate(smo, mos, tx); 1800 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1801 1802 space_map_destroy(&smsync); 1803 1804 mutex_exit(&smlock); 1805 mutex_destroy(&smlock); 1806 1807 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1808 dmu_buf_will_dirty(db, tx); 1809 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1810 bcopy(smo, db->db_data, sizeof (*smo)); 1811 dmu_buf_rele(db, FTAG); 1812 1813 dmu_tx_commit(tx); 1814 } 1815 1816 /* 1817 * Determine whether the specified vdev can be offlined/detached/removed 1818 * without losing data. 1819 */ 1820 boolean_t 1821 vdev_dtl_required(vdev_t *vd) 1822 { 1823 spa_t *spa = vd->vdev_spa; 1824 vdev_t *tvd = vd->vdev_top; 1825 uint8_t cant_read = vd->vdev_cant_read; 1826 boolean_t required; 1827 1828 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1829 1830 if (vd == spa->spa_root_vdev || vd == tvd) 1831 return (B_TRUE); 1832 1833 /* 1834 * Temporarily mark the device as unreadable, and then determine 1835 * whether this results in any DTL outages in the top-level vdev. 1836 * If not, we can safely offline/detach/remove the device. 1837 */ 1838 vd->vdev_cant_read = B_TRUE; 1839 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1840 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 1841 vd->vdev_cant_read = cant_read; 1842 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1843 1844 return (required); 1845 } 1846 1847 /* 1848 * Determine if resilver is needed, and if so the txg range. 1849 */ 1850 boolean_t 1851 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1852 { 1853 boolean_t needed = B_FALSE; 1854 uint64_t thismin = UINT64_MAX; 1855 uint64_t thismax = 0; 1856 1857 if (vd->vdev_children == 0) { 1858 mutex_enter(&vd->vdev_dtl_lock); 1859 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 1860 vdev_writeable(vd)) { 1861 space_seg_t *ss; 1862 1863 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1864 thismin = ss->ss_start - 1; 1865 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1866 thismax = ss->ss_end; 1867 needed = B_TRUE; 1868 } 1869 mutex_exit(&vd->vdev_dtl_lock); 1870 } else { 1871 for (int c = 0; c < vd->vdev_children; c++) { 1872 vdev_t *cvd = vd->vdev_child[c]; 1873 uint64_t cmin, cmax; 1874 1875 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1876 thismin = MIN(thismin, cmin); 1877 thismax = MAX(thismax, cmax); 1878 needed = B_TRUE; 1879 } 1880 } 1881 } 1882 1883 if (needed && minp) { 1884 *minp = thismin; 1885 *maxp = thismax; 1886 } 1887 return (needed); 1888 } 1889 1890 void 1891 vdev_load(vdev_t *vd) 1892 { 1893 /* 1894 * Recursively load all children. 1895 */ 1896 for (int c = 0; c < vd->vdev_children; c++) 1897 vdev_load(vd->vdev_child[c]); 1898 1899 /* 1900 * If this is a top-level vdev, initialize its metaslabs. 1901 */ 1902 if (vd == vd->vdev_top && !vd->vdev_ishole && 1903 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1904 vdev_metaslab_init(vd, 0) != 0)) 1905 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1906 VDEV_AUX_CORRUPT_DATA); 1907 1908 /* 1909 * If this is a leaf vdev, load its DTL. 1910 */ 1911 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1912 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1913 VDEV_AUX_CORRUPT_DATA); 1914 } 1915 1916 /* 1917 * The special vdev case is used for hot spares and l2cache devices. Its 1918 * sole purpose it to set the vdev state for the associated vdev. To do this, 1919 * we make sure that we can open the underlying device, then try to read the 1920 * label, and make sure that the label is sane and that it hasn't been 1921 * repurposed to another pool. 1922 */ 1923 int 1924 vdev_validate_aux(vdev_t *vd) 1925 { 1926 nvlist_t *label; 1927 uint64_t guid, version; 1928 uint64_t state; 1929 1930 if (!vdev_readable(vd)) 1931 return (0); 1932 1933 if ((label = vdev_label_read_config(vd)) == NULL) { 1934 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1935 VDEV_AUX_CORRUPT_DATA); 1936 return (-1); 1937 } 1938 1939 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1940 version > SPA_VERSION || 1941 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 1942 guid != vd->vdev_guid || 1943 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 1944 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1945 VDEV_AUX_CORRUPT_DATA); 1946 nvlist_free(label); 1947 return (-1); 1948 } 1949 1950 /* 1951 * We don't actually check the pool state here. If it's in fact in 1952 * use by another pool, we update this fact on the fly when requested. 1953 */ 1954 nvlist_free(label); 1955 return (0); 1956 } 1957 1958 void 1959 vdev_remove(vdev_t *vd, uint64_t txg) 1960 { 1961 spa_t *spa = vd->vdev_spa; 1962 objset_t *mos = spa->spa_meta_objset; 1963 dmu_tx_t *tx; 1964 1965 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 1966 1967 if (vd->vdev_dtl_smo.smo_object) { 1968 ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 1969 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 1970 vd->vdev_dtl_smo.smo_object = 0; 1971 } 1972 1973 if (vd->vdev_ms != NULL) { 1974 for (int m = 0; m < vd->vdev_ms_count; m++) { 1975 metaslab_t *msp = vd->vdev_ms[m]; 1976 1977 if (msp == NULL || msp->ms_smo.smo_object == 0) 1978 continue; 1979 1980 ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 1981 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 1982 msp->ms_smo.smo_object = 0; 1983 } 1984 } 1985 1986 if (vd->vdev_ms_array) { 1987 (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 1988 vd->vdev_ms_array = 0; 1989 vd->vdev_ms_shift = 0; 1990 } 1991 dmu_tx_commit(tx); 1992 } 1993 1994 void 1995 vdev_sync_done(vdev_t *vd, uint64_t txg) 1996 { 1997 metaslab_t *msp; 1998 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 1999 2000 ASSERT(!vd->vdev_ishole); 2001 2002 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 2003 metaslab_sync_done(msp, txg); 2004 2005 if (reassess) 2006 metaslab_sync_reassess(vd->vdev_mg); 2007 } 2008 2009 void 2010 vdev_sync(vdev_t *vd, uint64_t txg) 2011 { 2012 spa_t *spa = vd->vdev_spa; 2013 vdev_t *lvd; 2014 metaslab_t *msp; 2015 dmu_tx_t *tx; 2016 2017 ASSERT(!vd->vdev_ishole); 2018 2019 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 2020 ASSERT(vd == vd->vdev_top); 2021 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2022 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 2023 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 2024 ASSERT(vd->vdev_ms_array != 0); 2025 vdev_config_dirty(vd); 2026 dmu_tx_commit(tx); 2027 } 2028 2029 if (vd->vdev_removing) 2030 vdev_remove(vd, txg); 2031 2032 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 2033 metaslab_sync(msp, txg); 2034 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 2035 } 2036 2037 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 2038 vdev_dtl_sync(lvd, txg); 2039 2040 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 2041 } 2042 2043 uint64_t 2044 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 2045 { 2046 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 2047 } 2048 2049 /* 2050 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 2051 * not be opened, and no I/O is attempted. 2052 */ 2053 int 2054 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2055 { 2056 vdev_t *vd; 2057 2058 spa_vdev_state_enter(spa, SCL_NONE); 2059 2060 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2061 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2062 2063 if (!vd->vdev_ops->vdev_op_leaf) 2064 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2065 2066 /* 2067 * We don't directly use the aux state here, but if we do a 2068 * vdev_reopen(), we need this value to be present to remember why we 2069 * were faulted. 2070 */ 2071 vd->vdev_label_aux = aux; 2072 2073 /* 2074 * Faulted state takes precedence over degraded. 2075 */ 2076 vd->vdev_faulted = 1ULL; 2077 vd->vdev_degraded = 0ULL; 2078 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 2079 2080 /* 2081 * If marking the vdev as faulted cause the top-level vdev to become 2082 * unavailable, then back off and simply mark the vdev as degraded 2083 * instead. 2084 */ 2085 if (vdev_is_dead(vd->vdev_top) && !vd->vdev_islog && 2086 vd->vdev_aux == NULL) { 2087 vd->vdev_degraded = 1ULL; 2088 vd->vdev_faulted = 0ULL; 2089 2090 /* 2091 * If we reopen the device and it's not dead, only then do we 2092 * mark it degraded. 2093 */ 2094 vdev_reopen(vd); 2095 2096 if (vdev_readable(vd)) 2097 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 2098 } 2099 2100 return (spa_vdev_state_exit(spa, vd, 0)); 2101 } 2102 2103 /* 2104 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 2105 * user that something is wrong. The vdev continues to operate as normal as far 2106 * as I/O is concerned. 2107 */ 2108 int 2109 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2110 { 2111 vdev_t *vd; 2112 2113 spa_vdev_state_enter(spa, SCL_NONE); 2114 2115 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2116 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2117 2118 if (!vd->vdev_ops->vdev_op_leaf) 2119 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2120 2121 /* 2122 * If the vdev is already faulted, then don't do anything. 2123 */ 2124 if (vd->vdev_faulted || vd->vdev_degraded) 2125 return (spa_vdev_state_exit(spa, NULL, 0)); 2126 2127 vd->vdev_degraded = 1ULL; 2128 if (!vdev_is_dead(vd)) 2129 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 2130 aux); 2131 2132 return (spa_vdev_state_exit(spa, vd, 0)); 2133 } 2134 2135 /* 2136 * Online the given vdev. If 'unspare' is set, it implies two things. First, 2137 * any attached spare device should be detached when the device finishes 2138 * resilvering. Second, the online should be treated like a 'test' online case, 2139 * so no FMA events are generated if the device fails to open. 2140 */ 2141 int 2142 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 2143 { 2144 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 2145 2146 spa_vdev_state_enter(spa, SCL_NONE); 2147 2148 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2149 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2150 2151 if (!vd->vdev_ops->vdev_op_leaf) 2152 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2153 2154 tvd = vd->vdev_top; 2155 vd->vdev_offline = B_FALSE; 2156 vd->vdev_tmpoffline = B_FALSE; 2157 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 2158 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 2159 2160 /* XXX - L2ARC 1.0 does not support expansion */ 2161 if (!vd->vdev_aux) { 2162 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2163 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 2164 } 2165 2166 vdev_reopen(tvd); 2167 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 2168 2169 if (!vd->vdev_aux) { 2170 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2171 pvd->vdev_expanding = B_FALSE; 2172 } 2173 2174 if (newstate) 2175 *newstate = vd->vdev_state; 2176 if ((flags & ZFS_ONLINE_UNSPARE) && 2177 !vdev_is_dead(vd) && vd->vdev_parent && 2178 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2179 vd->vdev_parent->vdev_child[0] == vd) 2180 vd->vdev_unspare = B_TRUE; 2181 2182 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 2183 2184 /* XXX - L2ARC 1.0 does not support expansion */ 2185 if (vd->vdev_aux) 2186 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 2187 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2188 } 2189 return (spa_vdev_state_exit(spa, vd, 0)); 2190 } 2191 2192 static int 2193 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 2194 { 2195 vdev_t *vd, *tvd; 2196 int error = 0; 2197 uint64_t generation; 2198 metaslab_group_t *mg; 2199 2200 top: 2201 spa_vdev_state_enter(spa, SCL_ALLOC); 2202 2203 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2204 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2205 2206 if (!vd->vdev_ops->vdev_op_leaf) 2207 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2208 2209 tvd = vd->vdev_top; 2210 mg = tvd->vdev_mg; 2211 generation = spa->spa_config_generation + 1; 2212 2213 /* 2214 * If the device isn't already offline, try to offline it. 2215 */ 2216 if (!vd->vdev_offline) { 2217 /* 2218 * If this device has the only valid copy of some data, 2219 * don't allow it to be offlined. Log devices are always 2220 * expendable. 2221 */ 2222 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2223 vdev_dtl_required(vd)) 2224 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2225 2226 /* 2227 * If the top-level is a slog and it has had allocations 2228 * then proceed. We check that the vdev's metaslab group 2229 * is not NULL since it's possible that we may have just 2230 * added this vdev but not yet initialized its metaslabs. 2231 */ 2232 if (tvd->vdev_islog && mg != NULL) { 2233 /* 2234 * Prevent any future allocations. 2235 */ 2236 metaslab_group_passivate(mg); 2237 (void) spa_vdev_state_exit(spa, vd, 0); 2238 2239 error = spa_offline_log(spa); 2240 2241 spa_vdev_state_enter(spa, SCL_ALLOC); 2242 2243 /* 2244 * Check to see if the config has changed. 2245 */ 2246 if (error || generation != spa->spa_config_generation) { 2247 metaslab_group_activate(mg); 2248 if (error) 2249 return (spa_vdev_state_exit(spa, 2250 vd, error)); 2251 (void) spa_vdev_state_exit(spa, vd, 0); 2252 goto top; 2253 } 2254 ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); 2255 } 2256 2257 /* 2258 * Offline this device and reopen its top-level vdev. 2259 * If the top-level vdev is a log device then just offline 2260 * it. Otherwise, if this action results in the top-level 2261 * vdev becoming unusable, undo it and fail the request. 2262 */ 2263 vd->vdev_offline = B_TRUE; 2264 vdev_reopen(tvd); 2265 2266 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2267 vdev_is_dead(tvd)) { 2268 vd->vdev_offline = B_FALSE; 2269 vdev_reopen(tvd); 2270 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2271 } 2272 2273 /* 2274 * Add the device back into the metaslab rotor so that 2275 * once we online the device it's open for business. 2276 */ 2277 if (tvd->vdev_islog && mg != NULL) 2278 metaslab_group_activate(mg); 2279 } 2280 2281 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2282 2283 return (spa_vdev_state_exit(spa, vd, 0)); 2284 } 2285 2286 int 2287 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2288 { 2289 int error; 2290 2291 mutex_enter(&spa->spa_vdev_top_lock); 2292 error = vdev_offline_locked(spa, guid, flags); 2293 mutex_exit(&spa->spa_vdev_top_lock); 2294 2295 return (error); 2296 } 2297 2298 /* 2299 * Clear the error counts associated with this vdev. Unlike vdev_online() and 2300 * vdev_offline(), we assume the spa config is locked. We also clear all 2301 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2302 */ 2303 void 2304 vdev_clear(spa_t *spa, vdev_t *vd) 2305 { 2306 vdev_t *rvd = spa->spa_root_vdev; 2307 2308 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2309 2310 if (vd == NULL) 2311 vd = rvd; 2312 2313 vd->vdev_stat.vs_read_errors = 0; 2314 vd->vdev_stat.vs_write_errors = 0; 2315 vd->vdev_stat.vs_checksum_errors = 0; 2316 2317 for (int c = 0; c < vd->vdev_children; c++) 2318 vdev_clear(spa, vd->vdev_child[c]); 2319 2320 /* 2321 * If we're in the FAULTED state or have experienced failed I/O, then 2322 * clear the persistent state and attempt to reopen the device. We 2323 * also mark the vdev config dirty, so that the new faulted state is 2324 * written out to disk. 2325 */ 2326 if (vd->vdev_faulted || vd->vdev_degraded || 2327 !vdev_readable(vd) || !vdev_writeable(vd)) { 2328 2329 /* 2330 * When reopening in reponse to a clear event, it may be due to 2331 * a fmadm repair request. In this case, if the device is 2332 * still broken, we want to still post the ereport again. 2333 */ 2334 vd->vdev_forcefault = B_TRUE; 2335 2336 vd->vdev_faulted = vd->vdev_degraded = 0; 2337 vd->vdev_cant_read = B_FALSE; 2338 vd->vdev_cant_write = B_FALSE; 2339 2340 vdev_reopen(vd); 2341 2342 vd->vdev_forcefault = B_FALSE; 2343 2344 if (vd != rvd) 2345 vdev_state_dirty(vd->vdev_top); 2346 2347 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2348 spa_async_request(spa, SPA_ASYNC_RESILVER); 2349 2350 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 2351 } 2352 2353 /* 2354 * When clearing a FMA-diagnosed fault, we always want to 2355 * unspare the device, as we assume that the original spare was 2356 * done in response to the FMA fault. 2357 */ 2358 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 2359 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2360 vd->vdev_parent->vdev_child[0] == vd) 2361 vd->vdev_unspare = B_TRUE; 2362 } 2363 2364 boolean_t 2365 vdev_is_dead(vdev_t *vd) 2366 { 2367 /* 2368 * Holes and missing devices are always considered "dead". 2369 * This simplifies the code since we don't have to check for 2370 * these types of devices in the various code paths. 2371 * Instead we rely on the fact that we skip over dead devices 2372 * before issuing I/O to them. 2373 */ 2374 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 2375 vd->vdev_ops == &vdev_missing_ops); 2376 } 2377 2378 boolean_t 2379 vdev_readable(vdev_t *vd) 2380 { 2381 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2382 } 2383 2384 boolean_t 2385 vdev_writeable(vdev_t *vd) 2386 { 2387 return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2388 } 2389 2390 boolean_t 2391 vdev_allocatable(vdev_t *vd) 2392 { 2393 uint64_t state = vd->vdev_state; 2394 2395 /* 2396 * We currently allow allocations from vdevs which may be in the 2397 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2398 * fails to reopen then we'll catch it later when we're holding 2399 * the proper locks. Note that we have to get the vdev state 2400 * in a local variable because although it changes atomically, 2401 * we're asking two separate questions about it. 2402 */ 2403 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2404 !vd->vdev_cant_write && !vd->vdev_ishole && !vd->vdev_removing); 2405 } 2406 2407 boolean_t 2408 vdev_accessible(vdev_t *vd, zio_t *zio) 2409 { 2410 ASSERT(zio->io_vd == vd); 2411 2412 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2413 return (B_FALSE); 2414 2415 if (zio->io_type == ZIO_TYPE_READ) 2416 return (!vd->vdev_cant_read); 2417 2418 if (zio->io_type == ZIO_TYPE_WRITE) 2419 return (!vd->vdev_cant_write); 2420 2421 return (B_TRUE); 2422 } 2423 2424 /* 2425 * Get statistics for the given vdev. 2426 */ 2427 void 2428 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2429 { 2430 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2431 2432 mutex_enter(&vd->vdev_stat_lock); 2433 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2434 vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2435 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2436 vs->vs_state = vd->vdev_state; 2437 vs->vs_rsize = vdev_get_min_asize(vd); 2438 if (vd->vdev_ops->vdev_op_leaf) 2439 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2440 mutex_exit(&vd->vdev_stat_lock); 2441 2442 /* 2443 * If we're getting stats on the root vdev, aggregate the I/O counts 2444 * over all top-level vdevs (i.e. the direct children of the root). 2445 */ 2446 if (vd == rvd) { 2447 for (int c = 0; c < rvd->vdev_children; c++) { 2448 vdev_t *cvd = rvd->vdev_child[c]; 2449 vdev_stat_t *cvs = &cvd->vdev_stat; 2450 2451 mutex_enter(&vd->vdev_stat_lock); 2452 for (int t = 0; t < ZIO_TYPES; t++) { 2453 vs->vs_ops[t] += cvs->vs_ops[t]; 2454 vs->vs_bytes[t] += cvs->vs_bytes[t]; 2455 } 2456 vs->vs_scrub_examined += cvs->vs_scrub_examined; 2457 mutex_exit(&vd->vdev_stat_lock); 2458 } 2459 } 2460 } 2461 2462 void 2463 vdev_clear_stats(vdev_t *vd) 2464 { 2465 mutex_enter(&vd->vdev_stat_lock); 2466 vd->vdev_stat.vs_space = 0; 2467 vd->vdev_stat.vs_dspace = 0; 2468 vd->vdev_stat.vs_alloc = 0; 2469 mutex_exit(&vd->vdev_stat_lock); 2470 } 2471 2472 void 2473 vdev_stat_update(zio_t *zio, uint64_t psize) 2474 { 2475 spa_t *spa = zio->io_spa; 2476 vdev_t *rvd = spa->spa_root_vdev; 2477 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2478 vdev_t *pvd; 2479 uint64_t txg = zio->io_txg; 2480 vdev_stat_t *vs = &vd->vdev_stat; 2481 zio_type_t type = zio->io_type; 2482 int flags = zio->io_flags; 2483 2484 /* 2485 * If this i/o is a gang leader, it didn't do any actual work. 2486 */ 2487 if (zio->io_gang_tree) 2488 return; 2489 2490 if (zio->io_error == 0) { 2491 /* 2492 * If this is a root i/o, don't count it -- we've already 2493 * counted the top-level vdevs, and vdev_get_stats() will 2494 * aggregate them when asked. This reduces contention on 2495 * the root vdev_stat_lock and implicitly handles blocks 2496 * that compress away to holes, for which there is no i/o. 2497 * (Holes never create vdev children, so all the counters 2498 * remain zero, which is what we want.) 2499 * 2500 * Note: this only applies to successful i/o (io_error == 0) 2501 * because unlike i/o counts, errors are not additive. 2502 * When reading a ditto block, for example, failure of 2503 * one top-level vdev does not imply a root-level error. 2504 */ 2505 if (vd == rvd) 2506 return; 2507 2508 ASSERT(vd == zio->io_vd); 2509 2510 if (flags & ZIO_FLAG_IO_BYPASS) 2511 return; 2512 2513 mutex_enter(&vd->vdev_stat_lock); 2514 2515 if (flags & ZIO_FLAG_IO_REPAIR) { 2516 if (flags & ZIO_FLAG_SCRUB_THREAD) 2517 vs->vs_scrub_repaired += psize; 2518 if (flags & ZIO_FLAG_SELF_HEAL) 2519 vs->vs_self_healed += psize; 2520 } 2521 2522 vs->vs_ops[type]++; 2523 vs->vs_bytes[type] += psize; 2524 2525 mutex_exit(&vd->vdev_stat_lock); 2526 return; 2527 } 2528 2529 if (flags & ZIO_FLAG_SPECULATIVE) 2530 return; 2531 2532 /* 2533 * If this is an I/O error that is going to be retried, then ignore the 2534 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 2535 * hard errors, when in reality they can happen for any number of 2536 * innocuous reasons (bus resets, MPxIO link failure, etc). 2537 */ 2538 if (zio->io_error == EIO && 2539 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 2540 return; 2541 2542 /* 2543 * Intent logs writes won't propagate their error to the root 2544 * I/O so don't mark these types of failures as pool-level 2545 * errors. 2546 */ 2547 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 2548 return; 2549 2550 mutex_enter(&vd->vdev_stat_lock); 2551 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2552 if (zio->io_error == ECKSUM) 2553 vs->vs_checksum_errors++; 2554 else 2555 vs->vs_read_errors++; 2556 } 2557 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2558 vs->vs_write_errors++; 2559 mutex_exit(&vd->vdev_stat_lock); 2560 2561 if (type == ZIO_TYPE_WRITE && txg != 0 && 2562 (!(flags & ZIO_FLAG_IO_REPAIR) || 2563 (flags & ZIO_FLAG_SCRUB_THREAD) || 2564 spa->spa_claiming)) { 2565 /* 2566 * This is either a normal write (not a repair), or it's 2567 * a repair induced by the scrub thread, or it's a repair 2568 * made by zil_claim() during spa_load() in the first txg. 2569 * In the normal case, we commit the DTL change in the same 2570 * txg as the block was born. In the scrub-induced repair 2571 * case, we know that scrubs run in first-pass syncing context, 2572 * so we commit the DTL change in spa_syncing_txg(spa). 2573 * In the zil_claim() case, we commit in spa_first_txg(spa). 2574 * 2575 * We currently do not make DTL entries for failed spontaneous 2576 * self-healing writes triggered by normal (non-scrubbing) 2577 * reads, because we have no transactional context in which to 2578 * do so -- and it's not clear that it'd be desirable anyway. 2579 */ 2580 if (vd->vdev_ops->vdev_op_leaf) { 2581 uint64_t commit_txg = txg; 2582 if (flags & ZIO_FLAG_SCRUB_THREAD) { 2583 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2584 ASSERT(spa_sync_pass(spa) == 1); 2585 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2586 commit_txg = spa_syncing_txg(spa); 2587 } else if (spa->spa_claiming) { 2588 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2589 commit_txg = spa_first_txg(spa); 2590 } 2591 ASSERT(commit_txg >= spa_syncing_txg(spa)); 2592 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2593 return; 2594 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2595 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 2596 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2597 } 2598 if (vd != rvd) 2599 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2600 } 2601 } 2602 2603 void 2604 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2605 { 2606 vdev_stat_t *vs = &vd->vdev_stat; 2607 2608 for (int c = 0; c < vd->vdev_children; c++) 2609 vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2610 2611 mutex_enter(&vd->vdev_stat_lock); 2612 2613 if (type == POOL_SCRUB_NONE) { 2614 /* 2615 * Update completion and end time. Leave everything else alone 2616 * so we can report what happened during the previous scrub. 2617 */ 2618 vs->vs_scrub_complete = complete; 2619 vs->vs_scrub_end = gethrestime_sec(); 2620 } else { 2621 vs->vs_scrub_type = type; 2622 vs->vs_scrub_complete = 0; 2623 vs->vs_scrub_examined = 0; 2624 vs->vs_scrub_repaired = 0; 2625 vs->vs_scrub_start = gethrestime_sec(); 2626 vs->vs_scrub_end = 0; 2627 } 2628 2629 mutex_exit(&vd->vdev_stat_lock); 2630 } 2631 2632 /* 2633 * Update the in-core space usage stats for this vdev, its metaslab class, 2634 * and the root vdev. 2635 */ 2636 void 2637 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 2638 int64_t space_delta) 2639 { 2640 int64_t dspace_delta = space_delta; 2641 spa_t *spa = vd->vdev_spa; 2642 vdev_t *rvd = spa->spa_root_vdev; 2643 metaslab_group_t *mg = vd->vdev_mg; 2644 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 2645 2646 ASSERT(vd == vd->vdev_top); 2647 2648 /* 2649 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 2650 * factor. We must calculate this here and not at the root vdev 2651 * because the root vdev's psize-to-asize is simply the max of its 2652 * childrens', thus not accurate enough for us. 2653 */ 2654 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2655 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 2656 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 2657 vd->vdev_deflate_ratio; 2658 2659 mutex_enter(&vd->vdev_stat_lock); 2660 vd->vdev_stat.vs_alloc += alloc_delta; 2661 vd->vdev_stat.vs_space += space_delta; 2662 vd->vdev_stat.vs_dspace += dspace_delta; 2663 mutex_exit(&vd->vdev_stat_lock); 2664 2665 if (mc == spa_normal_class(spa)) { 2666 mutex_enter(&rvd->vdev_stat_lock); 2667 rvd->vdev_stat.vs_alloc += alloc_delta; 2668 rvd->vdev_stat.vs_space += space_delta; 2669 rvd->vdev_stat.vs_dspace += dspace_delta; 2670 mutex_exit(&rvd->vdev_stat_lock); 2671 } 2672 2673 if (mc != NULL) { 2674 ASSERT(rvd == vd->vdev_parent); 2675 ASSERT(vd->vdev_ms_count != 0); 2676 2677 metaslab_class_space_update(mc, 2678 alloc_delta, defer_delta, space_delta, dspace_delta); 2679 } 2680 } 2681 2682 /* 2683 * Mark a top-level vdev's config as dirty, placing it on the dirty list 2684 * so that it will be written out next time the vdev configuration is synced. 2685 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2686 */ 2687 void 2688 vdev_config_dirty(vdev_t *vd) 2689 { 2690 spa_t *spa = vd->vdev_spa; 2691 vdev_t *rvd = spa->spa_root_vdev; 2692 int c; 2693 2694 /* 2695 * If this is an aux vdev (as with l2cache and spare devices), then we 2696 * update the vdev config manually and set the sync flag. 2697 */ 2698 if (vd->vdev_aux != NULL) { 2699 spa_aux_vdev_t *sav = vd->vdev_aux; 2700 nvlist_t **aux; 2701 uint_t naux; 2702 2703 for (c = 0; c < sav->sav_count; c++) { 2704 if (sav->sav_vdevs[c] == vd) 2705 break; 2706 } 2707 2708 if (c == sav->sav_count) { 2709 /* 2710 * We're being removed. There's nothing more to do. 2711 */ 2712 ASSERT(sav->sav_sync == B_TRUE); 2713 return; 2714 } 2715 2716 sav->sav_sync = B_TRUE; 2717 2718 if (nvlist_lookup_nvlist_array(sav->sav_config, 2719 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 2720 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 2721 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 2722 } 2723 2724 ASSERT(c < naux); 2725 2726 /* 2727 * Setting the nvlist in the middle if the array is a little 2728 * sketchy, but it will work. 2729 */ 2730 nvlist_free(aux[c]); 2731 aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 2732 2733 return; 2734 } 2735 2736 /* 2737 * The dirty list is protected by the SCL_CONFIG lock. The caller 2738 * must either hold SCL_CONFIG as writer, or must be the sync thread 2739 * (which holds SCL_CONFIG as reader). There's only one sync thread, 2740 * so this is sufficient to ensure mutual exclusion. 2741 */ 2742 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2743 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2744 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2745 2746 if (vd == rvd) { 2747 for (c = 0; c < rvd->vdev_children; c++) 2748 vdev_config_dirty(rvd->vdev_child[c]); 2749 } else { 2750 ASSERT(vd == vd->vdev_top); 2751 2752 if (!list_link_active(&vd->vdev_config_dirty_node) && 2753 !vd->vdev_ishole) 2754 list_insert_head(&spa->spa_config_dirty_list, vd); 2755 } 2756 } 2757 2758 void 2759 vdev_config_clean(vdev_t *vd) 2760 { 2761 spa_t *spa = vd->vdev_spa; 2762 2763 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2764 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2765 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2766 2767 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2768 list_remove(&spa->spa_config_dirty_list, vd); 2769 } 2770 2771 /* 2772 * Mark a top-level vdev's state as dirty, so that the next pass of 2773 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2774 * the state changes from larger config changes because they require 2775 * much less locking, and are often needed for administrative actions. 2776 */ 2777 void 2778 vdev_state_dirty(vdev_t *vd) 2779 { 2780 spa_t *spa = vd->vdev_spa; 2781 2782 ASSERT(vd == vd->vdev_top); 2783 2784 /* 2785 * The state list is protected by the SCL_STATE lock. The caller 2786 * must either hold SCL_STATE as writer, or must be the sync thread 2787 * (which holds SCL_STATE as reader). There's only one sync thread, 2788 * so this is sufficient to ensure mutual exclusion. 2789 */ 2790 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2791 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2792 spa_config_held(spa, SCL_STATE, RW_READER))); 2793 2794 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole) 2795 list_insert_head(&spa->spa_state_dirty_list, vd); 2796 } 2797 2798 void 2799 vdev_state_clean(vdev_t *vd) 2800 { 2801 spa_t *spa = vd->vdev_spa; 2802 2803 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2804 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2805 spa_config_held(spa, SCL_STATE, RW_READER))); 2806 2807 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2808 list_remove(&spa->spa_state_dirty_list, vd); 2809 } 2810 2811 /* 2812 * Propagate vdev state up from children to parent. 2813 */ 2814 void 2815 vdev_propagate_state(vdev_t *vd) 2816 { 2817 spa_t *spa = vd->vdev_spa; 2818 vdev_t *rvd = spa->spa_root_vdev; 2819 int degraded = 0, faulted = 0; 2820 int corrupted = 0; 2821 vdev_t *child; 2822 2823 if (vd->vdev_children > 0) { 2824 for (int c = 0; c < vd->vdev_children; c++) { 2825 child = vd->vdev_child[c]; 2826 2827 /* 2828 * Don't factor holes into the decision. 2829 */ 2830 if (child->vdev_ishole) 2831 continue; 2832 2833 if (!vdev_readable(child) || 2834 (!vdev_writeable(child) && spa_writeable(spa))) { 2835 /* 2836 * Root special: if there is a top-level log 2837 * device, treat the root vdev as if it were 2838 * degraded. 2839 */ 2840 if (child->vdev_islog && vd == rvd) 2841 degraded++; 2842 else 2843 faulted++; 2844 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 2845 degraded++; 2846 } 2847 2848 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 2849 corrupted++; 2850 } 2851 2852 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 2853 2854 /* 2855 * Root special: if there is a top-level vdev that cannot be 2856 * opened due to corrupted metadata, then propagate the root 2857 * vdev's aux state as 'corrupt' rather than 'insufficient 2858 * replicas'. 2859 */ 2860 if (corrupted && vd == rvd && 2861 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 2862 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 2863 VDEV_AUX_CORRUPT_DATA); 2864 } 2865 2866 if (vd->vdev_parent) 2867 vdev_propagate_state(vd->vdev_parent); 2868 } 2869 2870 /* 2871 * Set a vdev's state. If this is during an open, we don't update the parent 2872 * state, because we're in the process of opening children depth-first. 2873 * Otherwise, we propagate the change to the parent. 2874 * 2875 * If this routine places a device in a faulted state, an appropriate ereport is 2876 * generated. 2877 */ 2878 void 2879 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2880 { 2881 uint64_t save_state; 2882 spa_t *spa = vd->vdev_spa; 2883 2884 if (state == vd->vdev_state) { 2885 vd->vdev_stat.vs_aux = aux; 2886 return; 2887 } 2888 2889 save_state = vd->vdev_state; 2890 2891 vd->vdev_state = state; 2892 vd->vdev_stat.vs_aux = aux; 2893 2894 /* 2895 * If we are setting the vdev state to anything but an open state, then 2896 * always close the underlying device. Otherwise, we keep accessible 2897 * but invalid devices open forever. We don't call vdev_close() itself, 2898 * because that implies some extra checks (offline, etc) that we don't 2899 * want here. This is limited to leaf devices, because otherwise 2900 * closing the device will affect other children. 2901 */ 2902 if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 2903 vd->vdev_ops->vdev_op_close(vd); 2904 2905 /* 2906 * If we have brought this vdev back into service, we need 2907 * to notify fmd so that it can gracefully repair any outstanding 2908 * cases due to a missing device. We do this in all cases, even those 2909 * that probably don't correlate to a repaired fault. This is sure to 2910 * catch all cases, and we let the zfs-retire agent sort it out. If 2911 * this is a transient state it's OK, as the retire agent will 2912 * double-check the state of the vdev before repairing it. 2913 */ 2914 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 2915 vd->vdev_prevstate != state) 2916 zfs_post_state_change(spa, vd); 2917 2918 if (vd->vdev_removed && 2919 state == VDEV_STATE_CANT_OPEN && 2920 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 2921 /* 2922 * If the previous state is set to VDEV_STATE_REMOVED, then this 2923 * device was previously marked removed and someone attempted to 2924 * reopen it. If this failed due to a nonexistent device, then 2925 * keep the device in the REMOVED state. We also let this be if 2926 * it is one of our special test online cases, which is only 2927 * attempting to online the device and shouldn't generate an FMA 2928 * fault. 2929 */ 2930 vd->vdev_state = VDEV_STATE_REMOVED; 2931 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2932 } else if (state == VDEV_STATE_REMOVED) { 2933 vd->vdev_removed = B_TRUE; 2934 } else if (state == VDEV_STATE_CANT_OPEN) { 2935 /* 2936 * If we fail to open a vdev during an import, we mark it as 2937 * "not available", which signifies that it was never there to 2938 * begin with. Failure to open such a device is not considered 2939 * an error. 2940 */ 2941 if (spa_load_state(spa) == SPA_LOAD_IMPORT && 2942 vd->vdev_ops->vdev_op_leaf) 2943 vd->vdev_not_present = 1; 2944 2945 /* 2946 * Post the appropriate ereport. If the 'prevstate' field is 2947 * set to something other than VDEV_STATE_UNKNOWN, it indicates 2948 * that this is part of a vdev_reopen(). In this case, we don't 2949 * want to post the ereport if the device was already in the 2950 * CANT_OPEN state beforehand. 2951 * 2952 * If the 'checkremove' flag is set, then this is an attempt to 2953 * online the device in response to an insertion event. If we 2954 * hit this case, then we have detected an insertion event for a 2955 * faulted or offline device that wasn't in the removed state. 2956 * In this scenario, we don't post an ereport because we are 2957 * about to replace the device, or attempt an online with 2958 * vdev_forcefault, which will generate the fault for us. 2959 */ 2960 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 2961 !vd->vdev_not_present && !vd->vdev_checkremove && 2962 vd != spa->spa_root_vdev) { 2963 const char *class; 2964 2965 switch (aux) { 2966 case VDEV_AUX_OPEN_FAILED: 2967 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2968 break; 2969 case VDEV_AUX_CORRUPT_DATA: 2970 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 2971 break; 2972 case VDEV_AUX_NO_REPLICAS: 2973 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 2974 break; 2975 case VDEV_AUX_BAD_GUID_SUM: 2976 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 2977 break; 2978 case VDEV_AUX_TOO_SMALL: 2979 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 2980 break; 2981 case VDEV_AUX_BAD_LABEL: 2982 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 2983 break; 2984 case VDEV_AUX_IO_FAILURE: 2985 class = FM_EREPORT_ZFS_IO_FAILURE; 2986 break; 2987 default: 2988 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 2989 } 2990 2991 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 2992 } 2993 2994 /* Erase any notion of persistent removed state */ 2995 vd->vdev_removed = B_FALSE; 2996 } else { 2997 vd->vdev_removed = B_FALSE; 2998 } 2999 3000 if (!isopen && vd->vdev_parent) 3001 vdev_propagate_state(vd->vdev_parent); 3002 } 3003 3004 /* 3005 * Check the vdev configuration to ensure that it's capable of supporting 3006 * a root pool. Currently, we do not support RAID-Z or partial configuration. 3007 * In addition, only a single top-level vdev is allowed and none of the leaves 3008 * can be wholedisks. 3009 */ 3010 boolean_t 3011 vdev_is_bootable(vdev_t *vd) 3012 { 3013 if (!vd->vdev_ops->vdev_op_leaf) { 3014 char *vdev_type = vd->vdev_ops->vdev_op_type; 3015 3016 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 3017 vd->vdev_children > 1) { 3018 return (B_FALSE); 3019 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 3020 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 3021 return (B_FALSE); 3022 } 3023 } else if (vd->vdev_wholedisk == 1) { 3024 return (B_FALSE); 3025 } 3026 3027 for (int c = 0; c < vd->vdev_children; c++) { 3028 if (!vdev_is_bootable(vd->vdev_child[c])) 3029 return (B_FALSE); 3030 } 3031 return (B_TRUE); 3032 } 3033 3034 /* 3035 * Load the state from the original vdev tree (ovd) which 3036 * we've retrieved from the MOS config object. If the original 3037 * vdev was offline then we transfer that state to the device 3038 * in the current vdev tree (nvd). 3039 */ 3040 void 3041 vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 3042 { 3043 spa_t *spa = nvd->vdev_spa; 3044 3045 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3046 ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 3047 3048 for (int c = 0; c < nvd->vdev_children; c++) 3049 vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 3050 3051 if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) { 3052 /* 3053 * It would be nice to call vdev_offline() 3054 * directly but the pool isn't fully loaded and 3055 * the txg threads have not been started yet. 3056 */ 3057 nvd->vdev_offline = ovd->vdev_offline; 3058 vdev_reopen(nvd->vdev_top); 3059 } 3060 } 3061 3062 /* 3063 * Expand a vdev if possible. 3064 */ 3065 void 3066 vdev_expand(vdev_t *vd, uint64_t txg) 3067 { 3068 ASSERT(vd->vdev_top == vd); 3069 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3070 3071 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 3072 VERIFY(vdev_metaslab_init(vd, txg) == 0); 3073 vdev_config_dirty(vd); 3074 } 3075 } 3076 3077 /* 3078 * Split a vdev. 3079 */ 3080 void 3081 vdev_split(vdev_t *vd) 3082 { 3083 vdev_t *cvd, *pvd = vd->vdev_parent; 3084 3085 vdev_remove_child(pvd, vd); 3086 vdev_compact_children(pvd); 3087 3088 cvd = pvd->vdev_child[0]; 3089 if (pvd->vdev_children == 1) { 3090 vdev_remove_parent(cvd); 3091 cvd->vdev_splitting = B_TRUE; 3092 } 3093 vdev_propagate_state(cvd); 3094 } 3095