1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright 2017 Nexenta Systems, Inc. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2016 Toomas Soome <tsoome@me.com> 28 * Copyright 2017 Joyent, Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2019, Datto Inc. All rights reserved. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/fm/fs/zfs.h> 35 #include <sys/spa.h> 36 #include <sys/spa_impl.h> 37 #include <sys/bpobj.h> 38 #include <sys/dmu.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/dsl_dir.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/vdev_rebuild.h> 43 #include <sys/vdev_draid.h> 44 #include <sys/uberblock_impl.h> 45 #include <sys/metaslab.h> 46 #include <sys/metaslab_impl.h> 47 #include <sys/space_map.h> 48 #include <sys/space_reftree.h> 49 #include <sys/zio.h> 50 #include <sys/zap.h> 51 #include <sys/fs/zfs.h> 52 #include <sys/arc.h> 53 #include <sys/zil.h> 54 #include <sys/dsl_scan.h> 55 #include <sys/vdev_raidz.h> 56 #include <sys/abd.h> 57 #include <sys/vdev_initialize.h> 58 #include <sys/vdev_trim.h> 59 #include <sys/zvol.h> 60 #include <sys/zfs_ratelimit.h> 61 62 /* default target for number of metaslabs per top-level vdev */ 63 int zfs_vdev_default_ms_count = 200; 64 65 /* minimum number of metaslabs per top-level vdev */ 66 int zfs_vdev_min_ms_count = 16; 67 68 /* practical upper limit of total metaslabs per top-level vdev */ 69 int zfs_vdev_ms_count_limit = 1ULL << 17; 70 71 /* lower limit for metaslab size (512M) */ 72 int zfs_vdev_default_ms_shift = 29; 73 74 /* upper limit for metaslab size (16G) */ 75 int zfs_vdev_max_ms_shift = 34; 76 77 int vdev_validate_skip = B_FALSE; 78 79 /* 80 * Since the DTL space map of a vdev is not expected to have a lot of 81 * entries, we default its block size to 4K. 82 */ 83 int zfs_vdev_dtl_sm_blksz = (1 << 12); 84 85 /* 86 * Rate limit slow IO (delay) events to this many per second. 87 */ 88 unsigned int zfs_slow_io_events_per_second = 20; 89 90 /* 91 * Rate limit checksum events after this many checksum errors per second. 92 */ 93 unsigned int zfs_checksum_events_per_second = 20; 94 95 /* 96 * Ignore errors during scrub/resilver. Allows to work around resilver 97 * upon import when there are pool errors. 98 */ 99 int zfs_scan_ignore_errors = 0; 100 101 /* 102 * vdev-wide space maps that have lots of entries written to them at 103 * the end of each transaction can benefit from a higher I/O bandwidth 104 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 105 */ 106 int zfs_vdev_standard_sm_blksz = (1 << 17); 107 108 /* 109 * Tunable parameter for debugging or performance analysis. Setting this 110 * will cause pool corruption on power loss if a volatile out-of-order 111 * write cache is enabled. 112 */ 113 int zfs_nocacheflush = 0; 114 115 uint64_t zfs_vdev_max_auto_ashift = ASHIFT_MAX; 116 uint64_t zfs_vdev_min_auto_ashift = ASHIFT_MIN; 117 118 /*PRINTFLIKE2*/ 119 void 120 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) 121 { 122 va_list adx; 123 char buf[256]; 124 125 va_start(adx, fmt); 126 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 127 va_end(adx); 128 129 if (vd->vdev_path != NULL) { 130 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, 131 vd->vdev_path, buf); 132 } else { 133 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", 134 vd->vdev_ops->vdev_op_type, 135 (u_longlong_t)vd->vdev_id, 136 (u_longlong_t)vd->vdev_guid, buf); 137 } 138 } 139 140 void 141 vdev_dbgmsg_print_tree(vdev_t *vd, int indent) 142 { 143 char state[20]; 144 145 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { 146 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id, 147 vd->vdev_ops->vdev_op_type); 148 return; 149 } 150 151 switch (vd->vdev_state) { 152 case VDEV_STATE_UNKNOWN: 153 (void) snprintf(state, sizeof (state), "unknown"); 154 break; 155 case VDEV_STATE_CLOSED: 156 (void) snprintf(state, sizeof (state), "closed"); 157 break; 158 case VDEV_STATE_OFFLINE: 159 (void) snprintf(state, sizeof (state), "offline"); 160 break; 161 case VDEV_STATE_REMOVED: 162 (void) snprintf(state, sizeof (state), "removed"); 163 break; 164 case VDEV_STATE_CANT_OPEN: 165 (void) snprintf(state, sizeof (state), "can't open"); 166 break; 167 case VDEV_STATE_FAULTED: 168 (void) snprintf(state, sizeof (state), "faulted"); 169 break; 170 case VDEV_STATE_DEGRADED: 171 (void) snprintf(state, sizeof (state), "degraded"); 172 break; 173 case VDEV_STATE_HEALTHY: 174 (void) snprintf(state, sizeof (state), "healthy"); 175 break; 176 default: 177 (void) snprintf(state, sizeof (state), "<state %u>", 178 (uint_t)vd->vdev_state); 179 } 180 181 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, 182 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, 183 vd->vdev_islog ? " (log)" : "", 184 (u_longlong_t)vd->vdev_guid, 185 vd->vdev_path ? vd->vdev_path : "N/A", state); 186 187 for (uint64_t i = 0; i < vd->vdev_children; i++) 188 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); 189 } 190 191 /* 192 * Virtual device management. 193 */ 194 195 static vdev_ops_t *vdev_ops_table[] = { 196 &vdev_root_ops, 197 &vdev_raidz_ops, 198 &vdev_draid_ops, 199 &vdev_draid_spare_ops, 200 &vdev_mirror_ops, 201 &vdev_replacing_ops, 202 &vdev_spare_ops, 203 &vdev_disk_ops, 204 &vdev_file_ops, 205 &vdev_missing_ops, 206 &vdev_hole_ops, 207 &vdev_indirect_ops, 208 NULL 209 }; 210 211 /* 212 * Given a vdev type, return the appropriate ops vector. 213 */ 214 static vdev_ops_t * 215 vdev_getops(const char *type) 216 { 217 vdev_ops_t *ops, **opspp; 218 219 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 220 if (strcmp(ops->vdev_op_type, type) == 0) 221 break; 222 223 return (ops); 224 } 225 226 /* ARGSUSED */ 227 void 228 vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs, 229 range_seg64_t *physical_rs, range_seg64_t *remain_rs) 230 { 231 physical_rs->rs_start = logical_rs->rs_start; 232 physical_rs->rs_end = logical_rs->rs_end; 233 } 234 235 /* 236 * Derive the enumerated allocation bias from string input. 237 * String origin is either the per-vdev zap or zpool(8). 238 */ 239 static vdev_alloc_bias_t 240 vdev_derive_alloc_bias(const char *bias) 241 { 242 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; 243 244 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0) 245 alloc_bias = VDEV_BIAS_LOG; 246 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 247 alloc_bias = VDEV_BIAS_SPECIAL; 248 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 249 alloc_bias = VDEV_BIAS_DEDUP; 250 251 return (alloc_bias); 252 } 253 254 /* 255 * Default asize function: return the MAX of psize with the asize of 256 * all children. This is what's used by anything other than RAID-Z. 257 */ 258 uint64_t 259 vdev_default_asize(vdev_t *vd, uint64_t psize) 260 { 261 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 262 uint64_t csize; 263 264 for (int c = 0; c < vd->vdev_children; c++) { 265 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 266 asize = MAX(asize, csize); 267 } 268 269 return (asize); 270 } 271 272 uint64_t 273 vdev_default_min_asize(vdev_t *vd) 274 { 275 return (vd->vdev_min_asize); 276 } 277 278 /* 279 * Get the minimum allocatable size. We define the allocatable size as 280 * the vdev's asize rounded to the nearest metaslab. This allows us to 281 * replace or attach devices which don't have the same physical size but 282 * can still satisfy the same number of allocations. 283 */ 284 uint64_t 285 vdev_get_min_asize(vdev_t *vd) 286 { 287 vdev_t *pvd = vd->vdev_parent; 288 289 /* 290 * If our parent is NULL (inactive spare or cache) or is the root, 291 * just return our own asize. 292 */ 293 if (pvd == NULL) 294 return (vd->vdev_asize); 295 296 /* 297 * The top-level vdev just returns the allocatable size rounded 298 * to the nearest metaslab. 299 */ 300 if (vd == vd->vdev_top) 301 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 302 303 return (pvd->vdev_ops->vdev_op_min_asize(pvd)); 304 } 305 306 void 307 vdev_set_min_asize(vdev_t *vd) 308 { 309 vd->vdev_min_asize = vdev_get_min_asize(vd); 310 311 for (int c = 0; c < vd->vdev_children; c++) 312 vdev_set_min_asize(vd->vdev_child[c]); 313 } 314 315 /* 316 * Get the minimal allocation size for the top-level vdev. 317 */ 318 uint64_t 319 vdev_get_min_alloc(vdev_t *vd) 320 { 321 uint64_t min_alloc = 1ULL << vd->vdev_ashift; 322 323 if (vd->vdev_ops->vdev_op_min_alloc != NULL) 324 min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd); 325 326 return (min_alloc); 327 } 328 329 /* 330 * Get the parity level for a top-level vdev. 331 */ 332 uint64_t 333 vdev_get_nparity(vdev_t *vd) 334 { 335 uint64_t nparity = 0; 336 337 if (vd->vdev_ops->vdev_op_nparity != NULL) 338 nparity = vd->vdev_ops->vdev_op_nparity(vd); 339 340 return (nparity); 341 } 342 343 /* 344 * Get the number of data disks for a top-level vdev. 345 */ 346 uint64_t 347 vdev_get_ndisks(vdev_t *vd) 348 { 349 uint64_t ndisks = 1; 350 351 if (vd->vdev_ops->vdev_op_ndisks != NULL) 352 ndisks = vd->vdev_ops->vdev_op_ndisks(vd); 353 354 return (ndisks); 355 } 356 357 vdev_t * 358 vdev_lookup_top(spa_t *spa, uint64_t vdev) 359 { 360 vdev_t *rvd = spa->spa_root_vdev; 361 362 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 363 364 if (vdev < rvd->vdev_children) { 365 ASSERT(rvd->vdev_child[vdev] != NULL); 366 return (rvd->vdev_child[vdev]); 367 } 368 369 return (NULL); 370 } 371 372 vdev_t * 373 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 374 { 375 vdev_t *mvd; 376 377 if (vd->vdev_guid == guid) 378 return (vd); 379 380 for (int c = 0; c < vd->vdev_children; c++) 381 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 382 NULL) 383 return (mvd); 384 385 return (NULL); 386 } 387 388 static int 389 vdev_count_leaves_impl(vdev_t *vd) 390 { 391 int n = 0; 392 393 if (vd->vdev_ops->vdev_op_leaf) 394 return (1); 395 396 for (int c = 0; c < vd->vdev_children; c++) 397 n += vdev_count_leaves_impl(vd->vdev_child[c]); 398 399 return (n); 400 } 401 402 int 403 vdev_count_leaves(spa_t *spa) 404 { 405 int rc; 406 407 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 408 rc = vdev_count_leaves_impl(spa->spa_root_vdev); 409 spa_config_exit(spa, SCL_VDEV, FTAG); 410 411 return (rc); 412 } 413 414 void 415 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 416 { 417 size_t oldsize, newsize; 418 uint64_t id = cvd->vdev_id; 419 vdev_t **newchild; 420 421 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 422 ASSERT(cvd->vdev_parent == NULL); 423 424 cvd->vdev_parent = pvd; 425 426 if (pvd == NULL) 427 return; 428 429 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 430 431 oldsize = pvd->vdev_children * sizeof (vdev_t *); 432 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 433 newsize = pvd->vdev_children * sizeof (vdev_t *); 434 435 newchild = kmem_alloc(newsize, KM_SLEEP); 436 if (pvd->vdev_child != NULL) { 437 bcopy(pvd->vdev_child, newchild, oldsize); 438 kmem_free(pvd->vdev_child, oldsize); 439 } 440 441 pvd->vdev_child = newchild; 442 pvd->vdev_child[id] = cvd; 443 444 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 445 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 446 447 /* 448 * Walk up all ancestors to update guid sum. 449 */ 450 for (; pvd != NULL; pvd = pvd->vdev_parent) 451 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 452 453 if (cvd->vdev_ops->vdev_op_leaf) { 454 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); 455 cvd->vdev_spa->spa_leaf_list_gen++; 456 } 457 } 458 459 void 460 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 461 { 462 int c; 463 uint_t id = cvd->vdev_id; 464 465 ASSERT(cvd->vdev_parent == pvd); 466 467 if (pvd == NULL) 468 return; 469 470 ASSERT(id < pvd->vdev_children); 471 ASSERT(pvd->vdev_child[id] == cvd); 472 473 pvd->vdev_child[id] = NULL; 474 cvd->vdev_parent = NULL; 475 476 for (c = 0; c < pvd->vdev_children; c++) 477 if (pvd->vdev_child[c]) 478 break; 479 480 if (c == pvd->vdev_children) { 481 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 482 pvd->vdev_child = NULL; 483 pvd->vdev_children = 0; 484 } 485 486 if (cvd->vdev_ops->vdev_op_leaf) { 487 spa_t *spa = cvd->vdev_spa; 488 list_remove(&spa->spa_leaf_list, cvd); 489 spa->spa_leaf_list_gen++; 490 } 491 492 /* 493 * Walk up all ancestors to update guid sum. 494 */ 495 for (; pvd != NULL; pvd = pvd->vdev_parent) 496 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 497 } 498 499 /* 500 * Remove any holes in the child array. 501 */ 502 void 503 vdev_compact_children(vdev_t *pvd) 504 { 505 vdev_t **newchild, *cvd; 506 int oldc = pvd->vdev_children; 507 int newc; 508 509 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 510 511 if (oldc == 0) 512 return; 513 514 for (int c = newc = 0; c < oldc; c++) 515 if (pvd->vdev_child[c]) 516 newc++; 517 518 if (newc > 0) { 519 newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP); 520 521 for (int c = newc = 0; c < oldc; c++) { 522 if ((cvd = pvd->vdev_child[c]) != NULL) { 523 newchild[newc] = cvd; 524 cvd->vdev_id = newc++; 525 } 526 } 527 } else { 528 newchild = NULL; 529 } 530 531 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 532 pvd->vdev_child = newchild; 533 pvd->vdev_children = newc; 534 } 535 536 /* 537 * Allocate and minimally initialize a vdev_t. 538 */ 539 vdev_t * 540 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 541 { 542 vdev_t *vd; 543 vdev_indirect_config_t *vic; 544 545 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 546 vic = &vd->vdev_indirect_config; 547 548 if (spa->spa_root_vdev == NULL) { 549 ASSERT(ops == &vdev_root_ops); 550 spa->spa_root_vdev = vd; 551 spa->spa_load_guid = spa_generate_guid(NULL); 552 } 553 554 if (guid == 0 && ops != &vdev_hole_ops) { 555 if (spa->spa_root_vdev == vd) { 556 /* 557 * The root vdev's guid will also be the pool guid, 558 * which must be unique among all pools. 559 */ 560 guid = spa_generate_guid(NULL); 561 } else { 562 /* 563 * Any other vdev's guid must be unique within the pool. 564 */ 565 guid = spa_generate_guid(spa); 566 } 567 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 568 } 569 570 vd->vdev_spa = spa; 571 vd->vdev_id = id; 572 vd->vdev_guid = guid; 573 vd->vdev_guid_sum = guid; 574 vd->vdev_ops = ops; 575 vd->vdev_state = VDEV_STATE_CLOSED; 576 vd->vdev_ishole = (ops == &vdev_hole_ops); 577 vic->vic_prev_indirect_vdev = UINT64_MAX; 578 579 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); 580 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); 581 vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL, 582 0, 0); 583 584 /* 585 * Initialize rate limit structs for events. We rate limit ZIO delay 586 * and checksum events so that we don't overwhelm ZED with thousands 587 * of events when a disk is acting up. 588 */ 589 zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second, 590 1); 591 zfs_ratelimit_init(&vd->vdev_checksum_rl, 592 &zfs_checksum_events_per_second, 1); 593 594 list_link_init(&vd->vdev_config_dirty_node); 595 list_link_init(&vd->vdev_state_dirty_node); 596 list_link_init(&vd->vdev_initialize_node); 597 list_link_init(&vd->vdev_leaf_node); 598 list_link_init(&vd->vdev_trim_node); 599 600 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL); 601 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 602 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 603 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); 604 605 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); 606 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); 607 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); 608 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); 609 610 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL); 611 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL); 612 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL); 613 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL); 614 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL); 615 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL); 616 617 mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL); 618 cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL); 619 620 for (int t = 0; t < DTL_TYPES; t++) { 621 vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 622 0); 623 } 624 625 txg_list_create(&vd->vdev_ms_list, spa, 626 offsetof(struct metaslab, ms_txg_node)); 627 txg_list_create(&vd->vdev_dtl_list, spa, 628 offsetof(struct vdev, vdev_dtl_node)); 629 vd->vdev_stat.vs_timestamp = gethrtime(); 630 vdev_queue_init(vd); 631 vdev_cache_init(vd); 632 633 return (vd); 634 } 635 636 /* 637 * Allocate a new vdev. The 'alloctype' is used to control whether we are 638 * creating a new vdev or loading an existing one - the behavior is slightly 639 * different for each case. 640 */ 641 int 642 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 643 int alloctype) 644 { 645 vdev_ops_t *ops; 646 char *type; 647 uint64_t guid = 0, islog; 648 vdev_t *vd; 649 vdev_indirect_config_t *vic; 650 char *tmp = NULL; 651 int rc; 652 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; 653 boolean_t top_level = (parent && !parent->vdev_parent); 654 655 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 656 657 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 658 return (SET_ERROR(EINVAL)); 659 660 if ((ops = vdev_getops(type)) == NULL) 661 return (SET_ERROR(EINVAL)); 662 663 /* 664 * If this is a load, get the vdev guid from the nvlist. 665 * Otherwise, vdev_alloc_common() will generate one for us. 666 */ 667 if (alloctype == VDEV_ALLOC_LOAD) { 668 uint64_t label_id; 669 670 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 671 label_id != id) 672 return (SET_ERROR(EINVAL)); 673 674 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 675 return (SET_ERROR(EINVAL)); 676 } else if (alloctype == VDEV_ALLOC_SPARE) { 677 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 678 return (SET_ERROR(EINVAL)); 679 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 680 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 681 return (SET_ERROR(EINVAL)); 682 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 683 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 684 return (SET_ERROR(EINVAL)); 685 } 686 687 /* 688 * The first allocated vdev must be of type 'root'. 689 */ 690 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 691 return (SET_ERROR(EINVAL)); 692 693 /* 694 * Determine whether we're a log vdev. 695 */ 696 islog = 0; 697 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 698 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 699 return (SET_ERROR(ENOTSUP)); 700 701 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 702 return (SET_ERROR(ENOTSUP)); 703 704 if (top_level && alloctype == VDEV_ALLOC_ADD) { 705 char *bias; 706 707 /* 708 * If creating a top-level vdev, check for allocation 709 * classes input. 710 */ 711 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS, 712 &bias) == 0) { 713 alloc_bias = vdev_derive_alloc_bias(bias); 714 715 /* spa_vdev_add() expects feature to be enabled */ 716 if (spa->spa_load_state != SPA_LOAD_CREATE && 717 !spa_feature_is_enabled(spa, 718 SPA_FEATURE_ALLOCATION_CLASSES)) { 719 return (SET_ERROR(ENOTSUP)); 720 } 721 } 722 723 /* spa_vdev_add() expects feature to be enabled */ 724 if (ops == &vdev_draid_ops && 725 spa->spa_load_state != SPA_LOAD_CREATE && 726 !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) { 727 return (SET_ERROR(ENOTSUP)); 728 } 729 } 730 731 /* 732 * Initialize the vdev specific data. This is done before calling 733 * vdev_alloc_common() since it may fail and this simplifies the 734 * error reporting and cleanup code paths. 735 */ 736 void *tsd = NULL; 737 if (ops->vdev_op_init != NULL) { 738 rc = ops->vdev_op_init(spa, nv, &tsd); 739 if (rc != 0) { 740 return (rc); 741 } 742 } 743 744 vd = vdev_alloc_common(spa, id, guid, ops); 745 vd->vdev_tsd = tsd; 746 vd->vdev_islog = islog; 747 748 if (top_level && alloc_bias != VDEV_BIAS_NONE) 749 vd->vdev_alloc_bias = alloc_bias; 750 751 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 752 vd->vdev_path = spa_strdup(vd->vdev_path); 753 754 /* 755 * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a 756 * fault on a vdev and want it to persist across imports (like with 757 * zpool offline -f). 758 */ 759 rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp); 760 if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) { 761 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; 762 vd->vdev_faulted = 1; 763 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 764 } 765 766 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 767 vd->vdev_devid = spa_strdup(vd->vdev_devid); 768 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 769 &vd->vdev_physpath) == 0) 770 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 771 772 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, 773 &vd->vdev_enc_sysfs_path) == 0) 774 vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path); 775 776 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 777 vd->vdev_fru = spa_strdup(vd->vdev_fru); 778 779 /* 780 * Set the whole_disk property. If it's not specified, leave the value 781 * as -1. 782 */ 783 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 784 &vd->vdev_wholedisk) != 0) 785 vd->vdev_wholedisk = -1ULL; 786 787 vic = &vd->vdev_indirect_config; 788 789 ASSERT0(vic->vic_mapping_object); 790 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 791 &vic->vic_mapping_object); 792 ASSERT0(vic->vic_births_object); 793 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 794 &vic->vic_births_object); 795 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); 796 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 797 &vic->vic_prev_indirect_vdev); 798 799 /* 800 * Look for the 'not present' flag. This will only be set if the device 801 * was not present at the time of import. 802 */ 803 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 804 &vd->vdev_not_present); 805 806 /* 807 * Get the alignment requirement. 808 */ 809 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 810 811 /* 812 * Retrieve the vdev creation time. 813 */ 814 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 815 &vd->vdev_crtxg); 816 817 /* 818 * If we're a top-level vdev, try to load the allocation parameters. 819 */ 820 if (top_level && 821 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 822 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 823 &vd->vdev_ms_array); 824 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 825 &vd->vdev_ms_shift); 826 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 827 &vd->vdev_asize); 828 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 829 &vd->vdev_removing); 830 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 831 &vd->vdev_top_zap); 832 } else { 833 ASSERT0(vd->vdev_top_zap); 834 } 835 836 if (top_level && alloctype != VDEV_ALLOC_ATTACH) { 837 ASSERT(alloctype == VDEV_ALLOC_LOAD || 838 alloctype == VDEV_ALLOC_ADD || 839 alloctype == VDEV_ALLOC_SPLIT || 840 alloctype == VDEV_ALLOC_ROOTPOOL); 841 /* Note: metaslab_group_create() is now deferred */ 842 } 843 844 if (vd->vdev_ops->vdev_op_leaf && 845 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 846 (void) nvlist_lookup_uint64(nv, 847 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); 848 } else { 849 ASSERT0(vd->vdev_leaf_zap); 850 } 851 852 /* 853 * If we're a leaf vdev, try to load the DTL object and other state. 854 */ 855 856 if (vd->vdev_ops->vdev_op_leaf && 857 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 858 alloctype == VDEV_ALLOC_ROOTPOOL)) { 859 if (alloctype == VDEV_ALLOC_LOAD) { 860 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 861 &vd->vdev_dtl_object); 862 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 863 &vd->vdev_unspare); 864 } 865 866 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 867 uint64_t spare = 0; 868 869 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 870 &spare) == 0 && spare) 871 spa_spare_add(vd); 872 } 873 874 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 875 &vd->vdev_offline); 876 877 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 878 &vd->vdev_resilver_txg); 879 880 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG, 881 &vd->vdev_rebuild_txg); 882 883 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER)) 884 vdev_defer_resilver(vd); 885 886 /* 887 * In general, when importing a pool we want to ignore the 888 * persistent fault state, as the diagnosis made on another 889 * system may not be valid in the current context. The only 890 * exception is if we forced a vdev to a persistently faulted 891 * state with 'zpool offline -f'. The persistent fault will 892 * remain across imports until cleared. 893 * 894 * Local vdevs will remain in the faulted state. 895 */ 896 if (spa_load_state(spa) == SPA_LOAD_OPEN || 897 spa_load_state(spa) == SPA_LOAD_IMPORT) { 898 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 899 &vd->vdev_faulted); 900 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 901 &vd->vdev_degraded); 902 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 903 &vd->vdev_removed); 904 905 if (vd->vdev_faulted || vd->vdev_degraded) { 906 char *aux; 907 908 vd->vdev_label_aux = 909 VDEV_AUX_ERR_EXCEEDED; 910 if (nvlist_lookup_string(nv, 911 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 912 strcmp(aux, "external") == 0) 913 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 914 else 915 vd->vdev_faulted = 0ULL; 916 } 917 } 918 } 919 920 /* 921 * Add ourselves to the parent's list of children. 922 */ 923 vdev_add_child(parent, vd); 924 925 *vdp = vd; 926 927 return (0); 928 } 929 930 void 931 vdev_free(vdev_t *vd) 932 { 933 spa_t *spa = vd->vdev_spa; 934 935 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 936 ASSERT3P(vd->vdev_trim_thread, ==, NULL); 937 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); 938 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 939 940 /* 941 * Scan queues are normally destroyed at the end of a scan. If the 942 * queue exists here, that implies the vdev is being removed while 943 * the scan is still running. 944 */ 945 if (vd->vdev_scan_io_queue != NULL) { 946 mutex_enter(&vd->vdev_scan_io_queue_lock); 947 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); 948 vd->vdev_scan_io_queue = NULL; 949 mutex_exit(&vd->vdev_scan_io_queue_lock); 950 } 951 952 /* 953 * vdev_free() implies closing the vdev first. This is simpler than 954 * trying to ensure complicated semantics for all callers. 955 */ 956 vdev_close(vd); 957 958 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 959 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 960 961 /* 962 * Free all children. 963 */ 964 for (int c = 0; c < vd->vdev_children; c++) 965 vdev_free(vd->vdev_child[c]); 966 967 ASSERT(vd->vdev_child == NULL); 968 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 969 970 if (vd->vdev_ops->vdev_op_fini != NULL) 971 vd->vdev_ops->vdev_op_fini(vd); 972 973 /* 974 * Discard allocation state. 975 */ 976 if (vd->vdev_mg != NULL) { 977 vdev_metaslab_fini(vd); 978 metaslab_group_destroy(vd->vdev_mg); 979 vd->vdev_mg = NULL; 980 } 981 982 ASSERT0(vd->vdev_stat.vs_space); 983 ASSERT0(vd->vdev_stat.vs_dspace); 984 ASSERT0(vd->vdev_stat.vs_alloc); 985 986 /* 987 * Remove this vdev from its parent's child list. 988 */ 989 vdev_remove_child(vd->vdev_parent, vd); 990 991 ASSERT(vd->vdev_parent == NULL); 992 ASSERT(!list_link_active(&vd->vdev_leaf_node)); 993 994 /* 995 * Clean up vdev structure. 996 */ 997 vdev_queue_fini(vd); 998 vdev_cache_fini(vd); 999 1000 if (vd->vdev_path) 1001 spa_strfree(vd->vdev_path); 1002 if (vd->vdev_devid) 1003 spa_strfree(vd->vdev_devid); 1004 if (vd->vdev_physpath) 1005 spa_strfree(vd->vdev_physpath); 1006 1007 if (vd->vdev_enc_sysfs_path) 1008 spa_strfree(vd->vdev_enc_sysfs_path); 1009 1010 if (vd->vdev_fru) 1011 spa_strfree(vd->vdev_fru); 1012 1013 if (vd->vdev_isspare) 1014 spa_spare_remove(vd); 1015 if (vd->vdev_isl2cache) 1016 spa_l2cache_remove(vd); 1017 1018 txg_list_destroy(&vd->vdev_ms_list); 1019 txg_list_destroy(&vd->vdev_dtl_list); 1020 1021 mutex_enter(&vd->vdev_dtl_lock); 1022 space_map_close(vd->vdev_dtl_sm); 1023 for (int t = 0; t < DTL_TYPES; t++) { 1024 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); 1025 range_tree_destroy(vd->vdev_dtl[t]); 1026 } 1027 mutex_exit(&vd->vdev_dtl_lock); 1028 1029 EQUIV(vd->vdev_indirect_births != NULL, 1030 vd->vdev_indirect_mapping != NULL); 1031 if (vd->vdev_indirect_births != NULL) { 1032 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1033 vdev_indirect_births_close(vd->vdev_indirect_births); 1034 } 1035 1036 if (vd->vdev_obsolete_sm != NULL) { 1037 ASSERT(vd->vdev_removing || 1038 vd->vdev_ops == &vdev_indirect_ops); 1039 space_map_close(vd->vdev_obsolete_sm); 1040 vd->vdev_obsolete_sm = NULL; 1041 } 1042 range_tree_destroy(vd->vdev_obsolete_segments); 1043 rw_destroy(&vd->vdev_indirect_rwlock); 1044 mutex_destroy(&vd->vdev_obsolete_lock); 1045 1046 mutex_destroy(&vd->vdev_dtl_lock); 1047 mutex_destroy(&vd->vdev_stat_lock); 1048 mutex_destroy(&vd->vdev_probe_lock); 1049 mutex_destroy(&vd->vdev_scan_io_queue_lock); 1050 1051 mutex_destroy(&vd->vdev_initialize_lock); 1052 mutex_destroy(&vd->vdev_initialize_io_lock); 1053 cv_destroy(&vd->vdev_initialize_io_cv); 1054 cv_destroy(&vd->vdev_initialize_cv); 1055 1056 mutex_destroy(&vd->vdev_trim_lock); 1057 mutex_destroy(&vd->vdev_autotrim_lock); 1058 mutex_destroy(&vd->vdev_trim_io_lock); 1059 cv_destroy(&vd->vdev_trim_cv); 1060 cv_destroy(&vd->vdev_autotrim_cv); 1061 cv_destroy(&vd->vdev_trim_io_cv); 1062 1063 mutex_destroy(&vd->vdev_rebuild_lock); 1064 cv_destroy(&vd->vdev_rebuild_cv); 1065 1066 zfs_ratelimit_fini(&vd->vdev_delay_rl); 1067 zfs_ratelimit_fini(&vd->vdev_checksum_rl); 1068 1069 if (vd == spa->spa_root_vdev) 1070 spa->spa_root_vdev = NULL; 1071 1072 kmem_free(vd, sizeof (vdev_t)); 1073 } 1074 1075 /* 1076 * Transfer top-level vdev state from svd to tvd. 1077 */ 1078 static void 1079 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 1080 { 1081 spa_t *spa = svd->vdev_spa; 1082 metaslab_t *msp; 1083 vdev_t *vd; 1084 int t; 1085 1086 ASSERT(tvd == tvd->vdev_top); 1087 1088 tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite; 1089 tvd->vdev_ms_array = svd->vdev_ms_array; 1090 tvd->vdev_ms_shift = svd->vdev_ms_shift; 1091 tvd->vdev_ms_count = svd->vdev_ms_count; 1092 tvd->vdev_top_zap = svd->vdev_top_zap; 1093 1094 svd->vdev_ms_array = 0; 1095 svd->vdev_ms_shift = 0; 1096 svd->vdev_ms_count = 0; 1097 svd->vdev_top_zap = 0; 1098 1099 if (tvd->vdev_mg) 1100 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 1101 tvd->vdev_mg = svd->vdev_mg; 1102 tvd->vdev_ms = svd->vdev_ms; 1103 1104 svd->vdev_mg = NULL; 1105 svd->vdev_ms = NULL; 1106 1107 if (tvd->vdev_mg != NULL) 1108 tvd->vdev_mg->mg_vd = tvd; 1109 1110 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; 1111 svd->vdev_checkpoint_sm = NULL; 1112 1113 tvd->vdev_alloc_bias = svd->vdev_alloc_bias; 1114 svd->vdev_alloc_bias = VDEV_BIAS_NONE; 1115 1116 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 1117 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 1118 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 1119 1120 svd->vdev_stat.vs_alloc = 0; 1121 svd->vdev_stat.vs_space = 0; 1122 svd->vdev_stat.vs_dspace = 0; 1123 1124 /* 1125 * State which may be set on a top-level vdev that's in the 1126 * process of being removed. 1127 */ 1128 ASSERT0(tvd->vdev_indirect_config.vic_births_object); 1129 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); 1130 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); 1131 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); 1132 ASSERT3P(tvd->vdev_indirect_births, ==, NULL); 1133 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); 1134 ASSERT0(tvd->vdev_removing); 1135 ASSERT0(tvd->vdev_rebuilding); 1136 tvd->vdev_removing = svd->vdev_removing; 1137 tvd->vdev_rebuilding = svd->vdev_rebuilding; 1138 tvd->vdev_rebuild_config = svd->vdev_rebuild_config; 1139 tvd->vdev_indirect_config = svd->vdev_indirect_config; 1140 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; 1141 tvd->vdev_indirect_births = svd->vdev_indirect_births; 1142 range_tree_swap(&svd->vdev_obsolete_segments, 1143 &tvd->vdev_obsolete_segments); 1144 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; 1145 svd->vdev_indirect_config.vic_mapping_object = 0; 1146 svd->vdev_indirect_config.vic_births_object = 0; 1147 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; 1148 svd->vdev_indirect_mapping = NULL; 1149 svd->vdev_indirect_births = NULL; 1150 svd->vdev_obsolete_sm = NULL; 1151 svd->vdev_removing = 0; 1152 svd->vdev_rebuilding = 0; 1153 1154 for (t = 0; t < TXG_SIZE; t++) { 1155 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 1156 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 1157 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 1158 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 1159 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 1160 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 1161 } 1162 1163 if (list_link_active(&svd->vdev_config_dirty_node)) { 1164 vdev_config_clean(svd); 1165 vdev_config_dirty(tvd); 1166 } 1167 1168 if (list_link_active(&svd->vdev_state_dirty_node)) { 1169 vdev_state_clean(svd); 1170 vdev_state_dirty(tvd); 1171 } 1172 1173 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 1174 svd->vdev_deflate_ratio = 0; 1175 1176 tvd->vdev_islog = svd->vdev_islog; 1177 svd->vdev_islog = 0; 1178 1179 dsl_scan_io_queue_vdev_xfer(svd, tvd); 1180 } 1181 1182 static void 1183 vdev_top_update(vdev_t *tvd, vdev_t *vd) 1184 { 1185 if (vd == NULL) 1186 return; 1187 1188 vd->vdev_top = tvd; 1189 1190 for (int c = 0; c < vd->vdev_children; c++) 1191 vdev_top_update(tvd, vd->vdev_child[c]); 1192 } 1193 1194 /* 1195 * Add a mirror/replacing vdev above an existing vdev. There is no need to 1196 * call .vdev_op_init() since mirror/replacing vdevs do not have private state. 1197 */ 1198 vdev_t * 1199 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 1200 { 1201 spa_t *spa = cvd->vdev_spa; 1202 vdev_t *pvd = cvd->vdev_parent; 1203 vdev_t *mvd; 1204 1205 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1206 1207 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 1208 1209 mvd->vdev_asize = cvd->vdev_asize; 1210 mvd->vdev_min_asize = cvd->vdev_min_asize; 1211 mvd->vdev_max_asize = cvd->vdev_max_asize; 1212 mvd->vdev_psize = cvd->vdev_psize; 1213 mvd->vdev_ashift = cvd->vdev_ashift; 1214 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift; 1215 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift; 1216 mvd->vdev_state = cvd->vdev_state; 1217 mvd->vdev_crtxg = cvd->vdev_crtxg; 1218 1219 vdev_remove_child(pvd, cvd); 1220 vdev_add_child(pvd, mvd); 1221 cvd->vdev_id = mvd->vdev_children; 1222 vdev_add_child(mvd, cvd); 1223 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1224 1225 if (mvd == mvd->vdev_top) 1226 vdev_top_transfer(cvd, mvd); 1227 1228 return (mvd); 1229 } 1230 1231 /* 1232 * Remove a 1-way mirror/replacing vdev from the tree. 1233 */ 1234 void 1235 vdev_remove_parent(vdev_t *cvd) 1236 { 1237 vdev_t *mvd = cvd->vdev_parent; 1238 vdev_t *pvd = mvd->vdev_parent; 1239 1240 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1241 1242 ASSERT(mvd->vdev_children == 1); 1243 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 1244 mvd->vdev_ops == &vdev_replacing_ops || 1245 mvd->vdev_ops == &vdev_spare_ops); 1246 cvd->vdev_ashift = mvd->vdev_ashift; 1247 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift; 1248 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift; 1249 vdev_remove_child(mvd, cvd); 1250 vdev_remove_child(pvd, mvd); 1251 1252 /* 1253 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 1254 * Otherwise, we could have detached an offline device, and when we 1255 * go to import the pool we'll think we have two top-level vdevs, 1256 * instead of a different version of the same top-level vdev. 1257 */ 1258 if (mvd->vdev_top == mvd) { 1259 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 1260 cvd->vdev_orig_guid = cvd->vdev_guid; 1261 cvd->vdev_guid += guid_delta; 1262 cvd->vdev_guid_sum += guid_delta; 1263 1264 /* 1265 * If pool not set for autoexpand, we need to also preserve 1266 * mvd's asize to prevent automatic expansion of cvd. 1267 * Otherwise if we are adjusting the mirror by attaching and 1268 * detaching children of non-uniform sizes, the mirror could 1269 * autoexpand, unexpectedly requiring larger devices to 1270 * re-establish the mirror. 1271 */ 1272 if (!cvd->vdev_spa->spa_autoexpand) 1273 cvd->vdev_asize = mvd->vdev_asize; 1274 } 1275 cvd->vdev_id = mvd->vdev_id; 1276 vdev_add_child(pvd, cvd); 1277 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1278 1279 if (cvd == cvd->vdev_top) 1280 vdev_top_transfer(mvd, cvd); 1281 1282 ASSERT(mvd->vdev_children == 0); 1283 vdev_free(mvd); 1284 } 1285 1286 static void 1287 vdev_metaslab_group_create(vdev_t *vd) 1288 { 1289 spa_t *spa = vd->vdev_spa; 1290 1291 /* 1292 * metaslab_group_create was delayed until allocation bias was available 1293 */ 1294 if (vd->vdev_mg == NULL) { 1295 metaslab_class_t *mc; 1296 1297 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE) 1298 vd->vdev_alloc_bias = VDEV_BIAS_LOG; 1299 1300 ASSERT3U(vd->vdev_islog, ==, 1301 (vd->vdev_alloc_bias == VDEV_BIAS_LOG)); 1302 1303 switch (vd->vdev_alloc_bias) { 1304 case VDEV_BIAS_LOG: 1305 mc = spa_log_class(spa); 1306 break; 1307 case VDEV_BIAS_SPECIAL: 1308 mc = spa_special_class(spa); 1309 break; 1310 case VDEV_BIAS_DEDUP: 1311 mc = spa_dedup_class(spa); 1312 break; 1313 default: 1314 mc = spa_normal_class(spa); 1315 } 1316 1317 vd->vdev_mg = metaslab_group_create(mc, vd, 1318 spa->spa_alloc_count); 1319 1320 /* 1321 * The spa ashift min/max only apply for the normal metaslab 1322 * class. Class destination is late binding so ashift boundry 1323 * setting had to wait until now. 1324 */ 1325 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1326 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) { 1327 if (vd->vdev_ashift > spa->spa_max_ashift) 1328 spa->spa_max_ashift = vd->vdev_ashift; 1329 if (vd->vdev_ashift < spa->spa_min_ashift) 1330 spa->spa_min_ashift = vd->vdev_ashift; 1331 1332 uint64_t min_alloc = vdev_get_min_alloc(vd); 1333 if (min_alloc < spa->spa_min_alloc) 1334 spa->spa_min_alloc = min_alloc; 1335 } 1336 } 1337 } 1338 1339 int 1340 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 1341 { 1342 spa_t *spa = vd->vdev_spa; 1343 objset_t *mos = spa->spa_meta_objset; 1344 uint64_t m; 1345 uint64_t oldc = vd->vdev_ms_count; 1346 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 1347 metaslab_t **mspp; 1348 int error; 1349 boolean_t expanding = (oldc != 0); 1350 1351 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1352 1353 /* 1354 * This vdev is not being allocated from yet or is a hole. 1355 */ 1356 if (vd->vdev_ms_shift == 0) 1357 return (0); 1358 1359 ASSERT(!vd->vdev_ishole); 1360 1361 ASSERT(oldc <= newc); 1362 1363 mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 1364 1365 if (expanding) { 1366 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 1367 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 1368 } 1369 1370 vd->vdev_ms = mspp; 1371 vd->vdev_ms_count = newc; 1372 for (m = oldc; m < newc; m++) { 1373 uint64_t object = 0; 1374 1375 /* 1376 * vdev_ms_array may be 0 if we are creating the "fake" 1377 * metaslabs for an indirect vdev for zdb's leak detection. 1378 * See zdb_leak_init(). 1379 */ 1380 if (txg == 0 && vd->vdev_ms_array != 0) { 1381 error = dmu_read(mos, vd->vdev_ms_array, 1382 m * sizeof (uint64_t), sizeof (uint64_t), &object, 1383 DMU_READ_PREFETCH); 1384 if (error != 0) { 1385 vdev_dbgmsg(vd, "unable to read the metaslab " 1386 "array [error=%d]", error); 1387 return (error); 1388 } 1389 } 1390 1391 #ifndef _KERNEL 1392 /* 1393 * To accommodate zdb_leak_init() fake indirect 1394 * metaslabs, we allocate a metaslab group for 1395 * indirect vdevs which normally don't have one. 1396 */ 1397 if (vd->vdev_mg == NULL) { 1398 ASSERT0(vdev_is_concrete(vd)); 1399 vdev_metaslab_group_create(vd); 1400 } 1401 #endif 1402 error = metaslab_init(vd->vdev_mg, m, object, txg, 1403 &(vd->vdev_ms[m])); 1404 if (error != 0) { 1405 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", 1406 error); 1407 return (error); 1408 } 1409 } 1410 1411 if (txg == 0) 1412 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 1413 1414 /* 1415 * If the vdev is being removed we don't activate 1416 * the metaslabs since we want to ensure that no new 1417 * allocations are performed on this device. 1418 */ 1419 if (!expanding && !vd->vdev_removing) { 1420 metaslab_group_activate(vd->vdev_mg); 1421 } 1422 1423 if (txg == 0) 1424 spa_config_exit(spa, SCL_ALLOC, FTAG); 1425 1426 /* 1427 * Regardless whether this vdev was just added or it is being 1428 * expanded, the metaslab count has changed. Recalculate the 1429 * block limit. 1430 */ 1431 spa_log_sm_set_blocklimit(spa); 1432 1433 return (0); 1434 } 1435 1436 void 1437 vdev_metaslab_fini(vdev_t *vd) 1438 { 1439 if (vd->vdev_checkpoint_sm != NULL) { 1440 ASSERT(spa_feature_is_active(vd->vdev_spa, 1441 SPA_FEATURE_POOL_CHECKPOINT)); 1442 space_map_close(vd->vdev_checkpoint_sm); 1443 /* 1444 * Even though we close the space map, we need to set its 1445 * pointer to NULL. The reason is that vdev_metaslab_fini() 1446 * may be called multiple times for certain operations 1447 * (i.e. when destroying a pool) so we need to ensure that 1448 * this clause never executes twice. This logic is similar 1449 * to the one used for the vdev_ms clause below. 1450 */ 1451 vd->vdev_checkpoint_sm = NULL; 1452 } 1453 1454 if (vd->vdev_ms != NULL) { 1455 metaslab_group_t *mg = vd->vdev_mg; 1456 metaslab_group_passivate(mg); 1457 1458 uint64_t count = vd->vdev_ms_count; 1459 for (uint64_t m = 0; m < count; m++) { 1460 metaslab_t *msp = vd->vdev_ms[m]; 1461 if (msp != NULL) 1462 metaslab_fini(msp); 1463 } 1464 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 1465 vd->vdev_ms = NULL; 1466 1467 vd->vdev_ms_count = 0; 1468 1469 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 1470 ASSERT0(mg->mg_histogram[i]); 1471 } 1472 ASSERT0(vd->vdev_ms_count); 1473 ASSERT3U(vd->vdev_pending_fastwrite, ==, 0); 1474 } 1475 1476 typedef struct vdev_probe_stats { 1477 boolean_t vps_readable; 1478 boolean_t vps_writeable; 1479 int vps_flags; 1480 } vdev_probe_stats_t; 1481 1482 static void 1483 vdev_probe_done(zio_t *zio) 1484 { 1485 spa_t *spa = zio->io_spa; 1486 vdev_t *vd = zio->io_vd; 1487 vdev_probe_stats_t *vps = zio->io_private; 1488 1489 ASSERT(vd->vdev_probe_zio != NULL); 1490 1491 if (zio->io_type == ZIO_TYPE_READ) { 1492 if (zio->io_error == 0) 1493 vps->vps_readable = 1; 1494 if (zio->io_error == 0 && spa_writeable(spa)) { 1495 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 1496 zio->io_offset, zio->io_size, zio->io_abd, 1497 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1498 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 1499 } else { 1500 abd_free(zio->io_abd); 1501 } 1502 } else if (zio->io_type == ZIO_TYPE_WRITE) { 1503 if (zio->io_error == 0) 1504 vps->vps_writeable = 1; 1505 abd_free(zio->io_abd); 1506 } else if (zio->io_type == ZIO_TYPE_NULL) { 1507 zio_t *pio; 1508 zio_link_t *zl; 1509 1510 vd->vdev_cant_read |= !vps->vps_readable; 1511 vd->vdev_cant_write |= !vps->vps_writeable; 1512 1513 if (vdev_readable(vd) && 1514 (vdev_writeable(vd) || !spa_writeable(spa))) { 1515 zio->io_error = 0; 1516 } else { 1517 ASSERT(zio->io_error != 0); 1518 vdev_dbgmsg(vd, "failed probe"); 1519 (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 1520 spa, vd, NULL, NULL, 0); 1521 zio->io_error = SET_ERROR(ENXIO); 1522 } 1523 1524 mutex_enter(&vd->vdev_probe_lock); 1525 ASSERT(vd->vdev_probe_zio == zio); 1526 vd->vdev_probe_zio = NULL; 1527 mutex_exit(&vd->vdev_probe_lock); 1528 1529 zl = NULL; 1530 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 1531 if (!vdev_accessible(vd, pio)) 1532 pio->io_error = SET_ERROR(ENXIO); 1533 1534 kmem_free(vps, sizeof (*vps)); 1535 } 1536 } 1537 1538 /* 1539 * Determine whether this device is accessible. 1540 * 1541 * Read and write to several known locations: the pad regions of each 1542 * vdev label but the first, which we leave alone in case it contains 1543 * a VTOC. 1544 */ 1545 zio_t * 1546 vdev_probe(vdev_t *vd, zio_t *zio) 1547 { 1548 spa_t *spa = vd->vdev_spa; 1549 vdev_probe_stats_t *vps = NULL; 1550 zio_t *pio; 1551 1552 ASSERT(vd->vdev_ops->vdev_op_leaf); 1553 1554 /* 1555 * Don't probe the probe. 1556 */ 1557 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 1558 return (NULL); 1559 1560 /* 1561 * To prevent 'probe storms' when a device fails, we create 1562 * just one probe i/o at a time. All zios that want to probe 1563 * this vdev will become parents of the probe io. 1564 */ 1565 mutex_enter(&vd->vdev_probe_lock); 1566 1567 if ((pio = vd->vdev_probe_zio) == NULL) { 1568 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 1569 1570 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1571 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1572 ZIO_FLAG_TRYHARD; 1573 1574 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1575 /* 1576 * vdev_cant_read and vdev_cant_write can only 1577 * transition from TRUE to FALSE when we have the 1578 * SCL_ZIO lock as writer; otherwise they can only 1579 * transition from FALSE to TRUE. This ensures that 1580 * any zio looking at these values can assume that 1581 * failures persist for the life of the I/O. That's 1582 * important because when a device has intermittent 1583 * connectivity problems, we want to ensure that 1584 * they're ascribed to the device (ENXIO) and not 1585 * the zio (EIO). 1586 * 1587 * Since we hold SCL_ZIO as writer here, clear both 1588 * values so the probe can reevaluate from first 1589 * principles. 1590 */ 1591 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1592 vd->vdev_cant_read = B_FALSE; 1593 vd->vdev_cant_write = B_FALSE; 1594 } 1595 1596 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1597 vdev_probe_done, vps, 1598 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1599 1600 /* 1601 * We can't change the vdev state in this context, so we 1602 * kick off an async task to do it on our behalf. 1603 */ 1604 if (zio != NULL) { 1605 vd->vdev_probe_wanted = B_TRUE; 1606 spa_async_request(spa, SPA_ASYNC_PROBE); 1607 } 1608 } 1609 1610 if (zio != NULL) 1611 zio_add_child(zio, pio); 1612 1613 mutex_exit(&vd->vdev_probe_lock); 1614 1615 if (vps == NULL) { 1616 ASSERT(zio != NULL); 1617 return (NULL); 1618 } 1619 1620 for (int l = 1; l < VDEV_LABELS; l++) { 1621 zio_nowait(zio_read_phys(pio, vd, 1622 vdev_label_offset(vd->vdev_psize, l, 1623 offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE, 1624 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), 1625 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1626 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1627 } 1628 1629 if (zio == NULL) 1630 return (pio); 1631 1632 zio_nowait(pio); 1633 return (NULL); 1634 } 1635 1636 static void 1637 vdev_open_child(void *arg) 1638 { 1639 vdev_t *vd = arg; 1640 1641 vd->vdev_open_thread = curthread; 1642 vd->vdev_open_error = vdev_open(vd); 1643 vd->vdev_open_thread = NULL; 1644 } 1645 1646 static boolean_t 1647 vdev_uses_zvols(vdev_t *vd) 1648 { 1649 #ifdef _KERNEL 1650 if (zvol_is_zvol(vd->vdev_path)) 1651 return (B_TRUE); 1652 #endif 1653 1654 for (int c = 0; c < vd->vdev_children; c++) 1655 if (vdev_uses_zvols(vd->vdev_child[c])) 1656 return (B_TRUE); 1657 1658 return (B_FALSE); 1659 } 1660 1661 /* 1662 * Returns B_TRUE if the passed child should be opened. 1663 */ 1664 static boolean_t 1665 vdev_default_open_children_func(vdev_t *vd) 1666 { 1667 return (B_TRUE); 1668 } 1669 1670 /* 1671 * Open the requested child vdevs. If any of the leaf vdevs are using 1672 * a ZFS volume then do the opens in a single thread. This avoids a 1673 * deadlock when the current thread is holding the spa_namespace_lock. 1674 */ 1675 static void 1676 vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func) 1677 { 1678 int children = vd->vdev_children; 1679 1680 taskq_t *tq = taskq_create("vdev_open", children, minclsyspri, 1681 children, children, TASKQ_PREPOPULATE); 1682 vd->vdev_nonrot = B_TRUE; 1683 1684 for (int c = 0; c < children; c++) { 1685 vdev_t *cvd = vd->vdev_child[c]; 1686 1687 if (open_func(cvd) == B_FALSE) 1688 continue; 1689 1690 if (tq == NULL || vdev_uses_zvols(vd)) { 1691 cvd->vdev_open_error = vdev_open(cvd); 1692 } else { 1693 VERIFY(taskq_dispatch(tq, vdev_open_child, 1694 cvd, TQ_SLEEP) != TASKQID_INVALID); 1695 } 1696 1697 vd->vdev_nonrot &= cvd->vdev_nonrot; 1698 } 1699 1700 if (tq != NULL) { 1701 taskq_wait(tq); 1702 taskq_destroy(tq); 1703 } 1704 } 1705 1706 /* 1707 * Open all child vdevs. 1708 */ 1709 void 1710 vdev_open_children(vdev_t *vd) 1711 { 1712 vdev_open_children_impl(vd, vdev_default_open_children_func); 1713 } 1714 1715 /* 1716 * Conditionally open a subset of child vdevs. 1717 */ 1718 void 1719 vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func) 1720 { 1721 vdev_open_children_impl(vd, open_func); 1722 } 1723 1724 /* 1725 * Compute the raidz-deflation ratio. Note, we hard-code 1726 * in 128k (1 << 17) because it is the "typical" blocksize. 1727 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, 1728 * otherwise it would inconsistently account for existing bp's. 1729 */ 1730 static void 1731 vdev_set_deflate_ratio(vdev_t *vd) 1732 { 1733 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { 1734 vd->vdev_deflate_ratio = (1 << 17) / 1735 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 1736 } 1737 } 1738 1739 /* 1740 * Maximize performance by inflating the configured ashift for top level 1741 * vdevs to be as close to the physical ashift as possible while maintaining 1742 * administrator defined limits and ensuring it doesn't go below the 1743 * logical ashift. 1744 */ 1745 static void 1746 vdev_ashift_optimize(vdev_t *vd) 1747 { 1748 ASSERT(vd == vd->vdev_top); 1749 1750 if (vd->vdev_ashift < vd->vdev_physical_ashift) { 1751 vd->vdev_ashift = MIN( 1752 MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift), 1753 MAX(zfs_vdev_min_auto_ashift, 1754 vd->vdev_physical_ashift)); 1755 } else { 1756 /* 1757 * If the logical and physical ashifts are the same, then 1758 * we ensure that the top-level vdev's ashift is not smaller 1759 * than our minimum ashift value. For the unusual case 1760 * where logical ashift > physical ashift, we can't cap 1761 * the calculated ashift based on max ashift as that 1762 * would cause failures. 1763 * We still check if we need to increase it to match 1764 * the min ashift. 1765 */ 1766 vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift, 1767 vd->vdev_ashift); 1768 } 1769 } 1770 1771 /* 1772 * Prepare a virtual device for access. 1773 */ 1774 int 1775 vdev_open(vdev_t *vd) 1776 { 1777 spa_t *spa = vd->vdev_spa; 1778 int error; 1779 uint64_t osize = 0; 1780 uint64_t max_osize = 0; 1781 uint64_t asize, max_asize, psize; 1782 uint64_t logical_ashift = 0; 1783 uint64_t physical_ashift = 0; 1784 1785 ASSERT(vd->vdev_open_thread == curthread || 1786 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1787 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1788 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1789 vd->vdev_state == VDEV_STATE_OFFLINE); 1790 1791 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1792 vd->vdev_cant_read = B_FALSE; 1793 vd->vdev_cant_write = B_FALSE; 1794 vd->vdev_min_asize = vdev_get_min_asize(vd); 1795 1796 /* 1797 * If this vdev is not removed, check its fault status. If it's 1798 * faulted, bail out of the open. 1799 */ 1800 if (!vd->vdev_removed && vd->vdev_faulted) { 1801 ASSERT(vd->vdev_children == 0); 1802 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1803 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1804 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1805 vd->vdev_label_aux); 1806 return (SET_ERROR(ENXIO)); 1807 } else if (vd->vdev_offline) { 1808 ASSERT(vd->vdev_children == 0); 1809 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1810 return (SET_ERROR(ENXIO)); 1811 } 1812 1813 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, 1814 &logical_ashift, &physical_ashift); 1815 /* 1816 * Physical volume size should never be larger than its max size, unless 1817 * the disk has shrunk while we were reading it or the device is buggy 1818 * or damaged: either way it's not safe for use, bail out of the open. 1819 */ 1820 if (osize > max_osize) { 1821 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1822 VDEV_AUX_OPEN_FAILED); 1823 return (SET_ERROR(ENXIO)); 1824 } 1825 1826 /* 1827 * Reset the vdev_reopening flag so that we actually close 1828 * the vdev on error. 1829 */ 1830 vd->vdev_reopening = B_FALSE; 1831 if (zio_injection_enabled && error == 0) 1832 error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO)); 1833 1834 if (error) { 1835 if (vd->vdev_removed && 1836 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1837 vd->vdev_removed = B_FALSE; 1838 1839 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { 1840 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, 1841 vd->vdev_stat.vs_aux); 1842 } else { 1843 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1844 vd->vdev_stat.vs_aux); 1845 } 1846 return (error); 1847 } 1848 1849 vd->vdev_removed = B_FALSE; 1850 1851 /* 1852 * Recheck the faulted flag now that we have confirmed that 1853 * the vdev is accessible. If we're faulted, bail. 1854 */ 1855 if (vd->vdev_faulted) { 1856 ASSERT(vd->vdev_children == 0); 1857 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1858 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1859 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1860 vd->vdev_label_aux); 1861 return (SET_ERROR(ENXIO)); 1862 } 1863 1864 if (vd->vdev_degraded) { 1865 ASSERT(vd->vdev_children == 0); 1866 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1867 VDEV_AUX_ERR_EXCEEDED); 1868 } else { 1869 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1870 } 1871 1872 /* 1873 * For hole or missing vdevs we just return success. 1874 */ 1875 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1876 return (0); 1877 1878 for (int c = 0; c < vd->vdev_children; c++) { 1879 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1880 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1881 VDEV_AUX_NONE); 1882 break; 1883 } 1884 } 1885 1886 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1887 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1888 1889 if (vd->vdev_children == 0) { 1890 if (osize < SPA_MINDEVSIZE) { 1891 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1892 VDEV_AUX_TOO_SMALL); 1893 return (SET_ERROR(EOVERFLOW)); 1894 } 1895 psize = osize; 1896 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1897 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1898 VDEV_LABEL_END_SIZE); 1899 } else { 1900 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1901 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1902 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1903 VDEV_AUX_TOO_SMALL); 1904 return (SET_ERROR(EOVERFLOW)); 1905 } 1906 psize = 0; 1907 asize = osize; 1908 max_asize = max_osize; 1909 } 1910 1911 /* 1912 * If the vdev was expanded, record this so that we can re-create the 1913 * uberblock rings in labels {2,3}, during the next sync. 1914 */ 1915 if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0)) 1916 vd->vdev_copy_uberblocks = B_TRUE; 1917 1918 vd->vdev_psize = psize; 1919 1920 /* 1921 * Make sure the allocatable size hasn't shrunk too much. 1922 */ 1923 if (asize < vd->vdev_min_asize) { 1924 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1925 VDEV_AUX_BAD_LABEL); 1926 return (SET_ERROR(EINVAL)); 1927 } 1928 1929 /* 1930 * We can always set the logical/physical ashift members since 1931 * their values are only used to calculate the vdev_ashift when 1932 * the device is first added to the config. These values should 1933 * not be used for anything else since they may change whenever 1934 * the device is reopened and we don't store them in the label. 1935 */ 1936 vd->vdev_physical_ashift = 1937 MAX(physical_ashift, vd->vdev_physical_ashift); 1938 vd->vdev_logical_ashift = MAX(logical_ashift, 1939 vd->vdev_logical_ashift); 1940 1941 if (vd->vdev_asize == 0) { 1942 /* 1943 * This is the first-ever open, so use the computed values. 1944 * For compatibility, a different ashift can be requested. 1945 */ 1946 vd->vdev_asize = asize; 1947 vd->vdev_max_asize = max_asize; 1948 1949 /* 1950 * If the vdev_ashift was not overriden at creation time, 1951 * then set it the logical ashift and optimize the ashift. 1952 */ 1953 if (vd->vdev_ashift == 0) { 1954 vd->vdev_ashift = vd->vdev_logical_ashift; 1955 1956 if (vd->vdev_logical_ashift > ASHIFT_MAX) { 1957 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1958 VDEV_AUX_ASHIFT_TOO_BIG); 1959 return (SET_ERROR(EDOM)); 1960 } 1961 1962 if (vd->vdev_top == vd) { 1963 vdev_ashift_optimize(vd); 1964 } 1965 } 1966 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN || 1967 vd->vdev_ashift > ASHIFT_MAX)) { 1968 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1969 VDEV_AUX_BAD_ASHIFT); 1970 return (SET_ERROR(EDOM)); 1971 } 1972 } else { 1973 /* 1974 * Make sure the alignment required hasn't increased. 1975 */ 1976 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift && 1977 vd->vdev_ops->vdev_op_leaf) { 1978 (void) zfs_ereport_post( 1979 FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, 1980 spa, vd, NULL, NULL, 0); 1981 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1982 VDEV_AUX_BAD_LABEL); 1983 return (SET_ERROR(EDOM)); 1984 } 1985 vd->vdev_max_asize = max_asize; 1986 } 1987 1988 /* 1989 * If all children are healthy we update asize if either: 1990 * The asize has increased, due to a device expansion caused by dynamic 1991 * LUN growth or vdev replacement, and automatic expansion is enabled; 1992 * making the additional space available. 1993 * 1994 * The asize has decreased, due to a device shrink usually caused by a 1995 * vdev replace with a smaller device. This ensures that calculations 1996 * based of max_asize and asize e.g. esize are always valid. It's safe 1997 * to do this as we've already validated that asize is greater than 1998 * vdev_min_asize. 1999 */ 2000 if (vd->vdev_state == VDEV_STATE_HEALTHY && 2001 ((asize > vd->vdev_asize && 2002 (vd->vdev_expanding || spa->spa_autoexpand)) || 2003 (asize < vd->vdev_asize))) 2004 vd->vdev_asize = asize; 2005 2006 vdev_set_min_asize(vd); 2007 2008 /* 2009 * Ensure we can issue some IO before declaring the 2010 * vdev open for business. 2011 */ 2012 if (vd->vdev_ops->vdev_op_leaf && 2013 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 2014 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 2015 VDEV_AUX_ERR_EXCEEDED); 2016 return (error); 2017 } 2018 2019 /* 2020 * Track the the minimum allocation size. 2021 */ 2022 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 2023 vd->vdev_islog == 0 && vd->vdev_aux == NULL) { 2024 uint64_t min_alloc = vdev_get_min_alloc(vd); 2025 if (min_alloc < spa->spa_min_alloc) 2026 spa->spa_min_alloc = min_alloc; 2027 } 2028 2029 /* 2030 * If this is a leaf vdev, assess whether a resilver is needed. 2031 * But don't do this if we are doing a reopen for a scrub, since 2032 * this would just restart the scrub we are already doing. 2033 */ 2034 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen) 2035 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd); 2036 2037 return (0); 2038 } 2039 2040 /* 2041 * Called once the vdevs are all opened, this routine validates the label 2042 * contents. This needs to be done before vdev_load() so that we don't 2043 * inadvertently do repair I/Os to the wrong device. 2044 * 2045 * This function will only return failure if one of the vdevs indicates that it 2046 * has since been destroyed or exported. This is only possible if 2047 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 2048 * will be updated but the function will return 0. 2049 */ 2050 int 2051 vdev_validate(vdev_t *vd) 2052 { 2053 spa_t *spa = vd->vdev_spa; 2054 nvlist_t *label; 2055 uint64_t guid = 0, aux_guid = 0, top_guid; 2056 uint64_t state; 2057 nvlist_t *nvl; 2058 uint64_t txg; 2059 2060 if (vdev_validate_skip) 2061 return (0); 2062 2063 for (uint64_t c = 0; c < vd->vdev_children; c++) 2064 if (vdev_validate(vd->vdev_child[c]) != 0) 2065 return (SET_ERROR(EBADF)); 2066 2067 /* 2068 * If the device has already failed, or was marked offline, don't do 2069 * any further validation. Otherwise, label I/O will fail and we will 2070 * overwrite the previous state. 2071 */ 2072 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) 2073 return (0); 2074 2075 /* 2076 * If we are performing an extreme rewind, we allow for a label that 2077 * was modified at a point after the current txg. 2078 * If config lock is not held do not check for the txg. spa_sync could 2079 * be updating the vdev's label before updating spa_last_synced_txg. 2080 */ 2081 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || 2082 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) 2083 txg = UINT64_MAX; 2084 else 2085 txg = spa_last_synced_txg(spa); 2086 2087 if ((label = vdev_label_read_config(vd, txg)) == NULL) { 2088 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2089 VDEV_AUX_BAD_LABEL); 2090 vdev_dbgmsg(vd, "vdev_validate: failed reading config for " 2091 "txg %llu", (u_longlong_t)txg); 2092 return (0); 2093 } 2094 2095 /* 2096 * Determine if this vdev has been split off into another 2097 * pool. If so, then refuse to open it. 2098 */ 2099 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 2100 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 2101 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2102 VDEV_AUX_SPLIT_POOL); 2103 nvlist_free(label); 2104 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); 2105 return (0); 2106 } 2107 2108 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { 2109 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2110 VDEV_AUX_CORRUPT_DATA); 2111 nvlist_free(label); 2112 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 2113 ZPOOL_CONFIG_POOL_GUID); 2114 return (0); 2115 } 2116 2117 /* 2118 * If config is not trusted then ignore the spa guid check. This is 2119 * necessary because if the machine crashed during a re-guid the new 2120 * guid might have been written to all of the vdev labels, but not the 2121 * cached config. The check will be performed again once we have the 2122 * trusted config from the MOS. 2123 */ 2124 if (spa->spa_trust_config && guid != spa_guid(spa)) { 2125 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2126 VDEV_AUX_CORRUPT_DATA); 2127 nvlist_free(label); 2128 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " 2129 "match config (%llu != %llu)", (u_longlong_t)guid, 2130 (u_longlong_t)spa_guid(spa)); 2131 return (0); 2132 } 2133 2134 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 2135 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 2136 &aux_guid) != 0) 2137 aux_guid = 0; 2138 2139 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { 2140 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2141 VDEV_AUX_CORRUPT_DATA); 2142 nvlist_free(label); 2143 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 2144 ZPOOL_CONFIG_GUID); 2145 return (0); 2146 } 2147 2148 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) 2149 != 0) { 2150 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2151 VDEV_AUX_CORRUPT_DATA); 2152 nvlist_free(label); 2153 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 2154 ZPOOL_CONFIG_TOP_GUID); 2155 return (0); 2156 } 2157 2158 /* 2159 * If this vdev just became a top-level vdev because its sibling was 2160 * detached, it will have adopted the parent's vdev guid -- but the 2161 * label may or may not be on disk yet. Fortunately, either version 2162 * of the label will have the same top guid, so if we're a top-level 2163 * vdev, we can safely compare to that instead. 2164 * However, if the config comes from a cachefile that failed to update 2165 * after the detach, a top-level vdev will appear as a non top-level 2166 * vdev in the config. Also relax the constraints if we perform an 2167 * extreme rewind. 2168 * 2169 * If we split this vdev off instead, then we also check the 2170 * original pool's guid. We don't want to consider the vdev 2171 * corrupt if it is partway through a split operation. 2172 */ 2173 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { 2174 boolean_t mismatch = B_FALSE; 2175 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { 2176 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) 2177 mismatch = B_TRUE; 2178 } else { 2179 if (vd->vdev_guid != top_guid && 2180 vd->vdev_top->vdev_guid != guid) 2181 mismatch = B_TRUE; 2182 } 2183 2184 if (mismatch) { 2185 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2186 VDEV_AUX_CORRUPT_DATA); 2187 nvlist_free(label); 2188 vdev_dbgmsg(vd, "vdev_validate: config guid " 2189 "doesn't match label guid"); 2190 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", 2191 (u_longlong_t)vd->vdev_guid, 2192 (u_longlong_t)vd->vdev_top->vdev_guid); 2193 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " 2194 "aux_guid %llu", (u_longlong_t)guid, 2195 (u_longlong_t)top_guid, (u_longlong_t)aux_guid); 2196 return (0); 2197 } 2198 } 2199 2200 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 2201 &state) != 0) { 2202 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2203 VDEV_AUX_CORRUPT_DATA); 2204 nvlist_free(label); 2205 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 2206 ZPOOL_CONFIG_POOL_STATE); 2207 return (0); 2208 } 2209 2210 nvlist_free(label); 2211 2212 /* 2213 * If this is a verbatim import, no need to check the 2214 * state of the pool. 2215 */ 2216 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 2217 spa_load_state(spa) == SPA_LOAD_OPEN && 2218 state != POOL_STATE_ACTIVE) { 2219 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " 2220 "for spa %s", (u_longlong_t)state, spa->spa_name); 2221 return (SET_ERROR(EBADF)); 2222 } 2223 2224 /* 2225 * If we were able to open and validate a vdev that was 2226 * previously marked permanently unavailable, clear that state 2227 * now. 2228 */ 2229 if (vd->vdev_not_present) 2230 vd->vdev_not_present = 0; 2231 2232 return (0); 2233 } 2234 2235 static void 2236 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) 2237 { 2238 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { 2239 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { 2240 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " 2241 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, 2242 dvd->vdev_path, svd->vdev_path); 2243 spa_strfree(dvd->vdev_path); 2244 dvd->vdev_path = spa_strdup(svd->vdev_path); 2245 } 2246 } else if (svd->vdev_path != NULL) { 2247 dvd->vdev_path = spa_strdup(svd->vdev_path); 2248 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", 2249 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); 2250 } 2251 } 2252 2253 /* 2254 * Recursively copy vdev paths from one vdev to another. Source and destination 2255 * vdev trees must have same geometry otherwise return error. Intended to copy 2256 * paths from userland config into MOS config. 2257 */ 2258 int 2259 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) 2260 { 2261 if ((svd->vdev_ops == &vdev_missing_ops) || 2262 (svd->vdev_ishole && dvd->vdev_ishole) || 2263 (dvd->vdev_ops == &vdev_indirect_ops)) 2264 return (0); 2265 2266 if (svd->vdev_ops != dvd->vdev_ops) { 2267 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", 2268 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); 2269 return (SET_ERROR(EINVAL)); 2270 } 2271 2272 if (svd->vdev_guid != dvd->vdev_guid) { 2273 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " 2274 "%llu)", (u_longlong_t)svd->vdev_guid, 2275 (u_longlong_t)dvd->vdev_guid); 2276 return (SET_ERROR(EINVAL)); 2277 } 2278 2279 if (svd->vdev_children != dvd->vdev_children) { 2280 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " 2281 "%llu != %llu", (u_longlong_t)svd->vdev_children, 2282 (u_longlong_t)dvd->vdev_children); 2283 return (SET_ERROR(EINVAL)); 2284 } 2285 2286 for (uint64_t i = 0; i < svd->vdev_children; i++) { 2287 int error = vdev_copy_path_strict(svd->vdev_child[i], 2288 dvd->vdev_child[i]); 2289 if (error != 0) 2290 return (error); 2291 } 2292 2293 if (svd->vdev_ops->vdev_op_leaf) 2294 vdev_copy_path_impl(svd, dvd); 2295 2296 return (0); 2297 } 2298 2299 static void 2300 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) 2301 { 2302 ASSERT(stvd->vdev_top == stvd); 2303 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); 2304 2305 for (uint64_t i = 0; i < dvd->vdev_children; i++) { 2306 vdev_copy_path_search(stvd, dvd->vdev_child[i]); 2307 } 2308 2309 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) 2310 return; 2311 2312 /* 2313 * The idea here is that while a vdev can shift positions within 2314 * a top vdev (when replacing, attaching mirror, etc.) it cannot 2315 * step outside of it. 2316 */ 2317 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); 2318 2319 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) 2320 return; 2321 2322 ASSERT(vd->vdev_ops->vdev_op_leaf); 2323 2324 vdev_copy_path_impl(vd, dvd); 2325 } 2326 2327 /* 2328 * Recursively copy vdev paths from one root vdev to another. Source and 2329 * destination vdev trees may differ in geometry. For each destination leaf 2330 * vdev, search a vdev with the same guid and top vdev id in the source. 2331 * Intended to copy paths from userland config into MOS config. 2332 */ 2333 void 2334 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) 2335 { 2336 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); 2337 ASSERT(srvd->vdev_ops == &vdev_root_ops); 2338 ASSERT(drvd->vdev_ops == &vdev_root_ops); 2339 2340 for (uint64_t i = 0; i < children; i++) { 2341 vdev_copy_path_search(srvd->vdev_child[i], 2342 drvd->vdev_child[i]); 2343 } 2344 } 2345 2346 /* 2347 * Close a virtual device. 2348 */ 2349 void 2350 vdev_close(vdev_t *vd) 2351 { 2352 vdev_t *pvd = vd->vdev_parent; 2353 spa_t *spa __maybe_unused = vd->vdev_spa; 2354 2355 ASSERT(vd != NULL); 2356 ASSERT(vd->vdev_open_thread == curthread || 2357 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2358 2359 /* 2360 * If our parent is reopening, then we are as well, unless we are 2361 * going offline. 2362 */ 2363 if (pvd != NULL && pvd->vdev_reopening) 2364 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 2365 2366 vd->vdev_ops->vdev_op_close(vd); 2367 2368 vdev_cache_purge(vd); 2369 2370 /* 2371 * We record the previous state before we close it, so that if we are 2372 * doing a reopen(), we don't generate FMA ereports if we notice that 2373 * it's still faulted. 2374 */ 2375 vd->vdev_prevstate = vd->vdev_state; 2376 2377 if (vd->vdev_offline) 2378 vd->vdev_state = VDEV_STATE_OFFLINE; 2379 else 2380 vd->vdev_state = VDEV_STATE_CLOSED; 2381 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2382 } 2383 2384 void 2385 vdev_hold(vdev_t *vd) 2386 { 2387 spa_t *spa = vd->vdev_spa; 2388 2389 ASSERT(spa_is_root(spa)); 2390 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 2391 return; 2392 2393 for (int c = 0; c < vd->vdev_children; c++) 2394 vdev_hold(vd->vdev_child[c]); 2395 2396 if (vd->vdev_ops->vdev_op_leaf) 2397 vd->vdev_ops->vdev_op_hold(vd); 2398 } 2399 2400 void 2401 vdev_rele(vdev_t *vd) 2402 { 2403 ASSERT(spa_is_root(vd->vdev_spa)); 2404 for (int c = 0; c < vd->vdev_children; c++) 2405 vdev_rele(vd->vdev_child[c]); 2406 2407 if (vd->vdev_ops->vdev_op_leaf) 2408 vd->vdev_ops->vdev_op_rele(vd); 2409 } 2410 2411 /* 2412 * Reopen all interior vdevs and any unopened leaves. We don't actually 2413 * reopen leaf vdevs which had previously been opened as they might deadlock 2414 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 2415 * If the leaf has never been opened then open it, as usual. 2416 */ 2417 void 2418 vdev_reopen(vdev_t *vd) 2419 { 2420 spa_t *spa = vd->vdev_spa; 2421 2422 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2423 2424 /* set the reopening flag unless we're taking the vdev offline */ 2425 vd->vdev_reopening = !vd->vdev_offline; 2426 vdev_close(vd); 2427 (void) vdev_open(vd); 2428 2429 /* 2430 * Call vdev_validate() here to make sure we have the same device. 2431 * Otherwise, a device with an invalid label could be successfully 2432 * opened in response to vdev_reopen(). 2433 */ 2434 if (vd->vdev_aux) { 2435 (void) vdev_validate_aux(vd); 2436 if (vdev_readable(vd) && vdev_writeable(vd) && 2437 vd->vdev_aux == &spa->spa_l2cache) { 2438 /* 2439 * In case the vdev is present we should evict all ARC 2440 * buffers and pointers to log blocks and reclaim their 2441 * space before restoring its contents to L2ARC. 2442 */ 2443 if (l2arc_vdev_present(vd)) { 2444 l2arc_rebuild_vdev(vd, B_TRUE); 2445 } else { 2446 l2arc_add_vdev(spa, vd); 2447 } 2448 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); 2449 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); 2450 } 2451 } else { 2452 (void) vdev_validate(vd); 2453 } 2454 2455 /* 2456 * Reassess parent vdev's health. 2457 */ 2458 vdev_propagate_state(vd); 2459 } 2460 2461 int 2462 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 2463 { 2464 int error; 2465 2466 /* 2467 * Normally, partial opens (e.g. of a mirror) are allowed. 2468 * For a create, however, we want to fail the request if 2469 * there are any components we can't open. 2470 */ 2471 error = vdev_open(vd); 2472 2473 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 2474 vdev_close(vd); 2475 return (error ? error : SET_ERROR(ENXIO)); 2476 } 2477 2478 /* 2479 * Recursively load DTLs and initialize all labels. 2480 */ 2481 if ((error = vdev_dtl_load(vd)) != 0 || 2482 (error = vdev_label_init(vd, txg, isreplacing ? 2483 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 2484 vdev_close(vd); 2485 return (error); 2486 } 2487 2488 return (0); 2489 } 2490 2491 void 2492 vdev_metaslab_set_size(vdev_t *vd) 2493 { 2494 uint64_t asize = vd->vdev_asize; 2495 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift; 2496 uint64_t ms_shift; 2497 2498 /* 2499 * There are two dimensions to the metaslab sizing calculation: 2500 * the size of the metaslab and the count of metaslabs per vdev. 2501 * 2502 * The default values used below are a good balance between memory 2503 * usage (larger metaslab size means more memory needed for loaded 2504 * metaslabs; more metaslabs means more memory needed for the 2505 * metaslab_t structs), metaslab load time (larger metaslabs take 2506 * longer to load), and metaslab sync time (more metaslabs means 2507 * more time spent syncing all of them). 2508 * 2509 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs. 2510 * The range of the dimensions are as follows: 2511 * 2512 * 2^29 <= ms_size <= 2^34 2513 * 16 <= ms_count <= 131,072 2514 * 2515 * On the lower end of vdev sizes, we aim for metaslabs sizes of 2516 * at least 512MB (2^29) to minimize fragmentation effects when 2517 * testing with smaller devices. However, the count constraint 2518 * of at least 16 metaslabs will override this minimum size goal. 2519 * 2520 * On the upper end of vdev sizes, we aim for a maximum metaslab 2521 * size of 16GB. However, we will cap the total count to 2^17 2522 * metaslabs to keep our memory footprint in check and let the 2523 * metaslab size grow from there if that limit is hit. 2524 * 2525 * The net effect of applying above constrains is summarized below. 2526 * 2527 * vdev size metaslab count 2528 * --------------|----------------- 2529 * < 8GB ~16 2530 * 8GB - 100GB one per 512MB 2531 * 100GB - 3TB ~200 2532 * 3TB - 2PB one per 16GB 2533 * > 2PB ~131,072 2534 * -------------------------------- 2535 * 2536 * Finally, note that all of the above calculate the initial 2537 * number of metaslabs. Expanding a top-level vdev will result 2538 * in additional metaslabs being allocated making it possible 2539 * to exceed the zfs_vdev_ms_count_limit. 2540 */ 2541 2542 if (ms_count < zfs_vdev_min_ms_count) 2543 ms_shift = highbit64(asize / zfs_vdev_min_ms_count); 2544 else if (ms_count > zfs_vdev_default_ms_count) 2545 ms_shift = highbit64(asize / zfs_vdev_default_ms_count); 2546 else 2547 ms_shift = zfs_vdev_default_ms_shift; 2548 2549 if (ms_shift < SPA_MAXBLOCKSHIFT) { 2550 ms_shift = SPA_MAXBLOCKSHIFT; 2551 } else if (ms_shift > zfs_vdev_max_ms_shift) { 2552 ms_shift = zfs_vdev_max_ms_shift; 2553 /* cap the total count to constrain memory footprint */ 2554 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit) 2555 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit); 2556 } 2557 2558 vd->vdev_ms_shift = ms_shift; 2559 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); 2560 } 2561 2562 void 2563 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 2564 { 2565 ASSERT(vd == vd->vdev_top); 2566 /* indirect vdevs don't have metaslabs or dtls */ 2567 ASSERT(vdev_is_concrete(vd) || flags == 0); 2568 ASSERT(ISP2(flags)); 2569 ASSERT(spa_writeable(vd->vdev_spa)); 2570 2571 if (flags & VDD_METASLAB) 2572 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 2573 2574 if (flags & VDD_DTL) 2575 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 2576 2577 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 2578 } 2579 2580 void 2581 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) 2582 { 2583 for (int c = 0; c < vd->vdev_children; c++) 2584 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); 2585 2586 if (vd->vdev_ops->vdev_op_leaf) 2587 vdev_dirty(vd->vdev_top, flags, vd, txg); 2588 } 2589 2590 /* 2591 * DTLs. 2592 * 2593 * A vdev's DTL (dirty time log) is the set of transaction groups for which 2594 * the vdev has less than perfect replication. There are four kinds of DTL: 2595 * 2596 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 2597 * 2598 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 2599 * 2600 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 2601 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 2602 * txgs that was scrubbed. 2603 * 2604 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 2605 * persistent errors or just some device being offline. 2606 * Unlike the other three, the DTL_OUTAGE map is not generally 2607 * maintained; it's only computed when needed, typically to 2608 * determine whether a device can be detached. 2609 * 2610 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 2611 * either has the data or it doesn't. 2612 * 2613 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 2614 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 2615 * if any child is less than fully replicated, then so is its parent. 2616 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 2617 * comprising only those txgs which appear in 'maxfaults' or more children; 2618 * those are the txgs we don't have enough replication to read. For example, 2619 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 2620 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 2621 * two child DTL_MISSING maps. 2622 * 2623 * It should be clear from the above that to compute the DTLs and outage maps 2624 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 2625 * Therefore, that is all we keep on disk. When loading the pool, or after 2626 * a configuration change, we generate all other DTLs from first principles. 2627 */ 2628 void 2629 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2630 { 2631 range_tree_t *rt = vd->vdev_dtl[t]; 2632 2633 ASSERT(t < DTL_TYPES); 2634 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2635 ASSERT(spa_writeable(vd->vdev_spa)); 2636 2637 mutex_enter(&vd->vdev_dtl_lock); 2638 if (!range_tree_contains(rt, txg, size)) 2639 range_tree_add(rt, txg, size); 2640 mutex_exit(&vd->vdev_dtl_lock); 2641 } 2642 2643 boolean_t 2644 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2645 { 2646 range_tree_t *rt = vd->vdev_dtl[t]; 2647 boolean_t dirty = B_FALSE; 2648 2649 ASSERT(t < DTL_TYPES); 2650 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2651 2652 /* 2653 * While we are loading the pool, the DTLs have not been loaded yet. 2654 * This isn't a problem but it can result in devices being tried 2655 * which are known to not have the data. In which case, the import 2656 * is relying on the checksum to ensure that we get the right data. 2657 * Note that while importing we are only reading the MOS, which is 2658 * always checksummed. 2659 */ 2660 mutex_enter(&vd->vdev_dtl_lock); 2661 if (!range_tree_is_empty(rt)) 2662 dirty = range_tree_contains(rt, txg, size); 2663 mutex_exit(&vd->vdev_dtl_lock); 2664 2665 return (dirty); 2666 } 2667 2668 boolean_t 2669 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 2670 { 2671 range_tree_t *rt = vd->vdev_dtl[t]; 2672 boolean_t empty; 2673 2674 mutex_enter(&vd->vdev_dtl_lock); 2675 empty = range_tree_is_empty(rt); 2676 mutex_exit(&vd->vdev_dtl_lock); 2677 2678 return (empty); 2679 } 2680 2681 /* 2682 * Check if the txg falls within the range which must be 2683 * resilvered. DVAs outside this range can always be skipped. 2684 */ 2685 boolean_t 2686 vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, 2687 uint64_t phys_birth) 2688 { 2689 /* Set by sequential resilver. */ 2690 if (phys_birth == TXG_UNKNOWN) 2691 return (B_TRUE); 2692 2693 return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)); 2694 } 2695 2696 /* 2697 * Returns B_TRUE if the vdev determines the DVA needs to be resilvered. 2698 */ 2699 boolean_t 2700 vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, 2701 uint64_t phys_birth) 2702 { 2703 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2704 2705 if (vd->vdev_ops->vdev_op_need_resilver == NULL || 2706 vd->vdev_ops->vdev_op_leaf) 2707 return (B_TRUE); 2708 2709 return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize, 2710 phys_birth)); 2711 } 2712 2713 /* 2714 * Returns the lowest txg in the DTL range. 2715 */ 2716 static uint64_t 2717 vdev_dtl_min(vdev_t *vd) 2718 { 2719 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2720 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2721 ASSERT0(vd->vdev_children); 2722 2723 return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1); 2724 } 2725 2726 /* 2727 * Returns the highest txg in the DTL. 2728 */ 2729 static uint64_t 2730 vdev_dtl_max(vdev_t *vd) 2731 { 2732 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2733 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2734 ASSERT0(vd->vdev_children); 2735 2736 return (range_tree_max(vd->vdev_dtl[DTL_MISSING])); 2737 } 2738 2739 /* 2740 * Determine if a resilvering vdev should remove any DTL entries from 2741 * its range. If the vdev was resilvering for the entire duration of the 2742 * scan then it should excise that range from its DTLs. Otherwise, this 2743 * vdev is considered partially resilvered and should leave its DTL 2744 * entries intact. The comment in vdev_dtl_reassess() describes how we 2745 * excise the DTLs. 2746 */ 2747 static boolean_t 2748 vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done) 2749 { 2750 ASSERT0(vd->vdev_children); 2751 2752 if (vd->vdev_state < VDEV_STATE_DEGRADED) 2753 return (B_FALSE); 2754 2755 if (vd->vdev_resilver_deferred) 2756 return (B_FALSE); 2757 2758 if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) 2759 return (B_TRUE); 2760 2761 if (rebuild_done) { 2762 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; 2763 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 2764 2765 /* Rebuild not initiated by attach */ 2766 if (vd->vdev_rebuild_txg == 0) 2767 return (B_TRUE); 2768 2769 /* 2770 * When a rebuild completes without error then all missing data 2771 * up to the rebuild max txg has been reconstructed and the DTL 2772 * is eligible for excision. 2773 */ 2774 if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE && 2775 vdev_dtl_max(vd) <= vrp->vrp_max_txg) { 2776 ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd)); 2777 ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg); 2778 ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg); 2779 return (B_TRUE); 2780 } 2781 } else { 2782 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 2783 dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys; 2784 2785 /* Resilver not initiated by attach */ 2786 if (vd->vdev_resilver_txg == 0) 2787 return (B_TRUE); 2788 2789 /* 2790 * When a resilver is initiated the scan will assign the 2791 * scn_max_txg value to the highest txg value that exists 2792 * in all DTLs. If this device's max DTL is not part of this 2793 * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg] 2794 * then it is not eligible for excision. 2795 */ 2796 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { 2797 ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd)); 2798 ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg); 2799 ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg); 2800 return (B_TRUE); 2801 } 2802 } 2803 2804 return (B_FALSE); 2805 } 2806 2807 /* 2808 * Reassess DTLs after a config change or scrub completion. If txg == 0 no 2809 * write operations will be issued to the pool. 2810 */ 2811 void 2812 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, 2813 boolean_t scrub_done, boolean_t rebuild_done) 2814 { 2815 spa_t *spa = vd->vdev_spa; 2816 avl_tree_t reftree; 2817 int minref; 2818 2819 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2820 2821 for (int c = 0; c < vd->vdev_children; c++) 2822 vdev_dtl_reassess(vd->vdev_child[c], txg, 2823 scrub_txg, scrub_done, rebuild_done); 2824 2825 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) 2826 return; 2827 2828 if (vd->vdev_ops->vdev_op_leaf) { 2829 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2830 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; 2831 boolean_t check_excise = B_FALSE; 2832 boolean_t wasempty = B_TRUE; 2833 2834 mutex_enter(&vd->vdev_dtl_lock); 2835 2836 /* 2837 * If requested, pretend the scan or rebuild completed cleanly. 2838 */ 2839 if (zfs_scan_ignore_errors) { 2840 if (scn != NULL) 2841 scn->scn_phys.scn_errors = 0; 2842 if (vr != NULL) 2843 vr->vr_rebuild_phys.vrp_errors = 0; 2844 } 2845 2846 if (scrub_txg != 0 && 2847 !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { 2848 wasempty = B_FALSE; 2849 zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d " 2850 "dtl:%llu/%llu errors:%llu", 2851 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg, 2852 (u_longlong_t)scrub_txg, spa->spa_scrub_started, 2853 (u_longlong_t)vdev_dtl_min(vd), 2854 (u_longlong_t)vdev_dtl_max(vd), 2855 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0)); 2856 } 2857 2858 /* 2859 * If we've completed a scrub/resilver or a rebuild cleanly 2860 * then determine if this vdev should remove any DTLs. We 2861 * only want to excise regions on vdevs that were available 2862 * during the entire duration of this scan. 2863 */ 2864 if (rebuild_done && 2865 vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) { 2866 check_excise = B_TRUE; 2867 } else { 2868 if (spa->spa_scrub_started || 2869 (scn != NULL && scn->scn_phys.scn_errors == 0)) { 2870 check_excise = B_TRUE; 2871 } 2872 } 2873 2874 if (scrub_txg && check_excise && 2875 vdev_dtl_should_excise(vd, rebuild_done)) { 2876 /* 2877 * We completed a scrub, resilver or rebuild up to 2878 * scrub_txg. If we did it without rebooting, then 2879 * the scrub dtl will be valid, so excise the old 2880 * region and fold in the scrub dtl. Otherwise, 2881 * leave the dtl as-is if there was an error. 2882 * 2883 * There's little trick here: to excise the beginning 2884 * of the DTL_MISSING map, we put it into a reference 2885 * tree and then add a segment with refcnt -1 that 2886 * covers the range [0, scrub_txg). This means 2887 * that each txg in that range has refcnt -1 or 0. 2888 * We then add DTL_SCRUB with a refcnt of 2, so that 2889 * entries in the range [0, scrub_txg) will have a 2890 * positive refcnt -- either 1 or 2. We then convert 2891 * the reference tree into the new DTL_MISSING map. 2892 */ 2893 space_reftree_create(&reftree); 2894 space_reftree_add_map(&reftree, 2895 vd->vdev_dtl[DTL_MISSING], 1); 2896 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); 2897 space_reftree_add_map(&reftree, 2898 vd->vdev_dtl[DTL_SCRUB], 2); 2899 space_reftree_generate_map(&reftree, 2900 vd->vdev_dtl[DTL_MISSING], 1); 2901 space_reftree_destroy(&reftree); 2902 2903 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { 2904 zfs_dbgmsg("update DTL_MISSING:%llu/%llu", 2905 (u_longlong_t)vdev_dtl_min(vd), 2906 (u_longlong_t)vdev_dtl_max(vd)); 2907 } else if (!wasempty) { 2908 zfs_dbgmsg("DTL_MISSING is now empty"); 2909 } 2910 } 2911 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 2912 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2913 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); 2914 if (scrub_done) 2915 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 2916 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 2917 if (!vdev_readable(vd)) 2918 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 2919 else 2920 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2921 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); 2922 2923 /* 2924 * If the vdev was resilvering or rebuilding and no longer 2925 * has any DTLs then reset the appropriate flag and dirty 2926 * the top level so that we persist the change. 2927 */ 2928 if (txg != 0 && 2929 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2930 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { 2931 if (vd->vdev_rebuild_txg != 0) { 2932 vd->vdev_rebuild_txg = 0; 2933 vdev_config_dirty(vd->vdev_top); 2934 } else if (vd->vdev_resilver_txg != 0) { 2935 vd->vdev_resilver_txg = 0; 2936 vdev_config_dirty(vd->vdev_top); 2937 } 2938 } 2939 2940 mutex_exit(&vd->vdev_dtl_lock); 2941 2942 if (txg != 0) 2943 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 2944 return; 2945 } 2946 2947 mutex_enter(&vd->vdev_dtl_lock); 2948 for (int t = 0; t < DTL_TYPES; t++) { 2949 /* account for child's outage in parent's missing map */ 2950 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 2951 if (t == DTL_SCRUB) 2952 continue; /* leaf vdevs only */ 2953 if (t == DTL_PARTIAL) 2954 minref = 1; /* i.e. non-zero */ 2955 else if (vdev_get_nparity(vd) != 0) 2956 minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */ 2957 else 2958 minref = vd->vdev_children; /* any kind of mirror */ 2959 space_reftree_create(&reftree); 2960 for (int c = 0; c < vd->vdev_children; c++) { 2961 vdev_t *cvd = vd->vdev_child[c]; 2962 mutex_enter(&cvd->vdev_dtl_lock); 2963 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); 2964 mutex_exit(&cvd->vdev_dtl_lock); 2965 } 2966 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); 2967 space_reftree_destroy(&reftree); 2968 } 2969 mutex_exit(&vd->vdev_dtl_lock); 2970 } 2971 2972 int 2973 vdev_dtl_load(vdev_t *vd) 2974 { 2975 spa_t *spa = vd->vdev_spa; 2976 objset_t *mos = spa->spa_meta_objset; 2977 range_tree_t *rt; 2978 int error = 0; 2979 2980 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { 2981 ASSERT(vdev_is_concrete(vd)); 2982 2983 error = space_map_open(&vd->vdev_dtl_sm, mos, 2984 vd->vdev_dtl_object, 0, -1ULL, 0); 2985 if (error) 2986 return (error); 2987 ASSERT(vd->vdev_dtl_sm != NULL); 2988 2989 rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 2990 error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC); 2991 if (error == 0) { 2992 mutex_enter(&vd->vdev_dtl_lock); 2993 range_tree_walk(rt, range_tree_add, 2994 vd->vdev_dtl[DTL_MISSING]); 2995 mutex_exit(&vd->vdev_dtl_lock); 2996 } 2997 2998 range_tree_vacate(rt, NULL, NULL); 2999 range_tree_destroy(rt); 3000 3001 return (error); 3002 } 3003 3004 for (int c = 0; c < vd->vdev_children; c++) { 3005 error = vdev_dtl_load(vd->vdev_child[c]); 3006 if (error != 0) 3007 break; 3008 } 3009 3010 return (error); 3011 } 3012 3013 static void 3014 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx) 3015 { 3016 spa_t *spa = vd->vdev_spa; 3017 objset_t *mos = spa->spa_meta_objset; 3018 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; 3019 const char *string; 3020 3021 ASSERT(alloc_bias != VDEV_BIAS_NONE); 3022 3023 string = 3024 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG : 3025 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL : 3026 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL; 3027 3028 ASSERT(string != NULL); 3029 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS, 3030 1, strlen(string) + 1, string, tx)); 3031 3032 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) { 3033 spa_activate_allocation_classes(spa, tx); 3034 } 3035 } 3036 3037 void 3038 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) 3039 { 3040 spa_t *spa = vd->vdev_spa; 3041 3042 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); 3043 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 3044 zapobj, tx)); 3045 } 3046 3047 uint64_t 3048 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) 3049 { 3050 spa_t *spa = vd->vdev_spa; 3051 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, 3052 DMU_OT_NONE, 0, tx); 3053 3054 ASSERT(zap != 0); 3055 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 3056 zap, tx)); 3057 3058 return (zap); 3059 } 3060 3061 void 3062 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) 3063 { 3064 if (vd->vdev_ops != &vdev_hole_ops && 3065 vd->vdev_ops != &vdev_missing_ops && 3066 vd->vdev_ops != &vdev_root_ops && 3067 !vd->vdev_top->vdev_removing) { 3068 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { 3069 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); 3070 } 3071 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { 3072 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); 3073 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE) 3074 vdev_zap_allocation_data(vd, tx); 3075 } 3076 } 3077 3078 for (uint64_t i = 0; i < vd->vdev_children; i++) { 3079 vdev_construct_zaps(vd->vdev_child[i], tx); 3080 } 3081 } 3082 3083 static void 3084 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 3085 { 3086 spa_t *spa = vd->vdev_spa; 3087 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; 3088 objset_t *mos = spa->spa_meta_objset; 3089 range_tree_t *rtsync; 3090 dmu_tx_t *tx; 3091 uint64_t object = space_map_object(vd->vdev_dtl_sm); 3092 3093 ASSERT(vdev_is_concrete(vd)); 3094 ASSERT(vd->vdev_ops->vdev_op_leaf); 3095 3096 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3097 3098 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { 3099 mutex_enter(&vd->vdev_dtl_lock); 3100 space_map_free(vd->vdev_dtl_sm, tx); 3101 space_map_close(vd->vdev_dtl_sm); 3102 vd->vdev_dtl_sm = NULL; 3103 mutex_exit(&vd->vdev_dtl_lock); 3104 3105 /* 3106 * We only destroy the leaf ZAP for detached leaves or for 3107 * removed log devices. Removed data devices handle leaf ZAP 3108 * cleanup later, once cancellation is no longer possible. 3109 */ 3110 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || 3111 vd->vdev_top->vdev_islog)) { 3112 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); 3113 vd->vdev_leaf_zap = 0; 3114 } 3115 3116 dmu_tx_commit(tx); 3117 return; 3118 } 3119 3120 if (vd->vdev_dtl_sm == NULL) { 3121 uint64_t new_object; 3122 3123 new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx); 3124 VERIFY3U(new_object, !=, 0); 3125 3126 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, 3127 0, -1ULL, 0)); 3128 ASSERT(vd->vdev_dtl_sm != NULL); 3129 } 3130 3131 rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 3132 3133 mutex_enter(&vd->vdev_dtl_lock); 3134 range_tree_walk(rt, range_tree_add, rtsync); 3135 mutex_exit(&vd->vdev_dtl_lock); 3136 3137 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx); 3138 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); 3139 range_tree_vacate(rtsync, NULL, NULL); 3140 3141 range_tree_destroy(rtsync); 3142 3143 /* 3144 * If the object for the space map has changed then dirty 3145 * the top level so that we update the config. 3146 */ 3147 if (object != space_map_object(vd->vdev_dtl_sm)) { 3148 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " 3149 "new object %llu", (u_longlong_t)txg, spa_name(spa), 3150 (u_longlong_t)object, 3151 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); 3152 vdev_config_dirty(vd->vdev_top); 3153 } 3154 3155 dmu_tx_commit(tx); 3156 } 3157 3158 /* 3159 * Determine whether the specified vdev can be offlined/detached/removed 3160 * without losing data. 3161 */ 3162 boolean_t 3163 vdev_dtl_required(vdev_t *vd) 3164 { 3165 spa_t *spa = vd->vdev_spa; 3166 vdev_t *tvd = vd->vdev_top; 3167 uint8_t cant_read = vd->vdev_cant_read; 3168 boolean_t required; 3169 3170 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3171 3172 if (vd == spa->spa_root_vdev || vd == tvd) 3173 return (B_TRUE); 3174 3175 /* 3176 * Temporarily mark the device as unreadable, and then determine 3177 * whether this results in any DTL outages in the top-level vdev. 3178 * If not, we can safely offline/detach/remove the device. 3179 */ 3180 vd->vdev_cant_read = B_TRUE; 3181 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); 3182 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 3183 vd->vdev_cant_read = cant_read; 3184 vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); 3185 3186 if (!required && zio_injection_enabled) { 3187 required = !!zio_handle_device_injection(vd, NULL, 3188 SET_ERROR(ECHILD)); 3189 } 3190 3191 return (required); 3192 } 3193 3194 /* 3195 * Determine if resilver is needed, and if so the txg range. 3196 */ 3197 boolean_t 3198 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 3199 { 3200 boolean_t needed = B_FALSE; 3201 uint64_t thismin = UINT64_MAX; 3202 uint64_t thismax = 0; 3203 3204 if (vd->vdev_children == 0) { 3205 mutex_enter(&vd->vdev_dtl_lock); 3206 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 3207 vdev_writeable(vd)) { 3208 3209 thismin = vdev_dtl_min(vd); 3210 thismax = vdev_dtl_max(vd); 3211 needed = B_TRUE; 3212 } 3213 mutex_exit(&vd->vdev_dtl_lock); 3214 } else { 3215 for (int c = 0; c < vd->vdev_children; c++) { 3216 vdev_t *cvd = vd->vdev_child[c]; 3217 uint64_t cmin, cmax; 3218 3219 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 3220 thismin = MIN(thismin, cmin); 3221 thismax = MAX(thismax, cmax); 3222 needed = B_TRUE; 3223 } 3224 } 3225 } 3226 3227 if (needed && minp) { 3228 *minp = thismin; 3229 *maxp = thismax; 3230 } 3231 return (needed); 3232 } 3233 3234 /* 3235 * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj 3236 * will contain either the checkpoint spacemap object or zero if none exists. 3237 * All other errors are returned to the caller. 3238 */ 3239 int 3240 vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj) 3241 { 3242 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 3243 3244 if (vd->vdev_top_zap == 0) { 3245 *sm_obj = 0; 3246 return (0); 3247 } 3248 3249 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, 3250 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj); 3251 if (error == ENOENT) { 3252 *sm_obj = 0; 3253 error = 0; 3254 } 3255 3256 return (error); 3257 } 3258 3259 int 3260 vdev_load(vdev_t *vd) 3261 { 3262 int error = 0; 3263 3264 /* 3265 * Recursively load all children. 3266 * TODO: parallelize. 3267 */ 3268 for (int c = 0; c < vd->vdev_children; c++) { 3269 error = vdev_load(vd->vdev_child[c]); 3270 if (error != 0) { 3271 return (error); 3272 } 3273 } 3274 3275 vdev_set_deflate_ratio(vd); 3276 3277 /* 3278 * On spa_load path, grab the allocation bias from our zap 3279 */ 3280 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { 3281 spa_t *spa = vd->vdev_spa; 3282 char bias_str[64]; 3283 3284 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, 3285 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str), 3286 bias_str); 3287 if (error == 0) { 3288 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE); 3289 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str); 3290 } else if (error != ENOENT) { 3291 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3292 VDEV_AUX_CORRUPT_DATA); 3293 vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) " 3294 "failed [error=%d]", vd->vdev_top_zap, error); 3295 return (error); 3296 } 3297 } 3298 3299 /* 3300 * Load any rebuild state from the top-level vdev zap. 3301 */ 3302 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { 3303 error = vdev_rebuild_load(vd); 3304 if (error && error != ENOTSUP) { 3305 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3306 VDEV_AUX_CORRUPT_DATA); 3307 vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load " 3308 "failed [error=%d]", error); 3309 return (error); 3310 } 3311 } 3312 3313 /* 3314 * If this is a top-level vdev, initialize its metaslabs. 3315 */ 3316 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { 3317 vdev_metaslab_group_create(vd); 3318 3319 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 3320 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3321 VDEV_AUX_CORRUPT_DATA); 3322 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " 3323 "asize=%llu", (u_longlong_t)vd->vdev_ashift, 3324 (u_longlong_t)vd->vdev_asize); 3325 return (SET_ERROR(ENXIO)); 3326 } 3327 3328 error = vdev_metaslab_init(vd, 0); 3329 if (error != 0) { 3330 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " 3331 "[error=%d]", error); 3332 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3333 VDEV_AUX_CORRUPT_DATA); 3334 return (error); 3335 } 3336 3337 uint64_t checkpoint_sm_obj; 3338 error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj); 3339 if (error == 0 && checkpoint_sm_obj != 0) { 3340 objset_t *mos = spa_meta_objset(vd->vdev_spa); 3341 ASSERT(vd->vdev_asize != 0); 3342 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); 3343 3344 error = space_map_open(&vd->vdev_checkpoint_sm, 3345 mos, checkpoint_sm_obj, 0, vd->vdev_asize, 3346 vd->vdev_ashift); 3347 if (error != 0) { 3348 vdev_dbgmsg(vd, "vdev_load: space_map_open " 3349 "failed for checkpoint spacemap (obj %llu) " 3350 "[error=%d]", 3351 (u_longlong_t)checkpoint_sm_obj, error); 3352 return (error); 3353 } 3354 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 3355 3356 /* 3357 * Since the checkpoint_sm contains free entries 3358 * exclusively we can use space_map_allocated() to 3359 * indicate the cumulative checkpointed space that 3360 * has been freed. 3361 */ 3362 vd->vdev_stat.vs_checkpoint_space = 3363 -space_map_allocated(vd->vdev_checkpoint_sm); 3364 vd->vdev_spa->spa_checkpoint_info.sci_dspace += 3365 vd->vdev_stat.vs_checkpoint_space; 3366 } else if (error != 0) { 3367 vdev_dbgmsg(vd, "vdev_load: failed to retrieve " 3368 "checkpoint space map object from vdev ZAP " 3369 "[error=%d]", error); 3370 return (error); 3371 } 3372 } 3373 3374 /* 3375 * If this is a leaf vdev, load its DTL. 3376 */ 3377 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { 3378 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3379 VDEV_AUX_CORRUPT_DATA); 3380 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " 3381 "[error=%d]", error); 3382 return (error); 3383 } 3384 3385 uint64_t obsolete_sm_object; 3386 error = vdev_obsolete_sm_object(vd, &obsolete_sm_object); 3387 if (error == 0 && obsolete_sm_object != 0) { 3388 objset_t *mos = vd->vdev_spa->spa_meta_objset; 3389 ASSERT(vd->vdev_asize != 0); 3390 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 3391 3392 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, 3393 obsolete_sm_object, 0, vd->vdev_asize, 0))) { 3394 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 3395 VDEV_AUX_CORRUPT_DATA); 3396 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " 3397 "obsolete spacemap (obj %llu) [error=%d]", 3398 (u_longlong_t)obsolete_sm_object, error); 3399 return (error); 3400 } 3401 } else if (error != 0) { 3402 vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete " 3403 "space map object from vdev ZAP [error=%d]", error); 3404 return (error); 3405 } 3406 3407 return (0); 3408 } 3409 3410 /* 3411 * The special vdev case is used for hot spares and l2cache devices. Its 3412 * sole purpose it to set the vdev state for the associated vdev. To do this, 3413 * we make sure that we can open the underlying device, then try to read the 3414 * label, and make sure that the label is sane and that it hasn't been 3415 * repurposed to another pool. 3416 */ 3417 int 3418 vdev_validate_aux(vdev_t *vd) 3419 { 3420 nvlist_t *label; 3421 uint64_t guid, version; 3422 uint64_t state; 3423 3424 if (!vdev_readable(vd)) 3425 return (0); 3426 3427 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { 3428 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 3429 VDEV_AUX_CORRUPT_DATA); 3430 return (-1); 3431 } 3432 3433 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 3434 !SPA_VERSION_IS_SUPPORTED(version) || 3435 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 3436 guid != vd->vdev_guid || 3437 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 3438 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 3439 VDEV_AUX_CORRUPT_DATA); 3440 nvlist_free(label); 3441 return (-1); 3442 } 3443 3444 /* 3445 * We don't actually check the pool state here. If it's in fact in 3446 * use by another pool, we update this fact on the fly when requested. 3447 */ 3448 nvlist_free(label); 3449 return (0); 3450 } 3451 3452 static void 3453 vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx) 3454 { 3455 objset_t *mos = spa_meta_objset(vd->vdev_spa); 3456 3457 if (vd->vdev_top_zap == 0) 3458 return; 3459 3460 uint64_t object = 0; 3461 int err = zap_lookup(mos, vd->vdev_top_zap, 3462 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object); 3463 if (err == ENOENT) 3464 return; 3465 VERIFY0(err); 3466 3467 VERIFY0(dmu_object_free(mos, object, tx)); 3468 VERIFY0(zap_remove(mos, vd->vdev_top_zap, 3469 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx)); 3470 } 3471 3472 /* 3473 * Free the objects used to store this vdev's spacemaps, and the array 3474 * that points to them. 3475 */ 3476 void 3477 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) 3478 { 3479 if (vd->vdev_ms_array == 0) 3480 return; 3481 3482 objset_t *mos = vd->vdev_spa->spa_meta_objset; 3483 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; 3484 size_t array_bytes = array_count * sizeof (uint64_t); 3485 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); 3486 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, 3487 array_bytes, smobj_array, 0)); 3488 3489 for (uint64_t i = 0; i < array_count; i++) { 3490 uint64_t smobj = smobj_array[i]; 3491 if (smobj == 0) 3492 continue; 3493 3494 space_map_free_obj(mos, smobj, tx); 3495 } 3496 3497 kmem_free(smobj_array, array_bytes); 3498 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); 3499 vdev_destroy_ms_flush_data(vd, tx); 3500 vd->vdev_ms_array = 0; 3501 } 3502 3503 static void 3504 vdev_remove_empty_log(vdev_t *vd, uint64_t txg) 3505 { 3506 spa_t *spa = vd->vdev_spa; 3507 3508 ASSERT(vd->vdev_islog); 3509 ASSERT(vd == vd->vdev_top); 3510 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 3511 3512 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 3513 3514 vdev_destroy_spacemaps(vd, tx); 3515 if (vd->vdev_top_zap != 0) { 3516 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); 3517 vd->vdev_top_zap = 0; 3518 } 3519 3520 dmu_tx_commit(tx); 3521 } 3522 3523 void 3524 vdev_sync_done(vdev_t *vd, uint64_t txg) 3525 { 3526 metaslab_t *msp; 3527 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 3528 3529 ASSERT(vdev_is_concrete(vd)); 3530 3531 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 3532 != NULL) 3533 metaslab_sync_done(msp, txg); 3534 3535 if (reassess) 3536 metaslab_sync_reassess(vd->vdev_mg); 3537 } 3538 3539 void 3540 vdev_sync(vdev_t *vd, uint64_t txg) 3541 { 3542 spa_t *spa = vd->vdev_spa; 3543 vdev_t *lvd; 3544 metaslab_t *msp; 3545 3546 ASSERT3U(txg, ==, spa->spa_syncing_txg); 3547 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3548 if (range_tree_space(vd->vdev_obsolete_segments) > 0) { 3549 ASSERT(vd->vdev_removing || 3550 vd->vdev_ops == &vdev_indirect_ops); 3551 3552 vdev_indirect_sync_obsolete(vd, tx); 3553 3554 /* 3555 * If the vdev is indirect, it can't have dirty 3556 * metaslabs or DTLs. 3557 */ 3558 if (vd->vdev_ops == &vdev_indirect_ops) { 3559 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); 3560 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); 3561 dmu_tx_commit(tx); 3562 return; 3563 } 3564 } 3565 3566 ASSERT(vdev_is_concrete(vd)); 3567 3568 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && 3569 !vd->vdev_removing) { 3570 ASSERT(vd == vd->vdev_top); 3571 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 3572 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 3573 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 3574 ASSERT(vd->vdev_ms_array != 0); 3575 vdev_config_dirty(vd); 3576 } 3577 3578 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 3579 metaslab_sync(msp, txg); 3580 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 3581 } 3582 3583 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 3584 vdev_dtl_sync(lvd, txg); 3585 3586 /* 3587 * If this is an empty log device being removed, destroy the 3588 * metadata associated with it. 3589 */ 3590 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) 3591 vdev_remove_empty_log(vd, txg); 3592 3593 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 3594 dmu_tx_commit(tx); 3595 } 3596 3597 uint64_t 3598 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 3599 { 3600 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 3601 } 3602 3603 /* 3604 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 3605 * not be opened, and no I/O is attempted. 3606 */ 3607 int 3608 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3609 { 3610 vdev_t *vd, *tvd; 3611 3612 spa_vdev_state_enter(spa, SCL_NONE); 3613 3614 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3615 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); 3616 3617 if (!vd->vdev_ops->vdev_op_leaf) 3618 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); 3619 3620 tvd = vd->vdev_top; 3621 3622 /* 3623 * If user did a 'zpool offline -f' then make the fault persist across 3624 * reboots. 3625 */ 3626 if (aux == VDEV_AUX_EXTERNAL_PERSIST) { 3627 /* 3628 * There are two kinds of forced faults: temporary and 3629 * persistent. Temporary faults go away at pool import, while 3630 * persistent faults stay set. Both types of faults can be 3631 * cleared with a zpool clear. 3632 * 3633 * We tell if a vdev is persistently faulted by looking at the 3634 * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at 3635 * import then it's a persistent fault. Otherwise, it's 3636 * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external" 3637 * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This 3638 * tells vdev_config_generate() (which gets run later) to set 3639 * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist. 3640 */ 3641 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; 3642 vd->vdev_tmpoffline = B_FALSE; 3643 aux = VDEV_AUX_EXTERNAL; 3644 } else { 3645 vd->vdev_tmpoffline = B_TRUE; 3646 } 3647 3648 /* 3649 * We don't directly use the aux state here, but if we do a 3650 * vdev_reopen(), we need this value to be present to remember why we 3651 * were faulted. 3652 */ 3653 vd->vdev_label_aux = aux; 3654 3655 /* 3656 * Faulted state takes precedence over degraded. 3657 */ 3658 vd->vdev_delayed_close = B_FALSE; 3659 vd->vdev_faulted = 1ULL; 3660 vd->vdev_degraded = 0ULL; 3661 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 3662 3663 /* 3664 * If this device has the only valid copy of the data, then 3665 * back off and simply mark the vdev as degraded instead. 3666 */ 3667 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 3668 vd->vdev_degraded = 1ULL; 3669 vd->vdev_faulted = 0ULL; 3670 3671 /* 3672 * If we reopen the device and it's not dead, only then do we 3673 * mark it degraded. 3674 */ 3675 vdev_reopen(tvd); 3676 3677 if (vdev_readable(vd)) 3678 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 3679 } 3680 3681 return (spa_vdev_state_exit(spa, vd, 0)); 3682 } 3683 3684 /* 3685 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 3686 * user that something is wrong. The vdev continues to operate as normal as far 3687 * as I/O is concerned. 3688 */ 3689 int 3690 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3691 { 3692 vdev_t *vd; 3693 3694 spa_vdev_state_enter(spa, SCL_NONE); 3695 3696 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3697 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); 3698 3699 if (!vd->vdev_ops->vdev_op_leaf) 3700 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); 3701 3702 /* 3703 * If the vdev is already faulted, then don't do anything. 3704 */ 3705 if (vd->vdev_faulted || vd->vdev_degraded) 3706 return (spa_vdev_state_exit(spa, NULL, 0)); 3707 3708 vd->vdev_degraded = 1ULL; 3709 if (!vdev_is_dead(vd)) 3710 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 3711 aux); 3712 3713 return (spa_vdev_state_exit(spa, vd, 0)); 3714 } 3715 3716 /* 3717 * Online the given vdev. 3718 * 3719 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached 3720 * spare device should be detached when the device finishes resilvering. 3721 * Second, the online should be treated like a 'test' online case, so no FMA 3722 * events are generated if the device fails to open. 3723 */ 3724 int 3725 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 3726 { 3727 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 3728 boolean_t wasoffline; 3729 vdev_state_t oldstate; 3730 3731 spa_vdev_state_enter(spa, SCL_NONE); 3732 3733 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3734 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); 3735 3736 if (!vd->vdev_ops->vdev_op_leaf) 3737 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); 3738 3739 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); 3740 oldstate = vd->vdev_state; 3741 3742 tvd = vd->vdev_top; 3743 vd->vdev_offline = B_FALSE; 3744 vd->vdev_tmpoffline = B_FALSE; 3745 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 3746 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 3747 3748 /* XXX - L2ARC 1.0 does not support expansion */ 3749 if (!vd->vdev_aux) { 3750 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3751 pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) || 3752 spa->spa_autoexpand); 3753 vd->vdev_expansion_time = gethrestime_sec(); 3754 } 3755 3756 vdev_reopen(tvd); 3757 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 3758 3759 if (!vd->vdev_aux) { 3760 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3761 pvd->vdev_expanding = B_FALSE; 3762 } 3763 3764 if (newstate) 3765 *newstate = vd->vdev_state; 3766 if ((flags & ZFS_ONLINE_UNSPARE) && 3767 !vdev_is_dead(vd) && vd->vdev_parent && 3768 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3769 vd->vdev_parent->vdev_child[0] == vd) 3770 vd->vdev_unspare = B_TRUE; 3771 3772 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 3773 3774 /* XXX - L2ARC 1.0 does not support expansion */ 3775 if (vd->vdev_aux) 3776 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 3777 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3778 } 3779 3780 /* Restart initializing if necessary */ 3781 mutex_enter(&vd->vdev_initialize_lock); 3782 if (vdev_writeable(vd) && 3783 vd->vdev_initialize_thread == NULL && 3784 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { 3785 (void) vdev_initialize(vd); 3786 } 3787 mutex_exit(&vd->vdev_initialize_lock); 3788 3789 /* 3790 * Restart trimming if necessary. We do not restart trimming for cache 3791 * devices here. This is triggered by l2arc_rebuild_vdev() 3792 * asynchronously for the whole device or in l2arc_evict() as it evicts 3793 * space for upcoming writes. 3794 */ 3795 mutex_enter(&vd->vdev_trim_lock); 3796 if (vdev_writeable(vd) && !vd->vdev_isl2cache && 3797 vd->vdev_trim_thread == NULL && 3798 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) { 3799 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial, 3800 vd->vdev_trim_secure); 3801 } 3802 mutex_exit(&vd->vdev_trim_lock); 3803 3804 if (wasoffline || 3805 (oldstate < VDEV_STATE_DEGRADED && 3806 vd->vdev_state >= VDEV_STATE_DEGRADED)) 3807 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); 3808 3809 return (spa_vdev_state_exit(spa, vd, 0)); 3810 } 3811 3812 static int 3813 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 3814 { 3815 vdev_t *vd, *tvd; 3816 int error = 0; 3817 uint64_t generation; 3818 metaslab_group_t *mg; 3819 3820 top: 3821 spa_vdev_state_enter(spa, SCL_ALLOC); 3822 3823 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3824 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); 3825 3826 if (!vd->vdev_ops->vdev_op_leaf) 3827 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); 3828 3829 if (vd->vdev_ops == &vdev_draid_spare_ops) 3830 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3831 3832 tvd = vd->vdev_top; 3833 mg = tvd->vdev_mg; 3834 generation = spa->spa_config_generation + 1; 3835 3836 /* 3837 * If the device isn't already offline, try to offline it. 3838 */ 3839 if (!vd->vdev_offline) { 3840 /* 3841 * If this device has the only valid copy of some data, 3842 * don't allow it to be offlined. Log devices are always 3843 * expendable. 3844 */ 3845 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3846 vdev_dtl_required(vd)) 3847 return (spa_vdev_state_exit(spa, NULL, 3848 SET_ERROR(EBUSY))); 3849 3850 /* 3851 * If the top-level is a slog and it has had allocations 3852 * then proceed. We check that the vdev's metaslab group 3853 * is not NULL since it's possible that we may have just 3854 * added this vdev but not yet initialized its metaslabs. 3855 */ 3856 if (tvd->vdev_islog && mg != NULL) { 3857 /* 3858 * Prevent any future allocations. 3859 */ 3860 metaslab_group_passivate(mg); 3861 (void) spa_vdev_state_exit(spa, vd, 0); 3862 3863 error = spa_reset_logs(spa); 3864 3865 /* 3866 * If the log device was successfully reset but has 3867 * checkpointed data, do not offline it. 3868 */ 3869 if (error == 0 && 3870 tvd->vdev_checkpoint_sm != NULL) { 3871 ASSERT3U(space_map_allocated( 3872 tvd->vdev_checkpoint_sm), !=, 0); 3873 error = ZFS_ERR_CHECKPOINT_EXISTS; 3874 } 3875 3876 spa_vdev_state_enter(spa, SCL_ALLOC); 3877 3878 /* 3879 * Check to see if the config has changed. 3880 */ 3881 if (error || generation != spa->spa_config_generation) { 3882 metaslab_group_activate(mg); 3883 if (error) 3884 return (spa_vdev_state_exit(spa, 3885 vd, error)); 3886 (void) spa_vdev_state_exit(spa, vd, 0); 3887 goto top; 3888 } 3889 ASSERT0(tvd->vdev_stat.vs_alloc); 3890 } 3891 3892 /* 3893 * Offline this device and reopen its top-level vdev. 3894 * If the top-level vdev is a log device then just offline 3895 * it. Otherwise, if this action results in the top-level 3896 * vdev becoming unusable, undo it and fail the request. 3897 */ 3898 vd->vdev_offline = B_TRUE; 3899 vdev_reopen(tvd); 3900 3901 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3902 vdev_is_dead(tvd)) { 3903 vd->vdev_offline = B_FALSE; 3904 vdev_reopen(tvd); 3905 return (spa_vdev_state_exit(spa, NULL, 3906 SET_ERROR(EBUSY))); 3907 } 3908 3909 /* 3910 * Add the device back into the metaslab rotor so that 3911 * once we online the device it's open for business. 3912 */ 3913 if (tvd->vdev_islog && mg != NULL) 3914 metaslab_group_activate(mg); 3915 } 3916 3917 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 3918 3919 return (spa_vdev_state_exit(spa, vd, 0)); 3920 } 3921 3922 int 3923 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 3924 { 3925 int error; 3926 3927 mutex_enter(&spa->spa_vdev_top_lock); 3928 error = vdev_offline_locked(spa, guid, flags); 3929 mutex_exit(&spa->spa_vdev_top_lock); 3930 3931 return (error); 3932 } 3933 3934 /* 3935 * Clear the error counts associated with this vdev. Unlike vdev_online() and 3936 * vdev_offline(), we assume the spa config is locked. We also clear all 3937 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 3938 */ 3939 void 3940 vdev_clear(spa_t *spa, vdev_t *vd) 3941 { 3942 vdev_t *rvd = spa->spa_root_vdev; 3943 3944 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3945 3946 if (vd == NULL) 3947 vd = rvd; 3948 3949 vd->vdev_stat.vs_read_errors = 0; 3950 vd->vdev_stat.vs_write_errors = 0; 3951 vd->vdev_stat.vs_checksum_errors = 0; 3952 vd->vdev_stat.vs_slow_ios = 0; 3953 3954 for (int c = 0; c < vd->vdev_children; c++) 3955 vdev_clear(spa, vd->vdev_child[c]); 3956 3957 /* 3958 * It makes no sense to "clear" an indirect vdev. 3959 */ 3960 if (!vdev_is_concrete(vd)) 3961 return; 3962 3963 /* 3964 * If we're in the FAULTED state or have experienced failed I/O, then 3965 * clear the persistent state and attempt to reopen the device. We 3966 * also mark the vdev config dirty, so that the new faulted state is 3967 * written out to disk. 3968 */ 3969 if (vd->vdev_faulted || vd->vdev_degraded || 3970 !vdev_readable(vd) || !vdev_writeable(vd)) { 3971 /* 3972 * When reopening in response to a clear event, it may be due to 3973 * a fmadm repair request. In this case, if the device is 3974 * still broken, we want to still post the ereport again. 3975 */ 3976 vd->vdev_forcefault = B_TRUE; 3977 3978 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 3979 vd->vdev_cant_read = B_FALSE; 3980 vd->vdev_cant_write = B_FALSE; 3981 vd->vdev_stat.vs_aux = 0; 3982 3983 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 3984 3985 vd->vdev_forcefault = B_FALSE; 3986 3987 if (vd != rvd && vdev_writeable(vd->vdev_top)) 3988 vdev_state_dirty(vd->vdev_top); 3989 3990 /* If a resilver isn't required, check if vdevs can be culled */ 3991 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) && 3992 !dsl_scan_resilvering(spa->spa_dsl_pool) && 3993 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool)) 3994 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 3995 3996 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); 3997 } 3998 3999 /* 4000 * When clearing a FMA-diagnosed fault, we always want to 4001 * unspare the device, as we assume that the original spare was 4002 * done in response to the FMA fault. 4003 */ 4004 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 4005 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 4006 vd->vdev_parent->vdev_child[0] == vd) 4007 vd->vdev_unspare = B_TRUE; 4008 } 4009 4010 boolean_t 4011 vdev_is_dead(vdev_t *vd) 4012 { 4013 /* 4014 * Holes and missing devices are always considered "dead". 4015 * This simplifies the code since we don't have to check for 4016 * these types of devices in the various code paths. 4017 * Instead we rely on the fact that we skip over dead devices 4018 * before issuing I/O to them. 4019 */ 4020 return (vd->vdev_state < VDEV_STATE_DEGRADED || 4021 vd->vdev_ops == &vdev_hole_ops || 4022 vd->vdev_ops == &vdev_missing_ops); 4023 } 4024 4025 boolean_t 4026 vdev_readable(vdev_t *vd) 4027 { 4028 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 4029 } 4030 4031 boolean_t 4032 vdev_writeable(vdev_t *vd) 4033 { 4034 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && 4035 vdev_is_concrete(vd)); 4036 } 4037 4038 boolean_t 4039 vdev_allocatable(vdev_t *vd) 4040 { 4041 uint64_t state = vd->vdev_state; 4042 4043 /* 4044 * We currently allow allocations from vdevs which may be in the 4045 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 4046 * fails to reopen then we'll catch it later when we're holding 4047 * the proper locks. Note that we have to get the vdev state 4048 * in a local variable because although it changes atomically, 4049 * we're asking two separate questions about it. 4050 */ 4051 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 4052 !vd->vdev_cant_write && vdev_is_concrete(vd) && 4053 vd->vdev_mg->mg_initialized); 4054 } 4055 4056 boolean_t 4057 vdev_accessible(vdev_t *vd, zio_t *zio) 4058 { 4059 ASSERT(zio->io_vd == vd); 4060 4061 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 4062 return (B_FALSE); 4063 4064 if (zio->io_type == ZIO_TYPE_READ) 4065 return (!vd->vdev_cant_read); 4066 4067 if (zio->io_type == ZIO_TYPE_WRITE) 4068 return (!vd->vdev_cant_write); 4069 4070 return (B_TRUE); 4071 } 4072 4073 static void 4074 vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs) 4075 { 4076 /* 4077 * Exclude the dRAID spare when aggregating to avoid double counting 4078 * the ops and bytes. These IOs are counted by the physical leaves. 4079 */ 4080 if (cvd->vdev_ops == &vdev_draid_spare_ops) 4081 return; 4082 4083 for (int t = 0; t < VS_ZIO_TYPES; t++) { 4084 vs->vs_ops[t] += cvs->vs_ops[t]; 4085 vs->vs_bytes[t] += cvs->vs_bytes[t]; 4086 } 4087 4088 cvs->vs_scan_removing = cvd->vdev_removing; 4089 } 4090 4091 /* 4092 * Get extended stats 4093 */ 4094 static void 4095 vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx) 4096 { 4097 int t, b; 4098 for (t = 0; t < ZIO_TYPES; t++) { 4099 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++) 4100 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b]; 4101 4102 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) { 4103 vsx->vsx_total_histo[t][b] += 4104 cvsx->vsx_total_histo[t][b]; 4105 } 4106 } 4107 4108 for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) { 4109 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) { 4110 vsx->vsx_queue_histo[t][b] += 4111 cvsx->vsx_queue_histo[t][b]; 4112 } 4113 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t]; 4114 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t]; 4115 4116 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++) 4117 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b]; 4118 4119 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++) 4120 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b]; 4121 } 4122 4123 } 4124 4125 boolean_t 4126 vdev_is_spacemap_addressable(vdev_t *vd) 4127 { 4128 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) 4129 return (B_TRUE); 4130 4131 /* 4132 * If double-word space map entries are not enabled we assume 4133 * 47 bits of the space map entry are dedicated to the entry's 4134 * offset (see SM_OFFSET_BITS in space_map.h). We then use that 4135 * to calculate the maximum address that can be described by a 4136 * space map entry for the given device. 4137 */ 4138 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; 4139 4140 if (shift >= 63) /* detect potential overflow */ 4141 return (B_TRUE); 4142 4143 return (vd->vdev_asize < (1ULL << shift)); 4144 } 4145 4146 /* 4147 * Get statistics for the given vdev. 4148 */ 4149 static void 4150 vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) 4151 { 4152 int t; 4153 /* 4154 * If we're getting stats on the root vdev, aggregate the I/O counts 4155 * over all top-level vdevs (i.e. the direct children of the root). 4156 */ 4157 if (!vd->vdev_ops->vdev_op_leaf) { 4158 if (vs) { 4159 memset(vs->vs_ops, 0, sizeof (vs->vs_ops)); 4160 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes)); 4161 } 4162 if (vsx) 4163 memset(vsx, 0, sizeof (*vsx)); 4164 4165 for (int c = 0; c < vd->vdev_children; c++) { 4166 vdev_t *cvd = vd->vdev_child[c]; 4167 vdev_stat_t *cvs = &cvd->vdev_stat; 4168 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex; 4169 4170 vdev_get_stats_ex_impl(cvd, cvs, cvsx); 4171 if (vs) 4172 vdev_get_child_stat(cvd, vs, cvs); 4173 if (vsx) 4174 vdev_get_child_stat_ex(cvd, vsx, cvsx); 4175 } 4176 } else { 4177 /* 4178 * We're a leaf. Just copy our ZIO active queue stats in. The 4179 * other leaf stats are updated in vdev_stat_update(). 4180 */ 4181 if (!vsx) 4182 return; 4183 4184 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex)); 4185 4186 for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) { 4187 vsx->vsx_active_queue[t] = 4188 vd->vdev_queue.vq_class[t].vqc_active; 4189 vsx->vsx_pend_queue[t] = avl_numnodes( 4190 &vd->vdev_queue.vq_class[t].vqc_queued_tree); 4191 } 4192 } 4193 } 4194 4195 void 4196 vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) 4197 { 4198 vdev_t *tvd = vd->vdev_top; 4199 mutex_enter(&vd->vdev_stat_lock); 4200 if (vs) { 4201 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 4202 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 4203 vs->vs_state = vd->vdev_state; 4204 vs->vs_rsize = vdev_get_min_asize(vd); 4205 4206 if (vd->vdev_ops->vdev_op_leaf) { 4207 vs->vs_rsize += VDEV_LABEL_START_SIZE + 4208 VDEV_LABEL_END_SIZE; 4209 /* 4210 * Report initializing progress. Since we don't 4211 * have the initializing locks held, this is only 4212 * an estimate (although a fairly accurate one). 4213 */ 4214 vs->vs_initialize_bytes_done = 4215 vd->vdev_initialize_bytes_done; 4216 vs->vs_initialize_bytes_est = 4217 vd->vdev_initialize_bytes_est; 4218 vs->vs_initialize_state = vd->vdev_initialize_state; 4219 vs->vs_initialize_action_time = 4220 vd->vdev_initialize_action_time; 4221 4222 /* 4223 * Report manual TRIM progress. Since we don't have 4224 * the manual TRIM locks held, this is only an 4225 * estimate (although fairly accurate one). 4226 */ 4227 vs->vs_trim_notsup = !vd->vdev_has_trim; 4228 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done; 4229 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est; 4230 vs->vs_trim_state = vd->vdev_trim_state; 4231 vs->vs_trim_action_time = vd->vdev_trim_action_time; 4232 4233 /* Set when there is a deferred resilver. */ 4234 vs->vs_resilver_deferred = vd->vdev_resilver_deferred; 4235 } 4236 4237 /* 4238 * Report expandable space on top-level, non-auxiliary devices 4239 * only. The expandable space is reported in terms of metaslab 4240 * sized units since that determines how much space the pool 4241 * can expand. 4242 */ 4243 if (vd->vdev_aux == NULL && tvd != NULL) { 4244 vs->vs_esize = P2ALIGN( 4245 vd->vdev_max_asize - vd->vdev_asize, 4246 1ULL << tvd->vdev_ms_shift); 4247 } 4248 4249 vs->vs_configured_ashift = vd->vdev_top != NULL 4250 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift; 4251 vs->vs_logical_ashift = vd->vdev_logical_ashift; 4252 vs->vs_physical_ashift = vd->vdev_physical_ashift; 4253 4254 /* 4255 * Report fragmentation and rebuild progress for top-level, 4256 * non-auxiliary, concrete devices. 4257 */ 4258 if (vd->vdev_aux == NULL && vd == vd->vdev_top && 4259 vdev_is_concrete(vd)) { 4260 vs->vs_fragmentation = (vd->vdev_mg != NULL) ? 4261 vd->vdev_mg->mg_fragmentation : 0; 4262 } 4263 } 4264 4265 vdev_get_stats_ex_impl(vd, vs, vsx); 4266 mutex_exit(&vd->vdev_stat_lock); 4267 } 4268 4269 void 4270 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 4271 { 4272 return (vdev_get_stats_ex(vd, vs, NULL)); 4273 } 4274 4275 void 4276 vdev_clear_stats(vdev_t *vd) 4277 { 4278 mutex_enter(&vd->vdev_stat_lock); 4279 vd->vdev_stat.vs_space = 0; 4280 vd->vdev_stat.vs_dspace = 0; 4281 vd->vdev_stat.vs_alloc = 0; 4282 mutex_exit(&vd->vdev_stat_lock); 4283 } 4284 4285 void 4286 vdev_scan_stat_init(vdev_t *vd) 4287 { 4288 vdev_stat_t *vs = &vd->vdev_stat; 4289 4290 for (int c = 0; c < vd->vdev_children; c++) 4291 vdev_scan_stat_init(vd->vdev_child[c]); 4292 4293 mutex_enter(&vd->vdev_stat_lock); 4294 vs->vs_scan_processed = 0; 4295 mutex_exit(&vd->vdev_stat_lock); 4296 } 4297 4298 void 4299 vdev_stat_update(zio_t *zio, uint64_t psize) 4300 { 4301 spa_t *spa = zio->io_spa; 4302 vdev_t *rvd = spa->spa_root_vdev; 4303 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 4304 vdev_t *pvd; 4305 uint64_t txg = zio->io_txg; 4306 vdev_stat_t *vs = &vd->vdev_stat; 4307 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex; 4308 zio_type_t type = zio->io_type; 4309 int flags = zio->io_flags; 4310 4311 /* 4312 * If this i/o is a gang leader, it didn't do any actual work. 4313 */ 4314 if (zio->io_gang_tree) 4315 return; 4316 4317 if (zio->io_error == 0) { 4318 /* 4319 * If this is a root i/o, don't count it -- we've already 4320 * counted the top-level vdevs, and vdev_get_stats() will 4321 * aggregate them when asked. This reduces contention on 4322 * the root vdev_stat_lock and implicitly handles blocks 4323 * that compress away to holes, for which there is no i/o. 4324 * (Holes never create vdev children, so all the counters 4325 * remain zero, which is what we want.) 4326 * 4327 * Note: this only applies to successful i/o (io_error == 0) 4328 * because unlike i/o counts, errors are not additive. 4329 * When reading a ditto block, for example, failure of 4330 * one top-level vdev does not imply a root-level error. 4331 */ 4332 if (vd == rvd) 4333 return; 4334 4335 ASSERT(vd == zio->io_vd); 4336 4337 if (flags & ZIO_FLAG_IO_BYPASS) 4338 return; 4339 4340 mutex_enter(&vd->vdev_stat_lock); 4341 4342 if (flags & ZIO_FLAG_IO_REPAIR) { 4343 /* 4344 * Repair is the result of a resilver issued by the 4345 * scan thread (spa_sync). 4346 */ 4347 if (flags & ZIO_FLAG_SCAN_THREAD) { 4348 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 4349 dsl_scan_phys_t *scn_phys = &scn->scn_phys; 4350 uint64_t *processed = &scn_phys->scn_processed; 4351 4352 if (vd->vdev_ops->vdev_op_leaf) 4353 atomic_add_64(processed, psize); 4354 vs->vs_scan_processed += psize; 4355 } 4356 4357 /* 4358 * Repair is the result of a rebuild issued by the 4359 * rebuild thread (vdev_rebuild_thread). To avoid 4360 * double counting repaired bytes the virtual dRAID 4361 * spare vdev is excluded from the processed bytes. 4362 */ 4363 if (zio->io_priority == ZIO_PRIORITY_REBUILD) { 4364 vdev_t *tvd = vd->vdev_top; 4365 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; 4366 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 4367 uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt; 4368 4369 if (vd->vdev_ops->vdev_op_leaf && 4370 vd->vdev_ops != &vdev_draid_spare_ops) { 4371 atomic_add_64(rebuilt, psize); 4372 } 4373 vs->vs_rebuild_processed += psize; 4374 } 4375 4376 if (flags & ZIO_FLAG_SELF_HEAL) 4377 vs->vs_self_healed += psize; 4378 } 4379 4380 /* 4381 * The bytes/ops/histograms are recorded at the leaf level and 4382 * aggregated into the higher level vdevs in vdev_get_stats(). 4383 */ 4384 if (vd->vdev_ops->vdev_op_leaf && 4385 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) { 4386 zio_type_t vs_type = type; 4387 zio_priority_t priority = zio->io_priority; 4388 4389 /* 4390 * TRIM ops and bytes are reported to user space as 4391 * ZIO_TYPE_IOCTL. This is done to preserve the 4392 * vdev_stat_t structure layout for user space. 4393 */ 4394 if (type == ZIO_TYPE_TRIM) 4395 vs_type = ZIO_TYPE_IOCTL; 4396 4397 /* 4398 * Solely for the purposes of 'zpool iostat -lqrw' 4399 * reporting use the priority to catagorize the IO. 4400 * Only the following are reported to user space: 4401 * 4402 * ZIO_PRIORITY_SYNC_READ, 4403 * ZIO_PRIORITY_SYNC_WRITE, 4404 * ZIO_PRIORITY_ASYNC_READ, 4405 * ZIO_PRIORITY_ASYNC_WRITE, 4406 * ZIO_PRIORITY_SCRUB, 4407 * ZIO_PRIORITY_TRIM. 4408 */ 4409 if (priority == ZIO_PRIORITY_REBUILD) { 4410 priority = ((type == ZIO_TYPE_WRITE) ? 4411 ZIO_PRIORITY_ASYNC_WRITE : 4412 ZIO_PRIORITY_SCRUB); 4413 } else if (priority == ZIO_PRIORITY_INITIALIZING) { 4414 ASSERT3U(type, ==, ZIO_TYPE_WRITE); 4415 priority = ZIO_PRIORITY_ASYNC_WRITE; 4416 } else if (priority == ZIO_PRIORITY_REMOVAL) { 4417 priority = ((type == ZIO_TYPE_WRITE) ? 4418 ZIO_PRIORITY_ASYNC_WRITE : 4419 ZIO_PRIORITY_ASYNC_READ); 4420 } 4421 4422 vs->vs_ops[vs_type]++; 4423 vs->vs_bytes[vs_type] += psize; 4424 4425 if (flags & ZIO_FLAG_DELEGATED) { 4426 vsx->vsx_agg_histo[priority] 4427 [RQ_HISTO(zio->io_size)]++; 4428 } else { 4429 vsx->vsx_ind_histo[priority] 4430 [RQ_HISTO(zio->io_size)]++; 4431 } 4432 4433 if (zio->io_delta && zio->io_delay) { 4434 vsx->vsx_queue_histo[priority] 4435 [L_HISTO(zio->io_delta - zio->io_delay)]++; 4436 vsx->vsx_disk_histo[type] 4437 [L_HISTO(zio->io_delay)]++; 4438 vsx->vsx_total_histo[type] 4439 [L_HISTO(zio->io_delta)]++; 4440 } 4441 } 4442 4443 mutex_exit(&vd->vdev_stat_lock); 4444 return; 4445 } 4446 4447 if (flags & ZIO_FLAG_SPECULATIVE) 4448 return; 4449 4450 /* 4451 * If this is an I/O error that is going to be retried, then ignore the 4452 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 4453 * hard errors, when in reality they can happen for any number of 4454 * innocuous reasons (bus resets, MPxIO link failure, etc). 4455 */ 4456 if (zio->io_error == EIO && 4457 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 4458 return; 4459 4460 /* 4461 * Intent logs writes won't propagate their error to the root 4462 * I/O so don't mark these types of failures as pool-level 4463 * errors. 4464 */ 4465 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 4466 return; 4467 4468 if (type == ZIO_TYPE_WRITE && txg != 0 && 4469 (!(flags & ZIO_FLAG_IO_REPAIR) || 4470 (flags & ZIO_FLAG_SCAN_THREAD) || 4471 spa->spa_claiming)) { 4472 /* 4473 * This is either a normal write (not a repair), or it's 4474 * a repair induced by the scrub thread, or it's a repair 4475 * made by zil_claim() during spa_load() in the first txg. 4476 * In the normal case, we commit the DTL change in the same 4477 * txg as the block was born. In the scrub-induced repair 4478 * case, we know that scrubs run in first-pass syncing context, 4479 * so we commit the DTL change in spa_syncing_txg(spa). 4480 * In the zil_claim() case, we commit in spa_first_txg(spa). 4481 * 4482 * We currently do not make DTL entries for failed spontaneous 4483 * self-healing writes triggered by normal (non-scrubbing) 4484 * reads, because we have no transactional context in which to 4485 * do so -- and it's not clear that it'd be desirable anyway. 4486 */ 4487 if (vd->vdev_ops->vdev_op_leaf) { 4488 uint64_t commit_txg = txg; 4489 if (flags & ZIO_FLAG_SCAN_THREAD) { 4490 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 4491 ASSERT(spa_sync_pass(spa) == 1); 4492 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 4493 commit_txg = spa_syncing_txg(spa); 4494 } else if (spa->spa_claiming) { 4495 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 4496 commit_txg = spa_first_txg(spa); 4497 } 4498 ASSERT(commit_txg >= spa_syncing_txg(spa)); 4499 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 4500 return; 4501 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 4502 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 4503 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 4504 } 4505 if (vd != rvd) 4506 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 4507 } 4508 } 4509 4510 int64_t 4511 vdev_deflated_space(vdev_t *vd, int64_t space) 4512 { 4513 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0); 4514 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 4515 4516 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio); 4517 } 4518 4519 /* 4520 * Update the in-core space usage stats for this vdev, its metaslab class, 4521 * and the root vdev. 4522 */ 4523 void 4524 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 4525 int64_t space_delta) 4526 { 4527 int64_t dspace_delta; 4528 spa_t *spa = vd->vdev_spa; 4529 vdev_t *rvd = spa->spa_root_vdev; 4530 4531 ASSERT(vd == vd->vdev_top); 4532 4533 /* 4534 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 4535 * factor. We must calculate this here and not at the root vdev 4536 * because the root vdev's psize-to-asize is simply the max of its 4537 * children's, thus not accurate enough for us. 4538 */ 4539 dspace_delta = vdev_deflated_space(vd, space_delta); 4540 4541 mutex_enter(&vd->vdev_stat_lock); 4542 /* ensure we won't underflow */ 4543 if (alloc_delta < 0) { 4544 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta); 4545 } 4546 4547 vd->vdev_stat.vs_alloc += alloc_delta; 4548 vd->vdev_stat.vs_space += space_delta; 4549 vd->vdev_stat.vs_dspace += dspace_delta; 4550 mutex_exit(&vd->vdev_stat_lock); 4551 4552 /* every class but log contributes to root space stats */ 4553 if (vd->vdev_mg != NULL && !vd->vdev_islog) { 4554 ASSERT(!vd->vdev_isl2cache); 4555 mutex_enter(&rvd->vdev_stat_lock); 4556 rvd->vdev_stat.vs_alloc += alloc_delta; 4557 rvd->vdev_stat.vs_space += space_delta; 4558 rvd->vdev_stat.vs_dspace += dspace_delta; 4559 mutex_exit(&rvd->vdev_stat_lock); 4560 } 4561 /* Note: metaslab_class_space_update moved to metaslab_space_update */ 4562 } 4563 4564 /* 4565 * Mark a top-level vdev's config as dirty, placing it on the dirty list 4566 * so that it will be written out next time the vdev configuration is synced. 4567 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 4568 */ 4569 void 4570 vdev_config_dirty(vdev_t *vd) 4571 { 4572 spa_t *spa = vd->vdev_spa; 4573 vdev_t *rvd = spa->spa_root_vdev; 4574 int c; 4575 4576 ASSERT(spa_writeable(spa)); 4577 4578 /* 4579 * If this is an aux vdev (as with l2cache and spare devices), then we 4580 * update the vdev config manually and set the sync flag. 4581 */ 4582 if (vd->vdev_aux != NULL) { 4583 spa_aux_vdev_t *sav = vd->vdev_aux; 4584 nvlist_t **aux; 4585 uint_t naux; 4586 4587 for (c = 0; c < sav->sav_count; c++) { 4588 if (sav->sav_vdevs[c] == vd) 4589 break; 4590 } 4591 4592 if (c == sav->sav_count) { 4593 /* 4594 * We're being removed. There's nothing more to do. 4595 */ 4596 ASSERT(sav->sav_sync == B_TRUE); 4597 return; 4598 } 4599 4600 sav->sav_sync = B_TRUE; 4601 4602 if (nvlist_lookup_nvlist_array(sav->sav_config, 4603 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 4604 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 4605 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 4606 } 4607 4608 ASSERT(c < naux); 4609 4610 /* 4611 * Setting the nvlist in the middle if the array is a little 4612 * sketchy, but it will work. 4613 */ 4614 nvlist_free(aux[c]); 4615 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 4616 4617 return; 4618 } 4619 4620 /* 4621 * The dirty list is protected by the SCL_CONFIG lock. The caller 4622 * must either hold SCL_CONFIG as writer, or must be the sync thread 4623 * (which holds SCL_CONFIG as reader). There's only one sync thread, 4624 * so this is sufficient to ensure mutual exclusion. 4625 */ 4626 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 4627 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4628 spa_config_held(spa, SCL_CONFIG, RW_READER))); 4629 4630 if (vd == rvd) { 4631 for (c = 0; c < rvd->vdev_children; c++) 4632 vdev_config_dirty(rvd->vdev_child[c]); 4633 } else { 4634 ASSERT(vd == vd->vdev_top); 4635 4636 if (!list_link_active(&vd->vdev_config_dirty_node) && 4637 vdev_is_concrete(vd)) { 4638 list_insert_head(&spa->spa_config_dirty_list, vd); 4639 } 4640 } 4641 } 4642 4643 void 4644 vdev_config_clean(vdev_t *vd) 4645 { 4646 spa_t *spa = vd->vdev_spa; 4647 4648 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 4649 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4650 spa_config_held(spa, SCL_CONFIG, RW_READER))); 4651 4652 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 4653 list_remove(&spa->spa_config_dirty_list, vd); 4654 } 4655 4656 /* 4657 * Mark a top-level vdev's state as dirty, so that the next pass of 4658 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 4659 * the state changes from larger config changes because they require 4660 * much less locking, and are often needed for administrative actions. 4661 */ 4662 void 4663 vdev_state_dirty(vdev_t *vd) 4664 { 4665 spa_t *spa = vd->vdev_spa; 4666 4667 ASSERT(spa_writeable(spa)); 4668 ASSERT(vd == vd->vdev_top); 4669 4670 /* 4671 * The state list is protected by the SCL_STATE lock. The caller 4672 * must either hold SCL_STATE as writer, or must be the sync thread 4673 * (which holds SCL_STATE as reader). There's only one sync thread, 4674 * so this is sufficient to ensure mutual exclusion. 4675 */ 4676 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 4677 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4678 spa_config_held(spa, SCL_STATE, RW_READER))); 4679 4680 if (!list_link_active(&vd->vdev_state_dirty_node) && 4681 vdev_is_concrete(vd)) 4682 list_insert_head(&spa->spa_state_dirty_list, vd); 4683 } 4684 4685 void 4686 vdev_state_clean(vdev_t *vd) 4687 { 4688 spa_t *spa = vd->vdev_spa; 4689 4690 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 4691 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4692 spa_config_held(spa, SCL_STATE, RW_READER))); 4693 4694 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 4695 list_remove(&spa->spa_state_dirty_list, vd); 4696 } 4697 4698 /* 4699 * Propagate vdev state up from children to parent. 4700 */ 4701 void 4702 vdev_propagate_state(vdev_t *vd) 4703 { 4704 spa_t *spa = vd->vdev_spa; 4705 vdev_t *rvd = spa->spa_root_vdev; 4706 int degraded = 0, faulted = 0; 4707 int corrupted = 0; 4708 vdev_t *child; 4709 4710 if (vd->vdev_children > 0) { 4711 for (int c = 0; c < vd->vdev_children; c++) { 4712 child = vd->vdev_child[c]; 4713 4714 /* 4715 * Don't factor holes or indirect vdevs into the 4716 * decision. 4717 */ 4718 if (!vdev_is_concrete(child)) 4719 continue; 4720 4721 if (!vdev_readable(child) || 4722 (!vdev_writeable(child) && spa_writeable(spa))) { 4723 /* 4724 * Root special: if there is a top-level log 4725 * device, treat the root vdev as if it were 4726 * degraded. 4727 */ 4728 if (child->vdev_islog && vd == rvd) 4729 degraded++; 4730 else 4731 faulted++; 4732 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 4733 degraded++; 4734 } 4735 4736 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 4737 corrupted++; 4738 } 4739 4740 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 4741 4742 /* 4743 * Root special: if there is a top-level vdev that cannot be 4744 * opened due to corrupted metadata, then propagate the root 4745 * vdev's aux state as 'corrupt' rather than 'insufficient 4746 * replicas'. 4747 */ 4748 if (corrupted && vd == rvd && 4749 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 4750 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 4751 VDEV_AUX_CORRUPT_DATA); 4752 } 4753 4754 if (vd->vdev_parent) 4755 vdev_propagate_state(vd->vdev_parent); 4756 } 4757 4758 /* 4759 * Set a vdev's state. If this is during an open, we don't update the parent 4760 * state, because we're in the process of opening children depth-first. 4761 * Otherwise, we propagate the change to the parent. 4762 * 4763 * If this routine places a device in a faulted state, an appropriate ereport is 4764 * generated. 4765 */ 4766 void 4767 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 4768 { 4769 uint64_t save_state; 4770 spa_t *spa = vd->vdev_spa; 4771 4772 if (state == vd->vdev_state) { 4773 /* 4774 * Since vdev_offline() code path is already in an offline 4775 * state we can miss a statechange event to OFFLINE. Check 4776 * the previous state to catch this condition. 4777 */ 4778 if (vd->vdev_ops->vdev_op_leaf && 4779 (state == VDEV_STATE_OFFLINE) && 4780 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) { 4781 /* post an offline state change */ 4782 zfs_post_state_change(spa, vd, vd->vdev_prevstate); 4783 } 4784 vd->vdev_stat.vs_aux = aux; 4785 return; 4786 } 4787 4788 save_state = vd->vdev_state; 4789 4790 vd->vdev_state = state; 4791 vd->vdev_stat.vs_aux = aux; 4792 4793 /* 4794 * If we are setting the vdev state to anything but an open state, then 4795 * always close the underlying device unless the device has requested 4796 * a delayed close (i.e. we're about to remove or fault the device). 4797 * Otherwise, we keep accessible but invalid devices open forever. 4798 * We don't call vdev_close() itself, because that implies some extra 4799 * checks (offline, etc) that we don't want here. This is limited to 4800 * leaf devices, because otherwise closing the device will affect other 4801 * children. 4802 */ 4803 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 4804 vd->vdev_ops->vdev_op_leaf) 4805 vd->vdev_ops->vdev_op_close(vd); 4806 4807 if (vd->vdev_removed && 4808 state == VDEV_STATE_CANT_OPEN && 4809 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 4810 /* 4811 * If the previous state is set to VDEV_STATE_REMOVED, then this 4812 * device was previously marked removed and someone attempted to 4813 * reopen it. If this failed due to a nonexistent device, then 4814 * keep the device in the REMOVED state. We also let this be if 4815 * it is one of our special test online cases, which is only 4816 * attempting to online the device and shouldn't generate an FMA 4817 * fault. 4818 */ 4819 vd->vdev_state = VDEV_STATE_REMOVED; 4820 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 4821 } else if (state == VDEV_STATE_REMOVED) { 4822 vd->vdev_removed = B_TRUE; 4823 } else if (state == VDEV_STATE_CANT_OPEN) { 4824 /* 4825 * If we fail to open a vdev during an import or recovery, we 4826 * mark it as "not available", which signifies that it was 4827 * never there to begin with. Failure to open such a device 4828 * is not considered an error. 4829 */ 4830 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 4831 spa_load_state(spa) == SPA_LOAD_RECOVER) && 4832 vd->vdev_ops->vdev_op_leaf) 4833 vd->vdev_not_present = 1; 4834 4835 /* 4836 * Post the appropriate ereport. If the 'prevstate' field is 4837 * set to something other than VDEV_STATE_UNKNOWN, it indicates 4838 * that this is part of a vdev_reopen(). In this case, we don't 4839 * want to post the ereport if the device was already in the 4840 * CANT_OPEN state beforehand. 4841 * 4842 * If the 'checkremove' flag is set, then this is an attempt to 4843 * online the device in response to an insertion event. If we 4844 * hit this case, then we have detected an insertion event for a 4845 * faulted or offline device that wasn't in the removed state. 4846 * In this scenario, we don't post an ereport because we are 4847 * about to replace the device, or attempt an online with 4848 * vdev_forcefault, which will generate the fault for us. 4849 */ 4850 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 4851 !vd->vdev_not_present && !vd->vdev_checkremove && 4852 vd != spa->spa_root_vdev) { 4853 const char *class; 4854 4855 switch (aux) { 4856 case VDEV_AUX_OPEN_FAILED: 4857 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 4858 break; 4859 case VDEV_AUX_CORRUPT_DATA: 4860 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 4861 break; 4862 case VDEV_AUX_NO_REPLICAS: 4863 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 4864 break; 4865 case VDEV_AUX_BAD_GUID_SUM: 4866 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 4867 break; 4868 case VDEV_AUX_TOO_SMALL: 4869 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 4870 break; 4871 case VDEV_AUX_BAD_LABEL: 4872 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 4873 break; 4874 case VDEV_AUX_BAD_ASHIFT: 4875 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT; 4876 break; 4877 default: 4878 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 4879 } 4880 4881 (void) zfs_ereport_post(class, spa, vd, NULL, NULL, 4882 save_state); 4883 } 4884 4885 /* Erase any notion of persistent removed state */ 4886 vd->vdev_removed = B_FALSE; 4887 } else { 4888 vd->vdev_removed = B_FALSE; 4889 } 4890 4891 /* 4892 * Notify ZED of any significant state-change on a leaf vdev. 4893 * 4894 */ 4895 if (vd->vdev_ops->vdev_op_leaf) { 4896 /* preserve original state from a vdev_reopen() */ 4897 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) && 4898 (vd->vdev_prevstate != vd->vdev_state) && 4899 (save_state <= VDEV_STATE_CLOSED)) 4900 save_state = vd->vdev_prevstate; 4901 4902 /* filter out state change due to initial vdev_open */ 4903 if (save_state > VDEV_STATE_CLOSED) 4904 zfs_post_state_change(spa, vd, save_state); 4905 } 4906 4907 if (!isopen && vd->vdev_parent) 4908 vdev_propagate_state(vd->vdev_parent); 4909 } 4910 4911 boolean_t 4912 vdev_children_are_offline(vdev_t *vd) 4913 { 4914 ASSERT(!vd->vdev_ops->vdev_op_leaf); 4915 4916 for (uint64_t i = 0; i < vd->vdev_children; i++) { 4917 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) 4918 return (B_FALSE); 4919 } 4920 4921 return (B_TRUE); 4922 } 4923 4924 /* 4925 * Check the vdev configuration to ensure that it's capable of supporting 4926 * a root pool. We do not support partial configuration. 4927 */ 4928 boolean_t 4929 vdev_is_bootable(vdev_t *vd) 4930 { 4931 if (!vd->vdev_ops->vdev_op_leaf) { 4932 const char *vdev_type = vd->vdev_ops->vdev_op_type; 4933 4934 if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 || 4935 strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) { 4936 return (B_FALSE); 4937 } 4938 } 4939 4940 for (int c = 0; c < vd->vdev_children; c++) { 4941 if (!vdev_is_bootable(vd->vdev_child[c])) 4942 return (B_FALSE); 4943 } 4944 return (B_TRUE); 4945 } 4946 4947 boolean_t 4948 vdev_is_concrete(vdev_t *vd) 4949 { 4950 vdev_ops_t *ops = vd->vdev_ops; 4951 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || 4952 ops == &vdev_missing_ops || ops == &vdev_root_ops) { 4953 return (B_FALSE); 4954 } else { 4955 return (B_TRUE); 4956 } 4957 } 4958 4959 /* 4960 * Determine if a log device has valid content. If the vdev was 4961 * removed or faulted in the MOS config then we know that 4962 * the content on the log device has already been written to the pool. 4963 */ 4964 boolean_t 4965 vdev_log_state_valid(vdev_t *vd) 4966 { 4967 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 4968 !vd->vdev_removed) 4969 return (B_TRUE); 4970 4971 for (int c = 0; c < vd->vdev_children; c++) 4972 if (vdev_log_state_valid(vd->vdev_child[c])) 4973 return (B_TRUE); 4974 4975 return (B_FALSE); 4976 } 4977 4978 /* 4979 * Expand a vdev if possible. 4980 */ 4981 void 4982 vdev_expand(vdev_t *vd, uint64_t txg) 4983 { 4984 ASSERT(vd->vdev_top == vd); 4985 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4986 ASSERT(vdev_is_concrete(vd)); 4987 4988 vdev_set_deflate_ratio(vd); 4989 4990 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && 4991 vdev_is_concrete(vd)) { 4992 vdev_metaslab_group_create(vd); 4993 VERIFY(vdev_metaslab_init(vd, txg) == 0); 4994 vdev_config_dirty(vd); 4995 } 4996 } 4997 4998 /* 4999 * Split a vdev. 5000 */ 5001 void 5002 vdev_split(vdev_t *vd) 5003 { 5004 vdev_t *cvd, *pvd = vd->vdev_parent; 5005 5006 vdev_remove_child(pvd, vd); 5007 vdev_compact_children(pvd); 5008 5009 cvd = pvd->vdev_child[0]; 5010 if (pvd->vdev_children == 1) { 5011 vdev_remove_parent(cvd); 5012 cvd->vdev_splitting = B_TRUE; 5013 } 5014 vdev_propagate_state(cvd); 5015 } 5016 5017 void 5018 vdev_deadman(vdev_t *vd, char *tag) 5019 { 5020 for (int c = 0; c < vd->vdev_children; c++) { 5021 vdev_t *cvd = vd->vdev_child[c]; 5022 5023 vdev_deadman(cvd, tag); 5024 } 5025 5026 if (vd->vdev_ops->vdev_op_leaf) { 5027 vdev_queue_t *vq = &vd->vdev_queue; 5028 5029 mutex_enter(&vq->vq_lock); 5030 if (avl_numnodes(&vq->vq_active_tree) > 0) { 5031 spa_t *spa = vd->vdev_spa; 5032 zio_t *fio; 5033 uint64_t delta; 5034 5035 zfs_dbgmsg("slow vdev: %s has %d active IOs", 5036 vd->vdev_path, avl_numnodes(&vq->vq_active_tree)); 5037 5038 /* 5039 * Look at the head of all the pending queues, 5040 * if any I/O has been outstanding for longer than 5041 * the spa_deadman_synctime invoke the deadman logic. 5042 */ 5043 fio = avl_first(&vq->vq_active_tree); 5044 delta = gethrtime() - fio->io_timestamp; 5045 if (delta > spa_deadman_synctime(spa)) 5046 zio_deadman(fio, tag); 5047 } 5048 mutex_exit(&vq->vq_lock); 5049 } 5050 } 5051 5052 void 5053 vdev_defer_resilver(vdev_t *vd) 5054 { 5055 ASSERT(vd->vdev_ops->vdev_op_leaf); 5056 5057 vd->vdev_resilver_deferred = B_TRUE; 5058 vd->vdev_spa->spa_resilver_deferred = B_TRUE; 5059 } 5060 5061 /* 5062 * Clears the resilver deferred flag on all leaf devs under vd. Returns 5063 * B_TRUE if we have devices that need to be resilvered and are available to 5064 * accept resilver I/Os. 5065 */ 5066 boolean_t 5067 vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx) 5068 { 5069 boolean_t resilver_needed = B_FALSE; 5070 spa_t *spa = vd->vdev_spa; 5071 5072 for (int c = 0; c < vd->vdev_children; c++) { 5073 vdev_t *cvd = vd->vdev_child[c]; 5074 resilver_needed |= vdev_clear_resilver_deferred(cvd, tx); 5075 } 5076 5077 if (vd == spa->spa_root_vdev && 5078 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) { 5079 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx); 5080 vdev_config_dirty(vd); 5081 spa->spa_resilver_deferred = B_FALSE; 5082 return (resilver_needed); 5083 } 5084 5085 if (!vdev_is_concrete(vd) || vd->vdev_aux || 5086 !vd->vdev_ops->vdev_op_leaf) 5087 return (resilver_needed); 5088 5089 vd->vdev_resilver_deferred = B_FALSE; 5090 5091 return (!vdev_is_dead(vd) && !vd->vdev_offline && 5092 vdev_resilver_needed(vd, NULL, NULL)); 5093 } 5094 5095 boolean_t 5096 vdev_xlate_is_empty(range_seg64_t *rs) 5097 { 5098 return (rs->rs_start == rs->rs_end); 5099 } 5100 5101 /* 5102 * Translate a logical range to the first contiguous physical range for the 5103 * specified vdev_t. This function is initially called with a leaf vdev and 5104 * will walk each parent vdev until it reaches a top-level vdev. Once the 5105 * top-level is reached the physical range is initialized and the recursive 5106 * function begins to unwind. As it unwinds it calls the parent's vdev 5107 * specific translation function to do the real conversion. 5108 */ 5109 void 5110 vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs, 5111 range_seg64_t *physical_rs, range_seg64_t *remain_rs) 5112 { 5113 /* 5114 * Walk up the vdev tree 5115 */ 5116 if (vd != vd->vdev_top) { 5117 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs, 5118 remain_rs); 5119 } else { 5120 /* 5121 * We've reached the top-level vdev, initialize the physical 5122 * range to the logical range and set an empty remaining 5123 * range then start to unwind. 5124 */ 5125 physical_rs->rs_start = logical_rs->rs_start; 5126 physical_rs->rs_end = logical_rs->rs_end; 5127 5128 remain_rs->rs_start = logical_rs->rs_start; 5129 remain_rs->rs_end = logical_rs->rs_start; 5130 5131 return; 5132 } 5133 5134 vdev_t *pvd = vd->vdev_parent; 5135 ASSERT3P(pvd, !=, NULL); 5136 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); 5137 5138 /* 5139 * As this recursive function unwinds, translate the logical 5140 * range into its physical and any remaining components by calling 5141 * the vdev specific translate function. 5142 */ 5143 range_seg64_t intermediate = { 0 }; 5144 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs); 5145 5146 physical_rs->rs_start = intermediate.rs_start; 5147 physical_rs->rs_end = intermediate.rs_end; 5148 } 5149 5150 void 5151 vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs, 5152 vdev_xlate_func_t *func, void *arg) 5153 { 5154 range_seg64_t iter_rs = *logical_rs; 5155 range_seg64_t physical_rs; 5156 range_seg64_t remain_rs; 5157 5158 while (!vdev_xlate_is_empty(&iter_rs)) { 5159 5160 vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs); 5161 5162 /* 5163 * With raidz and dRAID, it's possible that the logical range 5164 * does not live on this leaf vdev. Only when there is a non- 5165 * zero physical size call the provided function. 5166 */ 5167 if (!vdev_xlate_is_empty(&physical_rs)) 5168 func(arg, &physical_rs); 5169 5170 iter_rs = remain_rs; 5171 } 5172 } 5173 5174 /* 5175 * Look at the vdev tree and determine whether any devices are currently being 5176 * replaced. 5177 */ 5178 boolean_t 5179 vdev_replace_in_progress(vdev_t *vdev) 5180 { 5181 ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0); 5182 5183 if (vdev->vdev_ops == &vdev_replacing_ops) 5184 return (B_TRUE); 5185 5186 /* 5187 * A 'spare' vdev indicates that we have a replace in progress, unless 5188 * it has exactly two children, and the second, the hot spare, has 5189 * finished being resilvered. 5190 */ 5191 if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 || 5192 !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING))) 5193 return (B_TRUE); 5194 5195 for (int i = 0; i < vdev->vdev_children; i++) { 5196 if (vdev_replace_in_progress(vdev->vdev_child[i])) 5197 return (B_TRUE); 5198 } 5199 5200 return (B_FALSE); 5201 } 5202 5203 EXPORT_SYMBOL(vdev_fault); 5204 EXPORT_SYMBOL(vdev_degrade); 5205 EXPORT_SYMBOL(vdev_online); 5206 EXPORT_SYMBOL(vdev_offline); 5207 EXPORT_SYMBOL(vdev_clear); 5208 5209 /* BEGIN CSTYLED */ 5210 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW, 5211 "Target number of metaslabs per top-level vdev"); 5212 5213 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW, 5214 "Default limit for metaslab size"); 5215 5216 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW, 5217 "Minimum number of metaslabs per top-level vdev"); 5218 5219 ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW, 5220 "Practical upper limit of total metaslabs per top-level vdev"); 5221 5222 ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW, 5223 "Rate limit slow IO (delay) events to this many per second"); 5224 5225 ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW, 5226 "Rate limit checksum events to this many checksum errors per second " 5227 "(do not set below zed threshold)."); 5228 5229 ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW, 5230 "Ignore errors during resilver/scrub"); 5231 5232 ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW, 5233 "Bypass vdev_validate()"); 5234 5235 ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW, 5236 "Disable cache flushes"); 5237 5238 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift, 5239 param_set_min_auto_ashift, param_get_ulong, ZMOD_RW, 5240 "Minimum ashift used when creating new top-level vdevs"); 5241 5242 ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift, 5243 param_set_max_auto_ashift, param_get_ulong, ZMOD_RW, 5244 "Maximum ashift used when optimizing for logical -> physical sector " 5245 "size on new top-level vdevs"); 5246 /* END CSTYLED */ 5247