1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright 2017 Nexenta Systems, Inc. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2016 Toomas Soome <tsoome@me.com> 28 * Copyright 2019 Joyent, Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/fm/fs/zfs.h> 34 #include <sys/spa.h> 35 #include <sys/spa_impl.h> 36 #include <sys/bpobj.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/dsl_dir.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/uberblock_impl.h> 42 #include <sys/metaslab.h> 43 #include <sys/metaslab_impl.h> 44 #include <sys/space_map.h> 45 #include <sys/space_reftree.h> 46 #include <sys/zio.h> 47 #include <sys/zap.h> 48 #include <sys/fs/zfs.h> 49 #include <sys/arc.h> 50 #include <sys/zil.h> 51 #include <sys/dsl_scan.h> 52 #include <sys/abd.h> 53 #include <sys/vdev_initialize.h> 54 #include <sys/vdev_trim.h> 55 56 /* 57 * Virtual device management. 58 */ 59 60 static vdev_ops_t *vdev_ops_table[] = { 61 &vdev_root_ops, 62 &vdev_raidz_ops, 63 &vdev_mirror_ops, 64 &vdev_replacing_ops, 65 &vdev_spare_ops, 66 &vdev_disk_ops, 67 &vdev_file_ops, 68 &vdev_missing_ops, 69 &vdev_hole_ops, 70 &vdev_indirect_ops, 71 NULL 72 }; 73 74 /* maximum scrub/resilver I/O queue per leaf vdev */ 75 int zfs_scrub_limit = 10; 76 77 /* default target for number of metaslabs per top-level vdev */ 78 int zfs_vdev_default_ms_count = 200; 79 80 /* minimum number of metaslabs per top-level vdev */ 81 int zfs_vdev_min_ms_count = 16; 82 83 /* practical upper limit of total metaslabs per top-level vdev */ 84 int zfs_vdev_ms_count_limit = 1ULL << 17; 85 86 /* lower limit for metaslab size (512M) */ 87 int zfs_vdev_default_ms_shift = 29; 88 89 /* upper limit for metaslab size (16G) */ 90 int zfs_vdev_max_ms_shift = 34; 91 92 boolean_t vdev_validate_skip = B_FALSE; 93 94 /* 95 * Since the DTL space map of a vdev is not expected to have a lot of 96 * entries, we default its block size to 4K. 97 */ 98 int vdev_dtl_sm_blksz = (1 << 12); 99 100 /* 101 * vdev-wide space maps that have lots of entries written to them at 102 * the end of each transaction can benefit from a higher I/O bandwidth 103 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 104 */ 105 int vdev_standard_sm_blksz = (1 << 17); 106 107 int zfs_ashift_min; 108 109 /*PRINTFLIKE2*/ 110 void 111 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) 112 { 113 va_list adx; 114 char buf[256]; 115 116 va_start(adx, fmt); 117 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 118 va_end(adx); 119 120 if (vd->vdev_path != NULL) { 121 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, 122 vd->vdev_path, buf); 123 } else { 124 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", 125 vd->vdev_ops->vdev_op_type, 126 (u_longlong_t)vd->vdev_id, 127 (u_longlong_t)vd->vdev_guid, buf); 128 } 129 } 130 131 void 132 vdev_dbgmsg_print_tree(vdev_t *vd, int indent) 133 { 134 char state[20]; 135 136 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { 137 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id, 138 vd->vdev_ops->vdev_op_type); 139 return; 140 } 141 142 switch (vd->vdev_state) { 143 case VDEV_STATE_UNKNOWN: 144 (void) snprintf(state, sizeof (state), "unknown"); 145 break; 146 case VDEV_STATE_CLOSED: 147 (void) snprintf(state, sizeof (state), "closed"); 148 break; 149 case VDEV_STATE_OFFLINE: 150 (void) snprintf(state, sizeof (state), "offline"); 151 break; 152 case VDEV_STATE_REMOVED: 153 (void) snprintf(state, sizeof (state), "removed"); 154 break; 155 case VDEV_STATE_CANT_OPEN: 156 (void) snprintf(state, sizeof (state), "can't open"); 157 break; 158 case VDEV_STATE_FAULTED: 159 (void) snprintf(state, sizeof (state), "faulted"); 160 break; 161 case VDEV_STATE_DEGRADED: 162 (void) snprintf(state, sizeof (state), "degraded"); 163 break; 164 case VDEV_STATE_HEALTHY: 165 (void) snprintf(state, sizeof (state), "healthy"); 166 break; 167 default: 168 (void) snprintf(state, sizeof (state), "<state %u>", 169 (uint_t)vd->vdev_state); 170 } 171 172 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, 173 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, 174 vd->vdev_islog ? " (log)" : "", 175 (u_longlong_t)vd->vdev_guid, 176 vd->vdev_path ? vd->vdev_path : "N/A", state); 177 178 for (uint64_t i = 0; i < vd->vdev_children; i++) 179 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); 180 } 181 182 /* 183 * Given a vdev type, return the appropriate ops vector. 184 */ 185 static vdev_ops_t * 186 vdev_getops(const char *type) 187 { 188 vdev_ops_t *ops, **opspp; 189 190 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 191 if (strcmp(ops->vdev_op_type, type) == 0) 192 break; 193 194 return (ops); 195 } 196 197 /* 198 * Derive the enumerated alloction bias from string input. 199 * String origin is either the per-vdev zap or zpool(1M). 200 */ 201 static vdev_alloc_bias_t 202 vdev_derive_alloc_bias(const char *bias) 203 { 204 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; 205 206 if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0) 207 alloc_bias = VDEV_BIAS_LOG; 208 else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) 209 alloc_bias = VDEV_BIAS_SPECIAL; 210 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) 211 alloc_bias = VDEV_BIAS_DEDUP; 212 213 return (alloc_bias); 214 } 215 216 /* ARGSUSED */ 217 void 218 vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res) 219 { 220 res->rs_start = in->rs_start; 221 res->rs_end = in->rs_end; 222 } 223 224 /* 225 * Default asize function: return the MAX of psize with the asize of 226 * all children. This is what's used by anything other than RAID-Z. 227 */ 228 uint64_t 229 vdev_default_asize(vdev_t *vd, uint64_t psize) 230 { 231 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 232 uint64_t csize; 233 234 for (int c = 0; c < vd->vdev_children; c++) { 235 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 236 asize = MAX(asize, csize); 237 } 238 239 return (asize); 240 } 241 242 /* 243 * Get the minimum allocatable size. We define the allocatable size as 244 * the vdev's asize rounded to the nearest metaslab. This allows us to 245 * replace or attach devices which don't have the same physical size but 246 * can still satisfy the same number of allocations. 247 */ 248 uint64_t 249 vdev_get_min_asize(vdev_t *vd) 250 { 251 vdev_t *pvd = vd->vdev_parent; 252 253 /* 254 * If our parent is NULL (inactive spare or cache) or is the root, 255 * just return our own asize. 256 */ 257 if (pvd == NULL) 258 return (vd->vdev_asize); 259 260 /* 261 * The top-level vdev just returns the allocatable size rounded 262 * to the nearest metaslab. 263 */ 264 if (vd == vd->vdev_top) 265 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 266 267 /* 268 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 269 * so each child must provide at least 1/Nth of its asize. 270 */ 271 if (pvd->vdev_ops == &vdev_raidz_ops) 272 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) / 273 pvd->vdev_children); 274 275 return (pvd->vdev_min_asize); 276 } 277 278 void 279 vdev_set_min_asize(vdev_t *vd) 280 { 281 vd->vdev_min_asize = vdev_get_min_asize(vd); 282 283 for (int c = 0; c < vd->vdev_children; c++) 284 vdev_set_min_asize(vd->vdev_child[c]); 285 } 286 287 vdev_t * 288 vdev_lookup_top(spa_t *spa, uint64_t vdev) 289 { 290 vdev_t *rvd = spa->spa_root_vdev; 291 292 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 293 294 if (vdev < rvd->vdev_children) { 295 ASSERT(rvd->vdev_child[vdev] != NULL); 296 return (rvd->vdev_child[vdev]); 297 } 298 299 return (NULL); 300 } 301 302 vdev_t * 303 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 304 { 305 vdev_t *mvd; 306 307 if (vd->vdev_guid == guid) 308 return (vd); 309 310 for (int c = 0; c < vd->vdev_children; c++) 311 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 312 NULL) 313 return (mvd); 314 315 return (NULL); 316 } 317 318 static int 319 vdev_count_leaves_impl(vdev_t *vd) 320 { 321 int n = 0; 322 323 if (vd->vdev_ops->vdev_op_leaf) 324 return (1); 325 326 for (int c = 0; c < vd->vdev_children; c++) 327 n += vdev_count_leaves_impl(vd->vdev_child[c]); 328 329 return (n); 330 } 331 332 int 333 vdev_count_leaves(spa_t *spa) 334 { 335 return (vdev_count_leaves_impl(spa->spa_root_vdev)); 336 } 337 338 void 339 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 340 { 341 size_t oldsize, newsize; 342 uint64_t id = cvd->vdev_id; 343 vdev_t **newchild; 344 spa_t *spa = cvd->vdev_spa; 345 346 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 347 ASSERT(cvd->vdev_parent == NULL); 348 349 cvd->vdev_parent = pvd; 350 351 if (pvd == NULL) 352 return; 353 354 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 355 356 oldsize = pvd->vdev_children * sizeof (vdev_t *); 357 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 358 newsize = pvd->vdev_children * sizeof (vdev_t *); 359 360 newchild = kmem_zalloc(newsize, KM_SLEEP); 361 if (pvd->vdev_child != NULL) { 362 bcopy(pvd->vdev_child, newchild, oldsize); 363 kmem_free(pvd->vdev_child, oldsize); 364 } 365 366 pvd->vdev_child = newchild; 367 pvd->vdev_child[id] = cvd; 368 369 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 370 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 371 372 /* 373 * Walk up all ancestors to update guid sum. 374 */ 375 for (; pvd != NULL; pvd = pvd->vdev_parent) 376 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 377 378 if (cvd->vdev_ops->vdev_op_leaf) { 379 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); 380 cvd->vdev_spa->spa_leaf_list_gen++; 381 } 382 } 383 384 void 385 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 386 { 387 int c; 388 uint_t id = cvd->vdev_id; 389 390 ASSERT(cvd->vdev_parent == pvd); 391 392 if (pvd == NULL) 393 return; 394 395 ASSERT(id < pvd->vdev_children); 396 ASSERT(pvd->vdev_child[id] == cvd); 397 398 pvd->vdev_child[id] = NULL; 399 cvd->vdev_parent = NULL; 400 401 for (c = 0; c < pvd->vdev_children; c++) 402 if (pvd->vdev_child[c]) 403 break; 404 405 if (c == pvd->vdev_children) { 406 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 407 pvd->vdev_child = NULL; 408 pvd->vdev_children = 0; 409 } 410 411 if (cvd->vdev_ops->vdev_op_leaf) { 412 spa_t *spa = cvd->vdev_spa; 413 list_remove(&spa->spa_leaf_list, cvd); 414 spa->spa_leaf_list_gen++; 415 } 416 417 /* 418 * Walk up all ancestors to update guid sum. 419 */ 420 for (; pvd != NULL; pvd = pvd->vdev_parent) 421 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 422 } 423 424 /* 425 * Remove any holes in the child array. 426 */ 427 void 428 vdev_compact_children(vdev_t *pvd) 429 { 430 vdev_t **newchild, *cvd; 431 int oldc = pvd->vdev_children; 432 int newc; 433 434 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 435 436 for (int c = newc = 0; c < oldc; c++) 437 if (pvd->vdev_child[c]) 438 newc++; 439 440 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 441 442 for (int c = newc = 0; c < oldc; c++) { 443 if ((cvd = pvd->vdev_child[c]) != NULL) { 444 newchild[newc] = cvd; 445 cvd->vdev_id = newc++; 446 } 447 } 448 449 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 450 pvd->vdev_child = newchild; 451 pvd->vdev_children = newc; 452 } 453 454 /* 455 * Allocate and minimally initialize a vdev_t. 456 */ 457 vdev_t * 458 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 459 { 460 vdev_t *vd; 461 vdev_indirect_config_t *vic; 462 463 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 464 vic = &vd->vdev_indirect_config; 465 466 if (spa->spa_root_vdev == NULL) { 467 ASSERT(ops == &vdev_root_ops); 468 spa->spa_root_vdev = vd; 469 spa->spa_load_guid = spa_generate_guid(NULL); 470 } 471 472 if (guid == 0 && ops != &vdev_hole_ops) { 473 if (spa->spa_root_vdev == vd) { 474 /* 475 * The root vdev's guid will also be the pool guid, 476 * which must be unique among all pools. 477 */ 478 guid = spa_generate_guid(NULL); 479 } else { 480 /* 481 * Any other vdev's guid must be unique within the pool. 482 */ 483 guid = spa_generate_guid(spa); 484 } 485 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 486 } 487 488 vd->vdev_spa = spa; 489 vd->vdev_id = id; 490 vd->vdev_guid = guid; 491 vd->vdev_guid_sum = guid; 492 vd->vdev_ops = ops; 493 vd->vdev_state = VDEV_STATE_CLOSED; 494 vd->vdev_ishole = (ops == &vdev_hole_ops); 495 vic->vic_prev_indirect_vdev = UINT64_MAX; 496 497 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); 498 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); 499 vd->vdev_obsolete_segments = range_tree_create(NULL, NULL); 500 501 list_link_init(&vd->vdev_initialize_node); 502 list_link_init(&vd->vdev_leaf_node); 503 list_link_init(&vd->vdev_trim_node); 504 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 505 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 506 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 507 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); 508 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); 509 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); 510 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); 511 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); 512 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL); 513 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL); 514 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL); 515 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL); 516 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL); 517 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL); 518 519 for (int t = 0; t < DTL_TYPES; t++) { 520 vd->vdev_dtl[t] = range_tree_create(NULL, NULL); 521 } 522 txg_list_create(&vd->vdev_ms_list, spa, 523 offsetof(struct metaslab, ms_txg_node)); 524 txg_list_create(&vd->vdev_dtl_list, spa, 525 offsetof(struct vdev, vdev_dtl_node)); 526 vd->vdev_stat.vs_timestamp = gethrtime(); 527 vdev_queue_init(vd); 528 vdev_cache_init(vd); 529 530 return (vd); 531 } 532 533 /* 534 * Allocate a new vdev. The 'alloctype' is used to control whether we are 535 * creating a new vdev or loading an existing one - the behavior is slightly 536 * different for each case. 537 */ 538 int 539 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 540 int alloctype) 541 { 542 vdev_ops_t *ops; 543 char *type; 544 uint64_t guid = 0, islog, nparity; 545 vdev_t *vd; 546 vdev_indirect_config_t *vic; 547 vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; 548 boolean_t top_level = (parent && !parent->vdev_parent); 549 550 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 551 552 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 553 return (SET_ERROR(EINVAL)); 554 555 if ((ops = vdev_getops(type)) == NULL) 556 return (SET_ERROR(EINVAL)); 557 558 /* 559 * If this is a load, get the vdev guid from the nvlist. 560 * Otherwise, vdev_alloc_common() will generate one for us. 561 */ 562 if (alloctype == VDEV_ALLOC_LOAD) { 563 uint64_t label_id; 564 565 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 566 label_id != id) 567 return (SET_ERROR(EINVAL)); 568 569 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 570 return (SET_ERROR(EINVAL)); 571 } else if (alloctype == VDEV_ALLOC_SPARE) { 572 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 573 return (SET_ERROR(EINVAL)); 574 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 575 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 576 return (SET_ERROR(EINVAL)); 577 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 578 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 579 return (SET_ERROR(EINVAL)); 580 } 581 582 /* 583 * The first allocated vdev must be of type 'root'. 584 */ 585 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 586 return (SET_ERROR(EINVAL)); 587 588 /* 589 * Determine whether we're a log vdev. 590 */ 591 islog = 0; 592 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 593 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 594 return (SET_ERROR(ENOTSUP)); 595 596 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 597 return (SET_ERROR(ENOTSUP)); 598 599 /* 600 * Set the nparity property for RAID-Z vdevs. 601 */ 602 nparity = -1ULL; 603 if (ops == &vdev_raidz_ops) { 604 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 605 &nparity) == 0) { 606 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 607 return (SET_ERROR(EINVAL)); 608 /* 609 * Previous versions could only support 1 or 2 parity 610 * device. 611 */ 612 if (nparity > 1 && 613 spa_version(spa) < SPA_VERSION_RAIDZ2) 614 return (SET_ERROR(ENOTSUP)); 615 if (nparity > 2 && 616 spa_version(spa) < SPA_VERSION_RAIDZ3) 617 return (SET_ERROR(ENOTSUP)); 618 } else { 619 /* 620 * We require the parity to be specified for SPAs that 621 * support multiple parity levels. 622 */ 623 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 624 return (SET_ERROR(EINVAL)); 625 /* 626 * Otherwise, we default to 1 parity device for RAID-Z. 627 */ 628 nparity = 1; 629 } 630 } else { 631 nparity = 0; 632 } 633 ASSERT(nparity != -1ULL); 634 635 /* 636 * If creating a top-level vdev, check for allocation classes input 637 */ 638 if (top_level && alloctype == VDEV_ALLOC_ADD) { 639 char *bias; 640 641 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS, 642 &bias) == 0) { 643 alloc_bias = vdev_derive_alloc_bias(bias); 644 645 /* spa_vdev_add() expects feature to be enabled */ 646 if (alloc_bias != VDEV_BIAS_LOG && 647 spa->spa_load_state != SPA_LOAD_CREATE && 648 !spa_feature_is_enabled(spa, 649 SPA_FEATURE_ALLOCATION_CLASSES)) { 650 return (SET_ERROR(ENOTSUP)); 651 } 652 } 653 } 654 655 vd = vdev_alloc_common(spa, id, guid, ops); 656 vic = &vd->vdev_indirect_config; 657 658 vd->vdev_islog = islog; 659 vd->vdev_nparity = nparity; 660 if (top_level && alloc_bias != VDEV_BIAS_NONE) 661 vd->vdev_alloc_bias = alloc_bias; 662 663 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 664 vd->vdev_path = spa_strdup(vd->vdev_path); 665 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 666 vd->vdev_devid = spa_strdup(vd->vdev_devid); 667 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 668 &vd->vdev_physpath) == 0) 669 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 670 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 671 vd->vdev_fru = spa_strdup(vd->vdev_fru); 672 673 /* 674 * Set the whole_disk property. If it's not specified, leave the value 675 * as -1. 676 */ 677 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 678 &vd->vdev_wholedisk) != 0) 679 vd->vdev_wholedisk = -1ULL; 680 681 ASSERT0(vic->vic_mapping_object); 682 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 683 &vic->vic_mapping_object); 684 ASSERT0(vic->vic_births_object); 685 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 686 &vic->vic_births_object); 687 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); 688 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 689 &vic->vic_prev_indirect_vdev); 690 691 /* 692 * Look for the 'not present' flag. This will only be set if the device 693 * was not present at the time of import. 694 */ 695 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 696 &vd->vdev_not_present); 697 698 /* 699 * Get the alignment requirement. 700 */ 701 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 702 703 /* 704 * Retrieve the vdev creation time. 705 */ 706 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 707 &vd->vdev_crtxg); 708 709 /* 710 * If we're a top-level vdev, try to load the allocation parameters. 711 */ 712 if (top_level && 713 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 714 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 715 &vd->vdev_ms_array); 716 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 717 &vd->vdev_ms_shift); 718 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 719 &vd->vdev_asize); 720 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 721 &vd->vdev_removing); 722 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 723 &vd->vdev_top_zap); 724 } else { 725 ASSERT0(vd->vdev_top_zap); 726 } 727 728 if (top_level && alloctype != VDEV_ALLOC_ATTACH) { 729 ASSERT(alloctype == VDEV_ALLOC_LOAD || 730 alloctype == VDEV_ALLOC_ADD || 731 alloctype == VDEV_ALLOC_SPLIT || 732 alloctype == VDEV_ALLOC_ROOTPOOL); 733 /* Note: metaslab_group_create() is now deferred */ 734 } 735 736 if (vd->vdev_ops->vdev_op_leaf && 737 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 738 (void) nvlist_lookup_uint64(nv, 739 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); 740 } else { 741 ASSERT0(vd->vdev_leaf_zap); 742 } 743 744 /* 745 * If we're a leaf vdev, try to load the DTL object and other state. 746 */ 747 748 if (vd->vdev_ops->vdev_op_leaf && 749 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 750 alloctype == VDEV_ALLOC_ROOTPOOL)) { 751 if (alloctype == VDEV_ALLOC_LOAD) { 752 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 753 &vd->vdev_dtl_object); 754 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 755 &vd->vdev_unspare); 756 } 757 758 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 759 uint64_t spare = 0; 760 761 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 762 &spare) == 0 && spare) 763 spa_spare_add(vd); 764 } 765 766 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 767 &vd->vdev_offline); 768 769 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 770 &vd->vdev_resilver_txg); 771 772 if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER)) 773 vdev_set_deferred_resilver(spa, vd); 774 775 /* 776 * When importing a pool, we want to ignore the persistent fault 777 * state, as the diagnosis made on another system may not be 778 * valid in the current context. Local vdevs will 779 * remain in the faulted state. 780 */ 781 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 782 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 783 &vd->vdev_faulted); 784 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 785 &vd->vdev_degraded); 786 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 787 &vd->vdev_removed); 788 789 if (vd->vdev_faulted || vd->vdev_degraded) { 790 char *aux; 791 792 vd->vdev_label_aux = 793 VDEV_AUX_ERR_EXCEEDED; 794 if (nvlist_lookup_string(nv, 795 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 796 strcmp(aux, "external") == 0) 797 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 798 } 799 } 800 } 801 802 /* 803 * Add ourselves to the parent's list of children. 804 */ 805 vdev_add_child(parent, vd); 806 807 *vdp = vd; 808 809 return (0); 810 } 811 812 void 813 vdev_free(vdev_t *vd) 814 { 815 spa_t *spa = vd->vdev_spa; 816 817 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 818 ASSERT3P(vd->vdev_trim_thread, ==, NULL); 819 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); 820 821 /* 822 * Scan queues are normally destroyed at the end of a scan. If the 823 * queue exists here, that implies the vdev is being removed while 824 * the scan is still running. 825 */ 826 if (vd->vdev_scan_io_queue != NULL) { 827 mutex_enter(&vd->vdev_scan_io_queue_lock); 828 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); 829 vd->vdev_scan_io_queue = NULL; 830 mutex_exit(&vd->vdev_scan_io_queue_lock); 831 } 832 833 /* 834 * vdev_free() implies closing the vdev first. This is simpler than 835 * trying to ensure complicated semantics for all callers. 836 */ 837 vdev_close(vd); 838 839 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 840 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 841 842 /* 843 * Free all children. 844 */ 845 for (int c = 0; c < vd->vdev_children; c++) 846 vdev_free(vd->vdev_child[c]); 847 848 ASSERT(vd->vdev_child == NULL); 849 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 850 851 /* 852 * Discard allocation state. 853 */ 854 if (vd->vdev_mg != NULL) { 855 vdev_metaslab_fini(vd); 856 metaslab_group_destroy(vd->vdev_mg); 857 } 858 859 ASSERT0(vd->vdev_stat.vs_space); 860 ASSERT0(vd->vdev_stat.vs_dspace); 861 ASSERT0(vd->vdev_stat.vs_alloc); 862 863 /* 864 * Remove this vdev from its parent's child list. 865 */ 866 vdev_remove_child(vd->vdev_parent, vd); 867 868 ASSERT(vd->vdev_parent == NULL); 869 ASSERT(!list_link_active(&vd->vdev_leaf_node)); 870 871 /* 872 * Clean up vdev structure. 873 */ 874 vdev_queue_fini(vd); 875 vdev_cache_fini(vd); 876 877 if (vd->vdev_path) 878 spa_strfree(vd->vdev_path); 879 if (vd->vdev_devid) 880 spa_strfree(vd->vdev_devid); 881 if (vd->vdev_physpath) 882 spa_strfree(vd->vdev_physpath); 883 if (vd->vdev_fru) 884 spa_strfree(vd->vdev_fru); 885 886 if (vd->vdev_isspare) 887 spa_spare_remove(vd); 888 if (vd->vdev_isl2cache) 889 spa_l2cache_remove(vd); 890 891 txg_list_destroy(&vd->vdev_ms_list); 892 txg_list_destroy(&vd->vdev_dtl_list); 893 894 mutex_enter(&vd->vdev_dtl_lock); 895 space_map_close(vd->vdev_dtl_sm); 896 for (int t = 0; t < DTL_TYPES; t++) { 897 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); 898 range_tree_destroy(vd->vdev_dtl[t]); 899 } 900 mutex_exit(&vd->vdev_dtl_lock); 901 902 EQUIV(vd->vdev_indirect_births != NULL, 903 vd->vdev_indirect_mapping != NULL); 904 if (vd->vdev_indirect_births != NULL) { 905 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 906 vdev_indirect_births_close(vd->vdev_indirect_births); 907 } 908 909 if (vd->vdev_obsolete_sm != NULL) { 910 ASSERT(vd->vdev_removing || 911 vd->vdev_ops == &vdev_indirect_ops); 912 space_map_close(vd->vdev_obsolete_sm); 913 vd->vdev_obsolete_sm = NULL; 914 } 915 range_tree_destroy(vd->vdev_obsolete_segments); 916 rw_destroy(&vd->vdev_indirect_rwlock); 917 mutex_destroy(&vd->vdev_obsolete_lock); 918 919 mutex_destroy(&vd->vdev_dtl_lock); 920 mutex_destroy(&vd->vdev_stat_lock); 921 mutex_destroy(&vd->vdev_probe_lock); 922 mutex_destroy(&vd->vdev_scan_io_queue_lock); 923 mutex_destroy(&vd->vdev_initialize_lock); 924 mutex_destroy(&vd->vdev_initialize_io_lock); 925 cv_destroy(&vd->vdev_initialize_io_cv); 926 cv_destroy(&vd->vdev_initialize_cv); 927 mutex_destroy(&vd->vdev_trim_lock); 928 mutex_destroy(&vd->vdev_autotrim_lock); 929 mutex_destroy(&vd->vdev_trim_io_lock); 930 cv_destroy(&vd->vdev_trim_cv); 931 cv_destroy(&vd->vdev_autotrim_cv); 932 cv_destroy(&vd->vdev_trim_io_cv); 933 934 if (vd == spa->spa_root_vdev) 935 spa->spa_root_vdev = NULL; 936 937 kmem_free(vd, sizeof (vdev_t)); 938 } 939 940 /* 941 * Transfer top-level vdev state from svd to tvd. 942 */ 943 static void 944 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 945 { 946 spa_t *spa = svd->vdev_spa; 947 metaslab_t *msp; 948 vdev_t *vd; 949 int t; 950 951 ASSERT(tvd == tvd->vdev_top); 952 953 tvd->vdev_ms_array = svd->vdev_ms_array; 954 tvd->vdev_ms_shift = svd->vdev_ms_shift; 955 tvd->vdev_ms_count = svd->vdev_ms_count; 956 tvd->vdev_top_zap = svd->vdev_top_zap; 957 958 svd->vdev_ms_array = 0; 959 svd->vdev_ms_shift = 0; 960 svd->vdev_ms_count = 0; 961 svd->vdev_top_zap = 0; 962 963 if (tvd->vdev_mg) 964 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 965 tvd->vdev_mg = svd->vdev_mg; 966 tvd->vdev_ms = svd->vdev_ms; 967 968 svd->vdev_mg = NULL; 969 svd->vdev_ms = NULL; 970 971 if (tvd->vdev_mg != NULL) 972 tvd->vdev_mg->mg_vd = tvd; 973 974 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; 975 svd->vdev_checkpoint_sm = NULL; 976 977 tvd->vdev_alloc_bias = svd->vdev_alloc_bias; 978 svd->vdev_alloc_bias = VDEV_BIAS_NONE; 979 980 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 981 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 982 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 983 984 svd->vdev_stat.vs_alloc = 0; 985 svd->vdev_stat.vs_space = 0; 986 svd->vdev_stat.vs_dspace = 0; 987 988 /* 989 * State which may be set on a top-level vdev that's in the 990 * process of being removed. 991 */ 992 ASSERT0(tvd->vdev_indirect_config.vic_births_object); 993 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); 994 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); 995 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); 996 ASSERT3P(tvd->vdev_indirect_births, ==, NULL); 997 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); 998 ASSERT0(tvd->vdev_removing); 999 tvd->vdev_removing = svd->vdev_removing; 1000 tvd->vdev_indirect_config = svd->vdev_indirect_config; 1001 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; 1002 tvd->vdev_indirect_births = svd->vdev_indirect_births; 1003 range_tree_swap(&svd->vdev_obsolete_segments, 1004 &tvd->vdev_obsolete_segments); 1005 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; 1006 svd->vdev_indirect_config.vic_mapping_object = 0; 1007 svd->vdev_indirect_config.vic_births_object = 0; 1008 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; 1009 svd->vdev_indirect_mapping = NULL; 1010 svd->vdev_indirect_births = NULL; 1011 svd->vdev_obsolete_sm = NULL; 1012 svd->vdev_removing = 0; 1013 1014 for (t = 0; t < TXG_SIZE; t++) { 1015 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 1016 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 1017 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 1018 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 1019 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 1020 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 1021 } 1022 1023 if (list_link_active(&svd->vdev_config_dirty_node)) { 1024 vdev_config_clean(svd); 1025 vdev_config_dirty(tvd); 1026 } 1027 1028 if (list_link_active(&svd->vdev_state_dirty_node)) { 1029 vdev_state_clean(svd); 1030 vdev_state_dirty(tvd); 1031 } 1032 1033 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 1034 svd->vdev_deflate_ratio = 0; 1035 1036 tvd->vdev_islog = svd->vdev_islog; 1037 svd->vdev_islog = 0; 1038 1039 dsl_scan_io_queue_vdev_xfer(svd, tvd); 1040 } 1041 1042 static void 1043 vdev_top_update(vdev_t *tvd, vdev_t *vd) 1044 { 1045 if (vd == NULL) 1046 return; 1047 1048 vd->vdev_top = tvd; 1049 1050 for (int c = 0; c < vd->vdev_children; c++) 1051 vdev_top_update(tvd, vd->vdev_child[c]); 1052 } 1053 1054 /* 1055 * Add a mirror/replacing vdev above an existing vdev. 1056 */ 1057 vdev_t * 1058 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 1059 { 1060 spa_t *spa = cvd->vdev_spa; 1061 vdev_t *pvd = cvd->vdev_parent; 1062 vdev_t *mvd; 1063 1064 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1065 1066 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 1067 1068 mvd->vdev_asize = cvd->vdev_asize; 1069 mvd->vdev_min_asize = cvd->vdev_min_asize; 1070 mvd->vdev_max_asize = cvd->vdev_max_asize; 1071 mvd->vdev_psize = cvd->vdev_psize; 1072 mvd->vdev_ashift = cvd->vdev_ashift; 1073 mvd->vdev_state = cvd->vdev_state; 1074 mvd->vdev_crtxg = cvd->vdev_crtxg; 1075 1076 vdev_remove_child(pvd, cvd); 1077 vdev_add_child(pvd, mvd); 1078 cvd->vdev_id = mvd->vdev_children; 1079 vdev_add_child(mvd, cvd); 1080 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1081 1082 if (mvd == mvd->vdev_top) 1083 vdev_top_transfer(cvd, mvd); 1084 1085 return (mvd); 1086 } 1087 1088 /* 1089 * Remove a 1-way mirror/replacing vdev from the tree. 1090 */ 1091 void 1092 vdev_remove_parent(vdev_t *cvd) 1093 { 1094 vdev_t *mvd = cvd->vdev_parent; 1095 vdev_t *pvd = mvd->vdev_parent; 1096 1097 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1098 1099 ASSERT(mvd->vdev_children == 1); 1100 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 1101 mvd->vdev_ops == &vdev_replacing_ops || 1102 mvd->vdev_ops == &vdev_spare_ops); 1103 cvd->vdev_ashift = mvd->vdev_ashift; 1104 1105 vdev_remove_child(mvd, cvd); 1106 vdev_remove_child(pvd, mvd); 1107 1108 /* 1109 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 1110 * Otherwise, we could have detached an offline device, and when we 1111 * go to import the pool we'll think we have two top-level vdevs, 1112 * instead of a different version of the same top-level vdev. 1113 */ 1114 if (mvd->vdev_top == mvd) { 1115 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 1116 cvd->vdev_orig_guid = cvd->vdev_guid; 1117 cvd->vdev_guid += guid_delta; 1118 cvd->vdev_guid_sum += guid_delta; 1119 } 1120 cvd->vdev_id = mvd->vdev_id; 1121 vdev_add_child(pvd, cvd); 1122 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1123 1124 if (cvd == cvd->vdev_top) 1125 vdev_top_transfer(mvd, cvd); 1126 1127 ASSERT(mvd->vdev_children == 0); 1128 vdev_free(mvd); 1129 } 1130 1131 static void 1132 vdev_metaslab_group_create(vdev_t *vd) 1133 { 1134 spa_t *spa = vd->vdev_spa; 1135 1136 /* 1137 * metaslab_group_create was delayed until allocation bias was available 1138 */ 1139 if (vd->vdev_mg == NULL) { 1140 metaslab_class_t *mc; 1141 1142 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE) 1143 vd->vdev_alloc_bias = VDEV_BIAS_LOG; 1144 1145 ASSERT3U(vd->vdev_islog, ==, 1146 (vd->vdev_alloc_bias == VDEV_BIAS_LOG)); 1147 1148 switch (vd->vdev_alloc_bias) { 1149 case VDEV_BIAS_LOG: 1150 mc = spa_log_class(spa); 1151 break; 1152 case VDEV_BIAS_SPECIAL: 1153 mc = spa_special_class(spa); 1154 break; 1155 case VDEV_BIAS_DEDUP: 1156 mc = spa_dedup_class(spa); 1157 break; 1158 default: 1159 mc = spa_normal_class(spa); 1160 } 1161 1162 vd->vdev_mg = metaslab_group_create(mc, vd, 1163 spa->spa_alloc_count); 1164 1165 /* 1166 * The spa ashift values currently only reflect the 1167 * general vdev classes. Class destination is late 1168 * binding so ashift checking had to wait until now 1169 */ 1170 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1171 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) { 1172 if (vd->vdev_ashift > spa->spa_max_ashift) 1173 spa->spa_max_ashift = vd->vdev_ashift; 1174 if (vd->vdev_ashift < spa->spa_min_ashift) 1175 spa->spa_min_ashift = vd->vdev_ashift; 1176 } 1177 } 1178 } 1179 1180 int 1181 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 1182 { 1183 spa_t *spa = vd->vdev_spa; 1184 objset_t *mos = spa->spa_meta_objset; 1185 uint64_t m; 1186 uint64_t oldc = vd->vdev_ms_count; 1187 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 1188 metaslab_t **mspp; 1189 int error; 1190 boolean_t expanding = (oldc != 0); 1191 1192 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1193 1194 /* 1195 * This vdev is not being allocated from yet or is a hole. 1196 */ 1197 if (vd->vdev_ms_shift == 0) 1198 return (0); 1199 1200 ASSERT(!vd->vdev_ishole); 1201 1202 ASSERT(oldc <= newc); 1203 1204 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 1205 1206 if (expanding) { 1207 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 1208 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 1209 } 1210 1211 vd->vdev_ms = mspp; 1212 vd->vdev_ms_count = newc; 1213 for (m = oldc; m < newc; m++) { 1214 uint64_t object = 0; 1215 1216 /* 1217 * vdev_ms_array may be 0 if we are creating the "fake" 1218 * metaslabs for an indirect vdev for zdb's leak detection. 1219 * See zdb_leak_init(). 1220 */ 1221 if (txg == 0 && vd->vdev_ms_array != 0) { 1222 error = dmu_read(mos, vd->vdev_ms_array, 1223 m * sizeof (uint64_t), sizeof (uint64_t), &object, 1224 DMU_READ_PREFETCH); 1225 if (error != 0) { 1226 vdev_dbgmsg(vd, "unable to read the metaslab " 1227 "array [error=%d]", error); 1228 return (error); 1229 } 1230 } 1231 1232 #ifndef _KERNEL 1233 /* 1234 * To accomodate zdb_leak_init() fake indirect 1235 * metaslabs, we allocate a metaslab group for 1236 * indirect vdevs which normally don't have one. 1237 */ 1238 if (vd->vdev_mg == NULL) { 1239 ASSERT0(vdev_is_concrete(vd)); 1240 vdev_metaslab_group_create(vd); 1241 } 1242 #endif 1243 error = metaslab_init(vd->vdev_mg, m, object, txg, 1244 &(vd->vdev_ms[m])); 1245 if (error != 0) { 1246 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", 1247 error); 1248 return (error); 1249 } 1250 } 1251 1252 if (txg == 0) 1253 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 1254 1255 /* 1256 * If the vdev is being removed we don't activate 1257 * the metaslabs since we want to ensure that no new 1258 * allocations are performed on this device. 1259 */ 1260 if (!expanding && !vd->vdev_removing) { 1261 metaslab_group_activate(vd->vdev_mg); 1262 } 1263 1264 if (txg == 0) 1265 spa_config_exit(spa, SCL_ALLOC, FTAG); 1266 1267 return (0); 1268 } 1269 1270 void 1271 vdev_metaslab_fini(vdev_t *vd) 1272 { 1273 if (vd->vdev_checkpoint_sm != NULL) { 1274 ASSERT(spa_feature_is_active(vd->vdev_spa, 1275 SPA_FEATURE_POOL_CHECKPOINT)); 1276 space_map_close(vd->vdev_checkpoint_sm); 1277 /* 1278 * Even though we close the space map, we need to set its 1279 * pointer to NULL. The reason is that vdev_metaslab_fini() 1280 * may be called multiple times for certain operations 1281 * (i.e. when destroying a pool) so we need to ensure that 1282 * this clause never executes twice. This logic is similar 1283 * to the one used for the vdev_ms clause below. 1284 */ 1285 vd->vdev_checkpoint_sm = NULL; 1286 } 1287 1288 if (vd->vdev_ms != NULL) { 1289 metaslab_group_t *mg = vd->vdev_mg; 1290 metaslab_group_passivate(mg); 1291 1292 uint64_t count = vd->vdev_ms_count; 1293 for (uint64_t m = 0; m < count; m++) { 1294 metaslab_t *msp = vd->vdev_ms[m]; 1295 if (msp != NULL) 1296 metaslab_fini(msp); 1297 } 1298 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 1299 vd->vdev_ms = NULL; 1300 1301 vd->vdev_ms_count = 0; 1302 1303 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 1304 ASSERT0(mg->mg_histogram[i]); 1305 } 1306 ASSERT0(vd->vdev_ms_count); 1307 } 1308 1309 typedef struct vdev_probe_stats { 1310 boolean_t vps_readable; 1311 boolean_t vps_writeable; 1312 int vps_flags; 1313 } vdev_probe_stats_t; 1314 1315 static void 1316 vdev_probe_done(zio_t *zio) 1317 { 1318 spa_t *spa = zio->io_spa; 1319 vdev_t *vd = zio->io_vd; 1320 vdev_probe_stats_t *vps = zio->io_private; 1321 1322 ASSERT(vd->vdev_probe_zio != NULL); 1323 1324 if (zio->io_type == ZIO_TYPE_READ) { 1325 if (zio->io_error == 0) 1326 vps->vps_readable = 1; 1327 if (zio->io_error == 0 && spa_writeable(spa)) { 1328 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 1329 zio->io_offset, zio->io_size, zio->io_abd, 1330 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1331 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 1332 } else { 1333 abd_free(zio->io_abd); 1334 } 1335 } else if (zio->io_type == ZIO_TYPE_WRITE) { 1336 if (zio->io_error == 0) 1337 vps->vps_writeable = 1; 1338 abd_free(zio->io_abd); 1339 } else if (zio->io_type == ZIO_TYPE_NULL) { 1340 zio_t *pio; 1341 1342 vd->vdev_cant_read |= !vps->vps_readable; 1343 vd->vdev_cant_write |= !vps->vps_writeable; 1344 1345 if (vdev_readable(vd) && 1346 (vdev_writeable(vd) || !spa_writeable(spa))) { 1347 zio->io_error = 0; 1348 } else { 1349 ASSERT(zio->io_error != 0); 1350 vdev_dbgmsg(vd, "failed probe"); 1351 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 1352 spa, vd, NULL, NULL, 0, 0); 1353 zio->io_error = SET_ERROR(ENXIO); 1354 } 1355 1356 mutex_enter(&vd->vdev_probe_lock); 1357 ASSERT(vd->vdev_probe_zio == zio); 1358 vd->vdev_probe_zio = NULL; 1359 mutex_exit(&vd->vdev_probe_lock); 1360 1361 zio_link_t *zl = NULL; 1362 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 1363 if (!vdev_accessible(vd, pio)) 1364 pio->io_error = SET_ERROR(ENXIO); 1365 1366 kmem_free(vps, sizeof (*vps)); 1367 } 1368 } 1369 1370 /* 1371 * Determine whether this device is accessible. 1372 * 1373 * Read and write to several known locations: the pad regions of each 1374 * vdev label but the first, which we leave alone in case it contains 1375 * a VTOC. 1376 */ 1377 zio_t * 1378 vdev_probe(vdev_t *vd, zio_t *zio) 1379 { 1380 spa_t *spa = vd->vdev_spa; 1381 vdev_probe_stats_t *vps = NULL; 1382 zio_t *pio; 1383 1384 ASSERT(vd->vdev_ops->vdev_op_leaf); 1385 1386 /* 1387 * Don't probe the probe. 1388 */ 1389 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 1390 return (NULL); 1391 1392 /* 1393 * To prevent 'probe storms' when a device fails, we create 1394 * just one probe i/o at a time. All zios that want to probe 1395 * this vdev will become parents of the probe io. 1396 */ 1397 mutex_enter(&vd->vdev_probe_lock); 1398 1399 if ((pio = vd->vdev_probe_zio) == NULL) { 1400 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 1401 1402 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1403 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1404 ZIO_FLAG_TRYHARD; 1405 1406 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1407 /* 1408 * vdev_cant_read and vdev_cant_write can only 1409 * transition from TRUE to FALSE when we have the 1410 * SCL_ZIO lock as writer; otherwise they can only 1411 * transition from FALSE to TRUE. This ensures that 1412 * any zio looking at these values can assume that 1413 * failures persist for the life of the I/O. That's 1414 * important because when a device has intermittent 1415 * connectivity problems, we want to ensure that 1416 * they're ascribed to the device (ENXIO) and not 1417 * the zio (EIO). 1418 * 1419 * Since we hold SCL_ZIO as writer here, clear both 1420 * values so the probe can reevaluate from first 1421 * principles. 1422 */ 1423 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1424 vd->vdev_cant_read = B_FALSE; 1425 vd->vdev_cant_write = B_FALSE; 1426 } 1427 1428 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1429 vdev_probe_done, vps, 1430 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1431 1432 /* 1433 * We can't change the vdev state in this context, so we 1434 * kick off an async task to do it on our behalf. 1435 */ 1436 if (zio != NULL) { 1437 vd->vdev_probe_wanted = B_TRUE; 1438 spa_async_request(spa, SPA_ASYNC_PROBE); 1439 } 1440 } 1441 1442 if (zio != NULL) 1443 zio_add_child(zio, pio); 1444 1445 mutex_exit(&vd->vdev_probe_lock); 1446 1447 if (vps == NULL) { 1448 ASSERT(zio != NULL); 1449 return (NULL); 1450 } 1451 1452 for (int l = 1; l < VDEV_LABELS; l++) { 1453 zio_nowait(zio_read_phys(pio, vd, 1454 vdev_label_offset(vd->vdev_psize, l, 1455 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE, 1456 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), 1457 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1458 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1459 } 1460 1461 if (zio == NULL) 1462 return (pio); 1463 1464 zio_nowait(pio); 1465 return (NULL); 1466 } 1467 1468 static void 1469 vdev_open_child(void *arg) 1470 { 1471 vdev_t *vd = arg; 1472 1473 vd->vdev_open_thread = curthread; 1474 vd->vdev_open_error = vdev_open(vd); 1475 vd->vdev_open_thread = NULL; 1476 } 1477 1478 boolean_t 1479 vdev_uses_zvols(vdev_t *vd) 1480 { 1481 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1482 strlen(ZVOL_DIR)) == 0) 1483 return (B_TRUE); 1484 for (int c = 0; c < vd->vdev_children; c++) 1485 if (vdev_uses_zvols(vd->vdev_child[c])) 1486 return (B_TRUE); 1487 return (B_FALSE); 1488 } 1489 1490 void 1491 vdev_open_children(vdev_t *vd) 1492 { 1493 taskq_t *tq; 1494 int children = vd->vdev_children; 1495 1496 /* 1497 * in order to handle pools on top of zvols, do the opens 1498 * in a single thread so that the same thread holds the 1499 * spa_namespace_lock 1500 */ 1501 if (vdev_uses_zvols(vd)) { 1502 retry_sync: 1503 for (int c = 0; c < children; c++) 1504 vd->vdev_child[c]->vdev_open_error = 1505 vdev_open(vd->vdev_child[c]); 1506 } else { 1507 tq = taskq_create("vdev_open", children, minclsyspri, 1508 children, children, TASKQ_PREPOPULATE); 1509 if (tq == NULL) 1510 goto retry_sync; 1511 1512 for (int c = 0; c < children; c++) 1513 VERIFY(taskq_dispatch(tq, vdev_open_child, 1514 vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID); 1515 1516 taskq_destroy(tq); 1517 } 1518 1519 vd->vdev_nonrot = B_TRUE; 1520 1521 for (int c = 0; c < children; c++) 1522 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot; 1523 } 1524 1525 /* 1526 * Compute the raidz-deflation ratio. Note, we hard-code 1527 * in 128k (1 << 17) because it is the "typical" blocksize. 1528 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, 1529 * otherwise it would inconsistently account for existing bp's. 1530 */ 1531 static void 1532 vdev_set_deflate_ratio(vdev_t *vd) 1533 { 1534 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { 1535 vd->vdev_deflate_ratio = (1 << 17) / 1536 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 1537 } 1538 } 1539 1540 /* 1541 * Prepare a virtual device for access. 1542 */ 1543 int 1544 vdev_open(vdev_t *vd) 1545 { 1546 spa_t *spa = vd->vdev_spa; 1547 int error; 1548 uint64_t osize = 0; 1549 uint64_t max_osize = 0; 1550 uint64_t asize, max_asize, psize; 1551 uint64_t ashift = 0; 1552 1553 ASSERT(vd->vdev_open_thread == curthread || 1554 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1555 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1556 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1557 vd->vdev_state == VDEV_STATE_OFFLINE); 1558 1559 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1560 vd->vdev_cant_read = B_FALSE; 1561 vd->vdev_cant_write = B_FALSE; 1562 vd->vdev_min_asize = vdev_get_min_asize(vd); 1563 1564 /* 1565 * If this vdev is not removed, check its fault status. If it's 1566 * faulted, bail out of the open. 1567 */ 1568 if (!vd->vdev_removed && vd->vdev_faulted) { 1569 ASSERT(vd->vdev_children == 0); 1570 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1571 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1572 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1573 vd->vdev_label_aux); 1574 return (SET_ERROR(ENXIO)); 1575 } else if (vd->vdev_offline) { 1576 ASSERT(vd->vdev_children == 0); 1577 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1578 return (SET_ERROR(ENXIO)); 1579 } 1580 1581 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); 1582 1583 /* 1584 * Reset the vdev_reopening flag so that we actually close 1585 * the vdev on error. 1586 */ 1587 vd->vdev_reopening = B_FALSE; 1588 if (zio_injection_enabled && error == 0) 1589 error = zio_handle_device_injection(vd, NULL, ENXIO); 1590 1591 if (error) { 1592 if (vd->vdev_removed && 1593 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1594 vd->vdev_removed = B_FALSE; 1595 1596 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { 1597 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, 1598 vd->vdev_stat.vs_aux); 1599 } else { 1600 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1601 vd->vdev_stat.vs_aux); 1602 } 1603 return (error); 1604 } 1605 1606 vd->vdev_removed = B_FALSE; 1607 1608 /* 1609 * Recheck the faulted flag now that we have confirmed that 1610 * the vdev is accessible. If we're faulted, bail. 1611 */ 1612 if (vd->vdev_faulted) { 1613 ASSERT(vd->vdev_children == 0); 1614 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1615 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1616 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1617 vd->vdev_label_aux); 1618 return (SET_ERROR(ENXIO)); 1619 } 1620 1621 if (vd->vdev_degraded) { 1622 ASSERT(vd->vdev_children == 0); 1623 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1624 VDEV_AUX_ERR_EXCEEDED); 1625 } else { 1626 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1627 } 1628 1629 /* 1630 * For hole or missing vdevs we just return success. 1631 */ 1632 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1633 return (0); 1634 1635 for (int c = 0; c < vd->vdev_children; c++) { 1636 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1637 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1638 VDEV_AUX_NONE); 1639 break; 1640 } 1641 } 1642 1643 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1644 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1645 1646 if (vd->vdev_children == 0) { 1647 if (osize < SPA_MINDEVSIZE) { 1648 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1649 VDEV_AUX_TOO_SMALL); 1650 return (SET_ERROR(EOVERFLOW)); 1651 } 1652 psize = osize; 1653 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1654 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1655 VDEV_LABEL_END_SIZE); 1656 } else { 1657 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1658 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1659 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1660 VDEV_AUX_TOO_SMALL); 1661 return (SET_ERROR(EOVERFLOW)); 1662 } 1663 psize = 0; 1664 asize = osize; 1665 max_asize = max_osize; 1666 } 1667 1668 vd->vdev_psize = psize; 1669 1670 /* 1671 * Make sure the allocatable size hasn't shrunk too much. 1672 */ 1673 if (asize < vd->vdev_min_asize) { 1674 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1675 VDEV_AUX_BAD_LABEL); 1676 return (SET_ERROR(EINVAL)); 1677 } 1678 1679 if (vd->vdev_asize == 0) { 1680 /* 1681 * This is the first-ever open, so use the computed values. 1682 * For compatibility, a different ashift can be requested. 1683 */ 1684 vd->vdev_asize = asize; 1685 vd->vdev_max_asize = max_asize; 1686 if (vd->vdev_ashift == 0) { 1687 vd->vdev_ashift = ashift; /* use detected value */ 1688 } 1689 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN || 1690 vd->vdev_ashift > ASHIFT_MAX)) { 1691 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1692 VDEV_AUX_BAD_ASHIFT); 1693 return (SET_ERROR(EDOM)); 1694 } 1695 } else { 1696 /* 1697 * Detect if the alignment requirement has increased. 1698 * We don't want to make the pool unavailable, just 1699 * post an event instead. 1700 */ 1701 if (ashift > vd->vdev_top->vdev_ashift && 1702 vd->vdev_ops->vdev_op_leaf) { 1703 zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, 1704 spa, vd, NULL, NULL, 0, 0); 1705 } 1706 1707 vd->vdev_max_asize = max_asize; 1708 } 1709 1710 /* 1711 * If all children are healthy we update asize if either: 1712 * The asize has increased, due to a device expansion caused by dynamic 1713 * LUN growth or vdev replacement, and automatic expansion is enabled; 1714 * making the additional space available. 1715 * 1716 * The asize has decreased, due to a device shrink usually caused by a 1717 * vdev replace with a smaller device. This ensures that calculations 1718 * based of max_asize and asize e.g. esize are always valid. It's safe 1719 * to do this as we've already validated that asize is greater than 1720 * vdev_min_asize. 1721 */ 1722 if (vd->vdev_state == VDEV_STATE_HEALTHY && 1723 ((asize > vd->vdev_asize && 1724 (vd->vdev_expanding || spa->spa_autoexpand)) || 1725 (asize < vd->vdev_asize))) 1726 vd->vdev_asize = asize; 1727 1728 vdev_set_min_asize(vd); 1729 1730 /* 1731 * Ensure we can issue some IO before declaring the 1732 * vdev open for business. 1733 */ 1734 if (vd->vdev_ops->vdev_op_leaf && 1735 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1736 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1737 VDEV_AUX_ERR_EXCEEDED); 1738 return (error); 1739 } 1740 1741 /* 1742 * Track the min and max ashift values for normal data devices. 1743 * 1744 * DJB - TBD these should perhaps be tracked per allocation class 1745 * (e.g. spa_min_ashift is used to round up post compression buffers) 1746 */ 1747 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1748 vd->vdev_alloc_bias == VDEV_BIAS_NONE && 1749 vd->vdev_aux == NULL) { 1750 if (vd->vdev_ashift > spa->spa_max_ashift) 1751 spa->spa_max_ashift = vd->vdev_ashift; 1752 if (vd->vdev_ashift < spa->spa_min_ashift) 1753 spa->spa_min_ashift = vd->vdev_ashift; 1754 } 1755 1756 /* 1757 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1758 * resilver. But don't do this if we are doing a reopen for a scrub, 1759 * since this would just restart the scrub we are already doing. 1760 */ 1761 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1762 vdev_resilver_needed(vd, NULL, NULL)) { 1763 if (dsl_scan_resilvering(spa->spa_dsl_pool) && 1764 spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) 1765 vdev_set_deferred_resilver(spa, vd); 1766 else 1767 spa_async_request(spa, SPA_ASYNC_RESILVER); 1768 } 1769 1770 return (0); 1771 } 1772 1773 /* 1774 * Called once the vdevs are all opened, this routine validates the label 1775 * contents. This needs to be done before vdev_load() so that we don't 1776 * inadvertently do repair I/Os to the wrong device. 1777 * 1778 * This function will only return failure if one of the vdevs indicates that it 1779 * has since been destroyed or exported. This is only possible if 1780 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1781 * will be updated but the function will return 0. 1782 */ 1783 int 1784 vdev_validate(vdev_t *vd) 1785 { 1786 spa_t *spa = vd->vdev_spa; 1787 nvlist_t *label; 1788 uint64_t guid = 0, aux_guid = 0, top_guid; 1789 uint64_t state; 1790 nvlist_t *nvl; 1791 uint64_t txg; 1792 1793 if (vdev_validate_skip) 1794 return (0); 1795 1796 for (uint64_t c = 0; c < vd->vdev_children; c++) 1797 if (vdev_validate(vd->vdev_child[c]) != 0) 1798 return (SET_ERROR(EBADF)); 1799 1800 /* 1801 * If the device has already failed, or was marked offline, don't do 1802 * any further validation. Otherwise, label I/O will fail and we will 1803 * overwrite the previous state. 1804 */ 1805 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) 1806 return (0); 1807 1808 /* 1809 * If we are performing an extreme rewind, we allow for a label that 1810 * was modified at a point after the current txg. 1811 * If config lock is not held do not check for the txg. spa_sync could 1812 * be updating the vdev's label before updating spa_last_synced_txg. 1813 */ 1814 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || 1815 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) 1816 txg = UINT64_MAX; 1817 else 1818 txg = spa_last_synced_txg(spa); 1819 1820 if ((label = vdev_label_read_config(vd, txg)) == NULL) { 1821 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1822 VDEV_AUX_BAD_LABEL); 1823 vdev_dbgmsg(vd, "vdev_validate: failed reading config for " 1824 "txg %llu", (u_longlong_t)txg); 1825 return (0); 1826 } 1827 1828 /* 1829 * Determine if this vdev has been split off into another 1830 * pool. If so, then refuse to open it. 1831 */ 1832 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1833 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1834 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1835 VDEV_AUX_SPLIT_POOL); 1836 nvlist_free(label); 1837 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); 1838 return (0); 1839 } 1840 1841 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { 1842 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1843 VDEV_AUX_CORRUPT_DATA); 1844 nvlist_free(label); 1845 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1846 ZPOOL_CONFIG_POOL_GUID); 1847 return (0); 1848 } 1849 1850 /* 1851 * If config is not trusted then ignore the spa guid check. This is 1852 * necessary because if the machine crashed during a re-guid the new 1853 * guid might have been written to all of the vdev labels, but not the 1854 * cached config. The check will be performed again once we have the 1855 * trusted config from the MOS. 1856 */ 1857 if (spa->spa_trust_config && guid != spa_guid(spa)) { 1858 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1859 VDEV_AUX_CORRUPT_DATA); 1860 nvlist_free(label); 1861 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " 1862 "match config (%llu != %llu)", (u_longlong_t)guid, 1863 (u_longlong_t)spa_guid(spa)); 1864 return (0); 1865 } 1866 1867 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1868 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1869 &aux_guid) != 0) 1870 aux_guid = 0; 1871 1872 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { 1873 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1874 VDEV_AUX_CORRUPT_DATA); 1875 nvlist_free(label); 1876 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1877 ZPOOL_CONFIG_GUID); 1878 return (0); 1879 } 1880 1881 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) 1882 != 0) { 1883 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1884 VDEV_AUX_CORRUPT_DATA); 1885 nvlist_free(label); 1886 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1887 ZPOOL_CONFIG_TOP_GUID); 1888 return (0); 1889 } 1890 1891 /* 1892 * If this vdev just became a top-level vdev because its sibling was 1893 * detached, it will have adopted the parent's vdev guid -- but the 1894 * label may or may not be on disk yet. Fortunately, either version 1895 * of the label will have the same top guid, so if we're a top-level 1896 * vdev, we can safely compare to that instead. 1897 * However, if the config comes from a cachefile that failed to update 1898 * after the detach, a top-level vdev will appear as a non top-level 1899 * vdev in the config. Also relax the constraints if we perform an 1900 * extreme rewind. 1901 * 1902 * If we split this vdev off instead, then we also check the 1903 * original pool's guid. We don't want to consider the vdev 1904 * corrupt if it is partway through a split operation. 1905 */ 1906 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { 1907 boolean_t mismatch = B_FALSE; 1908 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { 1909 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) 1910 mismatch = B_TRUE; 1911 } else { 1912 if (vd->vdev_guid != top_guid && 1913 vd->vdev_top->vdev_guid != guid) 1914 mismatch = B_TRUE; 1915 } 1916 1917 if (mismatch) { 1918 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1919 VDEV_AUX_CORRUPT_DATA); 1920 nvlist_free(label); 1921 vdev_dbgmsg(vd, "vdev_validate: config guid " 1922 "doesn't match label guid"); 1923 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", 1924 (u_longlong_t)vd->vdev_guid, 1925 (u_longlong_t)vd->vdev_top->vdev_guid); 1926 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " 1927 "aux_guid %llu", (u_longlong_t)guid, 1928 (u_longlong_t)top_guid, (u_longlong_t)aux_guid); 1929 return (0); 1930 } 1931 } 1932 1933 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1934 &state) != 0) { 1935 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1936 VDEV_AUX_CORRUPT_DATA); 1937 nvlist_free(label); 1938 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1939 ZPOOL_CONFIG_POOL_STATE); 1940 return (0); 1941 } 1942 1943 nvlist_free(label); 1944 1945 /* 1946 * If this is a verbatim import, no need to check the 1947 * state of the pool. 1948 */ 1949 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1950 spa_load_state(spa) == SPA_LOAD_OPEN && 1951 state != POOL_STATE_ACTIVE) { 1952 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " 1953 "for spa %s", (u_longlong_t)state, spa->spa_name); 1954 return (SET_ERROR(EBADF)); 1955 } 1956 1957 /* 1958 * If we were able to open and validate a vdev that was 1959 * previously marked permanently unavailable, clear that state 1960 * now. 1961 */ 1962 if (vd->vdev_not_present) 1963 vd->vdev_not_present = 0; 1964 1965 return (0); 1966 } 1967 1968 static void 1969 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) 1970 { 1971 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { 1972 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { 1973 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " 1974 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, 1975 dvd->vdev_path, svd->vdev_path); 1976 spa_strfree(dvd->vdev_path); 1977 dvd->vdev_path = spa_strdup(svd->vdev_path); 1978 } 1979 } else if (svd->vdev_path != NULL) { 1980 dvd->vdev_path = spa_strdup(svd->vdev_path); 1981 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", 1982 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); 1983 } 1984 } 1985 1986 /* 1987 * Recursively copy vdev paths from one vdev to another. Source and destination 1988 * vdev trees must have same geometry otherwise return error. Intended to copy 1989 * paths from userland config into MOS config. 1990 */ 1991 int 1992 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) 1993 { 1994 if ((svd->vdev_ops == &vdev_missing_ops) || 1995 (svd->vdev_ishole && dvd->vdev_ishole) || 1996 (dvd->vdev_ops == &vdev_indirect_ops)) 1997 return (0); 1998 1999 if (svd->vdev_ops != dvd->vdev_ops) { 2000 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", 2001 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); 2002 return (SET_ERROR(EINVAL)); 2003 } 2004 2005 if (svd->vdev_guid != dvd->vdev_guid) { 2006 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " 2007 "%llu)", (u_longlong_t)svd->vdev_guid, 2008 (u_longlong_t)dvd->vdev_guid); 2009 return (SET_ERROR(EINVAL)); 2010 } 2011 2012 if (svd->vdev_children != dvd->vdev_children) { 2013 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " 2014 "%llu != %llu", (u_longlong_t)svd->vdev_children, 2015 (u_longlong_t)dvd->vdev_children); 2016 return (SET_ERROR(EINVAL)); 2017 } 2018 2019 for (uint64_t i = 0; i < svd->vdev_children; i++) { 2020 int error = vdev_copy_path_strict(svd->vdev_child[i], 2021 dvd->vdev_child[i]); 2022 if (error != 0) 2023 return (error); 2024 } 2025 2026 if (svd->vdev_ops->vdev_op_leaf) 2027 vdev_copy_path_impl(svd, dvd); 2028 2029 return (0); 2030 } 2031 2032 static void 2033 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) 2034 { 2035 ASSERT(stvd->vdev_top == stvd); 2036 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); 2037 2038 for (uint64_t i = 0; i < dvd->vdev_children; i++) { 2039 vdev_copy_path_search(stvd, dvd->vdev_child[i]); 2040 } 2041 2042 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) 2043 return; 2044 2045 /* 2046 * The idea here is that while a vdev can shift positions within 2047 * a top vdev (when replacing, attaching mirror, etc.) it cannot 2048 * step outside of it. 2049 */ 2050 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); 2051 2052 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) 2053 return; 2054 2055 ASSERT(vd->vdev_ops->vdev_op_leaf); 2056 2057 vdev_copy_path_impl(vd, dvd); 2058 } 2059 2060 /* 2061 * Recursively copy vdev paths from one root vdev to another. Source and 2062 * destination vdev trees may differ in geometry. For each destination leaf 2063 * vdev, search a vdev with the same guid and top vdev id in the source. 2064 * Intended to copy paths from userland config into MOS config. 2065 */ 2066 void 2067 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) 2068 { 2069 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); 2070 ASSERT(srvd->vdev_ops == &vdev_root_ops); 2071 ASSERT(drvd->vdev_ops == &vdev_root_ops); 2072 2073 for (uint64_t i = 0; i < children; i++) { 2074 vdev_copy_path_search(srvd->vdev_child[i], 2075 drvd->vdev_child[i]); 2076 } 2077 } 2078 2079 /* 2080 * Close a virtual device. 2081 */ 2082 void 2083 vdev_close(vdev_t *vd) 2084 { 2085 spa_t *spa = vd->vdev_spa; 2086 vdev_t *pvd = vd->vdev_parent; 2087 2088 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2089 2090 /* 2091 * If our parent is reopening, then we are as well, unless we are 2092 * going offline. 2093 */ 2094 if (pvd != NULL && pvd->vdev_reopening) 2095 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 2096 2097 vd->vdev_ops->vdev_op_close(vd); 2098 2099 vdev_cache_purge(vd); 2100 2101 /* 2102 * We record the previous state before we close it, so that if we are 2103 * doing a reopen(), we don't generate FMA ereports if we notice that 2104 * it's still faulted. 2105 */ 2106 vd->vdev_prevstate = vd->vdev_state; 2107 2108 if (vd->vdev_offline) 2109 vd->vdev_state = VDEV_STATE_OFFLINE; 2110 else 2111 vd->vdev_state = VDEV_STATE_CLOSED; 2112 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2113 } 2114 2115 void 2116 vdev_hold(vdev_t *vd) 2117 { 2118 spa_t *spa = vd->vdev_spa; 2119 2120 ASSERT(spa_is_root(spa)); 2121 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 2122 return; 2123 2124 for (int c = 0; c < vd->vdev_children; c++) 2125 vdev_hold(vd->vdev_child[c]); 2126 2127 if (vd->vdev_ops->vdev_op_leaf) 2128 vd->vdev_ops->vdev_op_hold(vd); 2129 } 2130 2131 void 2132 vdev_rele(vdev_t *vd) 2133 { 2134 spa_t *spa = vd->vdev_spa; 2135 2136 ASSERT(spa_is_root(spa)); 2137 for (int c = 0; c < vd->vdev_children; c++) 2138 vdev_rele(vd->vdev_child[c]); 2139 2140 if (vd->vdev_ops->vdev_op_leaf) 2141 vd->vdev_ops->vdev_op_rele(vd); 2142 } 2143 2144 /* 2145 * Reopen all interior vdevs and any unopened leaves. We don't actually 2146 * reopen leaf vdevs which had previously been opened as they might deadlock 2147 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 2148 * If the leaf has never been opened then open it, as usual. 2149 */ 2150 void 2151 vdev_reopen(vdev_t *vd) 2152 { 2153 spa_t *spa = vd->vdev_spa; 2154 2155 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2156 2157 /* set the reopening flag unless we're taking the vdev offline */ 2158 vd->vdev_reopening = !vd->vdev_offline; 2159 vdev_close(vd); 2160 (void) vdev_open(vd); 2161 2162 /* 2163 * Call vdev_validate() here to make sure we have the same device. 2164 * Otherwise, a device with an invalid label could be successfully 2165 * opened in response to vdev_reopen(). 2166 */ 2167 if (vd->vdev_aux) { 2168 (void) vdev_validate_aux(vd); 2169 if (vdev_readable(vd) && vdev_writeable(vd) && 2170 vd->vdev_aux == &spa->spa_l2cache && 2171 !l2arc_vdev_present(vd)) 2172 l2arc_add_vdev(spa, vd); 2173 } else { 2174 (void) vdev_validate(vd); 2175 } 2176 2177 /* 2178 * Reassess parent vdev's health. 2179 */ 2180 vdev_propagate_state(vd); 2181 } 2182 2183 int 2184 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 2185 { 2186 int error; 2187 2188 /* 2189 * Normally, partial opens (e.g. of a mirror) are allowed. 2190 * For a create, however, we want to fail the request if 2191 * there are any components we can't open. 2192 */ 2193 error = vdev_open(vd); 2194 2195 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 2196 vdev_close(vd); 2197 return (error ? error : ENXIO); 2198 } 2199 2200 /* 2201 * Recursively load DTLs and initialize all labels. 2202 */ 2203 if ((error = vdev_dtl_load(vd)) != 0 || 2204 (error = vdev_label_init(vd, txg, isreplacing ? 2205 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 2206 vdev_close(vd); 2207 return (error); 2208 } 2209 2210 return (0); 2211 } 2212 2213 void 2214 vdev_metaslab_set_size(vdev_t *vd) 2215 { 2216 uint64_t asize = vd->vdev_asize; 2217 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift; 2218 uint64_t ms_shift; 2219 2220 /* BEGIN CSTYLED */ 2221 /* 2222 * There are two dimensions to the metaslab sizing calculation: 2223 * the size of the metaslab and the count of metaslabs per vdev. 2224 * 2225 * The default values used below are a good balance between memory 2226 * usage (larger metaslab size means more memory needed for loaded 2227 * metaslabs; more metaslabs means more memory needed for the 2228 * metaslab_t structs), metaslab load time (larger metaslabs take 2229 * longer to load), and metaslab sync time (more metaslabs means 2230 * more time spent syncing all of them). 2231 * 2232 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs. 2233 * The range of the dimensions are as follows: 2234 * 2235 * 2^29 <= ms_size <= 2^34 2236 * 16 <= ms_count <= 131,072 2237 * 2238 * On the lower end of vdev sizes, we aim for metaslabs sizes of 2239 * at least 512MB (2^29) to minimize fragmentation effects when 2240 * testing with smaller devices. However, the count constraint 2241 * of at least 16 metaslabs will override this minimum size goal. 2242 * 2243 * On the upper end of vdev sizes, we aim for a maximum metaslab 2244 * size of 16GB. However, we will cap the total count to 2^17 2245 * metaslabs to keep our memory footprint in check and let the 2246 * metaslab size grow from there if that limit is hit. 2247 * 2248 * The net effect of applying above constrains is summarized below. 2249 * 2250 * vdev size metaslab count 2251 * --------------|----------------- 2252 * < 8GB ~16 2253 * 8GB - 100GB one per 512MB 2254 * 100GB - 3TB ~200 2255 * 3TB - 2PB one per 16GB 2256 * > 2PB ~131,072 2257 * -------------------------------- 2258 * 2259 * Finally, note that all of the above calculate the initial 2260 * number of metaslabs. Expanding a top-level vdev will result 2261 * in additional metaslabs being allocated making it possible 2262 * to exceed the zfs_vdev_ms_count_limit. 2263 */ 2264 /* END CSTYLED */ 2265 2266 if (ms_count < zfs_vdev_min_ms_count) 2267 ms_shift = highbit64(asize / zfs_vdev_min_ms_count); 2268 else if (ms_count > zfs_vdev_default_ms_count) 2269 ms_shift = highbit64(asize / zfs_vdev_default_ms_count); 2270 else 2271 ms_shift = zfs_vdev_default_ms_shift; 2272 2273 if (ms_shift < SPA_MAXBLOCKSHIFT) { 2274 ms_shift = SPA_MAXBLOCKSHIFT; 2275 } else if (ms_shift > zfs_vdev_max_ms_shift) { 2276 ms_shift = zfs_vdev_max_ms_shift; 2277 /* cap the total count to constrain memory footprint */ 2278 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit) 2279 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit); 2280 } 2281 2282 vd->vdev_ms_shift = ms_shift; 2283 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); 2284 } 2285 2286 void 2287 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 2288 { 2289 ASSERT(vd == vd->vdev_top); 2290 /* indirect vdevs don't have metaslabs or dtls */ 2291 ASSERT(vdev_is_concrete(vd) || flags == 0); 2292 ASSERT(ISP2(flags)); 2293 ASSERT(spa_writeable(vd->vdev_spa)); 2294 2295 if (flags & VDD_METASLAB) 2296 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 2297 2298 if (flags & VDD_DTL) 2299 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 2300 2301 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 2302 } 2303 2304 void 2305 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) 2306 { 2307 for (int c = 0; c < vd->vdev_children; c++) 2308 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); 2309 2310 if (vd->vdev_ops->vdev_op_leaf) 2311 vdev_dirty(vd->vdev_top, flags, vd, txg); 2312 } 2313 2314 /* 2315 * DTLs. 2316 * 2317 * A vdev's DTL (dirty time log) is the set of transaction groups for which 2318 * the vdev has less than perfect replication. There are four kinds of DTL: 2319 * 2320 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 2321 * 2322 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 2323 * 2324 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 2325 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 2326 * txgs that was scrubbed. 2327 * 2328 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 2329 * persistent errors or just some device being offline. 2330 * Unlike the other three, the DTL_OUTAGE map is not generally 2331 * maintained; it's only computed when needed, typically to 2332 * determine whether a device can be detached. 2333 * 2334 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 2335 * either has the data or it doesn't. 2336 * 2337 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 2338 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 2339 * if any child is less than fully replicated, then so is its parent. 2340 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 2341 * comprising only those txgs which appear in 'maxfaults' or more children; 2342 * those are the txgs we don't have enough replication to read. For example, 2343 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 2344 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 2345 * two child DTL_MISSING maps. 2346 * 2347 * It should be clear from the above that to compute the DTLs and outage maps 2348 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 2349 * Therefore, that is all we keep on disk. When loading the pool, or after 2350 * a configuration change, we generate all other DTLs from first principles. 2351 */ 2352 void 2353 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2354 { 2355 range_tree_t *rt = vd->vdev_dtl[t]; 2356 2357 ASSERT(t < DTL_TYPES); 2358 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2359 ASSERT(spa_writeable(vd->vdev_spa)); 2360 2361 mutex_enter(&vd->vdev_dtl_lock); 2362 if (!range_tree_contains(rt, txg, size)) 2363 range_tree_add(rt, txg, size); 2364 mutex_exit(&vd->vdev_dtl_lock); 2365 } 2366 2367 boolean_t 2368 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2369 { 2370 range_tree_t *rt = vd->vdev_dtl[t]; 2371 boolean_t dirty = B_FALSE; 2372 2373 ASSERT(t < DTL_TYPES); 2374 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2375 2376 /* 2377 * While we are loading the pool, the DTLs have not been loaded yet. 2378 * Ignore the DTLs and try all devices. This avoids a recursive 2379 * mutex enter on the vdev_dtl_lock, and also makes us try hard 2380 * when loading the pool (relying on the checksum to ensure that 2381 * we get the right data -- note that we while loading, we are 2382 * only reading the MOS, which is always checksummed). 2383 */ 2384 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE) 2385 return (B_FALSE); 2386 2387 mutex_enter(&vd->vdev_dtl_lock); 2388 if (!range_tree_is_empty(rt)) 2389 dirty = range_tree_contains(rt, txg, size); 2390 mutex_exit(&vd->vdev_dtl_lock); 2391 2392 return (dirty); 2393 } 2394 2395 boolean_t 2396 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 2397 { 2398 range_tree_t *rt = vd->vdev_dtl[t]; 2399 boolean_t empty; 2400 2401 mutex_enter(&vd->vdev_dtl_lock); 2402 empty = range_tree_is_empty(rt); 2403 mutex_exit(&vd->vdev_dtl_lock); 2404 2405 return (empty); 2406 } 2407 2408 /* 2409 * Returns B_TRUE if vdev determines offset needs to be resilvered. 2410 */ 2411 boolean_t 2412 vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize) 2413 { 2414 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2415 2416 if (vd->vdev_ops->vdev_op_need_resilver == NULL || 2417 vd->vdev_ops->vdev_op_leaf) 2418 return (B_TRUE); 2419 2420 return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize)); 2421 } 2422 2423 /* 2424 * Returns the lowest txg in the DTL range. 2425 */ 2426 static uint64_t 2427 vdev_dtl_min(vdev_t *vd) 2428 { 2429 range_seg_t *rs; 2430 2431 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2432 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2433 ASSERT0(vd->vdev_children); 2434 2435 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2436 return (rs->rs_start - 1); 2437 } 2438 2439 /* 2440 * Returns the highest txg in the DTL. 2441 */ 2442 static uint64_t 2443 vdev_dtl_max(vdev_t *vd) 2444 { 2445 range_seg_t *rs; 2446 2447 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2448 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2449 ASSERT0(vd->vdev_children); 2450 2451 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2452 return (rs->rs_end); 2453 } 2454 2455 /* 2456 * Determine if a resilvering vdev should remove any DTL entries from 2457 * its range. If the vdev was resilvering for the entire duration of the 2458 * scan then it should excise that range from its DTLs. Otherwise, this 2459 * vdev is considered partially resilvered and should leave its DTL 2460 * entries intact. The comment in vdev_dtl_reassess() describes how we 2461 * excise the DTLs. 2462 */ 2463 static boolean_t 2464 vdev_dtl_should_excise(vdev_t *vd) 2465 { 2466 spa_t *spa = vd->vdev_spa; 2467 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2468 2469 ASSERT0(scn->scn_phys.scn_errors); 2470 ASSERT0(vd->vdev_children); 2471 2472 if (vd->vdev_state < VDEV_STATE_DEGRADED) 2473 return (B_FALSE); 2474 2475 if (vd->vdev_resilver_deferred) 2476 return (B_FALSE); 2477 2478 if (vd->vdev_resilver_txg == 0 || 2479 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) 2480 return (B_TRUE); 2481 2482 /* 2483 * When a resilver is initiated the scan will assign the scn_max_txg 2484 * value to the highest txg value that exists in all DTLs. If this 2485 * device's max DTL is not part of this scan (i.e. it is not in 2486 * the range (scn_min_txg, scn_max_txg] then it is not eligible 2487 * for excision. 2488 */ 2489 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { 2490 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd)); 2491 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg); 2492 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg); 2493 return (B_TRUE); 2494 } 2495 return (B_FALSE); 2496 } 2497 2498 /* 2499 * Reassess DTLs after a config change or scrub completion. 2500 */ 2501 void 2502 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 2503 { 2504 spa_t *spa = vd->vdev_spa; 2505 avl_tree_t reftree; 2506 int minref; 2507 2508 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2509 2510 for (int c = 0; c < vd->vdev_children; c++) 2511 vdev_dtl_reassess(vd->vdev_child[c], txg, 2512 scrub_txg, scrub_done); 2513 2514 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) 2515 return; 2516 2517 if (vd->vdev_ops->vdev_op_leaf) { 2518 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2519 2520 mutex_enter(&vd->vdev_dtl_lock); 2521 2522 /* 2523 * If we've completed a scan cleanly then determine 2524 * if this vdev should remove any DTLs. We only want to 2525 * excise regions on vdevs that were available during 2526 * the entire duration of this scan. 2527 */ 2528 if (scrub_txg != 0 && 2529 (spa->spa_scrub_started || 2530 (scn != NULL && scn->scn_phys.scn_errors == 0)) && 2531 vdev_dtl_should_excise(vd)) { 2532 /* 2533 * We completed a scrub up to scrub_txg. If we 2534 * did it without rebooting, then the scrub dtl 2535 * will be valid, so excise the old region and 2536 * fold in the scrub dtl. Otherwise, leave the 2537 * dtl as-is if there was an error. 2538 * 2539 * There's little trick here: to excise the beginning 2540 * of the DTL_MISSING map, we put it into a reference 2541 * tree and then add a segment with refcnt -1 that 2542 * covers the range [0, scrub_txg). This means 2543 * that each txg in that range has refcnt -1 or 0. 2544 * We then add DTL_SCRUB with a refcnt of 2, so that 2545 * entries in the range [0, scrub_txg) will have a 2546 * positive refcnt -- either 1 or 2. We then convert 2547 * the reference tree into the new DTL_MISSING map. 2548 */ 2549 space_reftree_create(&reftree); 2550 space_reftree_add_map(&reftree, 2551 vd->vdev_dtl[DTL_MISSING], 1); 2552 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); 2553 space_reftree_add_map(&reftree, 2554 vd->vdev_dtl[DTL_SCRUB], 2); 2555 space_reftree_generate_map(&reftree, 2556 vd->vdev_dtl[DTL_MISSING], 1); 2557 space_reftree_destroy(&reftree); 2558 } 2559 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 2560 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2561 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); 2562 if (scrub_done) 2563 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 2564 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 2565 if (!vdev_readable(vd)) 2566 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 2567 else 2568 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2569 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); 2570 2571 /* 2572 * If the vdev was resilvering and no longer has any 2573 * DTLs then reset its resilvering flag. 2574 */ 2575 if (vd->vdev_resilver_txg != 0 && 2576 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2577 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) 2578 vd->vdev_resilver_txg = 0; 2579 2580 mutex_exit(&vd->vdev_dtl_lock); 2581 2582 if (txg != 0) 2583 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 2584 return; 2585 } 2586 2587 mutex_enter(&vd->vdev_dtl_lock); 2588 for (int t = 0; t < DTL_TYPES; t++) { 2589 /* account for child's outage in parent's missing map */ 2590 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 2591 if (t == DTL_SCRUB) 2592 continue; /* leaf vdevs only */ 2593 if (t == DTL_PARTIAL) 2594 minref = 1; /* i.e. non-zero */ 2595 else if (vd->vdev_nparity != 0) 2596 minref = vd->vdev_nparity + 1; /* RAID-Z */ 2597 else 2598 minref = vd->vdev_children; /* any kind of mirror */ 2599 space_reftree_create(&reftree); 2600 for (int c = 0; c < vd->vdev_children; c++) { 2601 vdev_t *cvd = vd->vdev_child[c]; 2602 mutex_enter(&cvd->vdev_dtl_lock); 2603 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); 2604 mutex_exit(&cvd->vdev_dtl_lock); 2605 } 2606 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); 2607 space_reftree_destroy(&reftree); 2608 } 2609 mutex_exit(&vd->vdev_dtl_lock); 2610 } 2611 2612 int 2613 vdev_dtl_load(vdev_t *vd) 2614 { 2615 spa_t *spa = vd->vdev_spa; 2616 objset_t *mos = spa->spa_meta_objset; 2617 int error = 0; 2618 2619 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { 2620 ASSERT(vdev_is_concrete(vd)); 2621 2622 error = space_map_open(&vd->vdev_dtl_sm, mos, 2623 vd->vdev_dtl_object, 0, -1ULL, 0); 2624 if (error) 2625 return (error); 2626 ASSERT(vd->vdev_dtl_sm != NULL); 2627 2628 mutex_enter(&vd->vdev_dtl_lock); 2629 error = space_map_load(vd->vdev_dtl_sm, 2630 vd->vdev_dtl[DTL_MISSING], SM_ALLOC); 2631 mutex_exit(&vd->vdev_dtl_lock); 2632 2633 return (error); 2634 } 2635 2636 for (int c = 0; c < vd->vdev_children; c++) { 2637 error = vdev_dtl_load(vd->vdev_child[c]); 2638 if (error != 0) 2639 break; 2640 } 2641 2642 return (error); 2643 } 2644 2645 static void 2646 vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx) 2647 { 2648 spa_t *spa = vd->vdev_spa; 2649 objset_t *mos = spa->spa_meta_objset; 2650 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; 2651 const char *string; 2652 2653 ASSERT(alloc_bias != VDEV_BIAS_NONE); 2654 2655 string = 2656 (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG : 2657 (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL : 2658 (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL; 2659 2660 ASSERT(string != NULL); 2661 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS, 2662 1, strlen(string) + 1, string, tx)); 2663 2664 if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) { 2665 spa_activate_allocation_classes(spa, tx); 2666 } 2667 } 2668 2669 void 2670 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) 2671 { 2672 spa_t *spa = vd->vdev_spa; 2673 2674 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); 2675 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2676 zapobj, tx)); 2677 } 2678 2679 uint64_t 2680 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) 2681 { 2682 spa_t *spa = vd->vdev_spa; 2683 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, 2684 DMU_OT_NONE, 0, tx); 2685 2686 ASSERT(zap != 0); 2687 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2688 zap, tx)); 2689 2690 return (zap); 2691 } 2692 2693 void 2694 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) 2695 { 2696 if (vd->vdev_ops != &vdev_hole_ops && 2697 vd->vdev_ops != &vdev_missing_ops && 2698 vd->vdev_ops != &vdev_root_ops && 2699 !vd->vdev_top->vdev_removing) { 2700 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { 2701 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); 2702 } 2703 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { 2704 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); 2705 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE) 2706 vdev_zap_allocation_data(vd, tx); 2707 } 2708 } 2709 2710 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2711 vdev_construct_zaps(vd->vdev_child[i], tx); 2712 } 2713 } 2714 2715 void 2716 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 2717 { 2718 spa_t *spa = vd->vdev_spa; 2719 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; 2720 objset_t *mos = spa->spa_meta_objset; 2721 range_tree_t *rtsync; 2722 dmu_tx_t *tx; 2723 uint64_t object = space_map_object(vd->vdev_dtl_sm); 2724 2725 ASSERT(vdev_is_concrete(vd)); 2726 ASSERT(vd->vdev_ops->vdev_op_leaf); 2727 2728 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2729 2730 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { 2731 mutex_enter(&vd->vdev_dtl_lock); 2732 space_map_free(vd->vdev_dtl_sm, tx); 2733 space_map_close(vd->vdev_dtl_sm); 2734 vd->vdev_dtl_sm = NULL; 2735 mutex_exit(&vd->vdev_dtl_lock); 2736 2737 /* 2738 * We only destroy the leaf ZAP for detached leaves or for 2739 * removed log devices. Removed data devices handle leaf ZAP 2740 * cleanup later, once cancellation is no longer possible. 2741 */ 2742 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || 2743 vd->vdev_top->vdev_islog)) { 2744 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); 2745 vd->vdev_leaf_zap = 0; 2746 } 2747 2748 dmu_tx_commit(tx); 2749 return; 2750 } 2751 2752 if (vd->vdev_dtl_sm == NULL) { 2753 uint64_t new_object; 2754 2755 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx); 2756 VERIFY3U(new_object, !=, 0); 2757 2758 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, 2759 0, -1ULL, 0)); 2760 ASSERT(vd->vdev_dtl_sm != NULL); 2761 } 2762 2763 rtsync = range_tree_create(NULL, NULL); 2764 2765 mutex_enter(&vd->vdev_dtl_lock); 2766 range_tree_walk(rt, range_tree_add, rtsync); 2767 mutex_exit(&vd->vdev_dtl_lock); 2768 2769 space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx); 2770 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); 2771 range_tree_vacate(rtsync, NULL, NULL); 2772 2773 range_tree_destroy(rtsync); 2774 2775 /* 2776 * If the object for the space map has changed then dirty 2777 * the top level so that we update the config. 2778 */ 2779 if (object != space_map_object(vd->vdev_dtl_sm)) { 2780 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " 2781 "new object %llu", (u_longlong_t)txg, spa_name(spa), 2782 (u_longlong_t)object, 2783 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); 2784 vdev_config_dirty(vd->vdev_top); 2785 } 2786 2787 dmu_tx_commit(tx); 2788 } 2789 2790 /* 2791 * Determine whether the specified vdev can be offlined/detached/removed 2792 * without losing data. 2793 */ 2794 boolean_t 2795 vdev_dtl_required(vdev_t *vd) 2796 { 2797 spa_t *spa = vd->vdev_spa; 2798 vdev_t *tvd = vd->vdev_top; 2799 uint8_t cant_read = vd->vdev_cant_read; 2800 boolean_t required; 2801 2802 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2803 2804 if (vd == spa->spa_root_vdev || vd == tvd) 2805 return (B_TRUE); 2806 2807 /* 2808 * Temporarily mark the device as unreadable, and then determine 2809 * whether this results in any DTL outages in the top-level vdev. 2810 * If not, we can safely offline/detach/remove the device. 2811 */ 2812 vd->vdev_cant_read = B_TRUE; 2813 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2814 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 2815 vd->vdev_cant_read = cant_read; 2816 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2817 2818 if (!required && zio_injection_enabled) 2819 required = !!zio_handle_device_injection(vd, NULL, ECHILD); 2820 2821 return (required); 2822 } 2823 2824 /* 2825 * Determine if resilver is needed, and if so the txg range. 2826 */ 2827 boolean_t 2828 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 2829 { 2830 boolean_t needed = B_FALSE; 2831 uint64_t thismin = UINT64_MAX; 2832 uint64_t thismax = 0; 2833 2834 if (vd->vdev_children == 0) { 2835 mutex_enter(&vd->vdev_dtl_lock); 2836 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2837 vdev_writeable(vd)) { 2838 2839 thismin = vdev_dtl_min(vd); 2840 thismax = vdev_dtl_max(vd); 2841 needed = B_TRUE; 2842 } 2843 mutex_exit(&vd->vdev_dtl_lock); 2844 } else { 2845 for (int c = 0; c < vd->vdev_children; c++) { 2846 vdev_t *cvd = vd->vdev_child[c]; 2847 uint64_t cmin, cmax; 2848 2849 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 2850 thismin = MIN(thismin, cmin); 2851 thismax = MAX(thismax, cmax); 2852 needed = B_TRUE; 2853 } 2854 } 2855 } 2856 2857 if (needed && minp) { 2858 *minp = thismin; 2859 *maxp = thismax; 2860 } 2861 return (needed); 2862 } 2863 2864 /* 2865 * Gets the checkpoint space map object from the vdev's ZAP. 2866 * Returns the spacemap object, or 0 if it wasn't in the ZAP 2867 * or the ZAP doesn't exist yet. 2868 */ 2869 int 2870 vdev_checkpoint_sm_object(vdev_t *vd) 2871 { 2872 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 2873 if (vd->vdev_top_zap == 0) { 2874 return (0); 2875 } 2876 2877 uint64_t sm_obj = 0; 2878 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, 2879 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj); 2880 2881 ASSERT(err == 0 || err == ENOENT); 2882 2883 return (sm_obj); 2884 } 2885 2886 int 2887 vdev_load(vdev_t *vd) 2888 { 2889 int error = 0; 2890 /* 2891 * Recursively load all children. 2892 */ 2893 for (int c = 0; c < vd->vdev_children; c++) { 2894 error = vdev_load(vd->vdev_child[c]); 2895 if (error != 0) { 2896 return (error); 2897 } 2898 } 2899 2900 vdev_set_deflate_ratio(vd); 2901 2902 /* 2903 * On spa_load path, grab the allocation bias from our zap 2904 */ 2905 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { 2906 spa_t *spa = vd->vdev_spa; 2907 char bias_str[64]; 2908 2909 if (zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, 2910 VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str), 2911 bias_str) == 0) { 2912 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE); 2913 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str); 2914 } 2915 } 2916 2917 /* 2918 * If this is a top-level vdev, initialize its metaslabs. 2919 */ 2920 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { 2921 vdev_metaslab_group_create(vd); 2922 2923 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 2924 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2925 VDEV_AUX_CORRUPT_DATA); 2926 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " 2927 "asize=%llu", (u_longlong_t)vd->vdev_ashift, 2928 (u_longlong_t)vd->vdev_asize); 2929 return (SET_ERROR(ENXIO)); 2930 } 2931 2932 error = vdev_metaslab_init(vd, 0); 2933 if (error != 0) { 2934 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " 2935 "[error=%d]", error); 2936 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2937 VDEV_AUX_CORRUPT_DATA); 2938 return (error); 2939 } 2940 2941 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd); 2942 if (checkpoint_sm_obj != 0) { 2943 objset_t *mos = spa_meta_objset(vd->vdev_spa); 2944 ASSERT(vd->vdev_asize != 0); 2945 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); 2946 2947 error = space_map_open(&vd->vdev_checkpoint_sm, 2948 mos, checkpoint_sm_obj, 0, vd->vdev_asize, 2949 vd->vdev_ashift); 2950 if (error != 0) { 2951 vdev_dbgmsg(vd, "vdev_load: space_map_open " 2952 "failed for checkpoint spacemap (obj %llu) " 2953 "[error=%d]", 2954 (u_longlong_t)checkpoint_sm_obj, error); 2955 return (error); 2956 } 2957 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2958 2959 /* 2960 * Since the checkpoint_sm contains free entries 2961 * exclusively we can use space_map_allocated() to 2962 * indicate the cumulative checkpointed space that 2963 * has been freed. 2964 */ 2965 vd->vdev_stat.vs_checkpoint_space = 2966 -space_map_allocated(vd->vdev_checkpoint_sm); 2967 vd->vdev_spa->spa_checkpoint_info.sci_dspace += 2968 vd->vdev_stat.vs_checkpoint_space; 2969 } 2970 } 2971 2972 /* 2973 * If this is a leaf vdev, load its DTL. 2974 */ 2975 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { 2976 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2977 VDEV_AUX_CORRUPT_DATA); 2978 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " 2979 "[error=%d]", error); 2980 return (error); 2981 } 2982 2983 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 2984 if (obsolete_sm_object != 0) { 2985 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2986 ASSERT(vd->vdev_asize != 0); 2987 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 2988 2989 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, 2990 obsolete_sm_object, 0, vd->vdev_asize, 0))) { 2991 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2992 VDEV_AUX_CORRUPT_DATA); 2993 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " 2994 "obsolete spacemap (obj %llu) [error=%d]", 2995 (u_longlong_t)obsolete_sm_object, error); 2996 return (error); 2997 } 2998 } 2999 3000 return (0); 3001 } 3002 3003 /* 3004 * The special vdev case is used for hot spares and l2cache devices. Its 3005 * sole purpose it to set the vdev state for the associated vdev. To do this, 3006 * we make sure that we can open the underlying device, then try to read the 3007 * label, and make sure that the label is sane and that it hasn't been 3008 * repurposed to another pool. 3009 */ 3010 int 3011 vdev_validate_aux(vdev_t *vd) 3012 { 3013 nvlist_t *label; 3014 uint64_t guid, version; 3015 uint64_t state; 3016 3017 if (!vdev_readable(vd)) 3018 return (0); 3019 3020 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { 3021 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 3022 VDEV_AUX_CORRUPT_DATA); 3023 return (-1); 3024 } 3025 3026 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 3027 !SPA_VERSION_IS_SUPPORTED(version) || 3028 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 3029 guid != vd->vdev_guid || 3030 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 3031 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 3032 VDEV_AUX_CORRUPT_DATA); 3033 nvlist_free(label); 3034 return (-1); 3035 } 3036 3037 /* 3038 * We don't actually check the pool state here. If it's in fact in 3039 * use by another pool, we update this fact on the fly when requested. 3040 */ 3041 nvlist_free(label); 3042 return (0); 3043 } 3044 3045 /* 3046 * Free the objects used to store this vdev's spacemaps, and the array 3047 * that points to them. 3048 */ 3049 void 3050 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) 3051 { 3052 if (vd->vdev_ms_array == 0) 3053 return; 3054 3055 objset_t *mos = vd->vdev_spa->spa_meta_objset; 3056 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; 3057 size_t array_bytes = array_count * sizeof (uint64_t); 3058 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); 3059 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, 3060 array_bytes, smobj_array, 0)); 3061 3062 for (uint64_t i = 0; i < array_count; i++) { 3063 uint64_t smobj = smobj_array[i]; 3064 if (smobj == 0) 3065 continue; 3066 3067 space_map_free_obj(mos, smobj, tx); 3068 } 3069 3070 kmem_free(smobj_array, array_bytes); 3071 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); 3072 vd->vdev_ms_array = 0; 3073 } 3074 3075 static void 3076 vdev_remove_empty_log(vdev_t *vd, uint64_t txg) 3077 { 3078 spa_t *spa = vd->vdev_spa; 3079 3080 ASSERT(vd->vdev_islog); 3081 ASSERT(vd == vd->vdev_top); 3082 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 3083 3084 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 3085 3086 vdev_destroy_spacemaps(vd, tx); 3087 if (vd->vdev_top_zap != 0) { 3088 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); 3089 vd->vdev_top_zap = 0; 3090 } 3091 3092 dmu_tx_commit(tx); 3093 } 3094 3095 void 3096 vdev_sync_done(vdev_t *vd, uint64_t txg) 3097 { 3098 metaslab_t *msp; 3099 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 3100 3101 ASSERT(vdev_is_concrete(vd)); 3102 3103 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 3104 != NULL) 3105 metaslab_sync_done(msp, txg); 3106 3107 if (reassess) 3108 metaslab_sync_reassess(vd->vdev_mg); 3109 } 3110 3111 void 3112 vdev_sync(vdev_t *vd, uint64_t txg) 3113 { 3114 spa_t *spa = vd->vdev_spa; 3115 vdev_t *lvd; 3116 metaslab_t *msp; 3117 3118 ASSERT3U(txg, ==, spa->spa_syncing_txg); 3119 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 3120 if (range_tree_space(vd->vdev_obsolete_segments) > 0) { 3121 ASSERT(vd->vdev_removing || 3122 vd->vdev_ops == &vdev_indirect_ops); 3123 3124 vdev_indirect_sync_obsolete(vd, tx); 3125 3126 /* 3127 * If the vdev is indirect, it can't have dirty 3128 * metaslabs or DTLs. 3129 */ 3130 if (vd->vdev_ops == &vdev_indirect_ops) { 3131 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); 3132 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); 3133 dmu_tx_commit(tx); 3134 return; 3135 } 3136 } 3137 3138 ASSERT(vdev_is_concrete(vd)); 3139 3140 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && 3141 !vd->vdev_removing) { 3142 ASSERT(vd == vd->vdev_top); 3143 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 3144 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 3145 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 3146 ASSERT(vd->vdev_ms_array != 0); 3147 vdev_config_dirty(vd); 3148 } 3149 3150 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 3151 metaslab_sync(msp, txg); 3152 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 3153 } 3154 3155 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 3156 vdev_dtl_sync(lvd, txg); 3157 3158 /* 3159 * If this is an empty log device being removed, destroy the 3160 * metadata associated with it. 3161 */ 3162 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) 3163 vdev_remove_empty_log(vd, txg); 3164 3165 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 3166 dmu_tx_commit(tx); 3167 } 3168 3169 uint64_t 3170 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 3171 { 3172 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 3173 } 3174 3175 /* 3176 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 3177 * not be opened, and no I/O is attempted. 3178 */ 3179 int 3180 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3181 { 3182 vdev_t *vd, *tvd; 3183 3184 spa_vdev_state_enter(spa, SCL_NONE); 3185 3186 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3187 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3188 3189 if (!vd->vdev_ops->vdev_op_leaf) 3190 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3191 3192 tvd = vd->vdev_top; 3193 3194 /* 3195 * We don't directly use the aux state here, but if we do a 3196 * vdev_reopen(), we need this value to be present to remember why we 3197 * were faulted. 3198 */ 3199 vd->vdev_label_aux = aux; 3200 3201 /* 3202 * Faulted state takes precedence over degraded. 3203 */ 3204 vd->vdev_delayed_close = B_FALSE; 3205 vd->vdev_faulted = 1ULL; 3206 vd->vdev_degraded = 0ULL; 3207 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 3208 3209 /* 3210 * If this device has the only valid copy of the data, then 3211 * back off and simply mark the vdev as degraded instead. 3212 */ 3213 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 3214 vd->vdev_degraded = 1ULL; 3215 vd->vdev_faulted = 0ULL; 3216 3217 /* 3218 * If we reopen the device and it's not dead, only then do we 3219 * mark it degraded. 3220 */ 3221 vdev_reopen(tvd); 3222 3223 if (vdev_readable(vd)) 3224 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 3225 } 3226 3227 return (spa_vdev_state_exit(spa, vd, 0)); 3228 } 3229 3230 /* 3231 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 3232 * user that something is wrong. The vdev continues to operate as normal as far 3233 * as I/O is concerned. 3234 */ 3235 int 3236 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3237 { 3238 vdev_t *vd; 3239 3240 spa_vdev_state_enter(spa, SCL_NONE); 3241 3242 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3243 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3244 3245 if (!vd->vdev_ops->vdev_op_leaf) 3246 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3247 3248 /* 3249 * If the vdev is already faulted, then don't do anything. 3250 */ 3251 if (vd->vdev_faulted || vd->vdev_degraded) 3252 return (spa_vdev_state_exit(spa, NULL, 0)); 3253 3254 vd->vdev_degraded = 1ULL; 3255 if (!vdev_is_dead(vd)) 3256 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 3257 aux); 3258 3259 return (spa_vdev_state_exit(spa, vd, 0)); 3260 } 3261 3262 /* 3263 * Online the given vdev. 3264 * 3265 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached 3266 * spare device should be detached when the device finishes resilvering. 3267 * Second, the online should be treated like a 'test' online case, so no FMA 3268 * events are generated if the device fails to open. 3269 */ 3270 int 3271 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 3272 { 3273 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 3274 boolean_t wasoffline; 3275 vdev_state_t oldstate; 3276 3277 spa_vdev_state_enter(spa, SCL_NONE); 3278 3279 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3280 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3281 3282 if (!vd->vdev_ops->vdev_op_leaf) 3283 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3284 3285 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); 3286 oldstate = vd->vdev_state; 3287 3288 tvd = vd->vdev_top; 3289 vd->vdev_offline = B_FALSE; 3290 vd->vdev_tmpoffline = B_FALSE; 3291 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 3292 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 3293 3294 /* XXX - L2ARC 1.0 does not support expansion */ 3295 if (!vd->vdev_aux) { 3296 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3297 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 3298 } 3299 3300 vdev_reopen(tvd); 3301 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 3302 3303 if (!vd->vdev_aux) { 3304 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3305 pvd->vdev_expanding = B_FALSE; 3306 } 3307 3308 if (newstate) 3309 *newstate = vd->vdev_state; 3310 if ((flags & ZFS_ONLINE_UNSPARE) && 3311 !vdev_is_dead(vd) && vd->vdev_parent && 3312 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3313 vd->vdev_parent->vdev_child[0] == vd) 3314 vd->vdev_unspare = B_TRUE; 3315 3316 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 3317 3318 /* XXX - L2ARC 1.0 does not support expansion */ 3319 if (vd->vdev_aux) 3320 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 3321 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3322 } 3323 3324 /* Restart initializing if necessary */ 3325 mutex_enter(&vd->vdev_initialize_lock); 3326 if (vdev_writeable(vd) && 3327 vd->vdev_initialize_thread == NULL && 3328 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { 3329 (void) vdev_initialize(vd); 3330 } 3331 mutex_exit(&vd->vdev_initialize_lock); 3332 3333 /* Restart trimming if necessary */ 3334 mutex_enter(&vd->vdev_trim_lock); 3335 if (vdev_writeable(vd) && 3336 vd->vdev_trim_thread == NULL && 3337 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) { 3338 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial, 3339 vd->vdev_trim_secure); 3340 } 3341 mutex_exit(&vd->vdev_trim_lock); 3342 3343 if (wasoffline || 3344 (oldstate < VDEV_STATE_DEGRADED && 3345 vd->vdev_state >= VDEV_STATE_DEGRADED)) 3346 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); 3347 3348 return (spa_vdev_state_exit(spa, vd, 0)); 3349 } 3350 3351 static int 3352 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 3353 { 3354 vdev_t *vd, *tvd; 3355 int error = 0; 3356 uint64_t generation; 3357 metaslab_group_t *mg; 3358 3359 top: 3360 spa_vdev_state_enter(spa, SCL_ALLOC); 3361 3362 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3363 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3364 3365 if (!vd->vdev_ops->vdev_op_leaf) 3366 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3367 3368 tvd = vd->vdev_top; 3369 mg = tvd->vdev_mg; 3370 generation = spa->spa_config_generation + 1; 3371 3372 /* 3373 * If the device isn't already offline, try to offline it. 3374 */ 3375 if (!vd->vdev_offline) { 3376 /* 3377 * If this device has the only valid copy of some data, 3378 * don't allow it to be offlined. Log devices are always 3379 * expendable. 3380 */ 3381 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3382 vdev_dtl_required(vd)) 3383 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3384 3385 /* 3386 * If the top-level is a slog and it has had allocations 3387 * then proceed. We check that the vdev's metaslab group 3388 * is not NULL since it's possible that we may have just 3389 * added this vdev but not yet initialized its metaslabs. 3390 */ 3391 if (tvd->vdev_islog && mg != NULL) { 3392 /* 3393 * Prevent any future allocations. 3394 */ 3395 metaslab_group_passivate(mg); 3396 (void) spa_vdev_state_exit(spa, vd, 0); 3397 3398 error = spa_reset_logs(spa); 3399 3400 /* 3401 * If the log device was successfully reset but has 3402 * checkpointed data, do not offline it. 3403 */ 3404 if (error == 0 && 3405 tvd->vdev_checkpoint_sm != NULL) { 3406 error = ZFS_ERR_CHECKPOINT_EXISTS; 3407 } 3408 3409 spa_vdev_state_enter(spa, SCL_ALLOC); 3410 3411 /* 3412 * Check to see if the config has changed. 3413 */ 3414 if (error || generation != spa->spa_config_generation) { 3415 metaslab_group_activate(mg); 3416 if (error) 3417 return (spa_vdev_state_exit(spa, 3418 vd, error)); 3419 (void) spa_vdev_state_exit(spa, vd, 0); 3420 goto top; 3421 } 3422 ASSERT0(tvd->vdev_stat.vs_alloc); 3423 } 3424 3425 /* 3426 * Offline this device and reopen its top-level vdev. 3427 * If the top-level vdev is a log device then just offline 3428 * it. Otherwise, if this action results in the top-level 3429 * vdev becoming unusable, undo it and fail the request. 3430 */ 3431 vd->vdev_offline = B_TRUE; 3432 vdev_reopen(tvd); 3433 3434 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3435 vdev_is_dead(tvd)) { 3436 vd->vdev_offline = B_FALSE; 3437 vdev_reopen(tvd); 3438 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3439 } 3440 3441 /* 3442 * Add the device back into the metaslab rotor so that 3443 * once we online the device it's open for business. 3444 */ 3445 if (tvd->vdev_islog && mg != NULL) 3446 metaslab_group_activate(mg); 3447 } 3448 3449 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 3450 3451 return (spa_vdev_state_exit(spa, vd, 0)); 3452 } 3453 3454 int 3455 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 3456 { 3457 int error; 3458 3459 mutex_enter(&spa->spa_vdev_top_lock); 3460 error = vdev_offline_locked(spa, guid, flags); 3461 mutex_exit(&spa->spa_vdev_top_lock); 3462 3463 return (error); 3464 } 3465 3466 /* 3467 * Clear the error counts associated with this vdev. Unlike vdev_online() and 3468 * vdev_offline(), we assume the spa config is locked. We also clear all 3469 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 3470 */ 3471 void 3472 vdev_clear(spa_t *spa, vdev_t *vd) 3473 { 3474 vdev_t *rvd = spa->spa_root_vdev; 3475 3476 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3477 3478 if (vd == NULL) 3479 vd = rvd; 3480 3481 vd->vdev_stat.vs_read_errors = 0; 3482 vd->vdev_stat.vs_write_errors = 0; 3483 vd->vdev_stat.vs_checksum_errors = 0; 3484 3485 for (int c = 0; c < vd->vdev_children; c++) 3486 vdev_clear(spa, vd->vdev_child[c]); 3487 3488 /* 3489 * It makes no sense to "clear" an indirect vdev. 3490 */ 3491 if (!vdev_is_concrete(vd)) 3492 return; 3493 3494 /* 3495 * If we're in the FAULTED state or have experienced failed I/O, then 3496 * clear the persistent state and attempt to reopen the device. We 3497 * also mark the vdev config dirty, so that the new faulted state is 3498 * written out to disk. 3499 */ 3500 if (vd->vdev_faulted || vd->vdev_degraded || 3501 !vdev_readable(vd) || !vdev_writeable(vd)) { 3502 3503 /* 3504 * When reopening in reponse to a clear event, it may be due to 3505 * a fmadm repair request. In this case, if the device is 3506 * still broken, we want to still post the ereport again. 3507 */ 3508 vd->vdev_forcefault = B_TRUE; 3509 3510 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 3511 vd->vdev_cant_read = B_FALSE; 3512 vd->vdev_cant_write = B_FALSE; 3513 3514 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 3515 3516 vd->vdev_forcefault = B_FALSE; 3517 3518 if (vd != rvd && vdev_writeable(vd->vdev_top)) 3519 vdev_state_dirty(vd->vdev_top); 3520 3521 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) { 3522 if (dsl_scan_resilvering(spa->spa_dsl_pool) && 3523 spa_feature_is_enabled(spa, 3524 SPA_FEATURE_RESILVER_DEFER)) 3525 vdev_set_deferred_resilver(spa, vd); 3526 else 3527 spa_async_request(spa, SPA_ASYNC_RESILVER); 3528 } 3529 3530 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); 3531 } 3532 3533 /* 3534 * When clearing a FMA-diagnosed fault, we always want to 3535 * unspare the device, as we assume that the original spare was 3536 * done in response to the FMA fault. 3537 */ 3538 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 3539 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3540 vd->vdev_parent->vdev_child[0] == vd) 3541 vd->vdev_unspare = B_TRUE; 3542 } 3543 3544 boolean_t 3545 vdev_is_dead(vdev_t *vd) 3546 { 3547 /* 3548 * Holes and missing devices are always considered "dead". 3549 * This simplifies the code since we don't have to check for 3550 * these types of devices in the various code paths. 3551 * Instead we rely on the fact that we skip over dead devices 3552 * before issuing I/O to them. 3553 */ 3554 return (vd->vdev_state < VDEV_STATE_DEGRADED || 3555 vd->vdev_ops == &vdev_hole_ops || 3556 vd->vdev_ops == &vdev_missing_ops); 3557 } 3558 3559 boolean_t 3560 vdev_readable(vdev_t *vd) 3561 { 3562 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 3563 } 3564 3565 boolean_t 3566 vdev_writeable(vdev_t *vd) 3567 { 3568 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && 3569 vdev_is_concrete(vd)); 3570 } 3571 3572 boolean_t 3573 vdev_allocatable(vdev_t *vd) 3574 { 3575 uint64_t state = vd->vdev_state; 3576 3577 /* 3578 * We currently allow allocations from vdevs which may be in the 3579 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 3580 * fails to reopen then we'll catch it later when we're holding 3581 * the proper locks. Note that we have to get the vdev state 3582 * in a local variable because although it changes atomically, 3583 * we're asking two separate questions about it. 3584 */ 3585 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 3586 !vd->vdev_cant_write && vdev_is_concrete(vd) && 3587 vd->vdev_mg->mg_initialized); 3588 } 3589 3590 boolean_t 3591 vdev_accessible(vdev_t *vd, zio_t *zio) 3592 { 3593 ASSERT(zio->io_vd == vd); 3594 3595 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 3596 return (B_FALSE); 3597 3598 if (zio->io_type == ZIO_TYPE_READ) 3599 return (!vd->vdev_cant_read); 3600 3601 if (zio->io_type == ZIO_TYPE_WRITE) 3602 return (!vd->vdev_cant_write); 3603 3604 return (B_TRUE); 3605 } 3606 3607 boolean_t 3608 vdev_is_spacemap_addressable(vdev_t *vd) 3609 { 3610 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) 3611 return (B_TRUE); 3612 3613 /* 3614 * If double-word space map entries are not enabled we assume 3615 * 47 bits of the space map entry are dedicated to the entry's 3616 * offset (see SM_OFFSET_BITS in space_map.h). We then use that 3617 * to calculate the maximum address that can be described by a 3618 * space map entry for the given device. 3619 */ 3620 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; 3621 3622 if (shift >= 63) /* detect potential overflow */ 3623 return (B_TRUE); 3624 3625 return (vd->vdev_asize < (1ULL << shift)); 3626 } 3627 3628 /* 3629 * Get statistics for the given vdev. 3630 */ 3631 void 3632 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 3633 { 3634 spa_t *spa = vd->vdev_spa; 3635 vdev_t *rvd = spa->spa_root_vdev; 3636 vdev_t *tvd = vd->vdev_top; 3637 3638 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 3639 3640 mutex_enter(&vd->vdev_stat_lock); 3641 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 3642 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 3643 vs->vs_state = vd->vdev_state; 3644 vs->vs_rsize = vdev_get_min_asize(vd); 3645 if (vd->vdev_ops->vdev_op_leaf) { 3646 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 3647 /* 3648 * Report intializing progress. Since we don't have the 3649 * initializing locks held, this is only an estimate (although a 3650 * fairly accurate one). 3651 */ 3652 vs->vs_initialize_bytes_done = vd->vdev_initialize_bytes_done; 3653 vs->vs_initialize_bytes_est = vd->vdev_initialize_bytes_est; 3654 vs->vs_initialize_state = vd->vdev_initialize_state; 3655 vs->vs_initialize_action_time = vd->vdev_initialize_action_time; 3656 } 3657 3658 /* 3659 * Report manual TRIM progress. Since we don't have 3660 * the manual TRIM locks held, this is only an 3661 * estimate (although fairly accurate one). 3662 */ 3663 vs->vs_trim_notsup = !vd->vdev_has_trim; 3664 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done; 3665 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est; 3666 vs->vs_trim_state = vd->vdev_trim_state; 3667 vs->vs_trim_action_time = vd->vdev_trim_action_time; 3668 3669 /* 3670 * Report expandable space on top-level, non-auxillary devices only. 3671 * The expandable space is reported in terms of metaslab sized units 3672 * since that determines how much space the pool can expand. 3673 */ 3674 if (vd->vdev_aux == NULL && tvd != NULL) { 3675 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize - 3676 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift); 3677 } 3678 if (vd->vdev_aux == NULL && vd == vd->vdev_top && 3679 vdev_is_concrete(vd)) { 3680 vs->vs_fragmentation = (vd->vdev_mg != NULL) ? 3681 vd->vdev_mg->mg_fragmentation : 0; 3682 } 3683 if (vd->vdev_ops->vdev_op_leaf) 3684 vs->vs_resilver_deferred = vd->vdev_resilver_deferred; 3685 3686 /* 3687 * If we're getting stats on the root vdev, aggregate the I/O counts 3688 * over all top-level vdevs (i.e. the direct children of the root). 3689 */ 3690 if (vd == rvd) { 3691 for (int c = 0; c < rvd->vdev_children; c++) { 3692 vdev_t *cvd = rvd->vdev_child[c]; 3693 vdev_stat_t *cvs = &cvd->vdev_stat; 3694 3695 for (int t = 0; t < VS_ZIO_TYPES; t++) { 3696 vs->vs_ops[t] += cvs->vs_ops[t]; 3697 vs->vs_bytes[t] += cvs->vs_bytes[t]; 3698 } 3699 cvs->vs_scan_removing = cvd->vdev_removing; 3700 } 3701 } 3702 mutex_exit(&vd->vdev_stat_lock); 3703 } 3704 3705 void 3706 vdev_clear_stats(vdev_t *vd) 3707 { 3708 mutex_enter(&vd->vdev_stat_lock); 3709 vd->vdev_stat.vs_space = 0; 3710 vd->vdev_stat.vs_dspace = 0; 3711 vd->vdev_stat.vs_alloc = 0; 3712 mutex_exit(&vd->vdev_stat_lock); 3713 } 3714 3715 void 3716 vdev_scan_stat_init(vdev_t *vd) 3717 { 3718 vdev_stat_t *vs = &vd->vdev_stat; 3719 3720 for (int c = 0; c < vd->vdev_children; c++) 3721 vdev_scan_stat_init(vd->vdev_child[c]); 3722 3723 mutex_enter(&vd->vdev_stat_lock); 3724 vs->vs_scan_processed = 0; 3725 mutex_exit(&vd->vdev_stat_lock); 3726 } 3727 3728 void 3729 vdev_stat_update(zio_t *zio, uint64_t psize) 3730 { 3731 spa_t *spa = zio->io_spa; 3732 vdev_t *rvd = spa->spa_root_vdev; 3733 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 3734 vdev_t *pvd; 3735 uint64_t txg = zio->io_txg; 3736 vdev_stat_t *vs = &vd->vdev_stat; 3737 zio_type_t type = zio->io_type; 3738 int flags = zio->io_flags; 3739 3740 /* 3741 * If this i/o is a gang leader, it didn't do any actual work. 3742 */ 3743 if (zio->io_gang_tree) 3744 return; 3745 3746 if (zio->io_error == 0) { 3747 /* 3748 * If this is a root i/o, don't count it -- we've already 3749 * counted the top-level vdevs, and vdev_get_stats() will 3750 * aggregate them when asked. This reduces contention on 3751 * the root vdev_stat_lock and implicitly handles blocks 3752 * that compress away to holes, for which there is no i/o. 3753 * (Holes never create vdev children, so all the counters 3754 * remain zero, which is what we want.) 3755 * 3756 * Note: this only applies to successful i/o (io_error == 0) 3757 * because unlike i/o counts, errors are not additive. 3758 * When reading a ditto block, for example, failure of 3759 * one top-level vdev does not imply a root-level error. 3760 */ 3761 if (vd == rvd) 3762 return; 3763 3764 ASSERT(vd == zio->io_vd); 3765 3766 if (flags & ZIO_FLAG_IO_BYPASS) 3767 return; 3768 3769 mutex_enter(&vd->vdev_stat_lock); 3770 3771 if (flags & ZIO_FLAG_IO_REPAIR) { 3772 if (flags & ZIO_FLAG_SCAN_THREAD) { 3773 dsl_scan_phys_t *scn_phys = 3774 &spa->spa_dsl_pool->dp_scan->scn_phys; 3775 uint64_t *processed = &scn_phys->scn_processed; 3776 3777 /* XXX cleanup? */ 3778 if (vd->vdev_ops->vdev_op_leaf) 3779 atomic_add_64(processed, psize); 3780 vs->vs_scan_processed += psize; 3781 } 3782 3783 if (flags & ZIO_FLAG_SELF_HEAL) 3784 vs->vs_self_healed += psize; 3785 } 3786 3787 zio_type_t vs_type = type; 3788 3789 /* 3790 * TRIM ops and bytes are reported to user space as 3791 * ZIO_TYPE_IOCTL. This is done to preserve the 3792 * vdev_stat_t structure layout for user space. 3793 */ 3794 if (type == ZIO_TYPE_TRIM) 3795 vs_type = ZIO_TYPE_IOCTL; 3796 3797 vs->vs_ops[vs_type]++; 3798 vs->vs_bytes[vs_type] += psize; 3799 3800 mutex_exit(&vd->vdev_stat_lock); 3801 return; 3802 } 3803 3804 if (flags & ZIO_FLAG_SPECULATIVE) 3805 return; 3806 3807 /* 3808 * If this is an I/O error that is going to be retried, then ignore the 3809 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 3810 * hard errors, when in reality they can happen for any number of 3811 * innocuous reasons (bus resets, MPxIO link failure, etc). 3812 */ 3813 if (zio->io_error == EIO && 3814 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 3815 return; 3816 3817 /* 3818 * Intent logs writes won't propagate their error to the root 3819 * I/O so don't mark these types of failures as pool-level 3820 * errors. 3821 */ 3822 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 3823 return; 3824 3825 mutex_enter(&vd->vdev_stat_lock); 3826 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 3827 if (zio->io_error == ECKSUM) 3828 vs->vs_checksum_errors++; 3829 else 3830 vs->vs_read_errors++; 3831 } 3832 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 3833 vs->vs_write_errors++; 3834 mutex_exit(&vd->vdev_stat_lock); 3835 3836 if (spa->spa_load_state == SPA_LOAD_NONE && 3837 type == ZIO_TYPE_WRITE && txg != 0 && 3838 (!(flags & ZIO_FLAG_IO_REPAIR) || 3839 (flags & ZIO_FLAG_SCAN_THREAD) || 3840 spa->spa_claiming)) { 3841 /* 3842 * This is either a normal write (not a repair), or it's 3843 * a repair induced by the scrub thread, or it's a repair 3844 * made by zil_claim() during spa_load() in the first txg. 3845 * In the normal case, we commit the DTL change in the same 3846 * txg as the block was born. In the scrub-induced repair 3847 * case, we know that scrubs run in first-pass syncing context, 3848 * so we commit the DTL change in spa_syncing_txg(spa). 3849 * In the zil_claim() case, we commit in spa_first_txg(spa). 3850 * 3851 * We currently do not make DTL entries for failed spontaneous 3852 * self-healing writes triggered by normal (non-scrubbing) 3853 * reads, because we have no transactional context in which to 3854 * do so -- and it's not clear that it'd be desirable anyway. 3855 */ 3856 if (vd->vdev_ops->vdev_op_leaf) { 3857 uint64_t commit_txg = txg; 3858 if (flags & ZIO_FLAG_SCAN_THREAD) { 3859 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3860 ASSERT(spa_sync_pass(spa) == 1); 3861 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 3862 commit_txg = spa_syncing_txg(spa); 3863 } else if (spa->spa_claiming) { 3864 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3865 commit_txg = spa_first_txg(spa); 3866 } 3867 ASSERT(commit_txg >= spa_syncing_txg(spa)); 3868 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 3869 return; 3870 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3871 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 3872 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 3873 } 3874 if (vd != rvd) 3875 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 3876 } 3877 } 3878 3879 int64_t 3880 vdev_deflated_space(vdev_t *vd, int64_t space) 3881 { 3882 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0); 3883 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 3884 3885 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio); 3886 } 3887 3888 /* 3889 * Update the in-core space usage stats for this vdev, its metaslab class, 3890 * and the root vdev. 3891 */ 3892 void 3893 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 3894 int64_t space_delta) 3895 { 3896 int64_t dspace_delta; 3897 spa_t *spa = vd->vdev_spa; 3898 vdev_t *rvd = spa->spa_root_vdev; 3899 3900 ASSERT(vd == vd->vdev_top); 3901 3902 /* 3903 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 3904 * factor. We must calculate this here and not at the root vdev 3905 * because the root vdev's psize-to-asize is simply the max of its 3906 * childrens', thus not accurate enough for us. 3907 */ 3908 dspace_delta = vdev_deflated_space(vd, space_delta); 3909 3910 mutex_enter(&vd->vdev_stat_lock); 3911 /* ensure we won't underflow */ 3912 if (alloc_delta < 0) { 3913 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta); 3914 } 3915 3916 vd->vdev_stat.vs_alloc += alloc_delta; 3917 vd->vdev_stat.vs_space += space_delta; 3918 vd->vdev_stat.vs_dspace += dspace_delta; 3919 mutex_exit(&vd->vdev_stat_lock); 3920 3921 /* every class but log contributes to root space stats */ 3922 if (vd->vdev_mg != NULL && !vd->vdev_islog) { 3923 ASSERT(!vd->vdev_isl2cache); 3924 mutex_enter(&rvd->vdev_stat_lock); 3925 rvd->vdev_stat.vs_alloc += alloc_delta; 3926 rvd->vdev_stat.vs_space += space_delta; 3927 rvd->vdev_stat.vs_dspace += dspace_delta; 3928 mutex_exit(&rvd->vdev_stat_lock); 3929 } 3930 /* Note: metaslab_class_space_update moved to metaslab_space_update */ 3931 } 3932 3933 /* 3934 * Mark a top-level vdev's config as dirty, placing it on the dirty list 3935 * so that it will be written out next time the vdev configuration is synced. 3936 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 3937 */ 3938 void 3939 vdev_config_dirty(vdev_t *vd) 3940 { 3941 spa_t *spa = vd->vdev_spa; 3942 vdev_t *rvd = spa->spa_root_vdev; 3943 int c; 3944 3945 ASSERT(spa_writeable(spa)); 3946 3947 /* 3948 * If this is an aux vdev (as with l2cache and spare devices), then we 3949 * update the vdev config manually and set the sync flag. 3950 */ 3951 if (vd->vdev_aux != NULL) { 3952 spa_aux_vdev_t *sav = vd->vdev_aux; 3953 nvlist_t **aux; 3954 uint_t naux; 3955 3956 for (c = 0; c < sav->sav_count; c++) { 3957 if (sav->sav_vdevs[c] == vd) 3958 break; 3959 } 3960 3961 if (c == sav->sav_count) { 3962 /* 3963 * We're being removed. There's nothing more to do. 3964 */ 3965 ASSERT(sav->sav_sync == B_TRUE); 3966 return; 3967 } 3968 3969 sav->sav_sync = B_TRUE; 3970 3971 if (nvlist_lookup_nvlist_array(sav->sav_config, 3972 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 3973 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 3974 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 3975 } 3976 3977 ASSERT(c < naux); 3978 3979 /* 3980 * Setting the nvlist in the middle if the array is a little 3981 * sketchy, but it will work. 3982 */ 3983 nvlist_free(aux[c]); 3984 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 3985 3986 return; 3987 } 3988 3989 /* 3990 * The dirty list is protected by the SCL_CONFIG lock. The caller 3991 * must either hold SCL_CONFIG as writer, or must be the sync thread 3992 * (which holds SCL_CONFIG as reader). There's only one sync thread, 3993 * so this is sufficient to ensure mutual exclusion. 3994 */ 3995 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3996 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3997 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3998 3999 if (vd == rvd) { 4000 for (c = 0; c < rvd->vdev_children; c++) 4001 vdev_config_dirty(rvd->vdev_child[c]); 4002 } else { 4003 ASSERT(vd == vd->vdev_top); 4004 4005 if (!list_link_active(&vd->vdev_config_dirty_node) && 4006 vdev_is_concrete(vd)) { 4007 list_insert_head(&spa->spa_config_dirty_list, vd); 4008 } 4009 } 4010 } 4011 4012 void 4013 vdev_config_clean(vdev_t *vd) 4014 { 4015 spa_t *spa = vd->vdev_spa; 4016 4017 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 4018 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4019 spa_config_held(spa, SCL_CONFIG, RW_READER))); 4020 4021 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 4022 list_remove(&spa->spa_config_dirty_list, vd); 4023 } 4024 4025 /* 4026 * Mark a top-level vdev's state as dirty, so that the next pass of 4027 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 4028 * the state changes from larger config changes because they require 4029 * much less locking, and are often needed for administrative actions. 4030 */ 4031 void 4032 vdev_state_dirty(vdev_t *vd) 4033 { 4034 spa_t *spa = vd->vdev_spa; 4035 4036 ASSERT(spa_writeable(spa)); 4037 ASSERT(vd == vd->vdev_top); 4038 4039 /* 4040 * The state list is protected by the SCL_STATE lock. The caller 4041 * must either hold SCL_STATE as writer, or must be the sync thread 4042 * (which holds SCL_STATE as reader). There's only one sync thread, 4043 * so this is sufficient to ensure mutual exclusion. 4044 */ 4045 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 4046 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4047 spa_config_held(spa, SCL_STATE, RW_READER))); 4048 4049 if (!list_link_active(&vd->vdev_state_dirty_node) && 4050 vdev_is_concrete(vd)) 4051 list_insert_head(&spa->spa_state_dirty_list, vd); 4052 } 4053 4054 void 4055 vdev_state_clean(vdev_t *vd) 4056 { 4057 spa_t *spa = vd->vdev_spa; 4058 4059 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 4060 (dsl_pool_sync_context(spa_get_dsl(spa)) && 4061 spa_config_held(spa, SCL_STATE, RW_READER))); 4062 4063 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 4064 list_remove(&spa->spa_state_dirty_list, vd); 4065 } 4066 4067 /* 4068 * Propagate vdev state up from children to parent. 4069 */ 4070 void 4071 vdev_propagate_state(vdev_t *vd) 4072 { 4073 spa_t *spa = vd->vdev_spa; 4074 vdev_t *rvd = spa->spa_root_vdev; 4075 int degraded = 0, faulted = 0; 4076 int corrupted = 0; 4077 vdev_t *child; 4078 4079 if (vd->vdev_children > 0) { 4080 for (int c = 0; c < vd->vdev_children; c++) { 4081 child = vd->vdev_child[c]; 4082 4083 /* 4084 * Don't factor holes or indirect vdevs into the 4085 * decision. 4086 */ 4087 if (!vdev_is_concrete(child)) 4088 continue; 4089 4090 if (!vdev_readable(child) || 4091 (!vdev_writeable(child) && spa_writeable(spa))) { 4092 /* 4093 * Root special: if there is a top-level log 4094 * device, treat the root vdev as if it were 4095 * degraded. 4096 */ 4097 if (child->vdev_islog && vd == rvd) 4098 degraded++; 4099 else 4100 faulted++; 4101 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 4102 degraded++; 4103 } 4104 4105 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 4106 corrupted++; 4107 } 4108 4109 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 4110 4111 /* 4112 * Root special: if there is a top-level vdev that cannot be 4113 * opened due to corrupted metadata, then propagate the root 4114 * vdev's aux state as 'corrupt' rather than 'insufficient 4115 * replicas'. 4116 */ 4117 if (corrupted && vd == rvd && 4118 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 4119 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 4120 VDEV_AUX_CORRUPT_DATA); 4121 } 4122 4123 if (vd->vdev_parent) 4124 vdev_propagate_state(vd->vdev_parent); 4125 } 4126 4127 /* 4128 * Set a vdev's state. If this is during an open, we don't update the parent 4129 * state, because we're in the process of opening children depth-first. 4130 * Otherwise, we propagate the change to the parent. 4131 * 4132 * If this routine places a device in a faulted state, an appropriate ereport is 4133 * generated. 4134 */ 4135 void 4136 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 4137 { 4138 uint64_t save_state; 4139 spa_t *spa = vd->vdev_spa; 4140 4141 if (state == vd->vdev_state) { 4142 vd->vdev_stat.vs_aux = aux; 4143 return; 4144 } 4145 4146 save_state = vd->vdev_state; 4147 4148 vd->vdev_state = state; 4149 vd->vdev_stat.vs_aux = aux; 4150 4151 /* 4152 * If we are setting the vdev state to anything but an open state, then 4153 * always close the underlying device unless the device has requested 4154 * a delayed close (i.e. we're about to remove or fault the device). 4155 * Otherwise, we keep accessible but invalid devices open forever. 4156 * We don't call vdev_close() itself, because that implies some extra 4157 * checks (offline, etc) that we don't want here. This is limited to 4158 * leaf devices, because otherwise closing the device will affect other 4159 * children. 4160 */ 4161 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 4162 vd->vdev_ops->vdev_op_leaf) 4163 vd->vdev_ops->vdev_op_close(vd); 4164 4165 /* 4166 * If we have brought this vdev back into service, we need 4167 * to notify fmd so that it can gracefully repair any outstanding 4168 * cases due to a missing device. We do this in all cases, even those 4169 * that probably don't correlate to a repaired fault. This is sure to 4170 * catch all cases, and we let the zfs-retire agent sort it out. If 4171 * this is a transient state it's OK, as the retire agent will 4172 * double-check the state of the vdev before repairing it. 4173 */ 4174 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 4175 vd->vdev_prevstate != state) 4176 zfs_post_state_change(spa, vd); 4177 4178 if (vd->vdev_removed && 4179 state == VDEV_STATE_CANT_OPEN && 4180 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 4181 /* 4182 * If the previous state is set to VDEV_STATE_REMOVED, then this 4183 * device was previously marked removed and someone attempted to 4184 * reopen it. If this failed due to a nonexistent device, then 4185 * keep the device in the REMOVED state. We also let this be if 4186 * it is one of our special test online cases, which is only 4187 * attempting to online the device and shouldn't generate an FMA 4188 * fault. 4189 */ 4190 vd->vdev_state = VDEV_STATE_REMOVED; 4191 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 4192 } else if (state == VDEV_STATE_REMOVED) { 4193 vd->vdev_removed = B_TRUE; 4194 } else if (state == VDEV_STATE_CANT_OPEN) { 4195 /* 4196 * If we fail to open a vdev during an import or recovery, we 4197 * mark it as "not available", which signifies that it was 4198 * never there to begin with. Failure to open such a device 4199 * is not considered an error. 4200 */ 4201 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 4202 spa_load_state(spa) == SPA_LOAD_RECOVER) && 4203 vd->vdev_ops->vdev_op_leaf) 4204 vd->vdev_not_present = 1; 4205 4206 /* 4207 * Post the appropriate ereport. If the 'prevstate' field is 4208 * set to something other than VDEV_STATE_UNKNOWN, it indicates 4209 * that this is part of a vdev_reopen(). In this case, we don't 4210 * want to post the ereport if the device was already in the 4211 * CANT_OPEN state beforehand. 4212 * 4213 * If the 'checkremove' flag is set, then this is an attempt to 4214 * online the device in response to an insertion event. If we 4215 * hit this case, then we have detected an insertion event for a 4216 * faulted or offline device that wasn't in the removed state. 4217 * In this scenario, we don't post an ereport because we are 4218 * about to replace the device, or attempt an online with 4219 * vdev_forcefault, which will generate the fault for us. 4220 */ 4221 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 4222 !vd->vdev_not_present && !vd->vdev_checkremove && 4223 vd != spa->spa_root_vdev) { 4224 const char *class; 4225 4226 switch (aux) { 4227 case VDEV_AUX_OPEN_FAILED: 4228 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 4229 break; 4230 case VDEV_AUX_CORRUPT_DATA: 4231 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 4232 break; 4233 case VDEV_AUX_NO_REPLICAS: 4234 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 4235 break; 4236 case VDEV_AUX_BAD_GUID_SUM: 4237 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 4238 break; 4239 case VDEV_AUX_TOO_SMALL: 4240 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 4241 break; 4242 case VDEV_AUX_BAD_LABEL: 4243 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 4244 break; 4245 case VDEV_AUX_BAD_ASHIFT: 4246 class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT; 4247 break; 4248 default: 4249 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 4250 } 4251 4252 zfs_ereport_post(class, spa, vd, NULL, NULL, 4253 save_state, 0); 4254 } 4255 4256 /* Erase any notion of persistent removed state */ 4257 vd->vdev_removed = B_FALSE; 4258 } else { 4259 vd->vdev_removed = B_FALSE; 4260 } 4261 4262 if (!isopen && vd->vdev_parent) 4263 vdev_propagate_state(vd->vdev_parent); 4264 } 4265 4266 boolean_t 4267 vdev_children_are_offline(vdev_t *vd) 4268 { 4269 ASSERT(!vd->vdev_ops->vdev_op_leaf); 4270 4271 for (uint64_t i = 0; i < vd->vdev_children; i++) { 4272 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) 4273 return (B_FALSE); 4274 } 4275 4276 return (B_TRUE); 4277 } 4278 4279 /* 4280 * Check the vdev configuration to ensure that it's capable of supporting 4281 * a root pool. We do not support partial configuration. 4282 * In addition, only a single top-level vdev is allowed. 4283 */ 4284 boolean_t 4285 vdev_is_bootable(vdev_t *vd) 4286 { 4287 if (!vd->vdev_ops->vdev_op_leaf) { 4288 char *vdev_type = vd->vdev_ops->vdev_op_type; 4289 4290 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 4291 vd->vdev_children > 1) { 4292 int non_indirect = 0; 4293 4294 for (int c = 0; c < vd->vdev_children; c++) { 4295 vdev_type = 4296 vd->vdev_child[c]->vdev_ops->vdev_op_type; 4297 if (strcmp(vdev_type, VDEV_TYPE_INDIRECT) != 0) 4298 non_indirect++; 4299 } 4300 /* 4301 * non_indirect > 1 means we have more than one 4302 * top-level vdev, so we stop here. 4303 */ 4304 if (non_indirect > 1) 4305 return (B_FALSE); 4306 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 4307 return (B_FALSE); 4308 } 4309 } 4310 4311 for (int c = 0; c < vd->vdev_children; c++) { 4312 if (!vdev_is_bootable(vd->vdev_child[c])) 4313 return (B_FALSE); 4314 } 4315 return (B_TRUE); 4316 } 4317 4318 boolean_t 4319 vdev_is_concrete(vdev_t *vd) 4320 { 4321 vdev_ops_t *ops = vd->vdev_ops; 4322 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || 4323 ops == &vdev_missing_ops || ops == &vdev_root_ops) { 4324 return (B_FALSE); 4325 } else { 4326 return (B_TRUE); 4327 } 4328 } 4329 4330 /* 4331 * Determine if a log device has valid content. If the vdev was 4332 * removed or faulted in the MOS config then we know that 4333 * the content on the log device has already been written to the pool. 4334 */ 4335 boolean_t 4336 vdev_log_state_valid(vdev_t *vd) 4337 { 4338 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 4339 !vd->vdev_removed) 4340 return (B_TRUE); 4341 4342 for (int c = 0; c < vd->vdev_children; c++) 4343 if (vdev_log_state_valid(vd->vdev_child[c])) 4344 return (B_TRUE); 4345 4346 return (B_FALSE); 4347 } 4348 4349 /* 4350 * Expand a vdev if possible. 4351 */ 4352 void 4353 vdev_expand(vdev_t *vd, uint64_t txg) 4354 { 4355 ASSERT(vd->vdev_top == vd); 4356 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4357 ASSERT(vdev_is_concrete(vd)); 4358 4359 vdev_set_deflate_ratio(vd); 4360 4361 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && 4362 vdev_is_concrete(vd)) { 4363 vdev_metaslab_group_create(vd); 4364 VERIFY(vdev_metaslab_init(vd, txg) == 0); 4365 vdev_config_dirty(vd); 4366 } 4367 } 4368 4369 /* 4370 * Split a vdev. 4371 */ 4372 void 4373 vdev_split(vdev_t *vd) 4374 { 4375 vdev_t *cvd, *pvd = vd->vdev_parent; 4376 4377 vdev_remove_child(pvd, vd); 4378 vdev_compact_children(pvd); 4379 4380 cvd = pvd->vdev_child[0]; 4381 if (pvd->vdev_children == 1) { 4382 vdev_remove_parent(cvd); 4383 cvd->vdev_splitting = B_TRUE; 4384 } 4385 vdev_propagate_state(cvd); 4386 } 4387 4388 void 4389 vdev_deadman(vdev_t *vd) 4390 { 4391 for (int c = 0; c < vd->vdev_children; c++) { 4392 vdev_t *cvd = vd->vdev_child[c]; 4393 4394 vdev_deadman(cvd); 4395 } 4396 4397 if (vd->vdev_ops->vdev_op_leaf) { 4398 vdev_queue_t *vq = &vd->vdev_queue; 4399 4400 mutex_enter(&vq->vq_lock); 4401 if (avl_numnodes(&vq->vq_active_tree) > 0) { 4402 spa_t *spa = vd->vdev_spa; 4403 zio_t *fio; 4404 uint64_t delta; 4405 4406 /* 4407 * Look at the head of all the pending queues, 4408 * if any I/O has been outstanding for longer than 4409 * the spa_deadman_synctime we panic the system. 4410 */ 4411 fio = avl_first(&vq->vq_active_tree); 4412 delta = gethrtime() - fio->io_timestamp; 4413 if (delta > spa_deadman_synctime(spa)) { 4414 vdev_dbgmsg(vd, "SLOW IO: zio timestamp " 4415 "%lluns, delta %lluns, last io %lluns", 4416 fio->io_timestamp, (u_longlong_t)delta, 4417 vq->vq_io_complete_ts); 4418 fm_panic("I/O to pool '%s' appears to be " 4419 "hung.", spa_name(spa)); 4420 } 4421 } 4422 mutex_exit(&vq->vq_lock); 4423 } 4424 } 4425 4426 void 4427 vdev_set_deferred_resilver(spa_t *spa, vdev_t *vd) 4428 { 4429 for (uint64_t i = 0; i < vd->vdev_children; i++) 4430 vdev_set_deferred_resilver(spa, vd->vdev_child[i]); 4431 4432 if (!vd->vdev_ops->vdev_op_leaf || !vdev_writeable(vd) || 4433 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { 4434 return; 4435 } 4436 4437 vd->vdev_resilver_deferred = B_TRUE; 4438 spa->spa_resilver_deferred = B_TRUE; 4439 } 4440 4441 /* 4442 * Translate a logical range to the physical range for the specified vdev_t. 4443 * This function is initially called with a leaf vdev and will walk each 4444 * parent vdev until it reaches a top-level vdev. Once the top-level is 4445 * reached the physical range is initialized and the recursive function 4446 * begins to unwind. As it unwinds it calls the parent's vdev specific 4447 * translation function to do the real conversion. 4448 */ 4449 void 4450 vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs) 4451 { 4452 /* 4453 * Walk up the vdev tree 4454 */ 4455 if (vd != vd->vdev_top) { 4456 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs); 4457 } else { 4458 /* 4459 * We've reached the top-level vdev, initialize the 4460 * physical range to the logical range and start to 4461 * unwind. 4462 */ 4463 physical_rs->rs_start = logical_rs->rs_start; 4464 physical_rs->rs_end = logical_rs->rs_end; 4465 return; 4466 } 4467 4468 vdev_t *pvd = vd->vdev_parent; 4469 ASSERT3P(pvd, !=, NULL); 4470 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); 4471 4472 /* 4473 * As this recursive function unwinds, translate the logical 4474 * range into its physical components by calling the 4475 * vdev specific translate function. 4476 */ 4477 range_seg_t intermediate = { { { 0, 0 } } }; 4478 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate); 4479 4480 physical_rs->rs_start = intermediate.rs_start; 4481 physical_rs->rs_end = intermediate.rs_end; 4482 } 4483