1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/zfs_context.h> 30 #include <sys/fm/fs/zfs.h> 31 #include <sys/spa.h> 32 #include <sys/spa_impl.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/uberblock_impl.h> 37 #include <sys/metaslab.h> 38 #include <sys/metaslab_impl.h> 39 #include <sys/space_map.h> 40 #include <sys/zio.h> 41 #include <sys/zap.h> 42 #include <sys/fs/zfs.h> 43 #include <sys/arc.h> 44 45 /* 46 * Virtual device management. 47 */ 48 49 static vdev_ops_t *vdev_ops_table[] = { 50 &vdev_root_ops, 51 &vdev_raidz_ops, 52 &vdev_mirror_ops, 53 &vdev_replacing_ops, 54 &vdev_spare_ops, 55 &vdev_disk_ops, 56 &vdev_file_ops, 57 &vdev_missing_ops, 58 NULL 59 }; 60 61 /* maximum scrub/resilver I/O queue */ 62 int zfs_scrub_limit = 70; 63 64 /* 65 * Given a vdev type, return the appropriate ops vector. 66 */ 67 static vdev_ops_t * 68 vdev_getops(const char *type) 69 { 70 vdev_ops_t *ops, **opspp; 71 72 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 73 if (strcmp(ops->vdev_op_type, type) == 0) 74 break; 75 76 return (ops); 77 } 78 79 /* 80 * Default asize function: return the MAX of psize with the asize of 81 * all children. This is what's used by anything other than RAID-Z. 82 */ 83 uint64_t 84 vdev_default_asize(vdev_t *vd, uint64_t psize) 85 { 86 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 87 uint64_t csize; 88 uint64_t c; 89 90 for (c = 0; c < vd->vdev_children; c++) { 91 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 92 asize = MAX(asize, csize); 93 } 94 95 return (asize); 96 } 97 98 /* 99 * Get the replaceable or attachable device size. 100 * If the parent is a mirror or raidz, the replaceable size is the minimum 101 * psize of all its children. For the rest, just return our own psize. 102 * 103 * e.g. 104 * psize rsize 105 * root - - 106 * mirror/raidz - - 107 * disk1 20g 20g 108 * disk2 40g 20g 109 * disk3 80g 80g 110 */ 111 uint64_t 112 vdev_get_rsize(vdev_t *vd) 113 { 114 vdev_t *pvd, *cvd; 115 uint64_t c, rsize; 116 117 pvd = vd->vdev_parent; 118 119 /* 120 * If our parent is NULL or the root, just return our own psize. 121 */ 122 if (pvd == NULL || pvd->vdev_parent == NULL) 123 return (vd->vdev_psize); 124 125 rsize = 0; 126 127 for (c = 0; c < pvd->vdev_children; c++) { 128 cvd = pvd->vdev_child[c]; 129 rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; 130 } 131 132 return (rsize); 133 } 134 135 vdev_t * 136 vdev_lookup_top(spa_t *spa, uint64_t vdev) 137 { 138 vdev_t *rvd = spa->spa_root_vdev; 139 140 ASSERT(spa_config_held(spa, RW_READER) || 141 curthread == spa->spa_scrub_thread); 142 143 if (vdev < rvd->vdev_children) 144 return (rvd->vdev_child[vdev]); 145 146 return (NULL); 147 } 148 149 vdev_t * 150 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 151 { 152 int c; 153 vdev_t *mvd; 154 155 if (vd->vdev_guid == guid) 156 return (vd); 157 158 for (c = 0; c < vd->vdev_children; c++) 159 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 160 NULL) 161 return (mvd); 162 163 return (NULL); 164 } 165 166 void 167 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 168 { 169 size_t oldsize, newsize; 170 uint64_t id = cvd->vdev_id; 171 vdev_t **newchild; 172 173 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER)); 174 ASSERT(cvd->vdev_parent == NULL); 175 176 cvd->vdev_parent = pvd; 177 178 if (pvd == NULL) 179 return; 180 181 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 182 183 oldsize = pvd->vdev_children * sizeof (vdev_t *); 184 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 185 newsize = pvd->vdev_children * sizeof (vdev_t *); 186 187 newchild = kmem_zalloc(newsize, KM_SLEEP); 188 if (pvd->vdev_child != NULL) { 189 bcopy(pvd->vdev_child, newchild, oldsize); 190 kmem_free(pvd->vdev_child, oldsize); 191 } 192 193 pvd->vdev_child = newchild; 194 pvd->vdev_child[id] = cvd; 195 196 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 197 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 198 199 /* 200 * Walk up all ancestors to update guid sum. 201 */ 202 for (; pvd != NULL; pvd = pvd->vdev_parent) 203 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 204 205 if (cvd->vdev_ops->vdev_op_leaf) 206 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 207 } 208 209 void 210 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 211 { 212 int c; 213 uint_t id = cvd->vdev_id; 214 215 ASSERT(cvd->vdev_parent == pvd); 216 217 if (pvd == NULL) 218 return; 219 220 ASSERT(id < pvd->vdev_children); 221 ASSERT(pvd->vdev_child[id] == cvd); 222 223 pvd->vdev_child[id] = NULL; 224 cvd->vdev_parent = NULL; 225 226 for (c = 0; c < pvd->vdev_children; c++) 227 if (pvd->vdev_child[c]) 228 break; 229 230 if (c == pvd->vdev_children) { 231 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 232 pvd->vdev_child = NULL; 233 pvd->vdev_children = 0; 234 } 235 236 /* 237 * Walk up all ancestors to update guid sum. 238 */ 239 for (; pvd != NULL; pvd = pvd->vdev_parent) 240 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 241 242 if (cvd->vdev_ops->vdev_op_leaf) 243 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 244 } 245 246 /* 247 * Remove any holes in the child array. 248 */ 249 void 250 vdev_compact_children(vdev_t *pvd) 251 { 252 vdev_t **newchild, *cvd; 253 int oldc = pvd->vdev_children; 254 int newc, c; 255 256 ASSERT(spa_config_held(pvd->vdev_spa, RW_WRITER)); 257 258 for (c = newc = 0; c < oldc; c++) 259 if (pvd->vdev_child[c]) 260 newc++; 261 262 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 263 264 for (c = newc = 0; c < oldc; c++) { 265 if ((cvd = pvd->vdev_child[c]) != NULL) { 266 newchild[newc] = cvd; 267 cvd->vdev_id = newc++; 268 } 269 } 270 271 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 272 pvd->vdev_child = newchild; 273 pvd->vdev_children = newc; 274 } 275 276 /* 277 * Allocate and minimally initialize a vdev_t. 278 */ 279 static vdev_t * 280 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 281 { 282 vdev_t *vd; 283 284 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 285 286 if (spa->spa_root_vdev == NULL) { 287 ASSERT(ops == &vdev_root_ops); 288 spa->spa_root_vdev = vd; 289 } 290 291 if (guid == 0) { 292 if (spa->spa_root_vdev == vd) { 293 /* 294 * The root vdev's guid will also be the pool guid, 295 * which must be unique among all pools. 296 */ 297 while (guid == 0 || spa_guid_exists(guid, 0)) 298 guid = spa_get_random(-1ULL); 299 } else { 300 /* 301 * Any other vdev's guid must be unique within the pool. 302 */ 303 while (guid == 0 || 304 spa_guid_exists(spa_guid(spa), guid)) 305 guid = spa_get_random(-1ULL); 306 } 307 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 308 } 309 310 vd->vdev_spa = spa; 311 vd->vdev_id = id; 312 vd->vdev_guid = guid; 313 vd->vdev_guid_sum = guid; 314 vd->vdev_ops = ops; 315 vd->vdev_state = VDEV_STATE_CLOSED; 316 317 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 318 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 319 space_map_create(&vd->vdev_dtl_map, 0, -1ULL, 0, &vd->vdev_dtl_lock); 320 space_map_create(&vd->vdev_dtl_scrub, 0, -1ULL, 0, &vd->vdev_dtl_lock); 321 txg_list_create(&vd->vdev_ms_list, 322 offsetof(struct metaslab, ms_txg_node)); 323 txg_list_create(&vd->vdev_dtl_list, 324 offsetof(struct vdev, vdev_dtl_node)); 325 vd->vdev_stat.vs_timestamp = gethrtime(); 326 vdev_queue_init(vd); 327 vdev_cache_init(vd); 328 329 return (vd); 330 } 331 332 /* 333 * Allocate a new vdev. The 'alloctype' is used to control whether we are 334 * creating a new vdev or loading an existing one - the behavior is slightly 335 * different for each case. 336 */ 337 int 338 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 339 int alloctype) 340 { 341 vdev_ops_t *ops; 342 char *type; 343 uint64_t guid = 0, islog, nparity; 344 vdev_t *vd; 345 346 ASSERT(spa_config_held(spa, RW_WRITER)); 347 348 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 349 return (EINVAL); 350 351 if ((ops = vdev_getops(type)) == NULL) 352 return (EINVAL); 353 354 /* 355 * If this is a load, get the vdev guid from the nvlist. 356 * Otherwise, vdev_alloc_common() will generate one for us. 357 */ 358 if (alloctype == VDEV_ALLOC_LOAD) { 359 uint64_t label_id; 360 361 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 362 label_id != id) 363 return (EINVAL); 364 365 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 366 return (EINVAL); 367 } else if (alloctype == VDEV_ALLOC_SPARE) { 368 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 369 return (EINVAL); 370 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 371 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 372 return (EINVAL); 373 } 374 375 /* 376 * The first allocated vdev must be of type 'root'. 377 */ 378 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 379 return (EINVAL); 380 381 /* 382 * Determine whether we're a log vdev. 383 */ 384 islog = 0; 385 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 386 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 387 return (ENOTSUP); 388 389 /* 390 * Set the nparity property for RAID-Z vdevs. 391 */ 392 nparity = -1ULL; 393 if (ops == &vdev_raidz_ops) { 394 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 395 &nparity) == 0) { 396 /* 397 * Currently, we can only support 2 parity devices. 398 */ 399 if (nparity == 0 || nparity > 2) 400 return (EINVAL); 401 /* 402 * Older versions can only support 1 parity device. 403 */ 404 if (nparity == 2 && 405 spa_version(spa) < SPA_VERSION_RAID6) 406 return (ENOTSUP); 407 } else { 408 /* 409 * We require the parity to be specified for SPAs that 410 * support multiple parity levels. 411 */ 412 if (spa_version(spa) >= SPA_VERSION_RAID6) 413 return (EINVAL); 414 /* 415 * Otherwise, we default to 1 parity device for RAID-Z. 416 */ 417 nparity = 1; 418 } 419 } else { 420 nparity = 0; 421 } 422 ASSERT(nparity != -1ULL); 423 424 vd = vdev_alloc_common(spa, id, guid, ops); 425 426 vd->vdev_islog = islog; 427 vd->vdev_nparity = nparity; 428 429 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 430 vd->vdev_path = spa_strdup(vd->vdev_path); 431 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 432 vd->vdev_devid = spa_strdup(vd->vdev_devid); 433 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 434 &vd->vdev_physpath) == 0) 435 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 436 437 /* 438 * Set the whole_disk property. If it's not specified, leave the value 439 * as -1. 440 */ 441 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 442 &vd->vdev_wholedisk) != 0) 443 vd->vdev_wholedisk = -1ULL; 444 445 /* 446 * Look for the 'not present' flag. This will only be set if the device 447 * was not present at the time of import. 448 */ 449 if (!spa->spa_import_faulted) 450 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 451 &vd->vdev_not_present); 452 453 /* 454 * Get the alignment requirement. 455 */ 456 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 457 458 /* 459 * If we're a top-level vdev, try to load the allocation parameters. 460 */ 461 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 462 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 463 &vd->vdev_ms_array); 464 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 465 &vd->vdev_ms_shift); 466 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 467 &vd->vdev_asize); 468 } 469 470 /* 471 * If we're a leaf vdev, try to load the DTL object and other state. 472 */ 473 if (vd->vdev_ops->vdev_op_leaf && 474 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) { 475 if (alloctype == VDEV_ALLOC_LOAD) { 476 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 477 &vd->vdev_dtl.smo_object); 478 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 479 &vd->vdev_unspare); 480 } 481 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 482 &vd->vdev_offline); 483 484 /* 485 * When importing a pool, we want to ignore the persistent fault 486 * state, as the diagnosis made on another system may not be 487 * valid in the current context. 488 */ 489 if (spa->spa_load_state == SPA_LOAD_OPEN) { 490 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 491 &vd->vdev_faulted); 492 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 493 &vd->vdev_degraded); 494 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 495 &vd->vdev_removed); 496 } 497 } 498 499 /* 500 * Add ourselves to the parent's list of children. 501 */ 502 vdev_add_child(parent, vd); 503 504 *vdp = vd; 505 506 return (0); 507 } 508 509 void 510 vdev_free(vdev_t *vd) 511 { 512 int c; 513 spa_t *spa = vd->vdev_spa; 514 515 /* 516 * vdev_free() implies closing the vdev first. This is simpler than 517 * trying to ensure complicated semantics for all callers. 518 */ 519 vdev_close(vd); 520 521 522 ASSERT(!list_link_active(&vd->vdev_dirty_node)); 523 524 /* 525 * Free all children. 526 */ 527 for (c = 0; c < vd->vdev_children; c++) 528 vdev_free(vd->vdev_child[c]); 529 530 ASSERT(vd->vdev_child == NULL); 531 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 532 533 /* 534 * Discard allocation state. 535 */ 536 if (vd == vd->vdev_top) 537 vdev_metaslab_fini(vd); 538 539 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 540 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 541 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 542 543 /* 544 * Remove this vdev from its parent's child list. 545 */ 546 vdev_remove_child(vd->vdev_parent, vd); 547 548 ASSERT(vd->vdev_parent == NULL); 549 550 /* 551 * Clean up vdev structure. 552 */ 553 vdev_queue_fini(vd); 554 vdev_cache_fini(vd); 555 556 if (vd->vdev_path) 557 spa_strfree(vd->vdev_path); 558 if (vd->vdev_devid) 559 spa_strfree(vd->vdev_devid); 560 if (vd->vdev_physpath) 561 spa_strfree(vd->vdev_physpath); 562 563 if (vd->vdev_isspare) 564 spa_spare_remove(vd); 565 if (vd->vdev_isl2cache) 566 spa_l2cache_remove(vd); 567 568 txg_list_destroy(&vd->vdev_ms_list); 569 txg_list_destroy(&vd->vdev_dtl_list); 570 mutex_enter(&vd->vdev_dtl_lock); 571 space_map_unload(&vd->vdev_dtl_map); 572 space_map_destroy(&vd->vdev_dtl_map); 573 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 574 space_map_destroy(&vd->vdev_dtl_scrub); 575 mutex_exit(&vd->vdev_dtl_lock); 576 mutex_destroy(&vd->vdev_dtl_lock); 577 mutex_destroy(&vd->vdev_stat_lock); 578 579 if (vd == spa->spa_root_vdev) 580 spa->spa_root_vdev = NULL; 581 582 kmem_free(vd, sizeof (vdev_t)); 583 } 584 585 /* 586 * Transfer top-level vdev state from svd to tvd. 587 */ 588 static void 589 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 590 { 591 spa_t *spa = svd->vdev_spa; 592 metaslab_t *msp; 593 vdev_t *vd; 594 int t; 595 596 ASSERT(tvd == tvd->vdev_top); 597 598 tvd->vdev_ms_array = svd->vdev_ms_array; 599 tvd->vdev_ms_shift = svd->vdev_ms_shift; 600 tvd->vdev_ms_count = svd->vdev_ms_count; 601 602 svd->vdev_ms_array = 0; 603 svd->vdev_ms_shift = 0; 604 svd->vdev_ms_count = 0; 605 606 tvd->vdev_mg = svd->vdev_mg; 607 tvd->vdev_ms = svd->vdev_ms; 608 609 svd->vdev_mg = NULL; 610 svd->vdev_ms = NULL; 611 612 if (tvd->vdev_mg != NULL) 613 tvd->vdev_mg->mg_vd = tvd; 614 615 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 616 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 617 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 618 619 svd->vdev_stat.vs_alloc = 0; 620 svd->vdev_stat.vs_space = 0; 621 svd->vdev_stat.vs_dspace = 0; 622 623 for (t = 0; t < TXG_SIZE; t++) { 624 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 625 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 626 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 627 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 628 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 629 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 630 } 631 632 if (list_link_active(&svd->vdev_dirty_node)) { 633 vdev_config_clean(svd); 634 vdev_config_dirty(tvd); 635 } 636 637 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 638 svd->vdev_deflate_ratio = 0; 639 640 tvd->vdev_islog = svd->vdev_islog; 641 svd->vdev_islog = 0; 642 } 643 644 static void 645 vdev_top_update(vdev_t *tvd, vdev_t *vd) 646 { 647 int c; 648 649 if (vd == NULL) 650 return; 651 652 vd->vdev_top = tvd; 653 654 for (c = 0; c < vd->vdev_children; c++) 655 vdev_top_update(tvd, vd->vdev_child[c]); 656 } 657 658 /* 659 * Add a mirror/replacing vdev above an existing vdev. 660 */ 661 vdev_t * 662 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 663 { 664 spa_t *spa = cvd->vdev_spa; 665 vdev_t *pvd = cvd->vdev_parent; 666 vdev_t *mvd; 667 668 ASSERT(spa_config_held(spa, RW_WRITER)); 669 670 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 671 672 mvd->vdev_asize = cvd->vdev_asize; 673 mvd->vdev_ashift = cvd->vdev_ashift; 674 mvd->vdev_state = cvd->vdev_state; 675 676 vdev_remove_child(pvd, cvd); 677 vdev_add_child(pvd, mvd); 678 cvd->vdev_id = mvd->vdev_children; 679 vdev_add_child(mvd, cvd); 680 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 681 682 if (mvd == mvd->vdev_top) 683 vdev_top_transfer(cvd, mvd); 684 685 return (mvd); 686 } 687 688 /* 689 * Remove a 1-way mirror/replacing vdev from the tree. 690 */ 691 void 692 vdev_remove_parent(vdev_t *cvd) 693 { 694 vdev_t *mvd = cvd->vdev_parent; 695 vdev_t *pvd = mvd->vdev_parent; 696 697 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER)); 698 699 ASSERT(mvd->vdev_children == 1); 700 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 701 mvd->vdev_ops == &vdev_replacing_ops || 702 mvd->vdev_ops == &vdev_spare_ops); 703 cvd->vdev_ashift = mvd->vdev_ashift; 704 705 vdev_remove_child(mvd, cvd); 706 vdev_remove_child(pvd, mvd); 707 cvd->vdev_id = mvd->vdev_id; 708 vdev_add_child(pvd, cvd); 709 /* 710 * If we created a new toplevel vdev, then we need to change the child's 711 * vdev GUID to match the old toplevel vdev. Otherwise, we could have 712 * detached an offline device, and when we go to import the pool we'll 713 * think we have two toplevel vdevs, instead of a different version of 714 * the same toplevel vdev. 715 */ 716 if (cvd->vdev_top == cvd) { 717 pvd->vdev_guid_sum -= cvd->vdev_guid; 718 cvd->vdev_guid_sum -= cvd->vdev_guid; 719 cvd->vdev_guid = mvd->vdev_guid; 720 cvd->vdev_guid_sum += mvd->vdev_guid; 721 pvd->vdev_guid_sum += cvd->vdev_guid; 722 } 723 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 724 725 if (cvd == cvd->vdev_top) 726 vdev_top_transfer(mvd, cvd); 727 728 ASSERT(mvd->vdev_children == 0); 729 vdev_free(mvd); 730 } 731 732 int 733 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 734 { 735 spa_t *spa = vd->vdev_spa; 736 objset_t *mos = spa->spa_meta_objset; 737 metaslab_class_t *mc; 738 uint64_t m; 739 uint64_t oldc = vd->vdev_ms_count; 740 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 741 metaslab_t **mspp; 742 int error; 743 744 if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 745 return (0); 746 747 dprintf("%s oldc %llu newc %llu\n", vdev_description(vd), oldc, newc); 748 749 ASSERT(oldc <= newc); 750 751 if (vd->vdev_islog) 752 mc = spa->spa_log_class; 753 else 754 mc = spa->spa_normal_class; 755 756 if (vd->vdev_mg == NULL) 757 vd->vdev_mg = metaslab_group_create(mc, vd); 758 759 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 760 761 if (oldc != 0) { 762 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 763 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 764 } 765 766 vd->vdev_ms = mspp; 767 vd->vdev_ms_count = newc; 768 769 for (m = oldc; m < newc; m++) { 770 space_map_obj_t smo = { 0, 0, 0 }; 771 if (txg == 0) { 772 uint64_t object = 0; 773 error = dmu_read(mos, vd->vdev_ms_array, 774 m * sizeof (uint64_t), sizeof (uint64_t), &object); 775 if (error) 776 return (error); 777 if (object != 0) { 778 dmu_buf_t *db; 779 error = dmu_bonus_hold(mos, object, FTAG, &db); 780 if (error) 781 return (error); 782 ASSERT3U(db->db_size, >=, sizeof (smo)); 783 bcopy(db->db_data, &smo, sizeof (smo)); 784 ASSERT3U(smo.smo_object, ==, object); 785 dmu_buf_rele(db, FTAG); 786 } 787 } 788 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 789 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 790 } 791 792 return (0); 793 } 794 795 void 796 vdev_metaslab_fini(vdev_t *vd) 797 { 798 uint64_t m; 799 uint64_t count = vd->vdev_ms_count; 800 801 if (vd->vdev_ms != NULL) { 802 for (m = 0; m < count; m++) 803 if (vd->vdev_ms[m] != NULL) 804 metaslab_fini(vd->vdev_ms[m]); 805 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 806 vd->vdev_ms = NULL; 807 } 808 } 809 810 int 811 vdev_probe(vdev_t *vd) 812 { 813 if (vd == NULL) 814 return (EINVAL); 815 816 /* 817 * Right now we only support status checks on the leaf vdevs. 818 */ 819 if (vd->vdev_ops->vdev_op_leaf) 820 return (vd->vdev_ops->vdev_op_probe(vd)); 821 822 return (0); 823 } 824 825 /* 826 * Prepare a virtual device for access. 827 */ 828 int 829 vdev_open(vdev_t *vd) 830 { 831 int error; 832 int c; 833 uint64_t osize = 0; 834 uint64_t asize, psize; 835 uint64_t ashift = 0; 836 837 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 838 vd->vdev_state == VDEV_STATE_CANT_OPEN || 839 vd->vdev_state == VDEV_STATE_OFFLINE); 840 841 if (vd->vdev_fault_mode == VDEV_FAULT_COUNT) 842 vd->vdev_fault_arg >>= 1; 843 else 844 vd->vdev_fault_mode = VDEV_FAULT_NONE; 845 846 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 847 848 if (!vd->vdev_removed && vd->vdev_faulted) { 849 ASSERT(vd->vdev_children == 0); 850 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 851 VDEV_AUX_ERR_EXCEEDED); 852 return (ENXIO); 853 } else if (vd->vdev_offline) { 854 ASSERT(vd->vdev_children == 0); 855 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 856 return (ENXIO); 857 } 858 859 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 860 861 if (zio_injection_enabled && error == 0) 862 error = zio_handle_device_injection(vd, ENXIO); 863 864 if (error) { 865 if (vd->vdev_removed && 866 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 867 vd->vdev_removed = B_FALSE; 868 869 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 870 vd->vdev_stat.vs_aux); 871 return (error); 872 } 873 874 vd->vdev_removed = B_FALSE; 875 876 if (vd->vdev_degraded) { 877 ASSERT(vd->vdev_children == 0); 878 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 879 VDEV_AUX_ERR_EXCEEDED); 880 } else { 881 vd->vdev_state = VDEV_STATE_HEALTHY; 882 } 883 884 for (c = 0; c < vd->vdev_children; c++) 885 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 886 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 887 VDEV_AUX_NONE); 888 break; 889 } 890 891 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 892 893 if (vd->vdev_children == 0) { 894 if (osize < SPA_MINDEVSIZE) { 895 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 896 VDEV_AUX_TOO_SMALL); 897 return (EOVERFLOW); 898 } 899 psize = osize; 900 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 901 } else { 902 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 903 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 904 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 905 VDEV_AUX_TOO_SMALL); 906 return (EOVERFLOW); 907 } 908 psize = 0; 909 asize = osize; 910 } 911 912 vd->vdev_psize = psize; 913 914 if (vd->vdev_asize == 0) { 915 /* 916 * This is the first-ever open, so use the computed values. 917 * For testing purposes, a higher ashift can be requested. 918 */ 919 vd->vdev_asize = asize; 920 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 921 } else { 922 /* 923 * Make sure the alignment requirement hasn't increased. 924 */ 925 if (ashift > vd->vdev_top->vdev_ashift) { 926 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 927 VDEV_AUX_BAD_LABEL); 928 return (EINVAL); 929 } 930 931 /* 932 * Make sure the device hasn't shrunk. 933 */ 934 if (asize < vd->vdev_asize) { 935 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 936 VDEV_AUX_BAD_LABEL); 937 return (EINVAL); 938 } 939 940 /* 941 * If all children are healthy and the asize has increased, 942 * then we've experienced dynamic LUN growth. 943 */ 944 if (vd->vdev_state == VDEV_STATE_HEALTHY && 945 asize > vd->vdev_asize) { 946 vd->vdev_asize = asize; 947 } 948 } 949 950 /* 951 * Ensure we can issue some IO before declaring the 952 * vdev open for business. 953 */ 954 error = vdev_probe(vd); 955 if (error) { 956 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 957 VDEV_AUX_OPEN_FAILED); 958 return (error); 959 } 960 961 /* 962 * If this is a top-level vdev, compute the raidz-deflation 963 * ratio. Note, we hard-code in 128k (1<<17) because it is the 964 * current "typical" blocksize. Even if SPA_MAXBLOCKSIZE 965 * changes, this algorithm must never change, or we will 966 * inconsistently account for existing bp's. 967 */ 968 if (vd->vdev_top == vd) { 969 vd->vdev_deflate_ratio = (1<<17) / 970 (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT); 971 } 972 973 return (0); 974 } 975 976 /* 977 * Called once the vdevs are all opened, this routine validates the label 978 * contents. This needs to be done before vdev_load() so that we don't 979 * inadvertently do repair I/Os to the wrong device. 980 * 981 * This function will only return failure if one of the vdevs indicates that it 982 * has since been destroyed or exported. This is only possible if 983 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 984 * will be updated but the function will return 0. 985 */ 986 int 987 vdev_validate(vdev_t *vd) 988 { 989 spa_t *spa = vd->vdev_spa; 990 int c; 991 nvlist_t *label; 992 uint64_t guid; 993 uint64_t state; 994 995 for (c = 0; c < vd->vdev_children; c++) 996 if (vdev_validate(vd->vdev_child[c]) != 0) 997 return (EBADF); 998 999 /* 1000 * If the device has already failed, or was marked offline, don't do 1001 * any further validation. Otherwise, label I/O will fail and we will 1002 * overwrite the previous state. 1003 */ 1004 if (vd->vdev_ops->vdev_op_leaf && !vdev_is_dead(vd)) { 1005 1006 if ((label = vdev_label_read_config(vd)) == NULL) { 1007 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1008 VDEV_AUX_BAD_LABEL); 1009 return (0); 1010 } 1011 1012 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1013 &guid) != 0 || guid != spa_guid(spa)) { 1014 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1015 VDEV_AUX_CORRUPT_DATA); 1016 nvlist_free(label); 1017 return (0); 1018 } 1019 1020 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1021 &guid) != 0 || guid != vd->vdev_guid) { 1022 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1023 VDEV_AUX_CORRUPT_DATA); 1024 nvlist_free(label); 1025 return (0); 1026 } 1027 1028 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1029 &state) != 0) { 1030 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1031 VDEV_AUX_CORRUPT_DATA); 1032 nvlist_free(label); 1033 return (0); 1034 } 1035 1036 nvlist_free(label); 1037 1038 if (spa->spa_load_state == SPA_LOAD_OPEN && 1039 state != POOL_STATE_ACTIVE) 1040 return (EBADF); 1041 } 1042 1043 /* 1044 * If we were able to open and validate a vdev that was previously 1045 * marked permanently unavailable, clear that state now. 1046 */ 1047 if (vd->vdev_not_present) 1048 vd->vdev_not_present = 0; 1049 1050 return (0); 1051 } 1052 1053 /* 1054 * Close a virtual device. 1055 */ 1056 void 1057 vdev_close(vdev_t *vd) 1058 { 1059 vd->vdev_ops->vdev_op_close(vd); 1060 1061 vdev_cache_purge(vd); 1062 1063 /* 1064 * We record the previous state before we close it, so that if we are 1065 * doing a reopen(), we don't generate FMA ereports if we notice that 1066 * it's still faulted. 1067 */ 1068 vd->vdev_prevstate = vd->vdev_state; 1069 1070 if (vd->vdev_offline) 1071 vd->vdev_state = VDEV_STATE_OFFLINE; 1072 else 1073 vd->vdev_state = VDEV_STATE_CLOSED; 1074 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1075 } 1076 1077 void 1078 vdev_reopen(vdev_t *vd) 1079 { 1080 spa_t *spa = vd->vdev_spa; 1081 1082 ASSERT(spa_config_held(spa, RW_WRITER)); 1083 1084 vdev_close(vd); 1085 (void) vdev_open(vd); 1086 1087 /* 1088 * Call vdev_validate() here to make sure we have the same device. 1089 * Otherwise, a device with an invalid label could be successfully 1090 * opened in response to vdev_reopen(). 1091 */ 1092 if (vd->vdev_aux) { 1093 (void) vdev_validate_aux(vd); 1094 if (!vdev_is_dead(vd) && 1095 !l2arc_vdev_present(vd)) { 1096 uint64_t size = vdev_get_rsize(vd); 1097 l2arc_add_vdev(spa, vd, 1098 VDEV_LABEL_START_SIZE, 1099 size - VDEV_LABEL_START_SIZE); 1100 } 1101 } else { 1102 (void) vdev_validate(vd); 1103 } 1104 1105 /* 1106 * Reassess parent vdev's health. 1107 */ 1108 vdev_propagate_state(vd); 1109 } 1110 1111 int 1112 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1113 { 1114 int error; 1115 1116 /* 1117 * Normally, partial opens (e.g. of a mirror) are allowed. 1118 * For a create, however, we want to fail the request if 1119 * there are any components we can't open. 1120 */ 1121 error = vdev_open(vd); 1122 1123 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1124 vdev_close(vd); 1125 return (error ? error : ENXIO); 1126 } 1127 1128 /* 1129 * Recursively initialize all labels. 1130 */ 1131 if ((error = vdev_label_init(vd, txg, isreplacing ? 1132 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1133 vdev_close(vd); 1134 return (error); 1135 } 1136 1137 return (0); 1138 } 1139 1140 /* 1141 * The is the latter half of vdev_create(). It is distinct because it 1142 * involves initiating transactions in order to do metaslab creation. 1143 * For creation, we want to try to create all vdevs at once and then undo it 1144 * if anything fails; this is much harder if we have pending transactions. 1145 */ 1146 void 1147 vdev_init(vdev_t *vd, uint64_t txg) 1148 { 1149 /* 1150 * Aim for roughly 200 metaslabs per vdev. 1151 */ 1152 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1153 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1154 1155 /* 1156 * Initialize the vdev's metaslabs. This can't fail because 1157 * there's nothing to read when creating all new metaslabs. 1158 */ 1159 VERIFY(vdev_metaslab_init(vd, txg) == 0); 1160 } 1161 1162 void 1163 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1164 { 1165 ASSERT(vd == vd->vdev_top); 1166 ASSERT(ISP2(flags)); 1167 1168 if (flags & VDD_METASLAB) 1169 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1170 1171 if (flags & VDD_DTL) 1172 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1173 1174 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1175 } 1176 1177 void 1178 vdev_dtl_dirty(space_map_t *sm, uint64_t txg, uint64_t size) 1179 { 1180 mutex_enter(sm->sm_lock); 1181 if (!space_map_contains(sm, txg, size)) 1182 space_map_add(sm, txg, size); 1183 mutex_exit(sm->sm_lock); 1184 } 1185 1186 int 1187 vdev_dtl_contains(space_map_t *sm, uint64_t txg, uint64_t size) 1188 { 1189 int dirty; 1190 1191 /* 1192 * Quick test without the lock -- covers the common case that 1193 * there are no dirty time segments. 1194 */ 1195 if (sm->sm_space == 0) 1196 return (0); 1197 1198 mutex_enter(sm->sm_lock); 1199 dirty = space_map_contains(sm, txg, size); 1200 mutex_exit(sm->sm_lock); 1201 1202 return (dirty); 1203 } 1204 1205 /* 1206 * Reassess DTLs after a config change or scrub completion. 1207 */ 1208 void 1209 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1210 { 1211 spa_t *spa = vd->vdev_spa; 1212 int c; 1213 1214 ASSERT(spa_config_held(spa, RW_WRITER)); 1215 1216 if (vd->vdev_children == 0) { 1217 mutex_enter(&vd->vdev_dtl_lock); 1218 /* 1219 * We're successfully scrubbed everything up to scrub_txg. 1220 * Therefore, excise all old DTLs up to that point, then 1221 * fold in the DTLs for everything we couldn't scrub. 1222 */ 1223 if (scrub_txg != 0) { 1224 space_map_excise(&vd->vdev_dtl_map, 0, scrub_txg); 1225 space_map_union(&vd->vdev_dtl_map, &vd->vdev_dtl_scrub); 1226 } 1227 if (scrub_done) 1228 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 1229 mutex_exit(&vd->vdev_dtl_lock); 1230 if (txg != 0) 1231 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1232 return; 1233 } 1234 1235 /* 1236 * Make sure the DTLs are always correct under the scrub lock. 1237 */ 1238 if (vd == spa->spa_root_vdev) 1239 mutex_enter(&spa->spa_scrub_lock); 1240 1241 mutex_enter(&vd->vdev_dtl_lock); 1242 space_map_vacate(&vd->vdev_dtl_map, NULL, NULL); 1243 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 1244 mutex_exit(&vd->vdev_dtl_lock); 1245 1246 for (c = 0; c < vd->vdev_children; c++) { 1247 vdev_t *cvd = vd->vdev_child[c]; 1248 vdev_dtl_reassess(cvd, txg, scrub_txg, scrub_done); 1249 mutex_enter(&vd->vdev_dtl_lock); 1250 space_map_union(&vd->vdev_dtl_map, &cvd->vdev_dtl_map); 1251 space_map_union(&vd->vdev_dtl_scrub, &cvd->vdev_dtl_scrub); 1252 mutex_exit(&vd->vdev_dtl_lock); 1253 } 1254 1255 if (vd == spa->spa_root_vdev) 1256 mutex_exit(&spa->spa_scrub_lock); 1257 } 1258 1259 static int 1260 vdev_dtl_load(vdev_t *vd) 1261 { 1262 spa_t *spa = vd->vdev_spa; 1263 space_map_obj_t *smo = &vd->vdev_dtl; 1264 objset_t *mos = spa->spa_meta_objset; 1265 dmu_buf_t *db; 1266 int error; 1267 1268 ASSERT(vd->vdev_children == 0); 1269 1270 if (smo->smo_object == 0) 1271 return (0); 1272 1273 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1274 return (error); 1275 1276 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1277 bcopy(db->db_data, smo, sizeof (*smo)); 1278 dmu_buf_rele(db, FTAG); 1279 1280 mutex_enter(&vd->vdev_dtl_lock); 1281 error = space_map_load(&vd->vdev_dtl_map, NULL, SM_ALLOC, smo, mos); 1282 mutex_exit(&vd->vdev_dtl_lock); 1283 1284 return (error); 1285 } 1286 1287 void 1288 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1289 { 1290 spa_t *spa = vd->vdev_spa; 1291 space_map_obj_t *smo = &vd->vdev_dtl; 1292 space_map_t *sm = &vd->vdev_dtl_map; 1293 objset_t *mos = spa->spa_meta_objset; 1294 space_map_t smsync; 1295 kmutex_t smlock; 1296 dmu_buf_t *db; 1297 dmu_tx_t *tx; 1298 1299 dprintf("%s in txg %llu pass %d\n", 1300 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa)); 1301 1302 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1303 1304 if (vd->vdev_detached) { 1305 if (smo->smo_object != 0) { 1306 int err = dmu_object_free(mos, smo->smo_object, tx); 1307 ASSERT3U(err, ==, 0); 1308 smo->smo_object = 0; 1309 } 1310 dmu_tx_commit(tx); 1311 dprintf("detach %s committed in txg %llu\n", 1312 vdev_description(vd), txg); 1313 return; 1314 } 1315 1316 if (smo->smo_object == 0) { 1317 ASSERT(smo->smo_objsize == 0); 1318 ASSERT(smo->smo_alloc == 0); 1319 smo->smo_object = dmu_object_alloc(mos, 1320 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1321 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1322 ASSERT(smo->smo_object != 0); 1323 vdev_config_dirty(vd->vdev_top); 1324 } 1325 1326 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1327 1328 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1329 &smlock); 1330 1331 mutex_enter(&smlock); 1332 1333 mutex_enter(&vd->vdev_dtl_lock); 1334 space_map_walk(sm, space_map_add, &smsync); 1335 mutex_exit(&vd->vdev_dtl_lock); 1336 1337 space_map_truncate(smo, mos, tx); 1338 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1339 1340 space_map_destroy(&smsync); 1341 1342 mutex_exit(&smlock); 1343 mutex_destroy(&smlock); 1344 1345 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1346 dmu_buf_will_dirty(db, tx); 1347 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1348 bcopy(smo, db->db_data, sizeof (*smo)); 1349 dmu_buf_rele(db, FTAG); 1350 1351 dmu_tx_commit(tx); 1352 } 1353 1354 void 1355 vdev_load(vdev_t *vd) 1356 { 1357 int c; 1358 1359 /* 1360 * Recursively load all children. 1361 */ 1362 for (c = 0; c < vd->vdev_children; c++) 1363 vdev_load(vd->vdev_child[c]); 1364 1365 /* 1366 * If this is a top-level vdev, initialize its metaslabs. 1367 */ 1368 if (vd == vd->vdev_top && 1369 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1370 vdev_metaslab_init(vd, 0) != 0)) 1371 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1372 VDEV_AUX_CORRUPT_DATA); 1373 1374 /* 1375 * If this is a leaf vdev, load its DTL. 1376 */ 1377 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1378 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1379 VDEV_AUX_CORRUPT_DATA); 1380 } 1381 1382 /* 1383 * The special vdev case is used for hot spares and l2cache devices. Its 1384 * sole purpose it to set the vdev state for the associated vdev. To do this, 1385 * we make sure that we can open the underlying device, then try to read the 1386 * label, and make sure that the label is sane and that it hasn't been 1387 * repurposed to another pool. 1388 */ 1389 int 1390 vdev_validate_aux(vdev_t *vd) 1391 { 1392 nvlist_t *label; 1393 uint64_t guid, version; 1394 uint64_t state; 1395 1396 if (vdev_is_dead(vd)) 1397 return (0); 1398 1399 if ((label = vdev_label_read_config(vd)) == NULL) { 1400 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1401 VDEV_AUX_CORRUPT_DATA); 1402 return (-1); 1403 } 1404 1405 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1406 version > SPA_VERSION || 1407 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 1408 guid != vd->vdev_guid || 1409 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 1410 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1411 VDEV_AUX_CORRUPT_DATA); 1412 nvlist_free(label); 1413 return (-1); 1414 } 1415 1416 /* 1417 * We don't actually check the pool state here. If it's in fact in 1418 * use by another pool, we update this fact on the fly when requested. 1419 */ 1420 nvlist_free(label); 1421 return (0); 1422 } 1423 1424 void 1425 vdev_sync_done(vdev_t *vd, uint64_t txg) 1426 { 1427 metaslab_t *msp; 1428 1429 dprintf("%s txg %llu\n", vdev_description(vd), txg); 1430 1431 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1432 metaslab_sync_done(msp, txg); 1433 } 1434 1435 void 1436 vdev_sync(vdev_t *vd, uint64_t txg) 1437 { 1438 spa_t *spa = vd->vdev_spa; 1439 vdev_t *lvd; 1440 metaslab_t *msp; 1441 dmu_tx_t *tx; 1442 1443 dprintf("%s txg %llu pass %d\n", 1444 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa)); 1445 1446 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 1447 ASSERT(vd == vd->vdev_top); 1448 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1449 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 1450 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 1451 ASSERT(vd->vdev_ms_array != 0); 1452 vdev_config_dirty(vd); 1453 dmu_tx_commit(tx); 1454 } 1455 1456 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1457 metaslab_sync(msp, txg); 1458 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 1459 } 1460 1461 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1462 vdev_dtl_sync(lvd, txg); 1463 1464 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1465 } 1466 1467 uint64_t 1468 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1469 { 1470 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1471 } 1472 1473 const char * 1474 vdev_description(vdev_t *vd) 1475 { 1476 if (vd == NULL || vd->vdev_ops == NULL) 1477 return ("<unknown>"); 1478 1479 if (vd->vdev_path != NULL) 1480 return (vd->vdev_path); 1481 1482 if (vd->vdev_parent == NULL) 1483 return (spa_name(vd->vdev_spa)); 1484 1485 return (vd->vdev_ops->vdev_op_type); 1486 } 1487 1488 /* 1489 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 1490 * not be opened, and no I/O is attempted. 1491 */ 1492 int 1493 vdev_fault(spa_t *spa, uint64_t guid) 1494 { 1495 vdev_t *vd; 1496 uint64_t txg; 1497 1498 /* 1499 * Disregard a vdev fault request if the pool has 1500 * experienced a complete failure. 1501 * 1502 * XXX - We do this here so that we don't hold the 1503 * spa_namespace_lock in the event that we can't get 1504 * the RW_WRITER spa_config_lock. 1505 */ 1506 if (spa_state(spa) == POOL_STATE_IO_FAILURE) 1507 return (EIO); 1508 1509 txg = spa_vdev_enter(spa); 1510 1511 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1512 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1513 if (!vd->vdev_ops->vdev_op_leaf) 1514 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1515 1516 /* 1517 * Faulted state takes precedence over degraded. 1518 */ 1519 vd->vdev_faulted = 1ULL; 1520 vd->vdev_degraded = 0ULL; 1521 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, 1522 VDEV_AUX_ERR_EXCEEDED); 1523 1524 /* 1525 * If marking the vdev as faulted cause the toplevel vdev to become 1526 * unavailable, then back off and simply mark the vdev as degraded 1527 * instead. 1528 */ 1529 if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 1530 vd->vdev_degraded = 1ULL; 1531 vd->vdev_faulted = 0ULL; 1532 1533 /* 1534 * If we reopen the device and it's not dead, only then do we 1535 * mark it degraded. 1536 */ 1537 vdev_reopen(vd); 1538 1539 if (vdev_readable(vd)) { 1540 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 1541 VDEV_AUX_ERR_EXCEEDED); 1542 } 1543 } 1544 1545 vdev_config_dirty(vd->vdev_top); 1546 1547 (void) spa_vdev_exit(spa, NULL, txg, 0); 1548 1549 return (0); 1550 } 1551 1552 /* 1553 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 1554 * user that something is wrong. The vdev continues to operate as normal as far 1555 * as I/O is concerned. 1556 */ 1557 int 1558 vdev_degrade(spa_t *spa, uint64_t guid) 1559 { 1560 vdev_t *vd; 1561 uint64_t txg; 1562 1563 /* 1564 * Disregard a vdev fault request if the pool has 1565 * experienced a complete failure. 1566 * 1567 * XXX - We do this here so that we don't hold the 1568 * spa_namespace_lock in the event that we can't get 1569 * the RW_WRITER spa_config_lock. 1570 */ 1571 if (spa_state(spa) == POOL_STATE_IO_FAILURE) 1572 return (EIO); 1573 1574 txg = spa_vdev_enter(spa); 1575 1576 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1577 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1578 if (!vd->vdev_ops->vdev_op_leaf) 1579 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1580 1581 /* 1582 * If the vdev is already faulted, then don't do anything. 1583 */ 1584 if (vd->vdev_faulted || vd->vdev_degraded) { 1585 (void) spa_vdev_exit(spa, NULL, txg, 0); 1586 return (0); 1587 } 1588 1589 vd->vdev_degraded = 1ULL; 1590 if (!vdev_is_dead(vd)) 1591 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 1592 VDEV_AUX_ERR_EXCEEDED); 1593 vdev_config_dirty(vd->vdev_top); 1594 1595 (void) spa_vdev_exit(spa, NULL, txg, 0); 1596 1597 return (0); 1598 } 1599 1600 /* 1601 * Online the given vdev. If 'unspare' is set, it implies two things. First, 1602 * any attached spare device should be detached when the device finishes 1603 * resilvering. Second, the online should be treated like a 'test' online case, 1604 * so no FMA events are generated if the device fails to open. 1605 */ 1606 int 1607 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, 1608 vdev_state_t *newstate) 1609 { 1610 vdev_t *vd; 1611 uint64_t txg; 1612 1613 /* 1614 * Disregard a vdev fault request if the pool has 1615 * experienced a complete failure. 1616 * 1617 * XXX - We do this here so that we don't hold the 1618 * spa_namespace_lock in the event that we can't get 1619 * the RW_WRITER spa_config_lock. 1620 */ 1621 if (spa_state(spa) == POOL_STATE_IO_FAILURE) 1622 return (EIO); 1623 1624 txg = spa_vdev_enter(spa); 1625 1626 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1627 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1628 1629 if (!vd->vdev_ops->vdev_op_leaf) 1630 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1631 1632 vd->vdev_offline = B_FALSE; 1633 vd->vdev_tmpoffline = B_FALSE; 1634 vd->vdev_checkremove = (flags & ZFS_ONLINE_CHECKREMOVE) ? 1635 B_TRUE : B_FALSE; 1636 vd->vdev_forcefault = (flags & ZFS_ONLINE_FORCEFAULT) ? 1637 B_TRUE : B_FALSE; 1638 vdev_reopen(vd->vdev_top); 1639 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 1640 1641 if (newstate) 1642 *newstate = vd->vdev_state; 1643 if ((flags & ZFS_ONLINE_UNSPARE) && 1644 !vdev_is_dead(vd) && vd->vdev_parent && 1645 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 1646 vd->vdev_parent->vdev_child[0] == vd) 1647 vd->vdev_unspare = B_TRUE; 1648 1649 vdev_config_dirty(vd->vdev_top); 1650 1651 (void) spa_vdev_exit(spa, NULL, txg, 0); 1652 1653 /* 1654 * Must hold spa_namespace_lock in order to post resilver sysevent 1655 * w/pool name. 1656 */ 1657 mutex_enter(&spa_namespace_lock); 1658 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1659 mutex_exit(&spa_namespace_lock); 1660 1661 return (0); 1662 } 1663 1664 int 1665 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1666 { 1667 vdev_t *vd; 1668 uint64_t txg; 1669 1670 /* 1671 * Disregard a vdev fault request if the pool has 1672 * experienced a complete failure. 1673 * 1674 * XXX - We do this here so that we don't hold the 1675 * spa_namespace_lock in the event that we can't get 1676 * the RW_WRITER spa_config_lock. 1677 */ 1678 if (spa_state(spa) == POOL_STATE_IO_FAILURE) 1679 return (EIO); 1680 1681 txg = spa_vdev_enter(spa); 1682 1683 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1684 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1685 1686 if (!vd->vdev_ops->vdev_op_leaf) 1687 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1688 1689 /* 1690 * If the device isn't already offline, try to offline it. 1691 */ 1692 if (!vd->vdev_offline) { 1693 /* 1694 * If this device's top-level vdev has a non-empty DTL, 1695 * don't allow the device to be offlined. 1696 * 1697 * XXX -- make this more precise by allowing the offline 1698 * as long as the remaining devices don't have any DTL holes. 1699 */ 1700 if (vd->vdev_top->vdev_dtl_map.sm_space != 0) 1701 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1702 1703 /* 1704 * Offline this device and reopen its top-level vdev. 1705 * If this action results in the top-level vdev becoming 1706 * unusable, undo it and fail the request. 1707 */ 1708 vd->vdev_offline = B_TRUE; 1709 vdev_reopen(vd->vdev_top); 1710 if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 1711 vd->vdev_offline = B_FALSE; 1712 vdev_reopen(vd->vdev_top); 1713 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1714 } 1715 } 1716 1717 vd->vdev_tmpoffline = (flags & ZFS_OFFLINE_TEMPORARY) ? 1718 B_TRUE : B_FALSE; 1719 1720 vdev_config_dirty(vd->vdev_top); 1721 1722 return (spa_vdev_exit(spa, NULL, txg, 0)); 1723 } 1724 1725 /* 1726 * Clear the error counts associated with this vdev. Unlike vdev_online() and 1727 * vdev_offline(), we assume the spa config is locked. We also clear all 1728 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 1729 * If reopen is specified then attempt to reopen the vdev if the vdev is 1730 * faulted or degraded. 1731 */ 1732 void 1733 vdev_clear(spa_t *spa, vdev_t *vd, boolean_t reopen_wanted) 1734 { 1735 int c; 1736 1737 if (vd == NULL) 1738 vd = spa->spa_root_vdev; 1739 1740 vd->vdev_stat.vs_read_errors = 0; 1741 vd->vdev_stat.vs_write_errors = 0; 1742 vd->vdev_stat.vs_checksum_errors = 0; 1743 vd->vdev_is_failing = B_FALSE; 1744 1745 for (c = 0; c < vd->vdev_children; c++) 1746 vdev_clear(spa, vd->vdev_child[c], reopen_wanted); 1747 1748 /* 1749 * If we're in the FAULTED state, then clear the persistent state and 1750 * attempt to reopen the device. We also mark the vdev config dirty, so 1751 * that the new faulted state is written out to disk. 1752 */ 1753 if (reopen_wanted && (vd->vdev_faulted || vd->vdev_degraded)) { 1754 vd->vdev_faulted = vd->vdev_degraded = 0; 1755 vdev_reopen(vd); 1756 vdev_config_dirty(vd->vdev_top); 1757 1758 if (vd->vdev_faulted && vd->vdev_aux == NULL) 1759 spa_async_request(spa, SPA_ASYNC_RESILVER); 1760 1761 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 1762 } 1763 } 1764 1765 int 1766 vdev_readable(vdev_t *vd) 1767 { 1768 /* XXPOLICY */ 1769 return (!vdev_is_dead(vd)); 1770 } 1771 1772 int 1773 vdev_writeable(vdev_t *vd) 1774 { 1775 return (!vdev_is_dead(vd) && !vd->vdev_is_failing); 1776 } 1777 1778 int 1779 vdev_is_dead(vdev_t *vd) 1780 { 1781 /* 1782 * If the vdev experienced I/O failures, then the vdev is marked 1783 * as faulted (VDEV_STATE_FAULTED) for status output and FMA; however, 1784 * we need to allow access to the vdev for resumed I/Os (see 1785 * zio_vdev_resume_io() ). 1786 */ 1787 return (vd->vdev_state < VDEV_STATE_DEGRADED && 1788 vd->vdev_stat.vs_aux != VDEV_AUX_IO_FAILURE); 1789 } 1790 1791 int 1792 vdev_error_inject(vdev_t *vd, zio_t *zio) 1793 { 1794 int error = 0; 1795 1796 if (vd->vdev_fault_mode == VDEV_FAULT_NONE) 1797 return (0); 1798 1799 if (((1ULL << zio->io_type) & vd->vdev_fault_mask) == 0) 1800 return (0); 1801 1802 switch (vd->vdev_fault_mode) { 1803 case VDEV_FAULT_RANDOM: 1804 if (spa_get_random(vd->vdev_fault_arg) == 0) 1805 error = EIO; 1806 break; 1807 1808 case VDEV_FAULT_COUNT: 1809 if ((int64_t)--vd->vdev_fault_arg <= 0) 1810 vd->vdev_fault_mode = VDEV_FAULT_NONE; 1811 error = EIO; 1812 break; 1813 } 1814 1815 return (error); 1816 } 1817 1818 /* 1819 * Get statistics for the given vdev. 1820 */ 1821 void 1822 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 1823 { 1824 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 1825 int c, t; 1826 1827 mutex_enter(&vd->vdev_stat_lock); 1828 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 1829 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 1830 vs->vs_state = vd->vdev_state; 1831 vs->vs_rsize = vdev_get_rsize(vd); 1832 mutex_exit(&vd->vdev_stat_lock); 1833 1834 /* 1835 * If we're getting stats on the root vdev, aggregate the I/O counts 1836 * over all top-level vdevs (i.e. the direct children of the root). 1837 */ 1838 if (vd == rvd) { 1839 for (c = 0; c < rvd->vdev_children; c++) { 1840 vdev_t *cvd = rvd->vdev_child[c]; 1841 vdev_stat_t *cvs = &cvd->vdev_stat; 1842 1843 mutex_enter(&vd->vdev_stat_lock); 1844 for (t = 0; t < ZIO_TYPES; t++) { 1845 vs->vs_ops[t] += cvs->vs_ops[t]; 1846 vs->vs_bytes[t] += cvs->vs_bytes[t]; 1847 } 1848 vs->vs_read_errors += cvs->vs_read_errors; 1849 vs->vs_write_errors += cvs->vs_write_errors; 1850 vs->vs_checksum_errors += cvs->vs_checksum_errors; 1851 vs->vs_scrub_examined += cvs->vs_scrub_examined; 1852 vs->vs_scrub_errors += cvs->vs_scrub_errors; 1853 mutex_exit(&vd->vdev_stat_lock); 1854 } 1855 } 1856 } 1857 1858 void 1859 vdev_clear_stats(vdev_t *vd) 1860 { 1861 mutex_enter(&vd->vdev_stat_lock); 1862 vd->vdev_stat.vs_space = 0; 1863 vd->vdev_stat.vs_dspace = 0; 1864 vd->vdev_stat.vs_alloc = 0; 1865 mutex_exit(&vd->vdev_stat_lock); 1866 } 1867 1868 void 1869 vdev_stat_update(zio_t *zio) 1870 { 1871 vdev_t *vd = zio->io_vd; 1872 vdev_t *pvd; 1873 uint64_t txg = zio->io_txg; 1874 vdev_stat_t *vs = &vd->vdev_stat; 1875 zio_type_t type = zio->io_type; 1876 int flags = zio->io_flags; 1877 1878 if (zio->io_error == 0) { 1879 if (!(flags & ZIO_FLAG_IO_BYPASS)) { 1880 mutex_enter(&vd->vdev_stat_lock); 1881 vs->vs_ops[type]++; 1882 vs->vs_bytes[type] += zio->io_size; 1883 mutex_exit(&vd->vdev_stat_lock); 1884 } 1885 if ((flags & ZIO_FLAG_IO_REPAIR) && 1886 zio->io_delegate_list == NULL) { 1887 mutex_enter(&vd->vdev_stat_lock); 1888 if (flags & ZIO_FLAG_SCRUB_THREAD) 1889 vs->vs_scrub_repaired += zio->io_size; 1890 else 1891 vs->vs_self_healed += zio->io_size; 1892 mutex_exit(&vd->vdev_stat_lock); 1893 } 1894 return; 1895 } 1896 1897 if (flags & ZIO_FLAG_SPECULATIVE) 1898 return; 1899 1900 if (vdev_readable(vd)) { 1901 mutex_enter(&vd->vdev_stat_lock); 1902 if (type == ZIO_TYPE_READ) { 1903 if (zio->io_error == ECKSUM) 1904 vs->vs_checksum_errors++; 1905 else 1906 vs->vs_read_errors++; 1907 } 1908 if (type == ZIO_TYPE_WRITE) 1909 vs->vs_write_errors++; 1910 mutex_exit(&vd->vdev_stat_lock); 1911 } 1912 1913 if (type == ZIO_TYPE_WRITE) { 1914 if (txg == 0 || vd->vdev_children != 0) 1915 return; 1916 if (flags & ZIO_FLAG_SCRUB_THREAD) { 1917 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 1918 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 1919 vdev_dtl_dirty(&pvd->vdev_dtl_scrub, txg, 1); 1920 } 1921 if (!(flags & ZIO_FLAG_IO_REPAIR)) { 1922 if (vdev_dtl_contains(&vd->vdev_dtl_map, txg, 1)) 1923 return; 1924 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1925 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 1926 vdev_dtl_dirty(&pvd->vdev_dtl_map, txg, 1); 1927 } 1928 } 1929 } 1930 1931 void 1932 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 1933 { 1934 int c; 1935 vdev_stat_t *vs = &vd->vdev_stat; 1936 1937 for (c = 0; c < vd->vdev_children; c++) 1938 vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 1939 1940 mutex_enter(&vd->vdev_stat_lock); 1941 1942 if (type == POOL_SCRUB_NONE) { 1943 /* 1944 * Update completion and end time. Leave everything else alone 1945 * so we can report what happened during the previous scrub. 1946 */ 1947 vs->vs_scrub_complete = complete; 1948 vs->vs_scrub_end = gethrestime_sec(); 1949 } else { 1950 vs->vs_scrub_type = type; 1951 vs->vs_scrub_complete = 0; 1952 vs->vs_scrub_examined = 0; 1953 vs->vs_scrub_repaired = 0; 1954 vs->vs_scrub_errors = 0; 1955 vs->vs_scrub_start = gethrestime_sec(); 1956 vs->vs_scrub_end = 0; 1957 } 1958 1959 mutex_exit(&vd->vdev_stat_lock); 1960 } 1961 1962 /* 1963 * Update the in-core space usage stats for this vdev and the root vdev. 1964 */ 1965 void 1966 vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 1967 boolean_t update_root) 1968 { 1969 int64_t dspace_delta = space_delta; 1970 spa_t *spa = vd->vdev_spa; 1971 vdev_t *rvd = spa->spa_root_vdev; 1972 1973 ASSERT(vd == vd->vdev_top); 1974 1975 /* 1976 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 1977 * factor. We must calculate this here and not at the root vdev 1978 * because the root vdev's psize-to-asize is simply the max of its 1979 * childrens', thus not accurate enough for us. 1980 */ 1981 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 1982 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 1983 vd->vdev_deflate_ratio; 1984 1985 mutex_enter(&vd->vdev_stat_lock); 1986 vd->vdev_stat.vs_space += space_delta; 1987 vd->vdev_stat.vs_alloc += alloc_delta; 1988 vd->vdev_stat.vs_dspace += dspace_delta; 1989 mutex_exit(&vd->vdev_stat_lock); 1990 1991 if (update_root) { 1992 ASSERT(rvd == vd->vdev_parent); 1993 ASSERT(vd->vdev_ms_count != 0); 1994 1995 /* 1996 * Don't count non-normal (e.g. intent log) space as part of 1997 * the pool's capacity. 1998 */ 1999 if (vd->vdev_mg->mg_class != spa->spa_normal_class) 2000 return; 2001 2002 mutex_enter(&rvd->vdev_stat_lock); 2003 rvd->vdev_stat.vs_space += space_delta; 2004 rvd->vdev_stat.vs_alloc += alloc_delta; 2005 rvd->vdev_stat.vs_dspace += dspace_delta; 2006 mutex_exit(&rvd->vdev_stat_lock); 2007 } 2008 } 2009 2010 /* 2011 * Mark a top-level vdev's config as dirty, placing it on the dirty list 2012 * so that it will be written out next time the vdev configuration is synced. 2013 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2014 */ 2015 void 2016 vdev_config_dirty(vdev_t *vd) 2017 { 2018 spa_t *spa = vd->vdev_spa; 2019 vdev_t *rvd = spa->spa_root_vdev; 2020 int c; 2021 2022 /* 2023 * If this is an aux vdev (as with l2cache devices), then we update the 2024 * vdev config manually and set the sync flag. 2025 */ 2026 if (vd->vdev_aux != NULL) { 2027 spa_aux_vdev_t *sav = vd->vdev_aux; 2028 nvlist_t **aux; 2029 uint_t naux; 2030 2031 for (c = 0; c < sav->sav_count; c++) { 2032 if (sav->sav_vdevs[c] == vd) 2033 break; 2034 } 2035 2036 ASSERT(c < sav->sav_count); 2037 sav->sav_sync = B_TRUE; 2038 2039 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 2040 ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0); 2041 2042 ASSERT(c < naux); 2043 2044 /* 2045 * Setting the nvlist in the middle if the array is a little 2046 * sketchy, but it will work. 2047 */ 2048 nvlist_free(aux[c]); 2049 aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 2050 2051 return; 2052 } 2053 2054 /* 2055 * The dirty list is protected by the config lock. The caller must 2056 * either hold the config lock as writer, or must be the sync thread 2057 * (which holds the lock as reader). There's only one sync thread, 2058 * so this is sufficient to ensure mutual exclusion. 2059 */ 2060 ASSERT(spa_config_held(spa, RW_WRITER) || 2061 dsl_pool_sync_context(spa_get_dsl(spa))); 2062 2063 if (vd == rvd) { 2064 for (c = 0; c < rvd->vdev_children; c++) 2065 vdev_config_dirty(rvd->vdev_child[c]); 2066 } else { 2067 ASSERT(vd == vd->vdev_top); 2068 2069 if (!list_link_active(&vd->vdev_dirty_node)) 2070 list_insert_head(&spa->spa_dirty_list, vd); 2071 } 2072 } 2073 2074 void 2075 vdev_config_clean(vdev_t *vd) 2076 { 2077 spa_t *spa = vd->vdev_spa; 2078 2079 ASSERT(spa_config_held(spa, RW_WRITER) || 2080 dsl_pool_sync_context(spa_get_dsl(spa))); 2081 2082 ASSERT(list_link_active(&vd->vdev_dirty_node)); 2083 list_remove(&spa->spa_dirty_list, vd); 2084 } 2085 2086 /* 2087 * Propagate vdev state up from children to parent. 2088 */ 2089 void 2090 vdev_propagate_state(vdev_t *vd) 2091 { 2092 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2093 int degraded = 0, faulted = 0; 2094 int corrupted = 0; 2095 int c; 2096 vdev_t *child; 2097 2098 if (vd->vdev_children > 0) { 2099 for (c = 0; c < vd->vdev_children; c++) { 2100 child = vd->vdev_child[c]; 2101 if (vdev_is_dead(child) && !vdev_readable(child)) 2102 faulted++; 2103 else if (child->vdev_stat.vs_aux == VDEV_AUX_IO_FAILURE) 2104 faulted++; 2105 else if (child->vdev_state <= VDEV_STATE_DEGRADED) 2106 degraded++; 2107 2108 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 2109 corrupted++; 2110 } 2111 2112 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 2113 2114 /* 2115 * Root special: if there is a toplevel vdev that cannot be 2116 * opened due to corrupted metadata, then propagate the root 2117 * vdev's aux state as 'corrupt' rather than 'insufficient 2118 * replicas'. 2119 */ 2120 if (corrupted && vd == rvd && 2121 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 2122 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 2123 VDEV_AUX_CORRUPT_DATA); 2124 } 2125 2126 if (vd->vdev_parent && !vd->vdev_islog) 2127 vdev_propagate_state(vd->vdev_parent); 2128 } 2129 2130 /* 2131 * Set a vdev's state. If this is during an open, we don't update the parent 2132 * state, because we're in the process of opening children depth-first. 2133 * Otherwise, we propagate the change to the parent. 2134 * 2135 * If this routine places a device in a faulted state, an appropriate ereport is 2136 * generated. 2137 */ 2138 void 2139 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2140 { 2141 uint64_t save_state; 2142 spa_t *spa = vd->vdev_spa; 2143 2144 if (state == vd->vdev_state) { 2145 vd->vdev_stat.vs_aux = aux; 2146 return; 2147 } 2148 2149 save_state = vd->vdev_state; 2150 2151 vd->vdev_state = state; 2152 vd->vdev_stat.vs_aux = aux; 2153 2154 /* 2155 * If we are setting the vdev state to anything but an open state, then 2156 * always close the underlying device. Otherwise, we keep accessible 2157 * but invalid devices open forever. We don't call vdev_close() itself, 2158 * because that implies some extra checks (offline, etc) that we don't 2159 * want here. This is limited to leaf devices, because otherwise 2160 * closing the device will affect other children. 2161 */ 2162 if (!vdev_readable(vd) && vd->vdev_ops->vdev_op_leaf) 2163 vd->vdev_ops->vdev_op_close(vd); 2164 2165 if (vd->vdev_removed && 2166 state == VDEV_STATE_CANT_OPEN && 2167 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 2168 /* 2169 * If the previous state is set to VDEV_STATE_REMOVED, then this 2170 * device was previously marked removed and someone attempted to 2171 * reopen it. If this failed due to a nonexistent device, then 2172 * keep the device in the REMOVED state. We also let this be if 2173 * it is one of our special test online cases, which is only 2174 * attempting to online the device and shouldn't generate an FMA 2175 * fault. 2176 */ 2177 vd->vdev_state = VDEV_STATE_REMOVED; 2178 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2179 } else if (state == VDEV_STATE_REMOVED) { 2180 /* 2181 * Indicate to the ZFS DE that this device has been removed, and 2182 * any recent errors should be ignored. 2183 */ 2184 zfs_post_remove(spa, vd); 2185 vd->vdev_removed = B_TRUE; 2186 } else if (state == VDEV_STATE_CANT_OPEN) { 2187 /* 2188 * If we fail to open a vdev during an import, we mark it as 2189 * "not available", which signifies that it was never there to 2190 * begin with. Failure to open such a device is not considered 2191 * an error. 2192 */ 2193 if (spa->spa_load_state == SPA_LOAD_IMPORT && 2194 !spa->spa_import_faulted && 2195 vd->vdev_ops->vdev_op_leaf) 2196 vd->vdev_not_present = 1; 2197 2198 /* 2199 * Post the appropriate ereport. If the 'prevstate' field is 2200 * set to something other than VDEV_STATE_UNKNOWN, it indicates 2201 * that this is part of a vdev_reopen(). In this case, we don't 2202 * want to post the ereport if the device was already in the 2203 * CANT_OPEN state beforehand. 2204 * 2205 * If the 'checkremove' flag is set, then this is an attempt to 2206 * online the device in response to an insertion event. If we 2207 * hit this case, then we have detected an insertion event for a 2208 * faulted or offline device that wasn't in the removed state. 2209 * In this scenario, we don't post an ereport because we are 2210 * about to replace the device, or attempt an online with 2211 * vdev_forcefault, which will generate the fault for us. 2212 */ 2213 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 2214 !vd->vdev_not_present && !vd->vdev_checkremove && 2215 vd != spa->spa_root_vdev) { 2216 const char *class; 2217 2218 switch (aux) { 2219 case VDEV_AUX_OPEN_FAILED: 2220 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2221 break; 2222 case VDEV_AUX_CORRUPT_DATA: 2223 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 2224 break; 2225 case VDEV_AUX_NO_REPLICAS: 2226 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 2227 break; 2228 case VDEV_AUX_BAD_GUID_SUM: 2229 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 2230 break; 2231 case VDEV_AUX_TOO_SMALL: 2232 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 2233 break; 2234 case VDEV_AUX_BAD_LABEL: 2235 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 2236 break; 2237 default: 2238 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 2239 } 2240 2241 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 2242 } 2243 2244 /* Erase any notion of persistent removed state */ 2245 vd->vdev_removed = B_FALSE; 2246 } else { 2247 vd->vdev_removed = B_FALSE; 2248 } 2249 2250 if (!isopen) 2251 vdev_propagate_state(vd); 2252 } 2253