Lines Matching defs:vd

95 vdev_default_asize(vdev_t *vd, uint64_t psize)
97 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
100 for (int c = 0; c < vd->vdev_children; c++) {
101 csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
115 vdev_get_min_asize(vdev_t *vd)
117 vdev_t *pvd = vd->vdev_parent;
124 return (vd->vdev_asize);
130 if (vd == vd->vdev_top)
131 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
144 vdev_set_min_asize(vdev_t *vd)
146 vd->vdev_min_asize = vdev_get_min_asize(vd);
148 for (int c = 0; c < vd->vdev_children; c++)
149 vdev_set_min_asize(vd->vdev_child[c]);
168 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
172 if (vd->vdev_guid == guid)
173 return (vd);
175 for (int c = 0; c < vd->vdev_children; c++)
176 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
184 vdev_count_leaves_impl(vdev_t *vd)
188 if (vd->vdev_ops->vdev_op_leaf)
191 for (int c = 0; c < vd->vdev_children; c++)
192 n += vdev_count_leaves_impl(vd->vdev_child[c]);
314 vdev_t *vd;
316 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
320 spa->spa_root_vdev = vd;
325 if (spa->spa_root_vdev == vd) {
340 vd->vdev_spa = spa;
341 vd->vdev_id = id;
342 vd->vdev_guid = guid;
343 vd->vdev_guid_sum = guid;
344 vd->vdev_ops = ops;
345 vd->vdev_state = VDEV_STATE_CLOSED;
346 vd->vdev_ishole = (ops == &vdev_hole_ops);
348 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
349 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
350 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
352 vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
353 &vd->vdev_dtl_lock);
355 txg_list_create(&vd->vdev_ms_list,
357 txg_list_create(&vd->vdev_dtl_list,
359 vd->vdev_stat.vs_timestamp = gethrtime();
360 vdev_queue_init(vd);
361 vdev_cache_init(vd);
363 return (vd);
378 vdev_t *vd;
465 vd = vdev_alloc_common(spa, id, guid, ops);
467 vd->vdev_islog = islog;
468 vd->vdev_nparity = nparity;
470 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
471 vd->vdev_path = spa_strdup(vd->vdev_path);
472 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
473 vd->vdev_devid = spa_strdup(vd->vdev_devid);
475 &vd->vdev_physpath) == 0)
476 vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
477 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
478 vd->vdev_fru = spa_strdup(vd->vdev_fru);
485 &vd->vdev_wholedisk) != 0)
486 vd->vdev_wholedisk = -1ULL;
493 &vd->vdev_not_present);
498 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
504 &vd->vdev_crtxg);
512 &vd->vdev_ms_array);
514 &vd->vdev_ms_shift);
516 &vd->vdev_asize);
518 &vd->vdev_removing);
526 vd->vdev_mg = metaslab_group_create(islog ?
527 spa_log_class(spa) : spa_normal_class(spa), vd);
533 if (vd->vdev_ops->vdev_op_leaf &&
538 &vd->vdev_dtl_object);
540 &vd->vdev_unspare);
548 spa_spare_add(vd);
552 &vd->vdev_offline);
555 &vd->vdev_resilver_txg);
565 &vd->vdev_faulted);
567 &vd->vdev_degraded);
569 &vd->vdev_removed);
571 if (vd->vdev_faulted || vd->vdev_degraded) {
574 vd->vdev_label_aux =
579 vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
587 vdev_add_child(parent, vd);
589 *vdp = vd;
595 vdev_free(vdev_t *vd)
597 spa_t *spa = vd->vdev_spa;
603 vdev_close(vd);
605 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
606 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
611 for (int c = 0; c < vd->vdev_children; c++)
612 vdev_free(vd->vdev_child[c]);
614 ASSERT(vd->vdev_child == NULL);
615 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
620 if (vd->vdev_mg != NULL) {
621 vdev_metaslab_fini(vd);
622 metaslab_group_destroy(vd->vdev_mg);
625 ASSERT0(vd->vdev_stat.vs_space);
626 ASSERT0(vd->vdev_stat.vs_dspace);
627 ASSERT0(vd->vdev_stat.vs_alloc);
632 vdev_remove_child(vd->vdev_parent, vd);
634 ASSERT(vd->vdev_parent == NULL);
639 vdev_queue_fini(vd);
640 vdev_cache_fini(vd);
642 if (vd->vdev_path)
643 spa_strfree(vd->vdev_path);
644 if (vd->vdev_devid)
645 spa_strfree(vd->vdev_devid);
646 if (vd->vdev_physpath)
647 spa_strfree(vd->vdev_physpath);
648 if (vd->vdev_fru)
649 spa_strfree(vd->vdev_fru);
651 if (vd->vdev_isspare)
652 spa_spare_remove(vd);
653 if (vd->vdev_isl2cache)
654 spa_l2cache_remove(vd);
656 txg_list_destroy(&vd->vdev_ms_list);
657 txg_list_destroy(&vd->vdev_dtl_list);
659 mutex_enter(&vd->vdev_dtl_lock);
660 space_map_close(vd->vdev_dtl_sm);
662 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
663 range_tree_destroy(vd->vdev_dtl[t]);
665 mutex_exit(&vd->vdev_dtl_lock);
667 mutex_destroy(&vd->vdev_dtl_lock);
668 mutex_destroy(&vd->vdev_stat_lock);
669 mutex_destroy(&vd->vdev_probe_lock);
671 if (vd == spa->spa_root_vdev)
674 kmem_free(vd, sizeof (vdev_t));
685 vdev_t *vd;
720 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
721 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
744 vdev_top_update(vdev_t *tvd, vdev_t *vd)
746 if (vd == NULL)
749 vd->vdev_top = tvd;
751 for (int c = 0; c < vd->vdev_children; c++)
752 vdev_top_update(tvd, vd->vdev_child[c]);
832 vdev_metaslab_init(vdev_t *vd, uint64_t txg)
834 spa_t *spa = vd->vdev_spa;
837 uint64_t oldc = vd->vdev_ms_count;
838 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
847 if (vd->vdev_ms_shift == 0)
850 ASSERT(!vd->vdev_ishole);
858 vd->vdev_deflate_ratio = (1 << 17) /
859 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
866 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
867 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
870 vd->vdev_ms = mspp;
871 vd->vdev_ms_count = newc;
877 error = dmu_read(mos, vd->vdev_ms_array,
884 error = metaslab_init(vd->vdev_mg, m, object, txg,
885 &(vd->vdev_ms[m]));
898 if (oldc == 0 && !vd->vdev_removing)
899 metaslab_group_activate(vd->vdev_mg);
908 vdev_metaslab_fini(vdev_t *vd)
911 uint64_t count = vd->vdev_ms_count;
913 if (vd->vdev_ms != NULL) {
914 metaslab_group_passivate(vd->vdev_mg);
916 metaslab_t *msp = vd->vdev_ms[m];
921 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
922 vd->vdev_ms = NULL;
936 vdev_t *vd = zio->io_vd;
939 ASSERT(vd->vdev_probe_zio != NULL);
945 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
959 vd->vdev_cant_read |= !vps->vps_readable;
960 vd->vdev_cant_write |= !vps->vps_writeable;
962 if (vdev_readable(vd) &&
963 (vdev_writeable(vd) || !spa_writeable(spa))) {
968 spa, vd, NULL, 0, 0);
972 mutex_enter(&vd->vdev_probe_lock);
973 ASSERT(vd->vdev_probe_zio == zio);
974 vd->vdev_probe_zio = NULL;
975 mutex_exit(&vd->vdev_probe_lock);
978 if (!vdev_accessible(vd, pio))
993 vdev_probe(vdev_t *vd, zio_t *zio)
995 spa_t *spa = vd->vdev_spa;
999 ASSERT(vd->vdev_ops->vdev_op_leaf);
1012 mutex_enter(&vd->vdev_probe_lock);
1014 if ((pio = vd->vdev_probe_zio) == NULL) {
1039 vd->vdev_cant_read = B_FALSE;
1040 vd->vdev_cant_write = B_FALSE;
1043 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
1052 vd->vdev_probe_wanted = B_TRUE;
1060 mutex_exit(&vd->vdev_probe_lock);
1068 zio_nowait(zio_read_phys(pio, vd,
1069 vdev_label_offset(vd->vdev_psize, l,
1086 vdev_t *vd = arg;
1088 vd->vdev_open_thread = curthread;
1089 vd->vdev_open_error = vdev_open(vd);
1090 vd->vdev_open_thread = NULL;
1094 vdev_uses_zvols(vdev_t *vd)
1096 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1099 for (int c = 0; c < vd->vdev_children; c++)
1100 if (vdev_uses_zvols(vd->vdev_child[c]))
1106 vdev_open_children(vdev_t *vd)
1109 int children = vd->vdev_children;
1116 if (vdev_uses_zvols(vd)) {
1118 vd->vdev_child[c]->vdev_open_error =
1119 vdev_open(vd->vdev_child[c]);
1126 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1136 vdev_open(vdev_t *vd)
1138 spa_t *spa = vd->vdev_spa;
1145 ASSERT(vd->vdev_open_thread == curthread ||
1147 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1148 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1149 vd->vdev_state == VDEV_STATE_OFFLINE);
1151 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1152 vd->vdev_cant_read = B_FALSE;
1153 vd->vdev_cant_write = B_FALSE;
1154 vd->vdev_min_asize = vdev_get_min_asize(vd);
1160 if (!vd->vdev_removed && vd->vdev_faulted) {
1161 ASSERT(vd->vdev_children == 0);
1162 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1163 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1164 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1165 vd->vdev_label_aux);
1167 } else if (vd->vdev_offline) {
1168 ASSERT(vd->vdev_children == 0);
1169 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1173 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
1179 vd->vdev_reopening = B_FALSE;
1181 error = zio_handle_device_injection(vd, NULL, ENXIO);
1184 if (vd->vdev_removed &&
1185 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
1186 vd->vdev_removed = B_FALSE;
1188 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1189 vd->vdev_stat.vs_aux);
1193 vd->vdev_removed = B_FALSE;
1199 if (vd->vdev_faulted) {
1200 ASSERT(vd->vdev_children == 0);
1201 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1202 vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1203 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1204 vd->vdev_label_aux);
1208 if (vd->vdev_degraded) {
1209 ASSERT(vd->vdev_children == 0);
1210 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1213 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
1219 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
1222 for (int c = 0; c < vd->vdev_children; c++) {
1223 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1224 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1233 if (vd->vdev_children == 0) {
1235 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1244 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1246 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1255 vd->vdev_psize = psize;
1260 if (asize < vd->vdev_min_asize) {
1261 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1266 if (vd->vdev_asize == 0) {
1271 vd->vdev_asize = asize;
1272 vd->vdev_max_asize = max_asize;
1273 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1280 if (ashift > vd->vdev_top->vdev_ashift &&
1281 vd->vdev_ops->vdev_op_leaf) {
1285 vd->vdev_path);
1287 vd->vdev_max_asize = max_asize;
1295 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
1296 (vd->vdev_expanding || spa->spa_autoexpand))
1297 vd->vdev_asize = asize;
1299 vdev_set_min_asize(vd);
1305 if (vd->vdev_ops->vdev_op_leaf &&
1306 (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
1307 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1315 if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
1316 !vd->vdev_islog && vd->vdev_aux == NULL) {
1317 if (vd->vdev_ashift > spa->spa_max_ashift)
1318 spa->spa_max_ashift = vd->vdev_ashift;
1319 if (vd->vdev_ashift < spa->spa_min_ashift)
1320 spa->spa_min_ashift = vd->vdev_ashift;
1328 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
1329 vdev_resilver_needed(vd, NULL, NULL))
1351 vdev_validate(vdev_t *vd, boolean_t strict)
1353 spa_t *spa = vd->vdev_spa;
1358 for (int c = 0; c < vd->vdev_children; c++)
1359 if (vdev_validate(vd->vdev_child[c], strict) != 0)
1367 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1373 if ((label = vdev_label_read_config(vd, txg)) == NULL) {
1374 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1385 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1394 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1421 ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
1422 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
1423 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1431 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1453 if (vd->vdev_not_present)
1454 vd->vdev_not_present = 0;
1464 vdev_close(vdev_t *vd)
1466 spa_t *spa = vd->vdev_spa;
1467 vdev_t *pvd = vd->vdev_parent;
1476 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
1478 vd->vdev_ops->vdev_op_close(vd);
1480 vdev_cache_purge(vd);
1487 vd->vdev_prevstate = vd->vdev_state;
1489 if (vd->vdev_offline)
1490 vd->vdev_state = VDEV_STATE_OFFLINE;
1492 vd->vdev_state = VDEV_STATE_CLOSED;
1493 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1497 vdev_hold(vdev_t *vd)
1499 spa_t *spa = vd->vdev_spa;
1505 for (int c = 0; c < vd->vdev_children; c++)
1506 vdev_hold(vd->vdev_child[c]);
1508 if (vd->vdev_ops->vdev_op_leaf)
1509 vd->vdev_ops->vdev_op_hold(vd);
1513 vdev_rele(vdev_t *vd)
1515 spa_t *spa = vd->vdev_spa;
1518 for (int c = 0; c < vd->vdev_children; c++)
1519 vdev_rele(vd->vdev_child[c]);
1521 if (vd->vdev_ops->vdev_op_leaf)
1522 vd->vdev_ops->vdev_op_rele(vd);
1532 vdev_reopen(vdev_t *vd)
1534 spa_t *spa = vd->vdev_spa;
1539 vd->vdev_reopening = !vd->vdev_offline;
1540 vdev_close(vd);
1541 (void) vdev_open(vd);
1548 if (vd->vdev_aux) {
1549 (void) vdev_validate_aux(vd);
1550 if (vdev_readable(vd) && vdev_writeable(vd) &&
1551 vd->vdev_aux == &spa->spa_l2cache &&
1552 !l2arc_vdev_present(vd))
1553 l2arc_add_vdev(spa, vd);
1555 (void) vdev_validate(vd, B_TRUE);
1561 vdev_propagate_state(vd);
1565 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1574 error = vdev_open(vd);
1576 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1577 vdev_close(vd);
1584 if ((error = vdev_dtl_load(vd)) != 0 ||
1585 (error = vdev_label_init(vd, txg, isreplacing ?
1587 vdev_close(vd);
1595 vdev_metaslab_set_size(vdev_t *vd)
1600 vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
1601 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1605 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1607 ASSERT(vd == vd->vdev_top);
1608 ASSERT(!vd->vdev_ishole);
1610 ASSERT(spa_writeable(vd->vdev_spa));
1613 (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1616 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1618 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1622 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
1624 for (int c = 0; c < vd->vdev_children; c++)
1625 vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
1627 if (vd->vdev_ops->vdev_op_leaf)
1628 vdev_dirty(vd->vdev_top, flags, vd, txg);
1670 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1672 range_tree_t *rt = vd->vdev_dtl[t];
1675 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1676 ASSERT(spa_writeable(vd->vdev_spa));
1685 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1687 range_tree_t *rt = vd->vdev_dtl[t];
1691 ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1702 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
1704 range_tree_t *rt = vd->vdev_dtl[t];
1718 vdev_dtl_min(vdev_t *vd)
1722 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
1723 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
1724 ASSERT0(vd->vdev_children);
1726 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1734 vdev_dtl_max(vdev_t *vd)
1738 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
1739 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
1740 ASSERT0(vd->vdev_children);
1742 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
1755 vdev_dtl_should_excise(vdev_t *vd)
1757 spa_t *spa = vd->vdev_spa;
1761 ASSERT0(vd->vdev_children);
1763 if (vd->vdev_resilver_txg == 0 ||
1764 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
1774 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
1775 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
1776 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
1777 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
1787 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1789 spa_t *spa = vd->vdev_spa;
1795 for (int c = 0; c < vd->vdev_children; c++)
1796 vdev_dtl_reassess(vd->vdev_child[c], txg,
1799 if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
1802 if (vd->vdev_ops->vdev_op_leaf) {
1805 mutex_enter(&vd->vdev_dtl_lock);
1816 vdev_dtl_should_excise(vd)) {
1836 vd->vdev_dtl[DTL_MISSING], 1);
1839 vd->vdev_dtl[DTL_SCRUB], 2);
1841 vd->vdev_dtl[DTL_MISSING], 1);
1844 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
1845 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1846 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
1848 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
1849 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
1850 if (!vdev_readable(vd))
1851 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
1853 range_tree_walk(vd->vdev_dtl[DTL_MISSING],
1854 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
1860 if (vd->vdev_resilver_txg != 0 &&
1861 range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
1862 range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0)
1863 vd->vdev_resilver_txg = 0;
1865 mutex_exit(&vd->vdev_dtl_lock);
1868 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1872 mutex_enter(&vd->vdev_dtl_lock);
1880 else if (vd->vdev_nparity != 0)
1881 minref = vd->vdev_nparity + 1; /* RAID-Z */
1883 minref = vd->vdev_children; /* any kind of mirror */
1885 for (int c = 0; c < vd->vdev_children; c++) {
1886 vdev_t *cvd = vd->vdev_child[c];
1891 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
1894 mutex_exit(&vd->vdev_dtl_lock);
1898 vdev_dtl_load(vdev_t *vd)
1900 spa_t *spa = vd->vdev_spa;
1904 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
1905 ASSERT(!vd->vdev_ishole);
1907 error = space_map_open(&vd->vdev_dtl_sm, mos,
1908 vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
1911 ASSERT(vd->vdev_dtl_sm != NULL);
1913 mutex_enter(&vd->vdev_dtl_lock);
1919 space_map_update(vd->vdev_dtl_sm);
1921 error = space_map_load(vd->vdev_dtl_sm,
1922 vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
1923 mutex_exit(&vd->vdev_dtl_lock);
1928 for (int c = 0; c < vd->vdev_children; c++) {
1929 error = vdev_dtl_load(vd->vdev_child[c]);
1938 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1940 spa_t *spa = vd->vdev_spa;
1941 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
1946 uint64_t object = space_map_object(vd->vdev_dtl_sm);
1948 ASSERT(!vd->vdev_ishole);
1949 ASSERT(vd->vdev_ops->vdev_op_leaf);
1953 if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
1954 mutex_enter(&vd->vdev_dtl_lock);
1955 space_map_free(vd->vdev_dtl_sm, tx);
1956 space_map_close(vd->vdev_dtl_sm);
1957 vd->vdev_dtl_sm = NULL;
1958 mutex_exit(&vd->vdev_dtl_lock);
1963 if (vd->vdev_dtl_sm == NULL) {
1969 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
1970 0, -1ULL, 0, &vd->vdev_dtl_lock));
1971 ASSERT(vd->vdev_dtl_sm != NULL);
1980 mutex_enter(&vd->vdev_dtl_lock);
1982 mutex_exit(&vd->vdev_dtl_lock);
1984 space_map_truncate(vd->vdev_dtl_sm, tx);
1985 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
1997 if (object != space_map_object(vd->vdev_dtl_sm)) {
2000 space_map_object(vd->vdev_dtl_sm));
2001 vdev_config_dirty(vd->vdev_top);
2006 mutex_enter(&vd->vdev_dtl_lock);
2007 space_map_update(vd->vdev_dtl_sm);
2008 mutex_exit(&vd->vdev_dtl_lock);
2016 vdev_dtl_required(vdev_t *vd)
2018 spa_t *spa = vd->vdev_spa;
2019 vdev_t *tvd = vd->vdev_top;
2020 uint8_t cant_read = vd->vdev_cant_read;
2025 if (vd == spa->spa_root_vdev || vd == tvd)
2033 vd->vdev_cant_read = B_TRUE;
2036 vd->vdev_cant_read = cant_read;
2040 required = !!zio_handle_device_injection(vd, NULL, ECHILD);
2049 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
2055 if (vd->vdev_children == 0) {
2056 mutex_enter(&vd->vdev_dtl_lock);
2057 if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
2058 vdev_writeable(vd)) {
2060 thismin = vdev_dtl_min(vd);
2061 thismax = vdev_dtl_max(vd);
2064 mutex_exit(&vd->vdev_dtl_lock);
2066 for (int c = 0; c < vd->vdev_children; c++) {
2067 vdev_t *cvd = vd->vdev_child[c];
2086 vdev_load(vdev_t *vd)
2091 for (int c = 0; c < vd->vdev_children; c++)
2092 vdev_load(vd->vdev_child[c]);
2097 if (vd == vd->vdev_top && !vd->vdev_ishole &&
2098 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
2099 vdev_metaslab_init(vd, 0) != 0))
2100 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2106 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
2107 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2119 vdev_validate_aux(vdev_t *vd)
2125 if (!vdev_readable(vd))
2128 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
2129 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2137 guid != vd->vdev_guid ||
2139 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
2154 vdev_remove(vdev_t *vd, uint64_t txg)
2156 spa_t *spa = vd->vdev_spa;
2162 if (vd->vdev_ms != NULL) {
2163 metaslab_group_t *mg = vd->vdev_mg;
2168 for (int m = 0; m < vd->vdev_ms_count; m++) {
2169 metaslab_t *msp = vd->vdev_ms[m];
2198 if (vd->vdev_ms_array) {
2199 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2200 vd->vdev_ms_array = 0;
2206 vdev_sync_done(vdev_t *vd, uint64_t txg)
2209 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2211 ASSERT(!vd->vdev_ishole);
2213 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
2217 metaslab_sync_reassess(vd->vdev_mg);
2221 vdev_sync(vdev_t *vd, uint64_t txg)
2223 spa_t *spa = vd->vdev_spa;
2228 ASSERT(!vd->vdev_ishole);
2230 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
2231 ASSERT(vd == vd->vdev_top);
2233 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
2235 ASSERT(vd->vdev_ms_array != 0);
2236 vdev_config_dirty(vd);
2243 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
2244 vdev_remove(vd, txg);
2246 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
2248 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
2251 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
2254 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
2258 vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
2260 return (vd->vdev_ops->vdev_op_asize(vd, psize));
2270 vdev_t *vd, *tvd;
2274 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2277 if (!vd->vdev_ops->vdev_op_leaf)
2280 tvd = vd->vdev_top;
2287 vd->vdev_label_aux = aux;
2292 vd->vdev_delayed_close = B_FALSE;
2293 vd->vdev_faulted = 1ULL;
2294 vd->vdev_degraded = 0ULL;
2295 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
2301 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
2302 vd->vdev_degraded = 1ULL;
2303 vd->vdev_faulted = 0ULL;
2311 if (vdev_readable(vd))
2312 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
2315 return (spa_vdev_state_exit(spa, vd, 0));
2326 vdev_t *vd;
2330 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2333 if (!vd->vdev_ops->vdev_op_leaf)
2339 if (vd->vdev_faulted || vd->vdev_degraded)
2342 vd->vdev_degraded = 1ULL;
2343 if (!vdev_is_dead(vd))
2344 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
2347 return (spa_vdev_state_exit(spa, vd, 0));
2361 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
2366 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2369 if (!vd->vdev_ops->vdev_op_leaf)
2373 (vd->vdev_offline == B_TRUE || vd->vdev_tmpoffline == B_TRUE) ?
2376 tvd = vd->vdev_top;
2377 vd->vdev_offline = B_FALSE;
2378 vd->vdev_tmpoffline = B_FALSE;
2379 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2380 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
2383 if (!vd->vdev_aux) {
2384 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2389 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
2391 if (!vd->vdev_aux) {
2392 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2397 *newstate = vd->vdev_state;
2399 !vdev_is_dead(vd) && vd->vdev_parent &&
2400 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2401 vd->vdev_parent->vdev_child[0] == vd)
2402 vd->vdev_unspare = B_TRUE;
2407 if (vd->vdev_aux)
2408 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2413 spa_event_notify(spa, vd, ESC_ZFS_VDEV_ONLINE);
2415 return (spa_vdev_state_exit(spa, vd, 0));
2421 vdev_t *vd, *tvd;
2429 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2432 if (!vd->vdev_ops->vdev_op_leaf)
2435 tvd = vd->vdev_top;
2442 if (!vd->vdev_offline) {
2448 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2449 vdev_dtl_required(vd))
2463 (void) spa_vdev_state_exit(spa, vd, 0);
2476 vd, error));
2477 (void) spa_vdev_state_exit(spa, vd, 0);
2489 vd->vdev_offline = B_TRUE;
2492 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2494 vd->vdev_offline = B_FALSE;
2507 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
2509 return (spa_vdev_state_exit(spa, vd, 0));
2527 * children. If 'vd' is NULL, then the user wants to clear all vdevs.
2530 vdev_clear(spa_t *spa, vdev_t *vd)
2536 if (vd == NULL)
2537 vd = rvd;
2539 vd->vdev_stat.vs_read_errors = 0;
2540 vd->vdev_stat.vs_write_errors = 0;
2541 vd->vdev_stat.vs_checksum_errors = 0;
2543 for (int c = 0; c < vd->vdev_children; c++)
2544 vdev_clear(spa, vd->vdev_child[c]);
2552 if (vd->vdev_faulted || vd->vdev_degraded ||
2553 !vdev_readable(vd) || !vdev_writeable(vd)) {
2560 vd->vdev_forcefault = B_TRUE;
2562 vd->vdev_faulted = vd->vdev_degraded = 0ULL;
2563 vd->vdev_cant_read = B_FALSE;
2564 vd->vdev_cant_write = B_FALSE;
2566 vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
2568 vd->vdev_forcefault = B_FALSE;
2570 if (vd != rvd && vdev_writeable(vd->vdev_top))
2571 vdev_state_dirty(vd->vdev_top);
2573 if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
2576 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
2584 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2585 vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2586 vd->vdev_parent->vdev_child[0] == vd)
2587 vd->vdev_unspare = B_TRUE;
2591 vdev_is_dead(vdev_t *vd)
2600 return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
2601 vd->vdev_ops == &vdev_missing_ops);
2605 vdev_readable(vdev_t *vd)
2607 return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
2611 vdev_writeable(vdev_t *vd)
2613 return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
2617 vdev_allocatable(vdev_t *vd)
2619 uint64_t state = vd->vdev_state;
2630 !vd->vdev_cant_write && !vd->vdev_ishole);
2634 vdev_accessible(vdev_t *vd, zio_t *zio)
2636 ASSERT(zio->io_vd == vd);
2638 if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2642 return (!vd->vdev_cant_read);
2645 return (!vd->vdev_cant_write);
2654 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2656 spa_t *spa = vd->vdev_spa;
2661 mutex_enter(&vd->vdev_stat_lock);
2662 bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2664 vs->vs_state = vd->vdev_state;
2665 vs->vs_rsize = vdev_get_min_asize(vd);
2666 if (vd->vdev_ops->vdev_op_leaf)
2668 vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
2669 if (vd->vdev_aux == NULL && vd == vd->vdev_top && !vd->vdev_ishole) {
2670 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
2677 if (vd == rvd) {
2689 mutex_exit(&vd->vdev_stat_lock);
2693 vdev_clear_stats(vdev_t *vd)
2695 mutex_enter(&vd->vdev_stat_lock);
2696 vd->vdev_stat.vs_space = 0;
2697 vd->vdev_stat.vs_dspace = 0;
2698 vd->vdev_stat.vs_alloc = 0;
2699 mutex_exit(&vd->vdev_stat_lock);
2703 vdev_scan_stat_init(vdev_t *vd)
2705 vdev_stat_t *vs = &vd->vdev_stat;
2707 for (int c = 0; c < vd->vdev_children; c++)
2708 vdev_scan_stat_init(vd->vdev_child[c]);
2710 mutex_enter(&vd->vdev_stat_lock);
2712 mutex_exit(&vd->vdev_stat_lock);
2720 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2723 vdev_stat_t *vs = &vd->vdev_stat;
2748 if (vd == rvd)
2751 ASSERT(vd == zio->io_vd);
2756 mutex_enter(&vd->vdev_stat_lock);
2765 if (vd->vdev_ops->vdev_op_leaf)
2777 mutex_exit(&vd->vdev_stat_lock);
2802 mutex_enter(&vd->vdev_stat_lock);
2803 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
2809 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
2811 mutex_exit(&vd->vdev_stat_lock);
2832 if (vd->vdev_ops->vdev_op_leaf) {
2837 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
2844 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
2846 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2848 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
2850 if (vd != rvd)
2851 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
2860 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
2864 spa_t *spa = vd->vdev_spa;
2866 metaslab_group_t *mg = vd->vdev_mg;
2869 ASSERT(vd == vd->vdev_top);
2878 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
2880 vd->vdev_deflate_ratio;
2882 mutex_enter(&vd->vdev_stat_lock);
2883 vd->vdev_stat.vs_alloc += alloc_delta;
2884 vd->vdev_stat.vs_space += space_delta;
2885 vd->vdev_stat.vs_dspace += dspace_delta;
2886 mutex_exit(&vd->vdev_stat_lock);
2897 ASSERT(rvd == vd->vdev_parent);
2898 ASSERT(vd->vdev_ms_count != 0);
2911 vdev_config_dirty(vdev_t *vd)
2913 spa_t *spa = vd->vdev_spa;
2923 if (vd->vdev_aux != NULL) {
2924 spa_aux_vdev_t *sav = vd->vdev_aux;
2929 if (sav->sav_vdevs[c] == vd)
2956 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
2971 if (vd == rvd) {
2975 ASSERT(vd == vd->vdev_top);
2977 if (!list_link_active(&vd->vdev_config_dirty_node) &&
2978 !vd->vdev_ishole)
2979 list_insert_head(&spa->spa_config_dirty_list, vd);
2984 vdev_config_clean(vdev_t *vd)
2986 spa_t *spa = vd->vdev_spa;
2992 ASSERT(list_link_active(&vd->vdev_config_dirty_node));
2993 list_remove(&spa->spa_config_dirty_list, vd);
3003 vdev_state_dirty(vdev_t *vd)
3005 spa_t *spa = vd->vdev_spa;
3008 ASSERT(vd == vd->vdev_top);
3020 if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
3021 list_insert_head(&spa->spa_state_dirty_list, vd);
3025 vdev_state_clean(vdev_t *vd)
3027 spa_t *spa = vd->vdev_spa;
3033 ASSERT(list_link_active(&vd->vdev_state_dirty_node));
3034 list_remove(&spa->spa_state_dirty_list, vd);
3041 vdev_propagate_state(vdev_t *vd)
3043 spa_t *spa = vd->vdev_spa;
3049 if (vd->vdev_children > 0) {
3050 for (int c = 0; c < vd->vdev_children; c++) {
3051 child = vd->vdev_child[c];
3066 if (child->vdev_islog && vd == rvd)
3078 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
3086 if (corrupted && vd == rvd &&
3092 if (vd->vdev_parent)
3093 vdev_propagate_state(vd->vdev_parent);
3105 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
3108 spa_t *spa = vd->vdev_spa;
3110 if (state == vd->vdev_state) {
3111 vd->vdev_stat.vs_aux = aux;
3115 save_state = vd->vdev_state;
3117 vd->vdev_state = state;
3118 vd->vdev_stat.vs_aux = aux;
3130 if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
3131 vd->vdev_ops->vdev_op_leaf)
3132 vd->vdev_ops->vdev_op_close(vd);
3143 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
3144 vd->vdev_prevstate != state)
3145 zfs_post_state_change(spa, vd);
3147 if (vd->vdev_removed &&
3149 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
3159 vd->vdev_state = VDEV_STATE_REMOVED;
3160 vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
3162 vd->vdev_removed = B_TRUE;
3172 vd->vdev_ops->vdev_op_leaf)
3173 vd->vdev_not_present = 1;
3190 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
3191 !vd->vdev_not_present && !vd->vdev_checkremove &&
3192 vd != spa->spa_root_vdev) {
3218 zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
3222 vd->vdev_removed = B_FALSE;
3224 vd->vdev_removed = B_FALSE;
3227 if (!isopen && vd->vdev_parent)
3228 vdev_propagate_state(vd->vdev_parent);
3237 vdev_is_bootable(vdev_t *vd)
3239 if (!vd->vdev_ops->vdev_op_leaf) {
3240 char *vdev_type = vd->vdev_ops->vdev_op_type;
3243 vd->vdev_children > 1) {
3250 for (int c = 0; c < vd->vdev_children; c++) {
3251 if (!vdev_is_bootable(vd->vdev_child[c]))
3292 vdev_log_state_valid(vdev_t *vd)
3294 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
3295 !vd->vdev_removed)
3298 for (int c = 0; c < vd->vdev_children; c++)
3299 if (vdev_log_state_valid(vd->vdev_child[c]))
3309 vdev_expand(vdev_t *vd, uint64_t txg)
3311 ASSERT(vd->vdev_top == vd);
3312 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3314 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3315 VERIFY(vdev_metaslab_init(vd, txg) == 0);
3316 vdev_config_dirty(vd);
3324 vdev_split(vdev_t *vd)
3326 vdev_t *cvd, *pvd = vd->vdev_parent;
3328 vdev_remove_child(pvd, vd);
3340 vdev_deadman(vdev_t *vd)
3342 for (int c = 0; c < vd->vdev_children; c++) {
3343 vdev_t *cvd = vd->vdev_child[c];
3348 if (vd->vdev_ops->vdev_op_leaf) {
3349 vdev_queue_t *vq = &vd->vdev_queue;
3353 spa_t *spa = vd->vdev_spa;