Lines Matching +full:additional +full:- +full:devs

1 // SPDX-License-Identifier: CDDL-1.0
10 * or https://opensource.org/licenses/CDDL-1.0.
68 * One metaslab from each (normal-class) vdev is used by the ZIL. These are
88 /* default target for number of metaslabs per top-level vdev */
91 /* minimum number of metaslabs per top-level vdev */
94 /* practical upper limit of total metaslabs per top-level vdev */
138 * vdev-wide space maps that have lots of entries written to them at
146 * will cause pool corruption on power loss if a volatile out-of-order
183 if (vd->vdev_path != NULL) { in vdev_dbgmsg()
184 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, in vdev_dbgmsg()
185 vd->vdev_path, buf); in vdev_dbgmsg()
187 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", in vdev_dbgmsg()
188 vd->vdev_ops->vdev_op_type, in vdev_dbgmsg()
189 (u_longlong_t)vd->vdev_id, in vdev_dbgmsg()
190 (u_longlong_t)vd->vdev_guid, buf); in vdev_dbgmsg()
199 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { in vdev_dbgmsg_print_tree()
201 (u_longlong_t)vd->vdev_id, in vdev_dbgmsg_print_tree()
202 vd->vdev_ops->vdev_op_type); in vdev_dbgmsg_print_tree()
206 switch (vd->vdev_state) { in vdev_dbgmsg_print_tree()
233 (uint_t)vd->vdev_state); in vdev_dbgmsg_print_tree()
237 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, in vdev_dbgmsg_print_tree()
238 vd->vdev_islog ? " (log)" : "", in vdev_dbgmsg_print_tree()
239 (u_longlong_t)vd->vdev_guid, in vdev_dbgmsg_print_tree()
240 vd->vdev_path ? vd->vdev_path : "N/A", state); in vdev_dbgmsg_print_tree()
242 for (uint64_t i = 0; i < vd->vdev_children; i++) in vdev_dbgmsg_print_tree()
243 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); in vdev_dbgmsg_print_tree()
275 if (strcmp(ops->vdev_op_type, type) == 0) in vdev_getops()
285 * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
290 if (mc == spa_embedded_log_class(vd->vdev_spa) && in vdev_get_mg()
291 vd->vdev_log_mg != NULL) in vdev_get_mg()
292 return (vd->vdev_log_mg); in vdev_get_mg()
294 return (vd->vdev_mg); in vdev_get_mg()
303 physical_rs->rs_start = logical_rs->rs_start; in vdev_default_xlate()
304 physical_rs->rs_end = logical_rs->rs_end; in vdev_default_xlate()
309 * String origin is either the per-vdev zap or zpool(8).
328 * all children. This is what's used by anything other than RAID-Z.
333 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); in vdev_default_asize()
336 for (int c = 0; c < vd->vdev_children; c++) { in vdev_default_asize()
337 csize = vdev_psize_to_asize_txg(vd->vdev_child[c], psize, txg); in vdev_default_asize()
347 return (vd->vdev_min_asize); in vdev_default_min_asize()
359 vdev_t *pvd = vd->vdev_parent; in vdev_get_min_asize()
366 return (vd->vdev_asize); in vdev_get_min_asize()
369 * The top-level vdev just returns the allocatable size rounded in vdev_get_min_asize()
372 if (vd == vd->vdev_top) in vdev_get_min_asize()
373 return (P2ALIGN_TYPED(vd->vdev_asize, 1ULL << vd->vdev_ms_shift, in vdev_get_min_asize()
376 return (pvd->vdev_ops->vdev_op_min_asize(pvd)); in vdev_get_min_asize()
382 vd->vdev_min_asize = vdev_get_min_asize(vd); in vdev_set_min_asize()
384 for (int c = 0; c < vd->vdev_children; c++) in vdev_set_min_asize()
385 vdev_set_min_asize(vd->vdev_child[c]); in vdev_set_min_asize()
389 * Get the minimal allocation size for the top-level vdev.
394 uint64_t min_alloc = 1ULL << vd->vdev_ashift; in vdev_get_min_alloc()
396 if (vd->vdev_ops->vdev_op_min_alloc != NULL) in vdev_get_min_alloc()
397 min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd); in vdev_get_min_alloc()
403 * Get the parity level for a top-level vdev.
410 if (vd->vdev_ops->vdev_op_nparity != NULL) in vdev_get_nparity()
411 nparity = vd->vdev_ops->vdev_op_nparity(vd); in vdev_get_nparity()
419 spa_t *spa = vd->vdev_spa; in vdev_prop_get_int()
420 objset_t *mos = spa->spa_meta_objset; in vdev_prop_get_int()
424 if (vd->vdev_root_zap != 0) { in vdev_prop_get_int()
425 objid = vd->vdev_root_zap; in vdev_prop_get_int()
426 } else if (vd->vdev_top_zap != 0) { in vdev_prop_get_int()
427 objid = vd->vdev_top_zap; in vdev_prop_get_int()
428 } else if (vd->vdev_leaf_zap != 0) { in vdev_prop_get_int()
429 objid = vd->vdev_leaf_zap; in vdev_prop_get_int()
444 * Get the number of data disks for a top-level vdev.
451 if (vd->vdev_ops->vdev_op_ndisks != NULL) in vdev_get_ndisks()
452 ndisks = vd->vdev_ops->vdev_op_ndisks(vd); in vdev_get_ndisks()
460 vdev_t *rvd = spa->spa_root_vdev; in vdev_lookup_top()
464 if (vdev < rvd->vdev_children) { in vdev_lookup_top()
465 ASSERT(rvd->vdev_child[vdev] != NULL); in vdev_lookup_top()
466 return (rvd->vdev_child[vdev]); in vdev_lookup_top()
477 if (vd->vdev_guid == guid) in vdev_lookup_by_guid()
480 for (int c = 0; c < vd->vdev_children; c++) in vdev_lookup_by_guid()
481 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != in vdev_lookup_by_guid()
493 if (vd->vdev_ops->vdev_op_leaf) in vdev_count_leaves_impl()
496 for (int c = 0; c < vd->vdev_children; c++) in vdev_count_leaves_impl()
497 n += vdev_count_leaves_impl(vd->vdev_child[c]); in vdev_count_leaves_impl()
508 rc = vdev_count_leaves_impl(spa->spa_root_vdev); in vdev_count_leaves()
518 uint64_t id = cvd->vdev_id; in vdev_add_child()
521 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); in vdev_add_child()
522 ASSERT(cvd->vdev_parent == NULL); in vdev_add_child()
524 cvd->vdev_parent = pvd; in vdev_add_child()
529 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); in vdev_add_child()
531 oldsize = pvd->vdev_children * sizeof (vdev_t *); in vdev_add_child()
532 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); in vdev_add_child()
533 newsize = pvd->vdev_children * sizeof (vdev_t *); in vdev_add_child()
536 if (pvd->vdev_child != NULL) { in vdev_add_child()
537 memcpy(newchild, pvd->vdev_child, oldsize); in vdev_add_child()
538 kmem_free(pvd->vdev_child, oldsize); in vdev_add_child()
541 pvd->vdev_child = newchild; in vdev_add_child()
542 pvd->vdev_child[id] = cvd; in vdev_add_child()
544 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); in vdev_add_child()
545 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); in vdev_add_child()
550 for (; pvd != NULL; pvd = pvd->vdev_parent) in vdev_add_child()
551 pvd->vdev_guid_sum += cvd->vdev_guid_sum; in vdev_add_child()
553 if (cvd->vdev_ops->vdev_op_leaf) { in vdev_add_child()
554 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); in vdev_add_child()
555 cvd->vdev_spa->spa_leaf_list_gen++; in vdev_add_child()
563 uint_t id = cvd->vdev_id; in vdev_remove_child()
565 ASSERT(cvd->vdev_parent == pvd); in vdev_remove_child()
570 ASSERT(id < pvd->vdev_children); in vdev_remove_child()
571 ASSERT(pvd->vdev_child[id] == cvd); in vdev_remove_child()
573 pvd->vdev_child[id] = NULL; in vdev_remove_child()
574 cvd->vdev_parent = NULL; in vdev_remove_child()
576 for (c = 0; c < pvd->vdev_children; c++) in vdev_remove_child()
577 if (pvd->vdev_child[c]) in vdev_remove_child()
580 if (c == pvd->vdev_children) { in vdev_remove_child()
581 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); in vdev_remove_child()
582 pvd->vdev_child = NULL; in vdev_remove_child()
583 pvd->vdev_children = 0; in vdev_remove_child()
586 if (cvd->vdev_ops->vdev_op_leaf) { in vdev_remove_child()
587 spa_t *spa = cvd->vdev_spa; in vdev_remove_child()
588 list_remove(&spa->spa_leaf_list, cvd); in vdev_remove_child()
589 spa->spa_leaf_list_gen++; in vdev_remove_child()
595 for (; pvd != NULL; pvd = pvd->vdev_parent) in vdev_remove_child()
596 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; in vdev_remove_child()
606 int oldc = pvd->vdev_children; in vdev_compact_children()
609 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); in vdev_compact_children()
615 if (pvd->vdev_child[c]) in vdev_compact_children()
622 if ((cvd = pvd->vdev_child[c]) != NULL) { in vdev_compact_children()
624 cvd->vdev_id = newc++; in vdev_compact_children()
631 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); in vdev_compact_children()
632 pvd->vdev_child = newchild; in vdev_compact_children()
633 pvd->vdev_children = newc; in vdev_compact_children()
646 vic = &vd->vdev_indirect_config; in vdev_alloc_common()
648 if (spa->spa_root_vdev == NULL) { in vdev_alloc_common()
650 spa->spa_root_vdev = vd; in vdev_alloc_common()
651 spa->spa_load_guid = spa_generate_load_guid(); in vdev_alloc_common()
655 if (spa->spa_root_vdev == vd) { in vdev_alloc_common()
670 vd->vdev_spa = spa; in vdev_alloc_common()
671 vd->vdev_id = id; in vdev_alloc_common()
672 vd->vdev_guid = guid; in vdev_alloc_common()
673 vd->vdev_guid_sum = guid; in vdev_alloc_common()
674 vd->vdev_ops = ops; in vdev_alloc_common()
675 vd->vdev_state = VDEV_STATE_CLOSED; in vdev_alloc_common()
676 vd->vdev_ishole = (ops == &vdev_hole_ops); in vdev_alloc_common()
677 vic->vic_prev_indirect_vdev = UINT64_MAX; in vdev_alloc_common()
679 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); in vdev_alloc_common()
680 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
681 vd->vdev_obsolete_segments = zfs_range_tree_create(NULL, in vdev_alloc_common()
689 zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second, in vdev_alloc_common()
691 zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_deadman_events_per_second, in vdev_alloc_common()
693 zfs_ratelimit_init(&vd->vdev_dio_verify_rl, in vdev_alloc_common()
695 zfs_ratelimit_init(&vd->vdev_checksum_rl, in vdev_alloc_common()
701 vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N); in vdev_alloc_common()
702 vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T); in vdev_alloc_common()
703 vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N); in vdev_alloc_common()
704 vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T); in vdev_alloc_common()
705 vd->vdev_slow_io_n = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_N); in vdev_alloc_common()
706 vd->vdev_slow_io_t = vdev_prop_default_numeric(VDEV_PROP_SLOW_IO_T); in vdev_alloc_common()
708 list_link_init(&vd->vdev_config_dirty_node); in vdev_alloc_common()
709 list_link_init(&vd->vdev_state_dirty_node); in vdev_alloc_common()
710 list_link_init(&vd->vdev_initialize_node); in vdev_alloc_common()
711 list_link_init(&vd->vdev_leaf_node); in vdev_alloc_common()
712 list_link_init(&vd->vdev_trim_node); in vdev_alloc_common()
714 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL); in vdev_alloc_common()
715 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
716 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
717 mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
719 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
720 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
721 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
722 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
724 mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
725 mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
726 mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
727 cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
728 cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
729 cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
730 cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
732 mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL); in vdev_alloc_common()
733 cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL); in vdev_alloc_common()
736 vd->vdev_dtl[t] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, in vdev_alloc_common()
740 txg_list_create(&vd->vdev_ms_list, spa, in vdev_alloc_common()
742 txg_list_create(&vd->vdev_dtl_list, spa, in vdev_alloc_common()
744 vd->vdev_stat.vs_timestamp = gethrtime(); in vdev_alloc_common()
752 * creating a new vdev or loading an existing one - the behavior is slightly
767 boolean_t top_level = (parent && !parent->vdev_parent); in vdev_alloc()
804 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) in vdev_alloc()
822 * If creating a top-level vdev, check for allocation in vdev_alloc()
830 if (spa->spa_load_state != SPA_LOAD_CREATE && in vdev_alloc()
839 spa->spa_load_state != SPA_LOAD_CREATE && in vdev_alloc()
851 if (ops->vdev_op_init != NULL) { in vdev_alloc()
852 rc = ops->vdev_op_init(spa, nv, &tsd); in vdev_alloc()
859 vd->vdev_tsd = tsd; in vdev_alloc()
860 vd->vdev_islog = islog; in vdev_alloc()
863 vd->vdev_alloc_bias = alloc_bias; in vdev_alloc()
866 vd->vdev_path = spa_strdup(tmp); in vdev_alloc()
871 * zpool offline -f). in vdev_alloc()
875 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; in vdev_alloc()
876 vd->vdev_faulted = 1; in vdev_alloc()
877 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; in vdev_alloc()
881 vd->vdev_devid = spa_strdup(tmp); in vdev_alloc()
883 vd->vdev_physpath = spa_strdup(tmp); in vdev_alloc()
887 vd->vdev_enc_sysfs_path = spa_strdup(tmp); in vdev_alloc()
890 vd->vdev_fru = spa_strdup(tmp); in vdev_alloc()
894 * as -1. in vdev_alloc()
897 &vd->vdev_wholedisk) != 0) in vdev_alloc()
898 vd->vdev_wholedisk = -1ULL; in vdev_alloc()
900 vic = &vd->vdev_indirect_config; in vdev_alloc()
902 ASSERT0(vic->vic_mapping_object); in vdev_alloc()
904 &vic->vic_mapping_object); in vdev_alloc()
905 ASSERT0(vic->vic_births_object); in vdev_alloc()
907 &vic->vic_births_object); in vdev_alloc()
908 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); in vdev_alloc()
910 &vic->vic_prev_indirect_vdev); in vdev_alloc()
917 &vd->vdev_not_present); in vdev_alloc()
925 &vd->vdev_ashift); in vdev_alloc()
927 vd->vdev_attaching = B_TRUE; in vdev_alloc()
934 &vd->vdev_crtxg); in vdev_alloc()
936 if (vd->vdev_ops == &vdev_root_ops && in vdev_alloc()
941 &vd->vdev_root_zap); in vdev_alloc()
945 * If we're a top-level vdev, try to load the allocation parameters. in vdev_alloc()
950 &vd->vdev_ms_array); in vdev_alloc()
952 &vd->vdev_ms_shift); in vdev_alloc()
954 &vd->vdev_asize); in vdev_alloc()
956 &vd->vdev_noalloc); in vdev_alloc()
958 &vd->vdev_removing); in vdev_alloc()
960 &vd->vdev_top_zap); in vdev_alloc()
961 vd->vdev_rz_expanding = nvlist_exists(nv, in vdev_alloc()
964 ASSERT0(vd->vdev_top_zap); in vdev_alloc()
975 if (vd->vdev_ops->vdev_op_leaf && in vdev_alloc()
978 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); in vdev_alloc()
980 ASSERT0(vd->vdev_leaf_zap); in vdev_alloc()
987 if (vd->vdev_ops->vdev_op_leaf && in vdev_alloc()
992 &vd->vdev_dtl_object); in vdev_alloc()
994 &vd->vdev_unspare); in vdev_alloc()
1006 &vd->vdev_offline); in vdev_alloc()
1009 &vd->vdev_resilver_txg); in vdev_alloc()
1012 &vd->vdev_rebuild_txg); in vdev_alloc()
1022 * state with 'zpool offline -f'. The persistent fault will in vdev_alloc()
1030 &vd->vdev_faulted); in vdev_alloc()
1032 &vd->vdev_degraded); in vdev_alloc()
1034 &vd->vdev_removed); in vdev_alloc()
1036 if (vd->vdev_faulted || vd->vdev_degraded) { in vdev_alloc()
1039 vd->vdev_label_aux = in vdev_alloc()
1044 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; in vdev_alloc()
1046 vd->vdev_faulted = 0ULL; in vdev_alloc()
1064 spa_t *spa = vd->vdev_spa; in vdev_free()
1066 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); in vdev_free()
1067 ASSERT3P(vd->vdev_trim_thread, ==, NULL); in vdev_free()
1068 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); in vdev_free()
1069 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); in vdev_free()
1076 if (vd->vdev_scan_io_queue != NULL) { in vdev_free()
1077 mutex_enter(&vd->vdev_scan_io_queue_lock); in vdev_free()
1078 dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); in vdev_free()
1079 vd->vdev_scan_io_queue = NULL; in vdev_free()
1080 mutex_exit(&vd->vdev_scan_io_queue_lock); in vdev_free()
1089 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); in vdev_free()
1090 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); in vdev_free()
1095 for (int c = 0; c < vd->vdev_children; c++) in vdev_free()
1096 vdev_free(vd->vdev_child[c]); in vdev_free()
1098 ASSERT(vd->vdev_child == NULL); in vdev_free()
1099 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); in vdev_free()
1101 if (vd->vdev_ops->vdev_op_fini != NULL) in vdev_free()
1102 vd->vdev_ops->vdev_op_fini(vd); in vdev_free()
1107 if (vd->vdev_mg != NULL) { in vdev_free()
1109 metaslab_group_destroy(vd->vdev_mg); in vdev_free()
1110 vd->vdev_mg = NULL; in vdev_free()
1112 if (vd->vdev_log_mg != NULL) { in vdev_free()
1113 ASSERT0(vd->vdev_ms_count); in vdev_free()
1114 metaslab_group_destroy(vd->vdev_log_mg); in vdev_free()
1115 vd->vdev_log_mg = NULL; in vdev_free()
1118 ASSERT0(vd->vdev_stat.vs_space); in vdev_free()
1119 ASSERT0(vd->vdev_stat.vs_dspace); in vdev_free()
1120 ASSERT0(vd->vdev_stat.vs_alloc); in vdev_free()
1125 vdev_remove_child(vd->vdev_parent, vd); in vdev_free()
1127 ASSERT(vd->vdev_parent == NULL); in vdev_free()
1128 ASSERT(!list_link_active(&vd->vdev_leaf_node)); in vdev_free()
1135 if (vd->vdev_path) in vdev_free()
1136 spa_strfree(vd->vdev_path); in vdev_free()
1137 if (vd->vdev_devid) in vdev_free()
1138 spa_strfree(vd->vdev_devid); in vdev_free()
1139 if (vd->vdev_physpath) in vdev_free()
1140 spa_strfree(vd->vdev_physpath); in vdev_free()
1142 if (vd->vdev_enc_sysfs_path) in vdev_free()
1143 spa_strfree(vd->vdev_enc_sysfs_path); in vdev_free()
1145 if (vd->vdev_fru) in vdev_free()
1146 spa_strfree(vd->vdev_fru); in vdev_free()
1148 if (vd->vdev_isspare) in vdev_free()
1150 if (vd->vdev_isl2cache) in vdev_free()
1153 txg_list_destroy(&vd->vdev_ms_list); in vdev_free()
1154 txg_list_destroy(&vd->vdev_dtl_list); in vdev_free()
1156 mutex_enter(&vd->vdev_dtl_lock); in vdev_free()
1157 space_map_close(vd->vdev_dtl_sm); in vdev_free()
1159 zfs_range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); in vdev_free()
1160 zfs_range_tree_destroy(vd->vdev_dtl[t]); in vdev_free()
1162 mutex_exit(&vd->vdev_dtl_lock); in vdev_free()
1164 EQUIV(vd->vdev_indirect_births != NULL, in vdev_free()
1165 vd->vdev_indirect_mapping != NULL); in vdev_free()
1166 if (vd->vdev_indirect_births != NULL) { in vdev_free()
1167 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); in vdev_free()
1168 vdev_indirect_births_close(vd->vdev_indirect_births); in vdev_free()
1171 if (vd->vdev_obsolete_sm != NULL) { in vdev_free()
1172 ASSERT(vd->vdev_removing || in vdev_free()
1173 vd->vdev_ops == &vdev_indirect_ops); in vdev_free()
1174 space_map_close(vd->vdev_obsolete_sm); in vdev_free()
1175 vd->vdev_obsolete_sm = NULL; in vdev_free()
1177 zfs_range_tree_destroy(vd->vdev_obsolete_segments); in vdev_free()
1178 rw_destroy(&vd->vdev_indirect_rwlock); in vdev_free()
1179 mutex_destroy(&vd->vdev_obsolete_lock); in vdev_free()
1181 mutex_destroy(&vd->vdev_dtl_lock); in vdev_free()
1182 mutex_destroy(&vd->vdev_stat_lock); in vdev_free()
1183 mutex_destroy(&vd->vdev_probe_lock); in vdev_free()
1184 mutex_destroy(&vd->vdev_scan_io_queue_lock); in vdev_free()
1186 mutex_destroy(&vd->vdev_initialize_lock); in vdev_free()
1187 mutex_destroy(&vd->vdev_initialize_io_lock); in vdev_free()
1188 cv_destroy(&vd->vdev_initialize_io_cv); in vdev_free()
1189 cv_destroy(&vd->vdev_initialize_cv); in vdev_free()
1191 mutex_destroy(&vd->vdev_trim_lock); in vdev_free()
1192 mutex_destroy(&vd->vdev_autotrim_lock); in vdev_free()
1193 mutex_destroy(&vd->vdev_trim_io_lock); in vdev_free()
1194 cv_destroy(&vd->vdev_trim_cv); in vdev_free()
1195 cv_destroy(&vd->vdev_autotrim_cv); in vdev_free()
1196 cv_destroy(&vd->vdev_autotrim_kick_cv); in vdev_free()
1197 cv_destroy(&vd->vdev_trim_io_cv); in vdev_free()
1199 mutex_destroy(&vd->vdev_rebuild_lock); in vdev_free()
1200 cv_destroy(&vd->vdev_rebuild_cv); in vdev_free()
1202 zfs_ratelimit_fini(&vd->vdev_delay_rl); in vdev_free()
1203 zfs_ratelimit_fini(&vd->vdev_deadman_rl); in vdev_free()
1204 zfs_ratelimit_fini(&vd->vdev_dio_verify_rl); in vdev_free()
1205 zfs_ratelimit_fini(&vd->vdev_checksum_rl); in vdev_free()
1207 if (vd == spa->spa_root_vdev) in vdev_free()
1208 spa->spa_root_vdev = NULL; in vdev_free()
1214 * Transfer top-level vdev state from svd to tvd.
1219 spa_t *spa = svd->vdev_spa; in vdev_top_transfer()
1224 ASSERT(tvd == tvd->vdev_top); in vdev_top_transfer()
1226 tvd->vdev_ms_array = svd->vdev_ms_array; in vdev_top_transfer()
1227 tvd->vdev_ms_shift = svd->vdev_ms_shift; in vdev_top_transfer()
1228 tvd->vdev_ms_count = svd->vdev_ms_count; in vdev_top_transfer()
1229 tvd->vdev_top_zap = svd->vdev_top_zap; in vdev_top_transfer()
1231 svd->vdev_ms_array = 0; in vdev_top_transfer()
1232 svd->vdev_ms_shift = 0; in vdev_top_transfer()
1233 svd->vdev_ms_count = 0; in vdev_top_transfer()
1234 svd->vdev_top_zap = 0; in vdev_top_transfer()
1236 if (tvd->vdev_mg) in vdev_top_transfer()
1237 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); in vdev_top_transfer()
1238 if (tvd->vdev_log_mg) in vdev_top_transfer()
1239 ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg); in vdev_top_transfer()
1240 tvd->vdev_mg = svd->vdev_mg; in vdev_top_transfer()
1241 tvd->vdev_log_mg = svd->vdev_log_mg; in vdev_top_transfer()
1242 tvd->vdev_ms = svd->vdev_ms; in vdev_top_transfer()
1244 svd->vdev_mg = NULL; in vdev_top_transfer()
1245 svd->vdev_log_mg = NULL; in vdev_top_transfer()
1246 svd->vdev_ms = NULL; in vdev_top_transfer()
1248 if (tvd->vdev_mg != NULL) in vdev_top_transfer()
1249 tvd->vdev_mg->mg_vd = tvd; in vdev_top_transfer()
1250 if (tvd->vdev_log_mg != NULL) in vdev_top_transfer()
1251 tvd->vdev_log_mg->mg_vd = tvd; in vdev_top_transfer()
1253 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; in vdev_top_transfer()
1254 svd->vdev_checkpoint_sm = NULL; in vdev_top_transfer()
1256 tvd->vdev_alloc_bias = svd->vdev_alloc_bias; in vdev_top_transfer()
1257 svd->vdev_alloc_bias = VDEV_BIAS_NONE; in vdev_top_transfer()
1259 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; in vdev_top_transfer()
1260 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; in vdev_top_transfer()
1261 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; in vdev_top_transfer()
1263 svd->vdev_stat.vs_alloc = 0; in vdev_top_transfer()
1264 svd->vdev_stat.vs_space = 0; in vdev_top_transfer()
1265 svd->vdev_stat.vs_dspace = 0; in vdev_top_transfer()
1268 * State which may be set on a top-level vdev that's in the in vdev_top_transfer()
1271 ASSERT0(tvd->vdev_indirect_config.vic_births_object); in vdev_top_transfer()
1272 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); in vdev_top_transfer()
1273 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); in vdev_top_transfer()
1274 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); in vdev_top_transfer()
1275 ASSERT3P(tvd->vdev_indirect_births, ==, NULL); in vdev_top_transfer()
1276 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); in vdev_top_transfer()
1277 ASSERT0(tvd->vdev_noalloc); in vdev_top_transfer()
1278 ASSERT0(tvd->vdev_removing); in vdev_top_transfer()
1279 ASSERT0(tvd->vdev_rebuilding); in vdev_top_transfer()
1280 tvd->vdev_noalloc = svd->vdev_noalloc; in vdev_top_transfer()
1281 tvd->vdev_removing = svd->vdev_removing; in vdev_top_transfer()
1282 tvd->vdev_rebuilding = svd->vdev_rebuilding; in vdev_top_transfer()
1283 tvd->vdev_rebuild_config = svd->vdev_rebuild_config; in vdev_top_transfer()
1284 tvd->vdev_indirect_config = svd->vdev_indirect_config; in vdev_top_transfer()
1285 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; in vdev_top_transfer()
1286 tvd->vdev_indirect_births = svd->vdev_indirect_births; in vdev_top_transfer()
1287 zfs_range_tree_swap(&svd->vdev_obsolete_segments, in vdev_top_transfer()
1288 &tvd->vdev_obsolete_segments); in vdev_top_transfer()
1289 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; in vdev_top_transfer()
1290 svd->vdev_indirect_config.vic_mapping_object = 0; in vdev_top_transfer()
1291 svd->vdev_indirect_config.vic_births_object = 0; in vdev_top_transfer()
1292 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; in vdev_top_transfer()
1293 svd->vdev_indirect_mapping = NULL; in vdev_top_transfer()
1294 svd->vdev_indirect_births = NULL; in vdev_top_transfer()
1295 svd->vdev_obsolete_sm = NULL; in vdev_top_transfer()
1296 svd->vdev_noalloc = 0; in vdev_top_transfer()
1297 svd->vdev_removing = 0; in vdev_top_transfer()
1298 svd->vdev_rebuilding = 0; in vdev_top_transfer()
1301 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) in vdev_top_transfer()
1302 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); in vdev_top_transfer()
1303 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) in vdev_top_transfer()
1304 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); in vdev_top_transfer()
1305 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) in vdev_top_transfer()
1306 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); in vdev_top_transfer()
1309 if (list_link_active(&svd->vdev_config_dirty_node)) { in vdev_top_transfer()
1314 if (list_link_active(&svd->vdev_state_dirty_node)) { in vdev_top_transfer()
1319 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; in vdev_top_transfer()
1320 svd->vdev_deflate_ratio = 0; in vdev_top_transfer()
1322 tvd->vdev_islog = svd->vdev_islog; in vdev_top_transfer()
1323 svd->vdev_islog = 0; in vdev_top_transfer()
1334 vd->vdev_top = tvd; in vdev_top_update()
1336 for (int c = 0; c < vd->vdev_children; c++) in vdev_top_update()
1337 vdev_top_update(tvd, vd->vdev_child[c]); in vdev_top_update()
1347 spa_t *spa = cvd->vdev_spa; in vdev_add_parent()
1348 vdev_t *pvd = cvd->vdev_parent; in vdev_add_parent()
1353 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); in vdev_add_parent()
1355 mvd->vdev_asize = cvd->vdev_asize; in vdev_add_parent()
1356 mvd->vdev_min_asize = cvd->vdev_min_asize; in vdev_add_parent()
1357 mvd->vdev_max_asize = cvd->vdev_max_asize; in vdev_add_parent()
1358 mvd->vdev_psize = cvd->vdev_psize; in vdev_add_parent()
1359 mvd->vdev_ashift = cvd->vdev_ashift; in vdev_add_parent()
1360 mvd->vdev_logical_ashift = cvd->vdev_logical_ashift; in vdev_add_parent()
1361 mvd->vdev_physical_ashift = cvd->vdev_physical_ashift; in vdev_add_parent()
1362 mvd->vdev_state = cvd->vdev_state; in vdev_add_parent()
1363 mvd->vdev_crtxg = cvd->vdev_crtxg; in vdev_add_parent()
1367 cvd->vdev_id = mvd->vdev_children; in vdev_add_parent()
1369 vdev_top_update(cvd->vdev_top, cvd->vdev_top); in vdev_add_parent()
1371 if (mvd == mvd->vdev_top) in vdev_add_parent()
1378 * Remove a 1-way mirror/replacing vdev from the tree.
1383 vdev_t *mvd = cvd->vdev_parent; in vdev_remove_parent()
1384 vdev_t *pvd = mvd->vdev_parent; in vdev_remove_parent()
1386 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); in vdev_remove_parent()
1388 ASSERT(mvd->vdev_children == 1); in vdev_remove_parent()
1389 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || in vdev_remove_parent()
1390 mvd->vdev_ops == &vdev_replacing_ops || in vdev_remove_parent()
1391 mvd->vdev_ops == &vdev_spare_ops); in vdev_remove_parent()
1392 cvd->vdev_ashift = mvd->vdev_ashift; in vdev_remove_parent()
1393 cvd->vdev_logical_ashift = mvd->vdev_logical_ashift; in vdev_remove_parent()
1394 cvd->vdev_physical_ashift = mvd->vdev_physical_ashift; in vdev_remove_parent()
1399 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. in vdev_remove_parent()
1401 * go to import the pool we'll think we have two top-level vdevs, in vdev_remove_parent()
1402 * instead of a different version of the same top-level vdev. in vdev_remove_parent()
1404 if (mvd->vdev_top == mvd) { in vdev_remove_parent()
1405 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; in vdev_remove_parent()
1406 cvd->vdev_orig_guid = cvd->vdev_guid; in vdev_remove_parent()
1407 cvd->vdev_guid += guid_delta; in vdev_remove_parent()
1408 cvd->vdev_guid_sum += guid_delta; in vdev_remove_parent()
1414 * detaching children of non-uniform sizes, the mirror could in vdev_remove_parent()
1416 * re-establish the mirror. in vdev_remove_parent()
1418 if (!cvd->vdev_spa->spa_autoexpand) in vdev_remove_parent()
1419 cvd->vdev_asize = mvd->vdev_asize; in vdev_remove_parent()
1421 cvd->vdev_id = mvd->vdev_id; in vdev_remove_parent()
1423 vdev_top_update(cvd->vdev_top, cvd->vdev_top); in vdev_remove_parent()
1425 if (cvd == cvd->vdev_top) in vdev_remove_parent()
1428 ASSERT(mvd->vdev_children == 0); in vdev_remove_parent()
1452 if (min_alloc < spa->spa_min_alloc) in vdev_spa_set_alloc()
1453 spa->spa_min_alloc = min_alloc; in vdev_spa_set_alloc()
1454 if (spa->spa_gcd_alloc == INT_MAX) { in vdev_spa_set_alloc()
1455 spa->spa_gcd_alloc = min_alloc; in vdev_spa_set_alloc()
1457 spa->spa_gcd_alloc = vdev_gcd(min_alloc, in vdev_spa_set_alloc()
1458 spa->spa_gcd_alloc); in vdev_spa_set_alloc()
1465 spa_t *spa = vd->vdev_spa; in vdev_metaslab_group_create()
1470 if (vd->vdev_mg == NULL) { in vdev_metaslab_group_create()
1473 if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE) in vdev_metaslab_group_create()
1474 vd->vdev_alloc_bias = VDEV_BIAS_LOG; in vdev_metaslab_group_create()
1476 ASSERT3U(vd->vdev_islog, ==, in vdev_metaslab_group_create()
1477 (vd->vdev_alloc_bias == VDEV_BIAS_LOG)); in vdev_metaslab_group_create()
1479 switch (vd->vdev_alloc_bias) { in vdev_metaslab_group_create()
1493 vd->vdev_mg = metaslab_group_create(mc, vd, in vdev_metaslab_group_create()
1494 spa->spa_alloc_count); in vdev_metaslab_group_create()
1496 if (!vd->vdev_islog) { in vdev_metaslab_group_create()
1497 vd->vdev_log_mg = metaslab_group_create( in vdev_metaslab_group_create()
1506 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && in vdev_metaslab_group_create()
1507 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) { in vdev_metaslab_group_create()
1508 if (vd->vdev_ashift > spa->spa_max_ashift) in vdev_metaslab_group_create()
1509 spa->spa_max_ashift = vd->vdev_ashift; in vdev_metaslab_group_create()
1510 if (vd->vdev_ashift < spa->spa_min_ashift) in vdev_metaslab_group_create()
1511 spa->spa_min_ashift = vd->vdev_ashift; in vdev_metaslab_group_create()
1522 spa_t *spa = vd->vdev_spa; in vdev_metaslab_init()
1523 uint64_t oldc = vd->vdev_ms_count; in vdev_metaslab_init()
1524 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; in vdev_metaslab_init()
1534 if (vd->vdev_ms_shift == 0) in vdev_metaslab_init()
1537 ASSERT(!vd->vdev_ishole); in vdev_metaslab_init()
1544 memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp)); in vdev_metaslab_init()
1545 vmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); in vdev_metaslab_init()
1548 vd->vdev_ms = mspp; in vdev_metaslab_init()
1549 vd->vdev_ms_count = newc; in vdev_metaslab_init()
1558 if (txg == 0 && vd->vdev_ms_array != 0) { in vdev_metaslab_init()
1559 error = dmu_read(spa->spa_meta_objset, in vdev_metaslab_init()
1560 vd->vdev_ms_array, in vdev_metaslab_init()
1570 error = metaslab_init(vd->vdev_mg, m, object, txg, in vdev_metaslab_init()
1571 &(vd->vdev_ms[m])); in vdev_metaslab_init()
1584 if (vd->vdev_mg->mg_class == spa_normal_class(spa) && in vdev_metaslab_init()
1585 vd->vdev_ms_count > zfs_embedded_slog_min_ms && in vdev_metaslab_init()
1586 avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) { in vdev_metaslab_init()
1592 * (pre-existing) ones may be active (e.g. have non-empty in vdev_metaslab_init()
1598 space_map_allocated(vd->vdev_ms[m]->ms_sm); in vdev_metaslab_init()
1604 metaslab_t *slog_ms = vd->vdev_ms[slog_msid]; in vdev_metaslab_init()
1611 (void) txg_list_remove_this(&vd->vdev_ms_list, in vdev_metaslab_init()
1614 uint64_t sm_obj = space_map_object(slog_ms->ms_sm); in vdev_metaslab_init()
1616 VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg, in vdev_metaslab_init()
1617 &vd->vdev_ms[slog_msid])); in vdev_metaslab_init()
1624 * If the vdev is marked as non-allocating then don't in vdev_metaslab_init()
1628 if (vd->vdev_noalloc) { in vdev_metaslab_init()
1629 /* track non-allocating vdev space */ in vdev_metaslab_init()
1630 spa->spa_nonallocating_dspace += spa_deflate(spa) ? in vdev_metaslab_init()
1631 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; in vdev_metaslab_init()
1633 metaslab_group_activate(vd->vdev_mg); in vdev_metaslab_init()
1634 if (vd->vdev_log_mg != NULL) in vdev_metaslab_init()
1635 metaslab_group_activate(vd->vdev_log_mg); in vdev_metaslab_init()
1647 if (vd->vdev_checkpoint_sm != NULL) { in vdev_metaslab_fini()
1648 ASSERT(spa_feature_is_active(vd->vdev_spa, in vdev_metaslab_fini()
1650 space_map_close(vd->vdev_checkpoint_sm); in vdev_metaslab_fini()
1659 vd->vdev_checkpoint_sm = NULL; in vdev_metaslab_fini()
1662 if (vd->vdev_ms != NULL) { in vdev_metaslab_fini()
1663 metaslab_group_t *mg = vd->vdev_mg; in vdev_metaslab_fini()
1666 if (vd->vdev_log_mg != NULL) { in vdev_metaslab_fini()
1667 ASSERT(!vd->vdev_islog); in vdev_metaslab_fini()
1668 metaslab_group_passivate(vd->vdev_log_mg); in vdev_metaslab_fini()
1671 uint64_t count = vd->vdev_ms_count; in vdev_metaslab_fini()
1673 metaslab_t *msp = vd->vdev_ms[m]; in vdev_metaslab_fini()
1677 vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); in vdev_metaslab_fini()
1678 vd->vdev_ms = NULL; in vdev_metaslab_fini()
1679 vd->vdev_ms_count = 0; in vdev_metaslab_fini()
1682 ASSERT0(mg->mg_histogram[i]); in vdev_metaslab_fini()
1683 if (vd->vdev_log_mg != NULL) in vdev_metaslab_fini()
1684 ASSERT0(vd->vdev_log_mg->mg_histogram[i]); in vdev_metaslab_fini()
1687 ASSERT0(vd->vdev_ms_count); in vdev_metaslab_fini()
1700 spa_t *spa = zio->io_spa; in vdev_probe_done()
1701 vdev_t *vd = zio->io_vd; in vdev_probe_done()
1702 vdev_probe_stats_t *vps = zio->io_private; in vdev_probe_done()
1704 ASSERT(vd->vdev_probe_zio != NULL); in vdev_probe_done()
1706 if (zio->io_type == ZIO_TYPE_READ) { in vdev_probe_done()
1707 if (zio->io_error == 0) in vdev_probe_done()
1708 vps->vps_readable = 1; in vdev_probe_done()
1709 if (zio->io_error == 0 && spa_writeable(spa)) { in vdev_probe_done()
1710 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, in vdev_probe_done()
1711 zio->io_offset, zio->io_size, zio->io_abd, in vdev_probe_done()
1713 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); in vdev_probe_done()
1715 abd_free(zio->io_abd); in vdev_probe_done()
1717 } else if (zio->io_type == ZIO_TYPE_WRITE) { in vdev_probe_done()
1718 if (zio->io_error == 0) in vdev_probe_done()
1719 vps->vps_writeable = 1; in vdev_probe_done()
1720 abd_free(zio->io_abd); in vdev_probe_done()
1721 } else if (zio->io_type == ZIO_TYPE_NULL) { in vdev_probe_done()
1725 vd->vdev_cant_read |= !vps->vps_readable; in vdev_probe_done()
1726 vd->vdev_cant_write |= !vps->vps_writeable; in vdev_probe_done()
1728 vd->vdev_cant_read, vd->vdev_cant_write); in vdev_probe_done()
1732 zio->io_error = 0; in vdev_probe_done()
1734 ASSERT(zio->io_error != 0); in vdev_probe_done()
1738 zio->io_error = SET_ERROR(ENXIO); in vdev_probe_done()
1746 if (vps->vps_zio_done_probe) { in vdev_probe_done()
1747 vd->vdev_fault_wanted = B_TRUE; in vdev_probe_done()
1752 mutex_enter(&vd->vdev_probe_lock); in vdev_probe_done()
1753 ASSERT(vd->vdev_probe_zio == zio); in vdev_probe_done()
1754 vd->vdev_probe_zio = NULL; in vdev_probe_done()
1755 mutex_exit(&vd->vdev_probe_lock); in vdev_probe_done()
1760 pio->io_error = SET_ERROR(ENXIO); in vdev_probe_done()
1776 spa_t *spa = vd->vdev_spa; in vdev_probe()
1780 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_probe()
1785 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) in vdev_probe()
1793 mutex_enter(&vd->vdev_probe_lock); in vdev_probe()
1795 if ((pio = vd->vdev_probe_zio) == NULL) { in vdev_probe()
1798 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | in vdev_probe()
1800 vps->vps_zio_done_probe = (zio != NULL); in vdev_probe()
1819 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; in vdev_probe()
1820 vd->vdev_cant_read = B_FALSE; in vdev_probe()
1821 vd->vdev_cant_write = B_FALSE; in vdev_probe()
1824 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, in vdev_probe()
1826 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); in vdev_probe()
1832 mutex_exit(&vd->vdev_probe_lock); in vdev_probe()
1841 vdev_label_offset(vd->vdev_psize, l, in vdev_probe()
1845 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); in vdev_probe()
1860 vd->vdev_load_error = vdev_load(vd); in vdev_load_child()
1868 vd->vdev_open_thread = curthread; in vdev_open_child()
1869 vd->vdev_open_error = vdev_open(vd); in vdev_open_child()
1870 vd->vdev_open_thread = NULL; in vdev_open_child()
1877 if (zvol_is_zvol(vd->vdev_path)) in vdev_uses_zvols()
1881 for (int c = 0; c < vd->vdev_children; c++) in vdev_uses_zvols()
1882 if (vdev_uses_zvols(vd->vdev_child[c])) in vdev_uses_zvols()
1906 int children = vd->vdev_children; in vdev_open_children_impl()
1910 vd->vdev_nonrot = B_TRUE; in vdev_open_children_impl()
1913 vdev_t *cvd = vd->vdev_child[c]; in vdev_open_children_impl()
1919 cvd->vdev_open_error = vdev_open(cvd); in vdev_open_children_impl()
1925 vd->vdev_nonrot &= cvd->vdev_nonrot; in vdev_open_children_impl()
1953 * Compute the raidz-deflation ratio. Note, we hard-code 128k (1 << 17)
1956 * account for existing bp's. We also hard-code txg 0 for the same reason
1963 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { in vdev_set_deflate_ratio()
1964 vd->vdev_deflate_ratio = (1 << 17) / in vdev_set_deflate_ratio()
1997 ASSERT(vd == vd->vdev_top); in vdev_ashift_optimize()
1999 if (vd->vdev_ashift < vd->vdev_physical_ashift && in vdev_ashift_optimize()
2000 vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) { in vdev_ashift_optimize()
2001 vd->vdev_ashift = MIN( in vdev_ashift_optimize()
2002 MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift), in vdev_ashift_optimize()
2004 vd->vdev_physical_ashift)); in vdev_ashift_optimize()
2008 * we ensure that the top-level vdev's ashift is not smaller in vdev_ashift_optimize()
2016 vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift, in vdev_ashift_optimize()
2017 vd->vdev_ashift); in vdev_ashift_optimize()
2027 spa_t *spa = vd->vdev_spa; in vdev_open()
2035 ASSERT(vd->vdev_open_thread == curthread || in vdev_open()
2037 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || in vdev_open()
2038 vd->vdev_state == VDEV_STATE_CANT_OPEN || in vdev_open()
2039 vd->vdev_state == VDEV_STATE_OFFLINE); in vdev_open()
2041 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; in vdev_open()
2042 vd->vdev_cant_read = B_FALSE; in vdev_open()
2043 vd->vdev_cant_write = B_FALSE; in vdev_open()
2044 vd->vdev_fault_wanted = B_FALSE; in vdev_open()
2045 vd->vdev_remove_wanted = B_FALSE; in vdev_open()
2046 vd->vdev_min_asize = vdev_get_min_asize(vd); in vdev_open()
2052 if (!vd->vdev_removed && vd->vdev_faulted) { in vdev_open()
2053 ASSERT(vd->vdev_children == 0); in vdev_open()
2054 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || in vdev_open()
2055 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); in vdev_open()
2057 vd->vdev_label_aux); in vdev_open()
2059 } else if (vd->vdev_offline) { in vdev_open()
2060 ASSERT(vd->vdev_children == 0); in vdev_open()
2065 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, in vdev_open()
2069 if (error == ENOENT && vd->vdev_removed) { in vdev_open()
2090 vd->vdev_reopening = B_FALSE; in vdev_open()
2095 if (vd->vdev_removed && in vdev_open()
2096 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) in vdev_open()
2097 vd->vdev_removed = B_FALSE; in vdev_open()
2099 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { in vdev_open()
2101 vd->vdev_stat.vs_aux); in vdev_open()
2104 vd->vdev_stat.vs_aux); in vdev_open()
2109 vd->vdev_removed = B_FALSE; in vdev_open()
2115 if (vd->vdev_faulted) { in vdev_open()
2116 ASSERT(vd->vdev_children == 0); in vdev_open()
2117 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || in vdev_open()
2118 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); in vdev_open()
2120 vd->vdev_label_aux); in vdev_open()
2124 if (vd->vdev_degraded) { in vdev_open()
2125 ASSERT(vd->vdev_children == 0); in vdev_open()
2135 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) in vdev_open()
2138 for (int c = 0; c < vd->vdev_children; c++) { in vdev_open()
2139 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { in vdev_open()
2149 if (vd->vdev_children == 0) { in vdev_open()
2156 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); in vdev_open()
2157 max_asize = max_osize - (VDEV_LABEL_START_SIZE + in vdev_open()
2160 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - in vdev_open()
2172 * If the vdev was expanded, record this so that we can re-create the in vdev_open()
2175 if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0)) in vdev_open()
2176 vd->vdev_copy_uberblocks = B_TRUE; in vdev_open()
2178 vd->vdev_psize = psize; in vdev_open()
2183 if (asize < vd->vdev_min_asize) { in vdev_open()
2196 vd->vdev_physical_ashift = in vdev_open()
2197 MAX(physical_ashift, vd->vdev_physical_ashift); in vdev_open()
2198 vd->vdev_logical_ashift = MAX(logical_ashift, in vdev_open()
2199 vd->vdev_logical_ashift); in vdev_open()
2201 if (vd->vdev_asize == 0) { in vdev_open()
2203 * This is the first-ever open, so use the computed values. in vdev_open()
2206 vd->vdev_asize = asize; in vdev_open()
2207 vd->vdev_max_asize = max_asize; in vdev_open()
2214 if (vd->vdev_ashift < vd->vdev_logical_ashift) { in vdev_open()
2215 vd->vdev_ashift = vd->vdev_logical_ashift; in vdev_open()
2217 if (vd->vdev_logical_ashift > ASHIFT_MAX) { in vdev_open()
2223 if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE) in vdev_open()
2225 vd->vdev_attaching = B_FALSE; in vdev_open()
2227 if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN || in vdev_open()
2228 vd->vdev_ashift > ASHIFT_MAX)) { in vdev_open()
2237 if (vd->vdev_ashift > vd->vdev_top->vdev_ashift && in vdev_open()
2238 vd->vdev_ops->vdev_op_leaf) { in vdev_open()
2246 vd->vdev_max_asize = max_asize; in vdev_open()
2253 * making the additional space available. in vdev_open()
2261 if (vd->vdev_state == VDEV_STATE_HEALTHY && in vdev_open()
2262 ((asize > vd->vdev_asize && in vdev_open()
2263 (vd->vdev_expanding || spa->spa_autoexpand)) || in vdev_open()
2264 (asize < vd->vdev_asize))) in vdev_open()
2265 vd->vdev_asize = asize; in vdev_open()
2273 if (vd->vdev_ops->vdev_op_leaf && in vdev_open()
2283 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && in vdev_open()
2284 vd->vdev_islog == 0 && vd->vdev_aux == NULL) { in vdev_open()
2294 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen) in vdev_open()
2295 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd); in vdev_open()
2305 vd->vdev_validate_thread = curthread; in vdev_validate_child()
2306 vd->vdev_validate_error = vdev_validate(vd); in vdev_validate_child()
2307 vd->vdev_validate_thread = NULL; in vdev_validate_child()
2323 spa_t *spa = vd->vdev_spa; in vdev_validate()
2330 int children = vd->vdev_children; in vdev_validate()
2341 vdev_t *cvd = vd->vdev_child[c]; in vdev_validate()
2355 int error = vd->vdev_child[c]->vdev_validate_error; in vdev_validate()
2367 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) in vdev_validate()
2376 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || in vdev_validate()
2414 * necessary because if the machine crashed during a re-guid the new in vdev_validate()
2419 if (spa->spa_trust_config && guid != spa_guid(spa)) { in vdev_validate()
2454 * If this vdev just became a top-level vdev because its sibling was in vdev_validate()
2455 * detached, it will have adopted the parent's vdev guid -- but the in vdev_validate()
2457 * of the label will have the same top guid, so if we're a top-level in vdev_validate()
2460 * after the detach, a top-level vdev will appear as a non top-level in vdev_validate()
2468 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { in vdev_validate()
2470 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { in vdev_validate()
2471 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) in vdev_validate()
2474 if (vd->vdev_guid != top_guid && in vdev_validate()
2475 vd->vdev_top->vdev_guid != guid) in vdev_validate()
2486 (u_longlong_t)vd->vdev_guid, in vdev_validate()
2487 (u_longlong_t)vd->vdev_top->vdev_guid); in vdev_validate()
2511 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && in vdev_validate()
2515 "for spa %s", (u_longlong_t)state, spa->spa_name); in vdev_validate()
2524 if (vd->vdev_not_present) in vdev_validate()
2525 vd->vdev_not_present = 0; in vdev_validate()
2553 vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path, in vdev_copy_path_impl()
2554 dvd->vdev_guid); in vdev_copy_path_impl()
2556 vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid, in vdev_copy_path_impl()
2557 dvd->vdev_guid); in vdev_copy_path_impl()
2559 vdev_update_path("vdev_physpath", svd->vdev_physpath, in vdev_copy_path_impl()
2560 &dvd->vdev_physpath, dvd->vdev_guid); in vdev_copy_path_impl()
2565 old = dvd->vdev_enc_sysfs_path; in vdev_copy_path_impl()
2566 new = svd->vdev_enc_sysfs_path; in vdev_copy_path_impl()
2571 "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, in vdev_copy_path_impl()
2574 if (dvd->vdev_enc_sysfs_path) in vdev_copy_path_impl()
2575 spa_strfree(dvd->vdev_enc_sysfs_path); in vdev_copy_path_impl()
2577 if (svd->vdev_enc_sysfs_path) { in vdev_copy_path_impl()
2578 dvd->vdev_enc_sysfs_path = spa_strdup( in vdev_copy_path_impl()
2579 svd->vdev_enc_sysfs_path); in vdev_copy_path_impl()
2581 dvd->vdev_enc_sysfs_path = NULL; in vdev_copy_path_impl()
2594 if ((svd->vdev_ops == &vdev_missing_ops) || in vdev_copy_path_strict()
2595 (svd->vdev_ishole && dvd->vdev_ishole) || in vdev_copy_path_strict()
2596 (dvd->vdev_ops == &vdev_indirect_ops)) in vdev_copy_path_strict()
2599 if (svd->vdev_ops != dvd->vdev_ops) { in vdev_copy_path_strict()
2601 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); in vdev_copy_path_strict()
2605 if (svd->vdev_guid != dvd->vdev_guid) { in vdev_copy_path_strict()
2607 "%llu)", (u_longlong_t)svd->vdev_guid, in vdev_copy_path_strict()
2608 (u_longlong_t)dvd->vdev_guid); in vdev_copy_path_strict()
2612 if (svd->vdev_children != dvd->vdev_children) { in vdev_copy_path_strict()
2614 "%llu != %llu", (u_longlong_t)svd->vdev_children, in vdev_copy_path_strict()
2615 (u_longlong_t)dvd->vdev_children); in vdev_copy_path_strict()
2619 for (uint64_t i = 0; i < svd->vdev_children; i++) { in vdev_copy_path_strict()
2620 int error = vdev_copy_path_strict(svd->vdev_child[i], in vdev_copy_path_strict()
2621 dvd->vdev_child[i]); in vdev_copy_path_strict()
2626 if (svd->vdev_ops->vdev_op_leaf) in vdev_copy_path_strict()
2635 ASSERT(stvd->vdev_top == stvd); in vdev_copy_path_search()
2636 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); in vdev_copy_path_search()
2638 for (uint64_t i = 0; i < dvd->vdev_children; i++) { in vdev_copy_path_search()
2639 vdev_copy_path_search(stvd, dvd->vdev_child[i]); in vdev_copy_path_search()
2642 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) in vdev_copy_path_search()
2650 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); in vdev_copy_path_search()
2652 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) in vdev_copy_path_search()
2655 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_copy_path_search()
2669 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); in vdev_copy_path_relaxed()
2670 ASSERT(srvd->vdev_ops == &vdev_root_ops); in vdev_copy_path_relaxed()
2671 ASSERT(drvd->vdev_ops == &vdev_root_ops); in vdev_copy_path_relaxed()
2674 vdev_copy_path_search(srvd->vdev_child[i], in vdev_copy_path_relaxed()
2675 drvd->vdev_child[i]); in vdev_copy_path_relaxed()
2685 vdev_t *pvd = vd->vdev_parent; in vdev_close()
2686 spa_t *spa __maybe_unused = vd->vdev_spa; in vdev_close()
2689 ASSERT(vd->vdev_open_thread == curthread || in vdev_close()
2696 if (pvd != NULL && pvd->vdev_reopening) in vdev_close()
2697 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); in vdev_close()
2699 vd->vdev_ops->vdev_op_close(vd); in vdev_close()
2706 vd->vdev_prevstate = vd->vdev_state; in vdev_close()
2708 if (vd->vdev_offline) in vdev_close()
2709 vd->vdev_state = VDEV_STATE_OFFLINE; in vdev_close()
2711 vd->vdev_state = VDEV_STATE_CLOSED; in vdev_close()
2712 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; in vdev_close()
2718 spa_t *spa = vd->vdev_spa; in vdev_hold()
2721 if (spa->spa_state == POOL_STATE_UNINITIALIZED) in vdev_hold()
2724 for (int c = 0; c < vd->vdev_children; c++) in vdev_hold()
2725 vdev_hold(vd->vdev_child[c]); in vdev_hold()
2727 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL) in vdev_hold()
2728 vd->vdev_ops->vdev_op_hold(vd); in vdev_hold()
2734 ASSERT(spa_is_root(vd->vdev_spa)); in vdev_rele()
2735 for (int c = 0; c < vd->vdev_children; c++) in vdev_rele()
2736 vdev_rele(vd->vdev_child[c]); in vdev_rele()
2738 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL) in vdev_rele()
2739 vd->vdev_ops->vdev_op_rele(vd); in vdev_rele()
2751 spa_t *spa = vd->vdev_spa; in vdev_reopen()
2756 vd->vdev_reopening = !vd->vdev_offline; in vdev_reopen()
2765 if (vd->vdev_aux) { in vdev_reopen()
2768 vd->vdev_aux == &spa->spa_l2cache) { in vdev_reopen()
2790 if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) && in vdev_reopen()
2791 spa->spa_async_tasks & SPA_ASYNC_RESILVER) { in vdev_reopen()
2792 mutex_enter(&spa->spa_async_lock); in vdev_reopen()
2793 spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER; in vdev_reopen()
2794 mutex_exit(&spa->spa_async_lock); in vdev_reopen()
2815 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { in vdev_create()
2836 uint64_t asize = vd->vdev_asize; in vdev_metaslab_set_size()
2870 * --------------|----------------- in vdev_metaslab_set_size()
2872 * 8GB - 100GB one per 512MB in vdev_metaslab_set_size()
2873 * 100GB - 3TB ~200 in vdev_metaslab_set_size()
2874 * 3TB - 2PB one per 16GB in vdev_metaslab_set_size()
2876 * -------------------------------- in vdev_metaslab_set_size()
2879 * number of metaslabs. Expanding a top-level vdev will result in vdev_metaslab_set_size()
2880 * in additional metaslabs being allocated making it possible in vdev_metaslab_set_size()
2900 vd->vdev_ms_shift = ms_shift; in vdev_metaslab_set_size()
2901 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); in vdev_metaslab_set_size()
2907 ASSERT(vd == vd->vdev_top); in vdev_dirty()
2911 ASSERT(spa_writeable(vd->vdev_spa)); in vdev_dirty()
2914 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); in vdev_dirty()
2917 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); in vdev_dirty()
2919 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); in vdev_dirty()
2925 for (int c = 0; c < vd->vdev_children; c++) in vdev_dirty_leaves()
2926 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); in vdev_dirty_leaves()
2928 if (vd->vdev_ops->vdev_op_leaf) in vdev_dirty_leaves()
2929 vdev_dirty(vd->vdev_top, flags, vd, txg); in vdev_dirty_leaves()
2955 * For interior vdevs such as mirror and RAID-Z the picture is more complex.
2961 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
2973 zfs_range_tree_t *rt = vd->vdev_dtl[t]; in vdev_dtl_dirty()
2976 ASSERT(vd != vd->vdev_spa->spa_root_vdev); in vdev_dtl_dirty()
2977 ASSERT(spa_writeable(vd->vdev_spa)); in vdev_dtl_dirty()
2979 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_dirty()
2982 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_dirty()
2988 zfs_range_tree_t *rt = vd->vdev_dtl[t]; in vdev_dtl_contains()
2992 ASSERT(vd != vd->vdev_spa->spa_root_vdev); in vdev_dtl_contains()
3002 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_contains()
3005 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_contains()
3013 zfs_range_tree_t *rt = vd->vdev_dtl[t]; in vdev_dtl_empty()
3016 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_empty()
3018 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_empty()
3047 ASSERT(vd != vd->vdev_spa->spa_root_vdev); in vdev_dtl_need_resilver()
3049 if (vd->vdev_ops->vdev_op_need_resilver == NULL || in vdev_dtl_need_resilver()
3050 vd->vdev_ops->vdev_op_leaf) in vdev_dtl_need_resilver()
3053 return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize, in vdev_dtl_need_resilver()
3063 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); in vdev_dtl_min()
3064 ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); in vdev_dtl_min()
3065 ASSERT0(vd->vdev_children); in vdev_dtl_min()
3067 return (zfs_range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1); in vdev_dtl_min()
3076 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); in vdev_dtl_max()
3077 ASSERT3U(zfs_range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); in vdev_dtl_max()
3078 ASSERT0(vd->vdev_children); in vdev_dtl_max()
3080 return (zfs_range_tree_max(vd->vdev_dtl[DTL_MISSING])); in vdev_dtl_max()
3094 ASSERT0(vd->vdev_children); in vdev_dtl_should_excise()
3096 if (vd->vdev_state < VDEV_STATE_DEGRADED) in vdev_dtl_should_excise()
3099 if (vd->vdev_resilver_deferred) in vdev_dtl_should_excise()
3102 if (zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) in vdev_dtl_should_excise()
3106 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; in vdev_dtl_should_excise()
3107 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; in vdev_dtl_should_excise()
3110 if (vd->vdev_rebuild_txg == 0) in vdev_dtl_should_excise()
3118 if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE && in vdev_dtl_should_excise()
3119 vdev_dtl_max(vd) <= vrp->vrp_max_txg) { in vdev_dtl_should_excise()
3120 ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd)); in vdev_dtl_should_excise()
3121 ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg); in vdev_dtl_should_excise()
3122 ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg); in vdev_dtl_should_excise()
3126 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; in vdev_dtl_should_excise()
3127 dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys; in vdev_dtl_should_excise()
3130 if (vd->vdev_resilver_txg == 0) in vdev_dtl_should_excise()
3140 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { in vdev_dtl_should_excise()
3141 ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd)); in vdev_dtl_should_excise()
3142 ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg); in vdev_dtl_should_excise()
3143 ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg); in vdev_dtl_should_excise()
3159 spa_t *spa = vd->vdev_spa; in vdev_dtl_reassess_impl()
3165 for (int c = 0; c < vd->vdev_children; c++) in vdev_dtl_reassess_impl()
3166 vdev_dtl_reassess_impl(vd->vdev_child[c], txg, in vdev_dtl_reassess_impl()
3169 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) in vdev_dtl_reassess_impl()
3172 if (vd->vdev_ops->vdev_op_leaf) { in vdev_dtl_reassess_impl()
3173 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; in vdev_dtl_reassess_impl()
3174 vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; in vdev_dtl_reassess_impl()
3178 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3185 scn->scn_phys.scn_errors = 0; in vdev_dtl_reassess_impl()
3187 vr->vr_rebuild_phys.vrp_errors = 0; in vdev_dtl_reassess_impl()
3191 !zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { in vdev_dtl_reassess_impl()
3195 (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg, in vdev_dtl_reassess_impl()
3196 (u_longlong_t)scrub_txg, spa->spa_scrub_started, in vdev_dtl_reassess_impl()
3199 (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0)); in vdev_dtl_reassess_impl()
3209 vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) { in vdev_dtl_reassess_impl()
3212 if (spa->spa_scrub_started || in vdev_dtl_reassess_impl()
3213 (scn != NULL && scn->scn_phys.scn_errors == 0)) { in vdev_dtl_reassess_impl()
3225 * leave the dtl as-is if there was an error. in vdev_dtl_reassess_impl()
3229 * tree and then add a segment with refcnt -1 that in vdev_dtl_reassess_impl()
3231 * that each txg in that range has refcnt -1 or 0. in vdev_dtl_reassess_impl()
3234 * positive refcnt -- either 1 or 2. We then convert in vdev_dtl_reassess_impl()
3239 vd->vdev_dtl[DTL_MISSING], 1); in vdev_dtl_reassess_impl()
3240 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); in vdev_dtl_reassess_impl()
3242 vd->vdev_dtl[DTL_SCRUB], 2); in vdev_dtl_reassess_impl()
3244 vd->vdev_dtl[DTL_MISSING], 1); in vdev_dtl_reassess_impl()
3248 vd->vdev_dtl[DTL_MISSING])) { in vdev_dtl_reassess_impl()
3256 zfs_range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); in vdev_dtl_reassess_impl()
3257 zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING], in vdev_dtl_reassess_impl()
3258 zfs_range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); in vdev_dtl_reassess_impl()
3260 zfs_range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, in vdev_dtl_reassess_impl()
3262 zfs_range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); in vdev_dtl_reassess_impl()
3271 (faulting && vd->vdev_parent != NULL && in vdev_dtl_reassess_impl()
3272 vd->vdev_parent->vdev_ops == &vdev_replacing_ops)) { in vdev_dtl_reassess_impl()
3273 zfs_range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); in vdev_dtl_reassess_impl()
3275 zfs_range_tree_walk(vd->vdev_dtl[DTL_MISSING], in vdev_dtl_reassess_impl()
3276 zfs_range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); in vdev_dtl_reassess_impl()
3285 zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && in vdev_dtl_reassess_impl()
3286 zfs_range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { in vdev_dtl_reassess_impl()
3287 if (vd->vdev_rebuild_txg != 0) { in vdev_dtl_reassess_impl()
3288 vd->vdev_rebuild_txg = 0; in vdev_dtl_reassess_impl()
3289 vdev_config_dirty(vd->vdev_top); in vdev_dtl_reassess_impl()
3290 } else if (vd->vdev_resilver_txg != 0) { in vdev_dtl_reassess_impl()
3291 vd->vdev_resilver_txg = 0; in vdev_dtl_reassess_impl()
3292 vdev_config_dirty(vd->vdev_top); in vdev_dtl_reassess_impl()
3296 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3299 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); in vdev_dtl_reassess_impl()
3301 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3310 /* i.e. non-zero */ in vdev_dtl_reassess_impl()
3317 minref = vd->vdev_children; in vdev_dtl_reassess_impl()
3320 for (int c = 0; c < vd->vdev_children; c++) { in vdev_dtl_reassess_impl()
3321 vdev_t *cvd = vd->vdev_child[c]; in vdev_dtl_reassess_impl()
3322 mutex_enter(&cvd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3324 cvd->vdev_dtl[s], 1); in vdev_dtl_reassess_impl()
3325 mutex_exit(&cvd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3328 vd->vdev_dtl[t], minref); in vdev_dtl_reassess_impl()
3331 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_reassess_impl()
3334 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) { in vdev_dtl_reassess_impl()
3353 if (vd->vdev_ops->vdev_op_kobj_evt_post && in vdev_post_kobj_evt()
3354 vd->vdev_kobj_flag == B_FALSE) { in vdev_post_kobj_evt()
3355 vd->vdev_kobj_flag = B_TRUE; in vdev_post_kobj_evt()
3356 vd->vdev_ops->vdev_op_kobj_evt_post(vd); in vdev_post_kobj_evt()
3359 for (int c = 0; c < vd->vdev_children; c++) in vdev_post_kobj_evt()
3360 vdev_post_kobj_evt(vd->vdev_child[c]); in vdev_post_kobj_evt()
3369 vd->vdev_kobj_flag = B_FALSE; in vdev_clear_kobj_evt()
3371 for (int c = 0; c < vd->vdev_children; c++) in vdev_clear_kobj_evt()
3372 vdev_clear_kobj_evt(vd->vdev_child[c]); in vdev_clear_kobj_evt()
3378 spa_t *spa = vd->vdev_spa; in vdev_dtl_load()
3379 objset_t *mos = spa->spa_meta_objset; in vdev_dtl_load()
3383 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { in vdev_dtl_load()
3389 if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps) in vdev_dtl_load()
3392 error = space_map_open(&vd->vdev_dtl_sm, mos, in vdev_dtl_load()
3393 vd->vdev_dtl_object, 0, -1ULL, 0); in vdev_dtl_load()
3396 ASSERT(vd->vdev_dtl_sm != NULL); in vdev_dtl_load()
3399 error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC); in vdev_dtl_load()
3401 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_load()
3403 vd->vdev_dtl[DTL_MISSING]); in vdev_dtl_load()
3404 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_load()
3413 for (int c = 0; c < vd->vdev_children; c++) { in vdev_dtl_load()
3414 error = vdev_dtl_load(vd->vdev_child[c]); in vdev_dtl_load()
3425 spa_t *spa = vd->vdev_spa; in vdev_zap_allocation_data()
3426 objset_t *mos = spa->spa_meta_objset; in vdev_zap_allocation_data()
3427 vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; in vdev_zap_allocation_data()
3438 VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS, in vdev_zap_allocation_data()
3449 spa_t *spa = vd->vdev_spa; in vdev_destroy_unlink_zap()
3451 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); in vdev_destroy_unlink_zap()
3452 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, in vdev_destroy_unlink_zap()
3459 spa_t *spa = vd->vdev_spa; in vdev_create_link_zap()
3460 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, in vdev_create_link_zap()
3464 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, in vdev_create_link_zap()
3473 if (vd->vdev_ops != &vdev_hole_ops && in vdev_construct_zaps()
3474 vd->vdev_ops != &vdev_missing_ops && in vdev_construct_zaps()
3475 vd->vdev_ops != &vdev_root_ops && in vdev_construct_zaps()
3476 !vd->vdev_top->vdev_removing) { in vdev_construct_zaps()
3477 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { in vdev_construct_zaps()
3478 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); in vdev_construct_zaps()
3480 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { in vdev_construct_zaps()
3481 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); in vdev_construct_zaps()
3482 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE) in vdev_construct_zaps()
3486 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 && in vdev_construct_zaps()
3487 spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) { in vdev_construct_zaps()
3488 if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) in vdev_construct_zaps()
3489 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx); in vdev_construct_zaps()
3490 vd->vdev_root_zap = vdev_create_link_zap(vd, tx); in vdev_construct_zaps()
3493 for (uint64_t i = 0; i < vd->vdev_children; i++) { in vdev_construct_zaps()
3494 vdev_construct_zaps(vd->vdev_child[i], tx); in vdev_construct_zaps()
3501 spa_t *spa = vd->vdev_spa; in vdev_dtl_sync()
3502 zfs_range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; in vdev_dtl_sync()
3503 objset_t *mos = spa->spa_meta_objset; in vdev_dtl_sync()
3506 uint64_t object = space_map_object(vd->vdev_dtl_sm); in vdev_dtl_sync()
3509 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_dtl_sync()
3511 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); in vdev_dtl_sync()
3513 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { in vdev_dtl_sync()
3514 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_sync()
3515 space_map_free(vd->vdev_dtl_sm, tx); in vdev_dtl_sync()
3516 space_map_close(vd->vdev_dtl_sm); in vdev_dtl_sync()
3517 vd->vdev_dtl_sm = NULL; in vdev_dtl_sync()
3518 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_sync()
3525 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || in vdev_dtl_sync()
3526 vd->vdev_top->vdev_islog)) { in vdev_dtl_sync()
3527 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); in vdev_dtl_sync()
3528 vd->vdev_leaf_zap = 0; in vdev_dtl_sync()
3535 if (vd->vdev_dtl_sm == NULL) { in vdev_dtl_sync()
3541 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, in vdev_dtl_sync()
3542 0, -1ULL, 0)); in vdev_dtl_sync()
3543 ASSERT(vd->vdev_dtl_sm != NULL); in vdev_dtl_sync()
3548 mutex_enter(&vd->vdev_dtl_lock); in vdev_dtl_sync()
3550 mutex_exit(&vd->vdev_dtl_lock); in vdev_dtl_sync()
3552 space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx); in vdev_dtl_sync()
3553 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); in vdev_dtl_sync()
3562 if (object != space_map_object(vd->vdev_dtl_sm)) { in vdev_dtl_sync()
3566 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); in vdev_dtl_sync()
3567 vdev_config_dirty(vd->vdev_top); in vdev_dtl_sync()
3575 * - offlined
3576 * - detached
3577 * - removed
3578 * - faulted
3584 spa_t *spa = vd->vdev_spa; in vdev_dtl_required()
3585 vdev_t *tvd = vd->vdev_top; in vdev_dtl_required()
3586 uint8_t cant_read = vd->vdev_cant_read; in vdev_dtl_required()
3588 boolean_t faulting = vd->vdev_state == VDEV_STATE_FAULTED; in vdev_dtl_required()
3592 if (vd == spa->spa_root_vdev || vd == tvd) in vdev_dtl_required()
3597 * whether this results in any DTL outages in the top-level vdev. in vdev_dtl_required()
3600 vd->vdev_cant_read = B_TRUE; in vdev_dtl_required()
3603 vd->vdev_cant_read = cant_read; in vdev_dtl_required()
3624 if (vd->vdev_children == 0) { in vdev_resilver_needed()
3625 mutex_enter(&vd->vdev_dtl_lock); in vdev_resilver_needed()
3626 if (!zfs_range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && in vdev_resilver_needed()
3633 mutex_exit(&vd->vdev_dtl_lock); in vdev_resilver_needed()
3635 for (int c = 0; c < vd->vdev_children; c++) { in vdev_resilver_needed()
3636 vdev_t *cvd = vd->vdev_child[c]; in vdev_resilver_needed()
3662 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); in vdev_checkpoint_sm_object()
3664 if (vd->vdev_top_zap == 0) { in vdev_checkpoint_sm_object()
3669 int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, in vdev_checkpoint_sm_object()
3682 int children = vd->vdev_children; in vdev_load()
3688 * slow part is metaslab_init, and that only happens for top-level in vdev_load()
3691 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) { in vdev_load()
3699 for (int c = 0; c < vd->vdev_children; c++) { in vdev_load()
3700 vdev_t *cvd = vd->vdev_child[c]; in vdev_load()
3703 cvd->vdev_load_error = vdev_load(cvd); in vdev_load()
3715 for (int c = 0; c < vd->vdev_children; c++) { in vdev_load()
3716 int error = vd->vdev_child[c]->vdev_load_error; in vdev_load()
3724 if (vd->vdev_ops == &vdev_raidz_ops) { in vdev_load()
3733 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { in vdev_load()
3734 spa_t *spa = vd->vdev_spa; in vdev_load()
3737 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, in vdev_load()
3741 ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE); in vdev_load()
3742 vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str); in vdev_load()
3748 (u_longlong_t)vd->vdev_top_zap, error); in vdev_load()
3753 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { in vdev_load()
3754 spa_t *spa = vd->vdev_spa; in vdev_load()
3757 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, in vdev_load()
3761 vd->vdev_failfast = failfast & 1; in vdev_load()
3763 vd->vdev_failfast = vdev_prop_default_numeric( in vdev_load()
3769 (u_longlong_t)vd->vdev_top_zap, error); in vdev_load()
3774 * Load any rebuild state from the top-level vdev zap. in vdev_load()
3776 if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { in vdev_load()
3787 if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) { in vdev_load()
3790 if (vd->vdev_top_zap != 0) in vdev_load()
3791 zapobj = vd->vdev_top_zap; in vdev_load()
3793 zapobj = vd->vdev_leaf_zap; in vdev_load()
3796 &vd->vdev_checksum_n); in vdev_load()
3802 &vd->vdev_checksum_t); in vdev_load()
3808 &vd->vdev_io_n); in vdev_load()
3814 &vd->vdev_io_t); in vdev_load()
3820 &vd->vdev_slow_io_n); in vdev_load()
3826 &vd->vdev_slow_io_t); in vdev_load()
3833 * If this is a top-level vdev, initialize its metaslabs. in vdev_load()
3835 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { in vdev_load()
3838 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { in vdev_load()
3842 "asize=%llu", (u_longlong_t)vd->vdev_ashift, in vdev_load()
3843 (u_longlong_t)vd->vdev_asize); in vdev_load()
3859 objset_t *mos = spa_meta_objset(vd->vdev_spa); in vdev_load()
3860 ASSERT(vd->vdev_asize != 0); in vdev_load()
3861 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); in vdev_load()
3863 error = space_map_open(&vd->vdev_checkpoint_sm, in vdev_load()
3864 mos, checkpoint_sm_obj, 0, vd->vdev_asize, in vdev_load()
3865 vd->vdev_ashift); in vdev_load()
3873 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); in vdev_load()
3881 vd->vdev_stat.vs_checkpoint_space = in vdev_load()
3882 -space_map_allocated(vd->vdev_checkpoint_sm); in vdev_load()
3883 vd->vdev_spa->spa_checkpoint_info.sci_dspace += in vdev_load()
3884 vd->vdev_stat.vs_checkpoint_space; in vdev_load()
3896 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { in vdev_load()
3907 objset_t *mos = vd->vdev_spa->spa_meta_objset; in vdev_load()
3908 ASSERT(vd->vdev_asize != 0); in vdev_load()
3909 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); in vdev_load()
3911 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, in vdev_load()
3912 obsolete_sm_object, 0, vd->vdev_asize, 0))) { in vdev_load()
3946 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { in vdev_validate_aux()
3949 return (-1); in vdev_validate_aux()
3955 guid != vd->vdev_guid || in vdev_validate_aux()
3960 return (-1); in vdev_validate_aux()
3974 objset_t *mos = spa_meta_objset(vd->vdev_spa); in vdev_destroy_ms_flush_data()
3976 if (vd->vdev_top_zap == 0) in vdev_destroy_ms_flush_data()
3980 int err = zap_lookup(mos, vd->vdev_top_zap, in vdev_destroy_ms_flush_data()
3987 VERIFY0(zap_remove(mos, vd->vdev_top_zap, in vdev_destroy_ms_flush_data()
3998 if (vd->vdev_ms_array == 0) in vdev_destroy_spacemaps()
4001 objset_t *mos = vd->vdev_spa->spa_meta_objset; in vdev_destroy_spacemaps()
4002 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; in vdev_destroy_spacemaps()
4005 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, in vdev_destroy_spacemaps()
4017 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); in vdev_destroy_spacemaps()
4019 vd->vdev_ms_array = 0; in vdev_destroy_spacemaps()
4025 spa_t *spa = vd->vdev_spa; in vdev_remove_empty_log()
4027 ASSERT(vd->vdev_islog); in vdev_remove_empty_log()
4028 ASSERT(vd == vd->vdev_top); in vdev_remove_empty_log()
4034 if (vd->vdev_top_zap != 0) { in vdev_remove_empty_log()
4035 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); in vdev_remove_empty_log()
4036 vd->vdev_top_zap = 0; in vdev_remove_empty_log()
4046 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); in vdev_sync_done()
4050 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) in vdev_sync_done()
4055 metaslab_sync_reassess(vd->vdev_mg); in vdev_sync_done()
4056 if (vd->vdev_log_mg != NULL) in vdev_sync_done()
4057 metaslab_sync_reassess(vd->vdev_log_mg); in vdev_sync_done()
4064 spa_t *spa = vd->vdev_spa; in vdev_sync()
4068 ASSERT3U(txg, ==, spa->spa_syncing_txg); in vdev_sync()
4069 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); in vdev_sync()
4070 if (zfs_range_tree_space(vd->vdev_obsolete_segments) > 0) { in vdev_sync()
4071 ASSERT(vd->vdev_removing || in vdev_sync()
4072 vd->vdev_ops == &vdev_indirect_ops); in vdev_sync()
4080 if (vd->vdev_ops == &vdev_indirect_ops) { in vdev_sync()
4081 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); in vdev_sync()
4082 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); in vdev_sync()
4090 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && in vdev_sync()
4091 !vd->vdev_removing) { in vdev_sync()
4092 ASSERT(vd == vd->vdev_top); in vdev_sync()
4093 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); in vdev_sync()
4094 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, in vdev_sync()
4096 ASSERT(vd->vdev_ms_array != 0); in vdev_sync()
4100 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { in vdev_sync()
4102 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); in vdev_sync()
4105 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) in vdev_sync()
4112 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) in vdev_sync()
4115 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); in vdev_sync()
4128 return (vd->vdev_ops->vdev_op_asize(vd, psize, txg)); in vdev_psize_to_asize_txg()
4151 if (!vd->vdev_ops->vdev_op_leaf) in vdev_fault()
4154 tvd = vd->vdev_top; in vdev_fault()
4157 * If user did a 'zpool offline -f' then make the fault persist across in vdev_fault()
4175 vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; in vdev_fault()
4176 vd->vdev_tmpoffline = B_FALSE; in vdev_fault()
4179 vd->vdev_tmpoffline = B_TRUE; in vdev_fault()
4187 vd->vdev_label_aux = aux; in vdev_fault()
4192 vd->vdev_delayed_close = B_FALSE; in vdev_fault()
4193 vd->vdev_faulted = 1ULL; in vdev_fault()
4194 vd->vdev_degraded = 0ULL; in vdev_fault()
4201 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { in vdev_fault()
4202 vd->vdev_degraded = 1ULL; in vdev_fault()
4203 vd->vdev_faulted = 0ULL; in vdev_fault()
4233 if (!vd->vdev_ops->vdev_op_leaf) in vdev_degrade()
4239 if (vd->vdev_faulted || vd->vdev_degraded) in vdev_degrade()
4242 vd->vdev_degraded = 1ULL; in vdev_degrade()
4264 if (vd->vdev_removed || vd->vdev_expanding) in vdev_remove_wanted()
4270 if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL))) in vdev_remove_wanted()
4273 vd->vdev_remove_wanted = B_TRUE; in vdev_remove_wanted()
4291 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; in vdev_online()
4300 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); in vdev_online()
4301 oldstate = vd->vdev_state; in vdev_online()
4303 tvd = vd->vdev_top; in vdev_online()
4304 vd->vdev_offline = B_FALSE; in vdev_online()
4305 vd->vdev_tmpoffline = B_FALSE; in vdev_online()
4306 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); in vdev_online()
4307 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); in vdev_online()
4309 /* XXX - L2ARC 1.0 does not support expansion */ in vdev_online()
4310 if (!vd->vdev_aux) { in vdev_online()
4311 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) in vdev_online()
4312 pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) || in vdev_online()
4313 spa->spa_autoexpand); in vdev_online()
4314 vd->vdev_expansion_time = gethrestime_sec(); in vdev_online()
4318 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; in vdev_online()
4320 if (!vd->vdev_aux) { in vdev_online()
4321 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) in vdev_online()
4322 pvd->vdev_expanding = B_FALSE; in vdev_online()
4326 *newstate = vd->vdev_state; in vdev_online()
4328 !vdev_is_dead(vd) && vd->vdev_parent && in vdev_online()
4329 vd->vdev_parent->vdev_ops == &vdev_spare_ops && in vdev_online()
4330 vd->vdev_parent->vdev_child[0] == vd) in vdev_online()
4331 vd->vdev_unspare = B_TRUE; in vdev_online()
4333 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { in vdev_online()
4335 /* XXX - L2ARC 1.0 does not support expansion */ in vdev_online()
4336 if (vd->vdev_aux) in vdev_online()
4338 spa->spa_ccw_fail_time = 0; in vdev_online()
4343 mutex_enter(&vd->vdev_initialize_lock); in vdev_online()
4345 vd->vdev_initialize_thread == NULL && in vdev_online()
4346 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { in vdev_online()
4349 mutex_exit(&vd->vdev_initialize_lock); in vdev_online()
4357 mutex_enter(&vd->vdev_trim_lock); in vdev_online()
4358 if (vdev_writeable(vd) && !vd->vdev_isl2cache && in vdev_online()
4359 vd->vdev_trim_thread == NULL && in vdev_online()
4360 vd->vdev_trim_state == VDEV_TRIM_ACTIVE) { in vdev_online()
4361 (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial, in vdev_online()
4362 vd->vdev_trim_secure); in vdev_online()
4364 mutex_exit(&vd->vdev_trim_lock); in vdev_online()
4368 vd->vdev_state >= VDEV_STATE_DEGRADED)) { in vdev_online()
4375 if (vd->vdev_unspare && in vdev_online()
4376 !dsl_scan_resilvering(spa->spa_dsl_pool) && in vdev_online()
4377 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) && in vdev_online()
4398 if (!vd->vdev_ops->vdev_op_leaf) in vdev_offline_locked()
4401 if (vd->vdev_ops == &vdev_draid_spare_ops) in vdev_offline_locked()
4404 tvd = vd->vdev_top; in vdev_offline_locked()
4405 mg = tvd->vdev_mg; in vdev_offline_locked()
4406 generation = spa->spa_config_generation + 1; in vdev_offline_locked()
4411 if (!vd->vdev_offline) { in vdev_offline_locked()
4417 if (!tvd->vdev_islog && vd->vdev_aux == NULL && in vdev_offline_locked()
4423 * If the top-level is a slog and it has had allocations in vdev_offline_locked()
4428 if (tvd->vdev_islog && mg != NULL) { in vdev_offline_locked()
4432 ASSERT3P(tvd->vdev_log_mg, ==, NULL); in vdev_offline_locked()
4443 tvd->vdev_checkpoint_sm != NULL) { in vdev_offline_locked()
4445 tvd->vdev_checkpoint_sm), !=, 0); in vdev_offline_locked()
4454 if (error || generation != spa->spa_config_generation) { in vdev_offline_locked()
4462 ASSERT0(tvd->vdev_stat.vs_alloc); in vdev_offline_locked()
4466 * Offline this device and reopen its top-level vdev. in vdev_offline_locked()
4467 * If the top-level vdev is a log device then just offline in vdev_offline_locked()
4468 * it. Otherwise, if this action results in the top-level in vdev_offline_locked()
4471 vd->vdev_offline = B_TRUE; in vdev_offline_locked()
4474 if (!tvd->vdev_islog && vd->vdev_aux == NULL && in vdev_offline_locked()
4476 vd->vdev_offline = B_FALSE; in vdev_offline_locked()
4486 if (tvd->vdev_islog && mg != NULL) in vdev_offline_locked()
4490 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); in vdev_offline_locked()
4500 mutex_enter(&spa->spa_vdev_top_lock); in vdev_offline()
4502 mutex_exit(&spa->spa_vdev_top_lock); in vdev_offline()
4515 vdev_t *rvd = spa->spa_root_vdev; in vdev_clear()
4522 vd->vdev_stat.vs_read_errors = 0; in vdev_clear()
4523 vd->vdev_stat.vs_write_errors = 0; in vdev_clear()
4524 vd->vdev_stat.vs_checksum_errors = 0; in vdev_clear()
4525 vd->vdev_stat.vs_dio_verify_errors = 0; in vdev_clear()
4526 vd->vdev_stat.vs_slow_ios = 0; in vdev_clear()
4528 for (int c = 0; c < vd->vdev_children; c++) in vdev_clear()
4529 vdev_clear(spa, vd->vdev_child[c]); in vdev_clear()
4534 if (!vdev_is_concrete(vd) || vd->vdev_removed) in vdev_clear()
4543 if (vd->vdev_faulted || vd->vdev_degraded || in vdev_clear()
4550 vd->vdev_forcefault = B_TRUE; in vdev_clear()
4552 vd->vdev_faulted = vd->vdev_degraded = 0ULL; in vdev_clear()
4553 vd->vdev_cant_read = B_FALSE; in vdev_clear()
4554 vd->vdev_cant_write = B_FALSE; in vdev_clear()
4555 vd->vdev_stat.vs_aux = 0; in vdev_clear()
4557 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); in vdev_clear()
4559 vd->vdev_forcefault = B_FALSE; in vdev_clear()
4561 if (vd != rvd && vdev_writeable(vd->vdev_top)) in vdev_clear()
4562 vdev_state_dirty(vd->vdev_top); in vdev_clear()
4565 if (vd->vdev_aux == NULL && !vdev_is_dead(vd) && in vdev_clear()
4566 !dsl_scan_resilvering(spa->spa_dsl_pool) && in vdev_clear()
4567 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool)) in vdev_clear()
4574 * When clearing a FMA-diagnosed fault, we always want to in vdev_clear()
4578 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && in vdev_clear()
4579 vd->vdev_parent->vdev_ops == &vdev_spare_ops && in vdev_clear()
4580 vd->vdev_parent->vdev_child[0] == vd) in vdev_clear()
4581 vd->vdev_unspare = B_TRUE; in vdev_clear()
4597 return (vd->vdev_state < VDEV_STATE_DEGRADED || in vdev_is_dead()
4598 vd->vdev_ops == &vdev_hole_ops || in vdev_is_dead()
4599 vd->vdev_ops == &vdev_missing_ops); in vdev_is_dead()
4605 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); in vdev_readable()
4611 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && in vdev_writeable()
4618 uint64_t state = vd->vdev_state; in vdev_allocatable()
4629 !vd->vdev_cant_write && vdev_is_concrete(vd) && in vdev_allocatable()
4630 vd->vdev_mg->mg_initialized); in vdev_allocatable()
4636 ASSERT(zio->io_vd == vd); in vdev_accessible()
4638 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) in vdev_accessible()
4641 if (zio->io_type == ZIO_TYPE_READ) in vdev_accessible()
4642 return (!vd->vdev_cant_read); in vdev_accessible()
4644 if (zio->io_type == ZIO_TYPE_WRITE) in vdev_accessible()
4645 return (!vd->vdev_cant_write); in vdev_accessible()
4657 if (cvd->vdev_ops == &vdev_draid_spare_ops) in vdev_get_child_stat()
4661 vs->vs_ops[t] += cvs->vs_ops[t]; in vdev_get_child_stat()
4662 vs->vs_bytes[t] += cvs->vs_bytes[t]; in vdev_get_child_stat()
4665 cvs->vs_scan_removing = cvd->vdev_removing; in vdev_get_child_stat()
4678 for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++) in vdev_get_child_stat_ex()
4679 vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b]; in vdev_get_child_stat_ex()
4681 for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) { in vdev_get_child_stat_ex()
4682 vsx->vsx_total_histo[t][b] += in vdev_get_child_stat_ex()
4683 cvsx->vsx_total_histo[t][b]; in vdev_get_child_stat_ex()
4688 for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) { in vdev_get_child_stat_ex()
4689 vsx->vsx_queue_histo[t][b] += in vdev_get_child_stat_ex()
4690 cvsx->vsx_queue_histo[t][b]; in vdev_get_child_stat_ex()
4692 vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t]; in vdev_get_child_stat_ex()
4693 vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t]; in vdev_get_child_stat_ex()
4695 for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++) in vdev_get_child_stat_ex()
4696 vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b]; in vdev_get_child_stat_ex()
4698 for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++) in vdev_get_child_stat_ex()
4699 vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b]; in vdev_get_child_stat_ex()
4707 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) in vdev_is_spacemap_addressable()
4711 * If double-word space map entries are not enabled we assume in vdev_is_spacemap_addressable()
4717 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; in vdev_is_spacemap_addressable()
4722 return (vd->vdev_asize < (1ULL << shift)); in vdev_is_spacemap_addressable()
4734 * over all top-level vdevs (i.e. the direct children of the root). in vdev_get_stats_ex_impl()
4736 if (!vd->vdev_ops->vdev_op_leaf) { in vdev_get_stats_ex_impl()
4738 memset(vs->vs_ops, 0, sizeof (vs->vs_ops)); in vdev_get_stats_ex_impl()
4739 memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes)); in vdev_get_stats_ex_impl()
4744 for (int c = 0; c < vd->vdev_children; c++) { in vdev_get_stats_ex_impl()
4745 vdev_t *cvd = vd->vdev_child[c]; in vdev_get_stats_ex_impl()
4746 vdev_stat_t *cvs = &cvd->vdev_stat; in vdev_get_stats_ex_impl()
4747 vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex; in vdev_get_stats_ex_impl()
4763 memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex)); in vdev_get_stats_ex_impl()
4766 vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t]; in vdev_get_stats_ex_impl()
4767 vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t); in vdev_get_stats_ex_impl()
4775 vdev_t *tvd = vd->vdev_top; in vdev_get_stats_ex()
4776 mutex_enter(&vd->vdev_stat_lock); in vdev_get_stats_ex()
4778 memcpy(vs, &vd->vdev_stat, sizeof (*vs)); in vdev_get_stats_ex()
4779 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; in vdev_get_stats_ex()
4780 vs->vs_state = vd->vdev_state; in vdev_get_stats_ex()
4781 vs->vs_rsize = vdev_get_min_asize(vd); in vdev_get_stats_ex()
4783 if (vd->vdev_ops->vdev_op_leaf) { in vdev_get_stats_ex()
4784 vs->vs_pspace = vd->vdev_psize; in vdev_get_stats_ex()
4785 vs->vs_rsize += VDEV_LABEL_START_SIZE + in vdev_get_stats_ex()
4792 vs->vs_initialize_bytes_done = in vdev_get_stats_ex()
4793 vd->vdev_initialize_bytes_done; in vdev_get_stats_ex()
4794 vs->vs_initialize_bytes_est = in vdev_get_stats_ex()
4795 vd->vdev_initialize_bytes_est; in vdev_get_stats_ex()
4796 vs->vs_initialize_state = vd->vdev_initialize_state; in vdev_get_stats_ex()
4797 vs->vs_initialize_action_time = in vdev_get_stats_ex()
4798 vd->vdev_initialize_action_time; in vdev_get_stats_ex()
4805 vs->vs_trim_notsup = !vd->vdev_has_trim; in vdev_get_stats_ex()
4806 vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done; in vdev_get_stats_ex()
4807 vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est; in vdev_get_stats_ex()
4808 vs->vs_trim_state = vd->vdev_trim_state; in vdev_get_stats_ex()
4809 vs->vs_trim_action_time = vd->vdev_trim_action_time; in vdev_get_stats_ex()
4812 vs->vs_resilver_deferred = vd->vdev_resilver_deferred; in vdev_get_stats_ex()
4816 * Report expandable space on top-level, non-auxiliary devices in vdev_get_stats_ex()
4821 if (vd->vdev_aux == NULL && tvd != NULL) { in vdev_get_stats_ex()
4822 vs->vs_esize = P2ALIGN_TYPED( in vdev_get_stats_ex()
4823 vd->vdev_max_asize - vd->vdev_asize, in vdev_get_stats_ex()
4824 1ULL << tvd->vdev_ms_shift, uint64_t); in vdev_get_stats_ex()
4827 vs->vs_configured_ashift = vd->vdev_top != NULL in vdev_get_stats_ex()
4828 ? vd->vdev_top->vdev_ashift : vd->vdev_ashift; in vdev_get_stats_ex()
4829 vs->vs_logical_ashift = vd->vdev_logical_ashift; in vdev_get_stats_ex()
4830 if (vd->vdev_physical_ashift <= ASHIFT_MAX) in vdev_get_stats_ex()
4831 vs->vs_physical_ashift = vd->vdev_physical_ashift; in vdev_get_stats_ex()
4833 vs->vs_physical_ashift = 0; in vdev_get_stats_ex()
4836 * Report fragmentation and rebuild progress for top-level, in vdev_get_stats_ex()
4837 * non-auxiliary, concrete devices. in vdev_get_stats_ex()
4839 if (vd->vdev_aux == NULL && vd == vd->vdev_top && in vdev_get_stats_ex()
4847 vs->vs_fragmentation = (vd->vdev_mg != NULL) ? in vdev_get_stats_ex()
4848 vd->vdev_mg->mg_fragmentation : 0; in vdev_get_stats_ex()
4850 vs->vs_noalloc = MAX(vd->vdev_noalloc, in vdev_get_stats_ex()
4851 tvd ? tvd->vdev_noalloc : 0); in vdev_get_stats_ex()
4855 mutex_exit(&vd->vdev_stat_lock); in vdev_get_stats_ex()
4867 mutex_enter(&vd->vdev_stat_lock); in vdev_clear_stats()
4868 vd->vdev_stat.vs_space = 0; in vdev_clear_stats()
4869 vd->vdev_stat.vs_dspace = 0; in vdev_clear_stats()
4870 vd->vdev_stat.vs_alloc = 0; in vdev_clear_stats()
4871 mutex_exit(&vd->vdev_stat_lock); in vdev_clear_stats()
4877 vdev_stat_t *vs = &vd->vdev_stat; in vdev_scan_stat_init()
4879 for (int c = 0; c < vd->vdev_children; c++) in vdev_scan_stat_init()
4880 vdev_scan_stat_init(vd->vdev_child[c]); in vdev_scan_stat_init()
4882 mutex_enter(&vd->vdev_stat_lock); in vdev_scan_stat_init()
4883 vs->vs_scan_processed = 0; in vdev_scan_stat_init()
4884 mutex_exit(&vd->vdev_stat_lock); in vdev_scan_stat_init()
4890 spa_t *spa = zio->io_spa; in vdev_stat_update()
4891 vdev_t *rvd = spa->spa_root_vdev; in vdev_stat_update()
4892 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; in vdev_stat_update()
4894 uint64_t txg = zio->io_txg; in vdev_stat_update()
4897 vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL; in vdev_stat_update()
4898 vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL; in vdev_stat_update()
4900 vdev_stat_t *vs = &vd->vdev_stat; in vdev_stat_update()
4901 vdev_stat_ex_t *vsx = &vd->vdev_stat_ex; in vdev_stat_update()
4903 zio_type_t type = zio->io_type; in vdev_stat_update()
4904 int flags = zio->io_flags; in vdev_stat_update()
4909 if (zio->io_gang_tree) in vdev_stat_update()
4912 if (zio->io_error == 0) { in vdev_stat_update()
4914 * If this is a root i/o, don't count it -- we've already in vdev_stat_update()
4915 * counted the top-level vdevs, and vdev_get_stats() will in vdev_stat_update()
4925 * one top-level vdev does not imply a root-level error. in vdev_stat_update()
4930 ASSERT(vd == zio->io_vd); in vdev_stat_update()
4935 mutex_enter(&vd->vdev_stat_lock); in vdev_stat_update()
4943 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; in vdev_stat_update()
4944 dsl_scan_phys_t *scn_phys = &scn->scn_phys; in vdev_stat_update()
4945 uint64_t *processed = &scn_phys->scn_processed; in vdev_stat_update()
4947 if (vd->vdev_ops->vdev_op_leaf) in vdev_stat_update()
4949 vs->vs_scan_processed += psize; in vdev_stat_update()
4958 if (zio->io_priority == ZIO_PRIORITY_REBUILD) { in vdev_stat_update()
4959 vdev_t *tvd = vd->vdev_top; in vdev_stat_update()
4960 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; in vdev_stat_update()
4961 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; in vdev_stat_update()
4962 uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt; in vdev_stat_update()
4964 if (vd->vdev_ops->vdev_op_leaf && in vdev_stat_update()
4965 vd->vdev_ops != &vdev_draid_spare_ops) { in vdev_stat_update()
4968 vs->vs_rebuild_processed += psize; in vdev_stat_update()
4972 vs->vs_self_healed += psize; in vdev_stat_update()
4979 if (vd->vdev_ops->vdev_op_leaf && in vdev_stat_update()
4980 (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) { in vdev_stat_update()
4982 zio_priority_t priority = zio->io_priority; in vdev_stat_update()
4993 * Solely for the purposes of 'zpool iostat -lqrw' in vdev_stat_update()
5014 vs->vs_ops[vs_type]++; in vdev_stat_update()
5015 vs->vs_bytes[vs_type] += psize; in vdev_stat_update()
5018 vsx->vsx_agg_histo[priority] in vdev_stat_update()
5019 [RQ_HISTO(zio->io_size)]++; in vdev_stat_update()
5021 vsx->vsx_ind_histo[priority] in vdev_stat_update()
5022 [RQ_HISTO(zio->io_size)]++; in vdev_stat_update()
5025 if (zio->io_delta && zio->io_delay) { in vdev_stat_update()
5026 vsx->vsx_queue_histo[priority] in vdev_stat_update()
5027 [L_HISTO(zio->io_delta - zio->io_delay)]++; in vdev_stat_update()
5028 vsx->vsx_disk_histo[type] in vdev_stat_update()
5029 [L_HISTO(zio->io_delay)]++; in vdev_stat_update()
5030 vsx->vsx_total_histo[type] in vdev_stat_update()
5031 [L_HISTO(zio->io_delta)]++; in vdev_stat_update()
5035 mutex_exit(&vd->vdev_stat_lock); in vdev_stat_update()
5048 if (zio->io_error == EIO && in vdev_stat_update()
5049 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) in vdev_stat_update()
5054 * I/O so don't mark these types of failures as pool-level in vdev_stat_update()
5057 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) in vdev_stat_update()
5063 spa->spa_claiming)) { in vdev_stat_update()
5069 * txg as the block was born. In the scrub-induced repair in vdev_stat_update()
5070 * case, we know that scrubs run in first-pass syncing context, in vdev_stat_update()
5075 * self-healing writes triggered by normal (non-scrubbing) in vdev_stat_update()
5077 * do so -- and it's not clear that it'd be desirable anyway. in vdev_stat_update()
5079 if (vd->vdev_ops->vdev_op_leaf) { in vdev_stat_update()
5086 } else if (spa->spa_claiming) { in vdev_stat_update()
5093 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) in vdev_stat_update()
5095 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); in vdev_stat_update()
5105 ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0); in vdev_deflated_space()
5106 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); in vdev_deflated_space()
5108 return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio); in vdev_deflated_space()
5112 * Update the in-core space usage stats for this vdev, its metaslab class,
5121 spa_t *spa = vd->vdev_spa; in vdev_space_update()
5122 vdev_t *rvd = spa->spa_root_vdev; in vdev_space_update()
5124 ASSERT(vd == vd->vdev_top); in vdev_space_update()
5127 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion in vdev_space_update()
5129 * because the root vdev's psize-to-asize is simply the max of its in vdev_space_update()
5134 mutex_enter(&vd->vdev_stat_lock); in vdev_space_update()
5137 ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta); in vdev_space_update()
5140 vd->vdev_stat.vs_alloc += alloc_delta; in vdev_space_update()
5141 vd->vdev_stat.vs_space += space_delta; in vdev_space_update()
5142 vd->vdev_stat.vs_dspace += dspace_delta; in vdev_space_update()
5143 mutex_exit(&vd->vdev_stat_lock); in vdev_space_update()
5146 if (vd->vdev_mg != NULL && !vd->vdev_islog) { in vdev_space_update()
5147 ASSERT(!vd->vdev_isl2cache); in vdev_space_update()
5148 mutex_enter(&rvd->vdev_stat_lock); in vdev_space_update()
5149 rvd->vdev_stat.vs_alloc += alloc_delta; in vdev_space_update()
5150 rvd->vdev_stat.vs_space += space_delta; in vdev_space_update()
5151 rvd->vdev_stat.vs_dspace += dspace_delta; in vdev_space_update()
5152 mutex_exit(&rvd->vdev_stat_lock); in vdev_space_update()
5158 * Mark a top-level vdev's config as dirty, placing it on the dirty list
5160 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
5165 spa_t *spa = vd->vdev_spa; in vdev_config_dirty()
5166 vdev_t *rvd = spa->spa_root_vdev; in vdev_config_dirty()
5175 if (vd->vdev_aux != NULL) { in vdev_config_dirty()
5176 spa_aux_vdev_t *sav = vd->vdev_aux; in vdev_config_dirty()
5180 for (c = 0; c < sav->sav_count; c++) { in vdev_config_dirty()
5181 if (sav->sav_vdevs[c] == vd) in vdev_config_dirty()
5185 if (c == sav->sav_count) { in vdev_config_dirty()
5189 ASSERT(sav->sav_sync == B_TRUE); in vdev_config_dirty()
5193 sav->sav_sync = B_TRUE; in vdev_config_dirty()
5195 if (nvlist_lookup_nvlist_array(sav->sav_config, in vdev_config_dirty()
5197 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, in vdev_config_dirty()
5224 for (c = 0; c < rvd->vdev_children; c++) in vdev_config_dirty()
5225 vdev_config_dirty(rvd->vdev_child[c]); in vdev_config_dirty()
5227 ASSERT(vd == vd->vdev_top); in vdev_config_dirty()
5229 if (!list_link_active(&vd->vdev_config_dirty_node) && in vdev_config_dirty()
5231 list_insert_head(&spa->spa_config_dirty_list, vd); in vdev_config_dirty()
5239 spa_t *spa = vd->vdev_spa; in vdev_config_clean()
5245 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); in vdev_config_clean()
5246 list_remove(&spa->spa_config_dirty_list, vd); in vdev_config_clean()
5250 * Mark a top-level vdev's state as dirty, so that the next pass of
5258 spa_t *spa = vd->vdev_spa; in vdev_state_dirty()
5261 ASSERT(vd == vd->vdev_top); in vdev_state_dirty()
5273 if (!list_link_active(&vd->vdev_state_dirty_node) && in vdev_state_dirty()
5275 list_insert_head(&spa->spa_state_dirty_list, vd); in vdev_state_dirty()
5281 spa_t *spa = vd->vdev_spa; in vdev_state_clean()
5287 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); in vdev_state_clean()
5288 list_remove(&spa->spa_state_dirty_list, vd); in vdev_state_clean()
5297 spa_t *spa = vd->vdev_spa; in vdev_propagate_state()
5298 vdev_t *rvd = spa->spa_root_vdev; in vdev_propagate_state()
5303 if (vd->vdev_children > 0) { in vdev_propagate_state()
5304 for (int c = 0; c < vd->vdev_children; c++) { in vdev_propagate_state()
5305 child = vd->vdev_child[c]; in vdev_propagate_state()
5317 * Root special: if there is a top-level log in vdev_propagate_state()
5321 if (child->vdev_islog && vd == rvd) in vdev_propagate_state()
5325 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { in vdev_propagate_state()
5329 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) in vdev_propagate_state()
5333 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); in vdev_propagate_state()
5336 * Root special: if there is a top-level vdev that cannot be in vdev_propagate_state()
5342 rvd->vdev_state == VDEV_STATE_CANT_OPEN) in vdev_propagate_state()
5347 if (vd->vdev_parent) in vdev_propagate_state()
5348 vdev_propagate_state(vd->vdev_parent); in vdev_propagate_state()
5353 * state, because we're in the process of opening children depth-first.
5363 spa_t *spa = vd->vdev_spa; in vdev_set_state()
5365 if (state == vd->vdev_state) { in vdev_set_state()
5371 if (vd->vdev_ops->vdev_op_leaf && in vdev_set_state()
5373 (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) { in vdev_set_state()
5375 zfs_post_state_change(spa, vd, vd->vdev_prevstate); in vdev_set_state()
5377 vd->vdev_stat.vs_aux = aux; in vdev_set_state()
5381 save_state = vd->vdev_state; in vdev_set_state()
5383 vd->vdev_state = state; in vdev_set_state()
5384 vd->vdev_stat.vs_aux = aux; in vdev_set_state()
5396 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && in vdev_set_state()
5397 vd->vdev_ops->vdev_op_leaf) in vdev_set_state()
5398 vd->vdev_ops->vdev_op_close(vd); in vdev_set_state()
5400 if (vd->vdev_removed && in vdev_set_state()
5402 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { in vdev_set_state()
5412 vd->vdev_state = VDEV_STATE_REMOVED; in vdev_set_state()
5413 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; in vdev_set_state()
5415 vd->vdev_removed = B_TRUE; in vdev_set_state()
5425 vd->vdev_ops->vdev_op_leaf) in vdev_set_state()
5426 vd->vdev_not_present = 1; in vdev_set_state()
5443 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && in vdev_set_state()
5444 !vd->vdev_not_present && !vd->vdev_checkremove && in vdev_set_state()
5445 vd != spa->spa_root_vdev) { in vdev_set_state()
5479 vd->vdev_removed = B_FALSE; in vdev_set_state()
5481 vd->vdev_removed = B_FALSE; in vdev_set_state()
5485 * Notify ZED of any significant state-change on a leaf vdev. in vdev_set_state()
5488 if (vd->vdev_ops->vdev_op_leaf) { in vdev_set_state()
5490 if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) && in vdev_set_state()
5491 (vd->vdev_prevstate != vd->vdev_state) && in vdev_set_state()
5493 save_state = vd->vdev_prevstate; in vdev_set_state()
5500 if (!isopen && vd->vdev_parent) in vdev_set_state()
5501 vdev_propagate_state(vd->vdev_parent); in vdev_set_state()
5507 ASSERT(!vd->vdev_ops->vdev_op_leaf); in vdev_children_are_offline()
5509 for (uint64_t i = 0; i < vd->vdev_children; i++) { in vdev_children_are_offline()
5510 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) in vdev_children_are_offline()
5524 if (!vd->vdev_ops->vdev_op_leaf) { in vdev_is_bootable()
5525 const char *vdev_type = vd->vdev_ops->vdev_op_type; in vdev_is_bootable()
5531 for (int c = 0; c < vd->vdev_children; c++) { in vdev_is_bootable()
5532 if (!vdev_is_bootable(vd->vdev_child[c])) in vdev_is_bootable()
5541 vdev_ops_t *ops = vd->vdev_ops; in vdev_is_concrete()
5558 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && in vdev_log_state_valid()
5559 !vd->vdev_removed) in vdev_log_state_valid()
5562 for (int c = 0; c < vd->vdev_children; c++) in vdev_log_state_valid()
5563 if (vdev_log_state_valid(vd->vdev_child[c])) in vdev_log_state_valid()
5575 ASSERT(vd->vdev_top == vd); in vdev_expand()
5576 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); in vdev_expand()
5581 if ((vd->vdev_spa->spa_raidz_expand == NULL || in vdev_expand()
5582 vd->vdev_spa->spa_raidz_expand->vre_vdev_id != vd->vdev_id) && in vdev_expand()
5583 (vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && in vdev_expand()
5597 vdev_t *cvd, *pvd = vd->vdev_parent; in vdev_split()
5599 VERIFY3U(pvd->vdev_children, >, 1); in vdev_split()
5604 ASSERT3P(pvd->vdev_child, !=, NULL); in vdev_split()
5606 cvd = pvd->vdev_child[0]; in vdev_split()
5607 if (pvd->vdev_children == 1) { in vdev_split()
5609 cvd->vdev_splitting = B_TRUE; in vdev_split()
5617 for (int c = 0; c < vd->vdev_children; c++) { in vdev_deadman()
5618 vdev_t *cvd = vd->vdev_child[c]; in vdev_deadman()
5623 if (vd->vdev_ops->vdev_op_leaf) { in vdev_deadman()
5624 vdev_queue_t *vq = &vd->vdev_queue; in vdev_deadman()
5626 mutex_enter(&vq->vq_lock); in vdev_deadman()
5627 if (vq->vq_active > 0) { in vdev_deadman()
5628 spa_t *spa = vd->vdev_spa; in vdev_deadman()
5633 vd->vdev_path, vq->vq_active); in vdev_deadman()
5640 fio = list_head(&vq->vq_active_list); in vdev_deadman()
5641 delta = gethrtime() - fio->io_timestamp; in vdev_deadman()
5645 mutex_exit(&vq->vq_lock); in vdev_deadman()
5652 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_defer_resilver()
5654 vd->vdev_resilver_deferred = B_TRUE; in vdev_defer_resilver()
5655 vd->vdev_spa->spa_resilver_deferred = B_TRUE; in vdev_defer_resilver()
5659 * Clears the resilver deferred flag on all leaf devs under vd. Returns
5667 spa_t *spa = vd->vdev_spa; in vdev_clear_resilver_deferred()
5669 for (int c = 0; c < vd->vdev_children; c++) { in vdev_clear_resilver_deferred()
5670 vdev_t *cvd = vd->vdev_child[c]; in vdev_clear_resilver_deferred()
5674 if (vd == spa->spa_root_vdev && in vdev_clear_resilver_deferred()
5678 spa->spa_resilver_deferred = B_FALSE; in vdev_clear_resilver_deferred()
5682 if (!vdev_is_concrete(vd) || vd->vdev_aux || in vdev_clear_resilver_deferred()
5683 !vd->vdev_ops->vdev_op_leaf) in vdev_clear_resilver_deferred()
5686 vd->vdev_resilver_deferred = B_FALSE; in vdev_clear_resilver_deferred()
5688 return (!vdev_is_dead(vd) && !vd->vdev_offline && in vdev_clear_resilver_deferred()
5695 return (rs->rs_start == rs->rs_end); in vdev_xlate_is_empty()
5701 * will walk each parent vdev until it reaches a top-level vdev. Once the
5702 * top-level is reached the physical range is initialized and the recursive
5713 if (vd != vd->vdev_top) { in vdev_xlate()
5714 vdev_xlate(vd->vdev_parent, logical_rs, physical_rs, in vdev_xlate()
5718 * We've reached the top-level vdev, initialize the physical in vdev_xlate()
5722 physical_rs->rs_start = logical_rs->rs_start; in vdev_xlate()
5723 physical_rs->rs_end = logical_rs->rs_end; in vdev_xlate()
5725 remain_rs->rs_start = logical_rs->rs_start; in vdev_xlate()
5726 remain_rs->rs_end = logical_rs->rs_start; in vdev_xlate()
5731 vdev_t *pvd = vd->vdev_parent; in vdev_xlate()
5733 ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); in vdev_xlate()
5741 pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs); in vdev_xlate()
5743 physical_rs->rs_start = intermediate.rs_start; in vdev_xlate()
5744 physical_rs->rs_end = intermediate.rs_end; in vdev_xlate()
5761 * does not live on this leaf vdev. Only when there is a non- in vdev_xlate_walk()
5774 if (vd->vdev_path == NULL) { in vdev_name()
5775 if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) { in vdev_name()
5776 strlcpy(buf, vd->vdev_spa->spa_name, buflen); in vdev_name()
5777 } else if (!vd->vdev_ops->vdev_op_leaf) { in vdev_name()
5778 snprintf(buf, buflen, "%s-%llu", in vdev_name()
5779 vd->vdev_ops->vdev_op_type, in vdev_name()
5780 (u_longlong_t)vd->vdev_id); in vdev_name()
5783 strlcpy(buf, vd->vdev_path, buflen); in vdev_name()
5795 ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0); in vdev_replace_in_progress()
5797 if (vdev->vdev_ops == &vdev_replacing_ops) in vdev_replace_in_progress()
5805 if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 || in vdev_replace_in_progress()
5806 !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING))) in vdev_replace_in_progress()
5809 for (int i = 0; i < vdev->vdev_children; i++) { in vdev_replace_in_progress()
5810 if (vdev_replace_in_progress(vdev->vdev_child[i])) in vdev_replace_in_progress()
5843 spa_t *spa = dmu_tx_pool(tx)->dp_spa; in vdev_props_set_sync()
5844 objset_t *mos = spa->spa_meta_objset; in vdev_props_set_sync()
5861 if (vd->vdev_root_zap != 0) { in vdev_props_set_sync()
5862 objid = vd->vdev_root_zap; in vdev_props_set_sync()
5863 } else if (vd->vdev_top_zap != 0) { in vdev_props_set_sync()
5864 objid = vd->vdev_top_zap; in vdev_props_set_sync()
5865 } else if (vd->vdev_leaf_zap != 0) { in vdev_props_set_sync()
5866 objid = vd->vdev_leaf_zap; in vdev_props_set_sync()
5871 mutex_enter(&spa->spa_props_lock); in vdev_props_set_sync()
5934 mutex_exit(&spa->spa_props_lock); in vdev_props_set_sync()
5940 spa_t *spa = vd->vdev_spa; in vdev_prop_set()
5949 if (vd->vdev_root_zap == 0 && in vdev_prop_set()
5950 vd->vdev_top_zap == 0 && in vdev_prop_set()
5951 vd->vdev_leaf_zap == 0) in vdev_prop_set()
5984 if (vd->vdev_path == NULL) { in vdev_prop_set()
6004 if (intval != vd->vdev_noalloc) in vdev_prop_set()
6016 vd->vdev_failfast = intval & 1; in vdev_prop_set()
6023 vd->vdev_checksum_n = intval; in vdev_prop_set()
6030 vd->vdev_checksum_t = intval; in vdev_prop_set()
6037 vd->vdev_io_n = intval; in vdev_prop_set()
6044 vd->vdev_io_t = intval; in vdev_prop_set()
6051 vd->vdev_slow_io_n = intval; in vdev_prop_set()
6058 vd->vdev_slow_io_t = intval; in vdev_prop_set()
6072 return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync, in vdev_prop_set()
6079 spa_t *spa = vd->vdev_spa; in vdev_prop_get()
6080 objset_t *mos = spa->spa_meta_objset; in vdev_prop_get()
6100 if (vd->vdev_root_zap != 0) { in vdev_prop_get()
6101 objid = vd->vdev_root_zap; in vdev_prop_get()
6102 } else if (vd->vdev_top_zap != 0) { in vdev_prop_get()
6103 objid = vd->vdev_top_zap; in vdev_prop_get()
6104 } else if (vd->vdev_leaf_zap != 0) { in vdev_prop_get()
6105 objid = vd->vdev_leaf_zap; in vdev_prop_get()
6111 mutex_enter(&spa->spa_props_lock); in vdev_prop_get()
6125 /* Special Read-only Properties */ in vdev_prop_get()
6136 intval = (vd->vdev_stat.vs_dspace == 0) ? 0 : in vdev_prop_get()
6137 (vd->vdev_stat.vs_alloc * 100 / in vdev_prop_get()
6138 vd->vdev_stat.vs_dspace); in vdev_prop_get()
6144 vd->vdev_state, ZPROP_SRC_NONE); in vdev_prop_get()
6148 vd->vdev_guid, ZPROP_SRC_NONE); in vdev_prop_get()
6152 vd->vdev_asize, ZPROP_SRC_NONE); in vdev_prop_get()
6156 vd->vdev_psize, ZPROP_SRC_NONE); in vdev_prop_get()
6160 vd->vdev_ashift, ZPROP_SRC_NONE); in vdev_prop_get()
6164 vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE); in vdev_prop_get()
6168 vd->vdev_stat.vs_dspace - in vdev_prop_get()
6169 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); in vdev_prop_get()
6173 vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); in vdev_prop_get()
6177 vd->vdev_stat.vs_esize, ZPROP_SRC_NONE); in vdev_prop_get()
6181 vd->vdev_stat.vs_fragmentation, in vdev_prop_get()
6189 if (vd->vdev_path == NULL) in vdev_prop_get()
6192 vd->vdev_path, 0, ZPROP_SRC_NONE); in vdev_prop_get()
6195 if (vd->vdev_devid == NULL) in vdev_prop_get()
6198 vd->vdev_devid, 0, ZPROP_SRC_NONE); in vdev_prop_get()
6201 if (vd->vdev_physpath == NULL) in vdev_prop_get()
6204 vd->vdev_physpath, 0, ZPROP_SRC_NONE); in vdev_prop_get()
6207 if (vd->vdev_enc_sysfs_path == NULL) in vdev_prop_get()
6210 vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE); in vdev_prop_get()
6213 if (vd->vdev_fru == NULL) in vdev_prop_get()
6216 vd->vdev_fru, 0, ZPROP_SRC_NONE); in vdev_prop_get()
6219 if (vd->vdev_parent != NULL) { in vdev_prop_get()
6220 strval = vdev_name(vd->vdev_parent, in vdev_prop_get()
6227 if (vd->vdev_children > 0) in vdev_prop_get()
6230 for (uint64_t i = 0; i < vd->vdev_children; in vdev_prop_get()
6234 vname = vdev_name(vd->vdev_child[i], in vdev_prop_get()
6251 vd->vdev_children, ZPROP_SRC_NONE); in vdev_prop_get()
6255 vd->vdev_stat.vs_read_errors, in vdev_prop_get()
6260 vd->vdev_stat.vs_write_errors, in vdev_prop_get()
6265 vd->vdev_stat.vs_checksum_errors, in vdev_prop_get()
6270 vd->vdev_stat.vs_initialize_errors, in vdev_prop_get()
6275 vd->vdev_stat.vs_trim_errors, in vdev_prop_get()
6280 vd->vdev_stat.vs_slow_ios, in vdev_prop_get()
6285 vd->vdev_stat.vs_ops[ZIO_TYPE_NULL], in vdev_prop_get()
6290 vd->vdev_stat.vs_ops[ZIO_TYPE_READ], in vdev_prop_get()
6295 vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE], in vdev_prop_get()
6300 vd->vdev_stat.vs_ops[ZIO_TYPE_FREE], in vdev_prop_get()
6305 vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM], in vdev_prop_get()
6316 vd->vdev_stat.vs_ops[ZIO_TYPE_FLUSH], in vdev_prop_get()
6321 vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL], in vdev_prop_get()
6326 vd->vdev_stat.vs_bytes[ZIO_TYPE_READ], in vdev_prop_get()
6331 vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE], in vdev_prop_get()
6336 vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE], in vdev_prop_get()
6341 vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM], in vdev_prop_get()
6352 vd->vdev_stat.vs_bytes[ZIO_TYPE_FLUSH], in vdev_prop_get()
6357 vd->vdev_removing, ZPROP_SRC_NONE); in vdev_prop_get()
6361 if (vd->vdev_ops == &vdev_raidz_ops) { in vdev_prop_get()
6363 NULL, vd->vdev_rz_expanding, in vdev_prop_get()
6369 if (vd->vdev_ops->vdev_op_leaf) { in vdev_prop_get()
6371 NULL, vd->vdev_has_trim, in vdev_prop_get()
6378 if (vd->vdev_mg == NULL && in vdev_prop_get()
6379 vd->vdev_top != NULL) { in vdev_prop_get()
6490 propname = za->za_name; in vdev_prop_get()
6492 switch (za->za_integer_length) { in vdev_prop_get()
6499 strval = kmem_alloc(za->za_num_integers, in vdev_prop_get()
6501 err = zap_lookup(mos, objid, za->za_name, 1, in vdev_prop_get()
6502 za->za_num_integers, strval); in vdev_prop_get()
6504 kmem_free(strval, za->za_num_integers); in vdev_prop_get()
6509 kmem_free(strval, za->za_num_integers); in vdev_prop_get()
6520 mutex_exit(&spa->spa_props_lock); in vdev_prop_get()
6535 "Target number of metaslabs per top-level vdev");
6544 "Minimum number of metaslabs per top-level vdev");
6547 "Practical upper limit of total metaslabs per top-level vdev");
6580 "Minimum ashift used when creating new top-level vdevs");
6584 "Maximum ashift used when optimizing for logical -> physical sector "
6585 "size on new top-level vdevs");