Lines Matching +full:vrs +full:- +full:10
1 // SPDX-License-Identifier: CDDL-1.0
10 * or https://opensource.org/licenses/CDDL-1.0.
31 * ---------------------
57 * ------------------
69 * +------+ +------+ +------+
73 * +------+ +------+ +------+
76 * the same transaction group (10). Each label is mirrored and checksummed, so
94 * is synced. If we add a single device, we do not want to have to re-write
100 * On-disk Format
101 * --------------
108 * properties, per-vdev properties, and configuration information. It is
118 * -------------------------
122 * version ZFS on-disk version
133 * top_guid Unique ID for top-level vdev in which this is contained
169 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); in vdev_label_offset()
180 if (offset >= psize - VDEV_LABEL_END_SIZE) { in vdev_label_number()
181 offset -= psize - VDEV_LABEL_END_SIZE; in vdev_label_number()
185 return (l < VDEV_LABELS ? l : -1); in vdev_label_number()
193 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE || in vdev_label_read()
194 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE); in vdev_label_read()
198 vdev_label_offset(vd->vdev_psize, l, offset), in vdev_label_read()
208 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE || in vdev_label_write()
209 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE); in vdev_label_write()
213 vdev_label_offset(vd->vdev_psize, l, offset), in vdev_label_write()
244 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]); in vdev_config_generate_stats()
247 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]); in vdev_config_generate_stats()
250 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]); in vdev_config_generate_stats()
253 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]); in vdev_config_generate_stats()
256 vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]); in vdev_config_generate_stats()
259 vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]); in vdev_config_generate_stats()
262 vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]); in vdev_config_generate_stats()
266 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]); in vdev_config_generate_stats()
269 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]); in vdev_config_generate_stats()
272 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]); in vdev_config_generate_stats()
275 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]); in vdev_config_generate_stats()
278 vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]); in vdev_config_generate_stats()
281 vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]); in vdev_config_generate_stats()
284 vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]); in vdev_config_generate_stats()
288 vsx->vsx_total_histo[ZIO_TYPE_READ], in vdev_config_generate_stats()
289 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ])); in vdev_config_generate_stats()
292 vsx->vsx_total_histo[ZIO_TYPE_WRITE], in vdev_config_generate_stats()
293 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE])); in vdev_config_generate_stats()
296 vsx->vsx_disk_histo[ZIO_TYPE_READ], in vdev_config_generate_stats()
297 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ])); in vdev_config_generate_stats()
300 vsx->vsx_disk_histo[ZIO_TYPE_WRITE], in vdev_config_generate_stats()
301 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE])); in vdev_config_generate_stats()
304 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ], in vdev_config_generate_stats()
305 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ])); in vdev_config_generate_stats()
308 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE], in vdev_config_generate_stats()
309 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE])); in vdev_config_generate_stats()
312 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ], in vdev_config_generate_stats()
313 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ])); in vdev_config_generate_stats()
316 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE], in vdev_config_generate_stats()
317 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE])); in vdev_config_generate_stats()
320 vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB], in vdev_config_generate_stats()
321 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB])); in vdev_config_generate_stats()
324 vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM], in vdev_config_generate_stats()
325 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM])); in vdev_config_generate_stats()
328 vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD], in vdev_config_generate_stats()
329 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD])); in vdev_config_generate_stats()
333 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ], in vdev_config_generate_stats()
334 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ])); in vdev_config_generate_stats()
337 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE], in vdev_config_generate_stats()
338 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE])); in vdev_config_generate_stats()
341 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ], in vdev_config_generate_stats()
342 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ])); in vdev_config_generate_stats()
345 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE], in vdev_config_generate_stats()
346 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE])); in vdev_config_generate_stats()
349 vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB], in vdev_config_generate_stats()
350 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB])); in vdev_config_generate_stats()
353 vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM], in vdev_config_generate_stats()
354 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM])); in vdev_config_generate_stats()
357 vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD], in vdev_config_generate_stats()
358 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD])); in vdev_config_generate_stats()
361 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ], in vdev_config_generate_stats()
362 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ])); in vdev_config_generate_stats()
365 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE], in vdev_config_generate_stats()
366 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE])); in vdev_config_generate_stats()
369 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ], in vdev_config_generate_stats()
370 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ])); in vdev_config_generate_stats()
373 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE], in vdev_config_generate_stats()
374 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE])); in vdev_config_generate_stats()
377 vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB], in vdev_config_generate_stats()
378 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB])); in vdev_config_generate_stats()
381 vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM], in vdev_config_generate_stats()
382 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM])); in vdev_config_generate_stats()
385 vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD], in vdev_config_generate_stats()
386 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD])); in vdev_config_generate_stats()
389 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios); in vdev_config_generate_stats()
393 vs->vs_dio_verify_errors); in vdev_config_generate_stats()
406 spa_t *spa = vd->vdev_spa; in root_vdev_actions_getprogress()
408 if (vd != spa->spa_root_vdev) in root_vdev_actions_getprogress()
444 if (vd == vd->vdev_top) { in top_vdev_actions_getprogress()
445 vdev_rebuild_stat_t vrs; in top_vdev_actions_getprogress() local
446 if (vdev_rebuild_get_stats(vd, &vrs) == 0) { in top_vdev_actions_getprogress()
448 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t *)&vrs, in top_vdev_actions_getprogress()
449 sizeof (vrs) / sizeof (uint64_t)); in top_vdev_actions_getprogress()
462 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; in vdev_config_generate()
466 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type); in vdev_config_generate()
468 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id); in vdev_config_generate()
469 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid); in vdev_config_generate()
471 if (vd->vdev_path != NULL) in vdev_config_generate()
472 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path); in vdev_config_generate()
474 if (vd->vdev_devid != NULL) in vdev_config_generate()
475 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid); in vdev_config_generate()
477 if (vd->vdev_physpath != NULL) in vdev_config_generate()
479 vd->vdev_physpath); in vdev_config_generate()
481 if (vd->vdev_enc_sysfs_path != NULL) in vdev_config_generate()
483 vd->vdev_enc_sysfs_path); in vdev_config_generate()
485 if (vd->vdev_fru != NULL) in vdev_config_generate()
486 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru); in vdev_config_generate()
488 if (vd->vdev_ops->vdev_op_config_generate != NULL) in vdev_config_generate()
489 vd->vdev_ops->vdev_op_config_generate(vd, nv); in vdev_config_generate()
491 if (vd->vdev_wholedisk != -1ULL) { in vdev_config_generate()
493 vd->vdev_wholedisk); in vdev_config_generate()
496 if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING)) in vdev_config_generate()
499 if (vd->vdev_isspare) in vdev_config_generate()
503 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift); in vdev_config_generate()
506 vd == vd->vdev_top) { in vdev_config_generate()
508 vd->vdev_ms_array); in vdev_config_generate()
510 vd->vdev_ms_shift); in vdev_config_generate()
511 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift); in vdev_config_generate()
513 vd->vdev_asize); in vdev_config_generate()
516 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog); in vdev_config_generate()
517 if (vd->vdev_noalloc) { in vdev_config_generate()
519 vd->vdev_noalloc); in vdev_config_generate()
526 if (vd->vdev_removing && !vd->vdev_islog) { in vdev_config_generate()
528 vd->vdev_removing); in vdev_config_generate()
532 if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) { in vdev_config_generate()
535 switch (vd->vdev_alloc_bias) { in vdev_config_generate()
546 ASSERT3U(vd->vdev_alloc_bias, ==, in vdev_config_generate()
554 if (vd->vdev_dtl_sm != NULL) { in vdev_config_generate()
556 space_map_object(vd->vdev_dtl_sm)); in vdev_config_generate()
559 if (vic->vic_mapping_object != 0) { in vdev_config_generate()
561 vic->vic_mapping_object); in vdev_config_generate()
564 if (vic->vic_births_object != 0) { in vdev_config_generate()
566 vic->vic_births_object); in vdev_config_generate()
569 if (vic->vic_prev_indirect_vdev != UINT64_MAX) { in vdev_config_generate()
571 vic->vic_prev_indirect_vdev); in vdev_config_generate()
574 if (vd->vdev_crtxg) in vdev_config_generate()
575 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg); in vdev_config_generate()
577 if (vd->vdev_expansion_time) in vdev_config_generate()
579 vd->vdev_expansion_time); in vdev_config_generate()
582 if (vd->vdev_leaf_zap != 0) { in vdev_config_generate()
583 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_config_generate()
585 vd->vdev_leaf_zap); in vdev_config_generate()
588 if (vd->vdev_top_zap != 0) { in vdev_config_generate()
589 ASSERT(vd == vd->vdev_top); in vdev_config_generate()
591 vd->vdev_top_zap); in vdev_config_generate()
594 if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap != 0 && in vdev_config_generate()
595 spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) { in vdev_config_generate()
597 vd->vdev_root_zap); in vdev_config_generate()
600 if (vd->vdev_resilver_deferred) { in vdev_config_generate()
601 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_config_generate()
602 ASSERT(spa->spa_resilver_deferred); in vdev_config_generate()
618 rw_enter(&vd->vdev_indirect_rwlock, RW_READER); in vdev_config_generate()
619 if (vd->vdev_indirect_mapping != NULL) { in vdev_config_generate()
620 ASSERT(vd->vdev_indirect_births != NULL); in vdev_config_generate()
622 vd->vdev_indirect_mapping; in vdev_config_generate()
626 rw_exit(&vd->vdev_indirect_rwlock); in vdev_config_generate()
627 if (vd->vdev_mg != NULL && in vdev_config_generate()
628 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) { in vdev_config_generate()
639 uint64_t to_alloc = vd->vdev_stat.vs_alloc; in vdev_config_generate()
652 - 1) { in vdev_config_generate()
654 vd->vdev_mg->mg_histogram[i] << in vdev_config_generate()
658 vd->vdev_mg->mg_histogram[i]; in vdev_config_generate()
675 if (!vd->vdev_ops->vdev_op_leaf) { in vdev_config_generate()
679 ASSERT(!vd->vdev_ishole); in vdev_config_generate()
681 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *), in vdev_config_generate()
684 for (c = 0; c < vd->vdev_children; c++) { in vdev_config_generate()
685 child[c] = vdev_config_generate(spa, vd->vdev_child[c], in vdev_config_generate()
690 (const nvlist_t * const *)child, vd->vdev_children); in vdev_config_generate()
692 for (c = 0; c < vd->vdev_children; c++) in vdev_config_generate()
695 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *)); in vdev_config_generate()
700 if (vd->vdev_offline && !vd->vdev_tmpoffline) in vdev_config_generate()
702 if (vd->vdev_resilver_txg != 0) in vdev_config_generate()
704 vd->vdev_resilver_txg); in vdev_config_generate()
705 if (vd->vdev_rebuild_txg != 0) in vdev_config_generate()
707 vd->vdev_rebuild_txg); in vdev_config_generate()
708 if (vd->vdev_faulted) in vdev_config_generate()
710 if (vd->vdev_degraded) in vdev_config_generate()
712 if (vd->vdev_removed) in vdev_config_generate()
714 if (vd->vdev_unspare) in vdev_config_generate()
716 if (vd->vdev_ishole) in vdev_config_generate()
720 switch (vd->vdev_stat.vs_aux) { in vdev_config_generate()
730 if (aux != NULL && !vd->vdev_tmpoffline) { in vdev_config_generate()
734 * We're healthy - clear any previous AUX_STATE values. in vdev_config_generate()
740 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) { in vdev_config_generate()
742 vd->vdev_orig_guid); in vdev_config_generate()
750 * Generate a view of the top-level vdevs. If we currently have holes
752 * vdevs. Additionally, add the number of top-level children that currently
758 vdev_t *rvd = spa->spa_root_vdev; in vdev_top_config_generate()
762 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP); in vdev_top_config_generate()
764 for (c = 0, idx = 0; c < rvd->vdev_children; c++) { in vdev_top_config_generate()
765 vdev_t *tvd = rvd->vdev_child[c]; in vdev_top_config_generate()
767 if (tvd->vdev_ishole) { in vdev_top_config_generate()
778 rvd->vdev_children)); in vdev_top_config_generate()
780 kmem_free(array, rvd->vdev_children * sizeof (uint64_t)); in vdev_top_config_generate()
788 * find the most up-to-date label that does not exceed the specified
794 spa_t *spa = vd->vdev_spa; in vdev_label_read_config()
805 ASSERT(vd->vdev_validate_thread == curthread || in vdev_label_read_config()
816 if (vd->vdev_ops == &vdev_draid_spare_ops) in vdev_label_read_config()
836 nvlist_unpack(vp[l]->vp_nvlist, sizeof (vp[l]->vp_nvlist), in vdev_label_read_config()
894 spa_t *spa = vd->vdev_spa; in vdev_inuse()
907 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) in vdev_inuse()
946 * on-disk is the same as the one we're using now, in which case the in vdev_inuse()
1003 * read-only. Instead we look to see if the pools is marked in vdev_inuse()
1004 * read-only in the namespace and set the state to active. in vdev_inuse()
1030 spa_version(vd->vdev_spa)); in vdev_aux_label_generate()
1033 fnvlist_add_uint64(label, ZPOOL_CONFIG_GUID, vd->vdev_guid); in vdev_aux_label_generate()
1039 * spa->spa_l2cache->sav_config (populated in in vdev_aux_label_generate()
1043 fnvlist_add_uint64(label, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift); in vdev_aux_label_generate()
1048 if (vd->vdev_path != NULL) in vdev_aux_label_generate()
1049 fnvlist_add_string(label, ZPOOL_CONFIG_PATH, vd->vdev_path); in vdev_aux_label_generate()
1050 if (vd->vdev_devid != NULL) in vdev_aux_label_generate()
1051 fnvlist_add_string(label, ZPOOL_CONFIG_DEVID, vd->vdev_devid); in vdev_aux_label_generate()
1052 if (vd->vdev_physpath != NULL) { in vdev_aux_label_generate()
1054 vd->vdev_physpath); in vdev_aux_label_generate()
1064 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
1070 spa_t *spa = vd->vdev_spa; in vdev_label_init()
1085 VDEV_LABEL_REMOVE && vd->vdev_isspare)); in vdev_label_init()
1087 VDEV_LABEL_REMOVE && vd->vdev_isl2cache)); in vdev_label_init()
1091 for (int c = 0; c < vd->vdev_children; c++) in vdev_label_init()
1092 if ((error = vdev_label_init(vd->vdev_child[c], in vdev_label_init()
1097 vd->vdev_crtxg = crtxg; in vdev_label_init()
1099 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa)) in vdev_label_init()
1123 uint64_t guid_delta = spare_guid - vd->vdev_guid; in vdev_label_init()
1125 vd->vdev_guid += guid_delta; in vdev_label_init()
1127 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) in vdev_label_init()
1128 pvd->vdev_guid_sum += guid_delta; in vdev_label_init()
1143 uint64_t guid_delta = l2cache_guid - vd->vdev_guid; in vdev_label_init()
1145 vd->vdev_guid += guid_delta; in vdev_label_init()
1147 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) in vdev_label_init()
1148 pvd->vdev_guid_sum += guid_delta; in vdev_label_init()
1168 * Generate a label describing the pool and our top-level vdev. in vdev_label_init()
1178 * creation, spa->spa_uberblock is not written until this in vdev_label_init()
1181 if (uberblock_verify(&spa->spa_uberblock)) in vdev_label_init()
1182 spa->spa_aux_sync_uber = B_TRUE; in vdev_label_init()
1187 txg = spa->spa_uberblock.ub_txg; in vdev_label_init()
1199 buf = vp->vp_nvlist; in vdev_label_init()
1200 buflen = sizeof (vp->vp_nvlist); in vdev_label_init()
1214 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t)); in vdev_label_init()
1216 VDEV_UBERBLOCK_RING - sizeof (uberblock_t)); in vdev_label_init()
1218 ub->ub_txg = 0; in vdev_label_init()
1262 if (error == 0 && !vd->vdev_isspare && in vdev_label_init()
1264 spa_spare_exists(vd->vdev_guid, NULL, NULL))) in vdev_label_init()
1267 if (error == 0 && !vd->vdev_isl2cache && in vdev_label_init()
1269 spa_l2cache_exists(vd->vdev_guid, NULL))) in vdev_label_init()
1283 zio_t *rio = zio->io_private; in vdev_label_read_bootenv_done()
1284 abd_t **cbp = rio->io_private; in vdev_label_read_bootenv_done()
1286 ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE); in vdev_label_read_bootenv_done()
1288 if (zio->io_error == 0) { in vdev_label_read_bootenv_done()
1289 mutex_enter(&rio->io_lock); in vdev_label_read_bootenv_done()
1292 *cbp = zio->io_abd; in vdev_label_read_bootenv_done()
1294 abd_free(zio->io_abd); in vdev_label_read_bootenv_done()
1296 mutex_exit(&rio->io_lock); in vdev_label_read_bootenv_done()
1298 abd_free(zio->io_abd); in vdev_label_read_bootenv_done()
1305 for (int c = 0; c < vd->vdev_children; c++) in vdev_label_read_bootenv_impl()
1306 vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags); in vdev_label_read_bootenv_impl()
1314 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { in vdev_label_read_bootenv_impl()
1328 spa_t *spa = rvd->vdev_spa; in vdev_label_read_bootenv()
1344 vbe->vbe_version = ntohll(vbe->vbe_version); in vdev_label_read_bootenv()
1345 switch (vbe->vbe_version) { in vdev_label_read_bootenv()
1352 vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0'; in vdev_label_read_bootenv()
1354 vbe->vbe_bootenv); in vdev_label_read_bootenv()
1358 err = nvlist_unpack(vbe->vbe_bootenv, in vdev_label_read_bootenv()
1359 sizeof (vbe->vbe_bootenv), &config, 0); in vdev_label_read_bootenv()
1394 spa_t *spa = vd->vdev_spa; in vdev_label_write_bootenv()
1407 if (nvsize >= sizeof (bootenv->vbe_bootenv)) { in vdev_label_write_bootenv()
1414 for (int c = 0; c < vd->vdev_children; c++) { in vdev_label_write_bootenv()
1417 child_err = vdev_label_write_bootenv(vd->vdev_child[c], env); in vdev_label_write_bootenv()
1426 if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) || in vdev_label_write_bootenv()
1435 nvbuf = bootenv->vbe_bootenv; in vdev_label_write_bootenv()
1436 nvsize = sizeof (bootenv->vbe_bootenv); in vdev_label_write_bootenv()
1438 bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION); in vdev_label_write_bootenv()
1439 switch (bootenv->vbe_version) { in vdev_label_write_bootenv()
1442 (void) strlcpy(bootenv->vbe_bootenv, tmp, nvsize); in vdev_label_write_bootenv()
1458 bootenv->vbe_version = htonll(bootenv->vbe_version); in vdev_label_write_bootenv()
1497 int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg); in vdev_uberblock_compare()
1502 cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp); in vdev_uberblock_compare()
1507 * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware in vdev_uberblock_compare()
1538 vdev_t *vd = zio->io_vd; in vdev_uberblock_load_done()
1539 spa_t *spa = zio->io_spa; in vdev_uberblock_load_done()
1540 zio_t *rio = zio->io_private; in vdev_uberblock_load_done()
1541 uberblock_t *ub = abd_to_buf(zio->io_abd); in vdev_uberblock_load_done()
1542 struct ubl_cbdata *cbp = rio->io_private; in vdev_uberblock_load_done()
1544 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd)); in vdev_uberblock_load_done()
1546 if (zio->io_error == 0 && uberblock_verify(ub) == 0) { in vdev_uberblock_load_done()
1547 mutex_enter(&rio->io_lock); in vdev_uberblock_load_done()
1548 if (vdev_uberblock_compare(ub, &cbp->ubl_latest) > 0) { in vdev_uberblock_load_done()
1549 cbp->ubl_latest = *ub; in vdev_uberblock_load_done()
1551 if (ub->ub_txg <= spa->spa_load_max_txg && in vdev_uberblock_load_done()
1552 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) { in vdev_uberblock_load_done()
1559 *cbp->ubl_ubbest = *ub; in vdev_uberblock_load_done()
1560 cbp->ubl_vd = vd; in vdev_uberblock_load_done()
1562 mutex_exit(&rio->io_lock); in vdev_uberblock_load_done()
1565 abd_free(zio->io_abd); in vdev_uberblock_load_done()
1572 for (int c = 0; c < vd->vdev_children; c++) in vdev_uberblock_load_impl()
1573 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp); in vdev_uberblock_load_impl()
1575 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd) && in vdev_uberblock_load_impl()
1576 vd->vdev_ops != &vdev_draid_spare_ops) { in vdev_uberblock_load_impl()
1599 spa_t *spa = rvd->vdev_spa; in vdev_uberblock_load()
1626 "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg); in vdev_uberblock_load()
1628 if (ub->ub_raidz_reflow_info != in vdev_uberblock_load()
1634 spa->spa_name, in vdev_uberblock_load()
1635 (u_longlong_t)ub->ub_txg, in vdev_uberblock_load()
1636 (u_longlong_t)ub->ub_raidz_reflow_info, in vdev_uberblock_load()
1644 *config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg); in vdev_uberblock_load()
1645 if (*config == NULL && spa->spa_extreme_rewind) { in vdev_uberblock_load()
1677 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) == in vdev_copy_uberblocks()
1679 ASSERT(vd->vdev_ops->vdev_op_leaf); in vdev_copy_uberblocks()
1685 if (vd->vdev_ops == &vdev_draid_spare_ops) in vdev_copy_uberblocks()
1688 spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER); in vdev_copy_uberblocks()
1692 write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags); in vdev_copy_uberblocks()
1697 zio = zio_root(vd->vdev_spa, NULL, NULL, flags); in vdev_copy_uberblocks()
1713 spa_config_exit(vd->vdev_spa, locks, FTAG); in vdev_copy_uberblocks()
1720 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1725 uint64_t *good_writes = zio->io_private; in vdev_uberblock_sync_done()
1727 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0) in vdev_uberblock_sync_done()
1738 for (uint64_t c = 0; c < vd->vdev_children; c++) { in vdev_uberblock_sync()
1740 ub, vd->vdev_child[c], flags); in vdev_uberblock_sync()
1743 if (!vd->vdev_ops->vdev_op_leaf) in vdev_uberblock_sync()
1755 if (vd->vdev_ops == &vdev_draid_spare_ops) in vdev_uberblock_sync()
1759 if (vd->vdev_state == VDEV_STATE_HEALTHY && in vdev_uberblock_sync()
1760 vd->vdev_copy_uberblocks == B_TRUE) { in vdev_uberblock_sync()
1762 vd->vdev_copy_uberblocks = B_FALSE; in vdev_uberblock_sync()
1771 * write, and the disk does not do single-sector overwrites in vdev_uberblock_sync()
1772 * atomically (even though it is required to - i.e. we should see in vdev_uberblock_sync()
1779 int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0; in vdev_uberblock_sync()
1780 int n = (ub->ub_txg - (RRSS_GET_STATE(ub) == RRSS_SCRATCH_VALID)) % in vdev_uberblock_sync()
1781 (VDEV_UBERBLOCK_COUNT(vd) - m); in vdev_uberblock_sync()
1787 VDEV_UBERBLOCK_SIZE(vd) - sizeof (uberblock_t)); in vdev_uberblock_sync()
1802 spa_t *spa = svd[0]->vdev_spa; in vdev_uberblock_sync_list()
1811 if (spa->spa_aux_sync_uber) { in vdev_uberblock_sync_list()
1812 for (int v = 0; v < spa->spa_spares.sav_count; v++) { in vdev_uberblock_sync_list()
1814 spa->spa_spares.sav_vdevs[v], flags); in vdev_uberblock_sync_list()
1816 for (int v = 0; v < spa->spa_l2cache.sav_count; v++) { in vdev_uberblock_sync_list()
1818 spa->spa_l2cache.sav_vdevs[v], flags); in vdev_uberblock_sync_list()
1835 if (spa->spa_aux_sync_uber) { in vdev_uberblock_sync_list()
1836 spa->spa_aux_sync_uber = B_FALSE; in vdev_uberblock_sync_list()
1837 for (int v = 0; v < spa->spa_spares.sav_count; v++) { in vdev_uberblock_sync_list()
1838 if (vdev_writeable(spa->spa_spares.sav_vdevs[v])) { in vdev_uberblock_sync_list()
1839 zio_flush(zio, spa->spa_spares.sav_vdevs[v]); in vdev_uberblock_sync_list()
1842 for (int v = 0; v < spa->spa_l2cache.sav_count; v++) { in vdev_uberblock_sync_list()
1843 if (vdev_writeable(spa->spa_l2cache.sav_vdevs[v])) { in vdev_uberblock_sync_list()
1844 zio_flush(zio, spa->spa_l2cache.sav_vdevs[v]); in vdev_uberblock_sync_list()
1855 * On success, increment the count of good writes for our top-level vdev.
1860 uint64_t *good_writes = zio->io_private; in vdev_label_sync_done()
1862 if (zio->io_error == 0) in vdev_label_sync_done()
1872 uint64_t *good_writes = zio->io_private; in vdev_label_sync_top_done()
1875 zio->io_error = SET_ERROR(EIO); in vdev_label_sync_top_done()
1886 kmem_free(zio->io_private, sizeof (uint64_t)); in vdev_label_sync_ignore_done()
1901 vdev_t *pvd = vd->vdev_parent; in vdev_label_sync()
1904 for (int c = 0; c < vd->vdev_children; c++) { in vdev_label_sync()
1906 vd->vdev_child[c], l, txg, flags); in vdev_label_sync()
1909 if (!vd->vdev_ops->vdev_op_leaf) in vdev_label_sync()
1916 * The top-level config never needs to be written to a distributed in vdev_label_sync()
1920 if (vd->vdev_ops == &vdev_draid_spare_ops) in vdev_label_sync()
1923 if (pvd && pvd->vdev_ops == &vdev_spare_ops) in vdev_label_sync()
1927 * Generate a label describing the top-level config to which we belong. in vdev_label_sync()
1929 if ((vd->vdev_isspare && !spare_in_use) || vd->vdev_isl2cache) { in vdev_label_sync()
1930 label = vdev_aux_label_generate(vd, vd->vdev_isspare); in vdev_label_sync()
1932 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE); in vdev_label_sync()
1939 buf = vp->vp_nvlist; in vdev_label_sync()
1940 buflen = sizeof (vp->vp_nvlist); in vdev_label_sync()
1959 list_t *dl = &spa->spa_config_dirty_list; in vdev_label_sync_list()
1972 ASSERT(!vd->vdev_ishole); in vdev_label_sync_list()
1976 (vd->vdev_islog || vd->vdev_aux != NULL) ? in vdev_label_sync_list()
1986 spa_aux_vdev_t *sav[2] = {&spa->spa_spares, &spa->spa_l2cache}; in vdev_label_sync_list()
1988 for (int v = 0; v < sav[i]->sav_count; v++) { in vdev_label_sync_list()
1990 if (!sav[i]->sav_label_sync) in vdev_label_sync_list()
1995 vdev_label_sync(vio, good_writes, sav[i]->sav_vdevs[v], in vdev_label_sync_list()
2012 if (!sav[i]->sav_label_sync) in vdev_label_sync_list()
2014 for (int v = 0; v < sav[i]->sav_count; v++) in vdev_label_sync_list()
2015 zio_flush(zio, sav[i]->sav_vdevs[v]); in vdev_label_sync_list()
2017 sav[i]->sav_label_sync = B_FALSE; in vdev_label_sync_list()
2030 * is still transactionally consistent. The in-line comments below
2039 spa_t *spa = svd[0]->vdev_spa; in vdev_config_sync()
2040 uberblock_t *ub = &spa->spa_uberblock; in vdev_config_sync()
2059 ASSERT(ub->ub_txg <= txg); in vdev_config_sync()
2068 if (ub->ub_txg < txg) { in vdev_config_sync()
2069 boolean_t changed = uberblock_update(ub, spa->spa_root_vdev, in vdev_config_sync()
2070 txg, spa->spa_mmp.mmp_delay); in vdev_config_sync()
2072 if (!changed && list_is_empty(&spa->spa_config_dirty_list) && in vdev_config_sync()
2080 ASSERT(txg <= spa->spa_final_txg); in vdev_config_sync()
2091 txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL; in vdev_config_sync()
2092 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg))) in vdev_config_sync()
2103 * the new labels to disk to ensure that all even-label updates in vdev_config_sync()
2118 * to consider, and the on-disk state is consistent either way: in vdev_config_sync()
2145 * the pool is opened, the first thing we'll do -- before any in vdev_config_sync()
2146 * user data is modified -- is mark every vdev dirty so that in vdev_config_sync()
2148 * to disk to ensure that all odd-label updates are committed to in vdev_config_sync()