1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 228d18220dSMark J Musante * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23daec38ecSJoe Stein * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 249dc3941cSSašo Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25*c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 26fa9e4066Sahrens */ 27fa9e4066Sahrens 28fa9e4066Sahrens #include <sys/zfs_context.h> 29fa9e4066Sahrens #include <sys/dmu.h> 30fa9e4066Sahrens #include <sys/dmu_tx.h> 31fa9e4066Sahrens #include <sys/space_map.h> 32fa9e4066Sahrens #include <sys/metaslab_impl.h> 33fa9e4066Sahrens #include <sys/vdev_impl.h> 34fa9e4066Sahrens #include <sys/zio.h> 350713e232SGeorge Wilson #include <sys/spa_impl.h> 362e4c9986SGeorge Wilson #include <sys/zfeature.h> 37fa9e4066Sahrens 3809c9d376SGeorge Wilson /* 3909c9d376SGeorge Wilson * Allow allocations to switch to gang blocks quickly. We do this to 4009c9d376SGeorge Wilson * avoid having to load lots of space_maps in a given txg. There are, 4109c9d376SGeorge Wilson * however, some cases where we want to avoid "fast" ganging and instead 4209c9d376SGeorge Wilson * we want to do an exhaustive search of all metaslabs on this device. 43b6240e83SGeorge Wilson * Currently we don't allow any gang, slog, or dump device related allocations 4409c9d376SGeorge Wilson * to "fast" gang. 4509c9d376SGeorge Wilson */ 4609c9d376SGeorge Wilson #define CAN_FASTGANG(flags) \ 4709c9d376SGeorge Wilson (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \ 4809c9d376SGeorge Wilson METASLAB_GANG_AVOID))) 4909c9d376SGeorge Wilson 500713e232SGeorge Wilson #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 510713e232SGeorge Wilson #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 520713e232SGeorge Wilson #define METASLAB_ACTIVE_MASK \ 530713e232SGeorge Wilson (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 540713e232SGeorge Wilson 5513506d1eSmaybee uint64_t metaslab_aliquot = 512ULL << 10; 56e05725b1Sbonwick uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 5713506d1eSmaybee 58fa9e4066Sahrens /* 5916a4a807SGeorge Wilson * The in-core space map representation is more compact than its on-disk form. 6016a4a807SGeorge Wilson * The zfs_condense_pct determines how much more compact the in-core 6116a4a807SGeorge Wilson * space_map representation must be before we compact it on-disk. 6216a4a807SGeorge Wilson * Values should be greater than or equal to 100. 6316a4a807SGeorge Wilson */ 6416a4a807SGeorge Wilson int zfs_condense_pct = 200; 6516a4a807SGeorge Wilson 6616a4a807SGeorge Wilson /* 672a104a52SAlex Reece * Condensing a metaslab is not guaranteed to actually reduce the amount of 682a104a52SAlex Reece * space used on disk. In particular, a space map uses data in increments of 69b1be2892SMatthew Ahrens * MAX(1 << ashift, space_map_blksize), so a metaslab might use the 702a104a52SAlex Reece * same number of blocks after condensing. Since the goal of condensing is to 712a104a52SAlex Reece * reduce the number of IOPs required to read the space map, we only want to 722a104a52SAlex Reece * condense when we can be sure we will reduce the number of blocks used by the 732a104a52SAlex Reece * space map. Unfortunately, we cannot precisely compute whether or not this is 742a104a52SAlex Reece * the case in metaslab_should_condense since we are holding ms_lock. Instead, 752a104a52SAlex Reece * we apply the following heuristic: do not condense a spacemap unless the 762a104a52SAlex Reece * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 772a104a52SAlex Reece * blocks. 782a104a52SAlex Reece */ 792a104a52SAlex Reece int zfs_metaslab_condense_block_threshold = 4; 802a104a52SAlex Reece 812a104a52SAlex Reece /* 8222e30981SGeorge Wilson * The zfs_mg_noalloc_threshold defines which metaslab groups should 8322e30981SGeorge Wilson * be eligible for allocation. The value is defined as a percentage of 842e4c9986SGeorge Wilson * free space. Metaslab groups that have more free space than 8522e30981SGeorge Wilson * zfs_mg_noalloc_threshold are always eligible for allocations. Once 8622e30981SGeorge Wilson * a metaslab group's free space is less than or equal to the 8722e30981SGeorge Wilson * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 8822e30981SGeorge Wilson * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 8922e30981SGeorge Wilson * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 9022e30981SGeorge Wilson * groups are allowed to accept allocations. Gang blocks are always 9122e30981SGeorge Wilson * eligible to allocate on any metaslab group. The default value of 0 means 9222e30981SGeorge Wilson * no metaslab group will be excluded based on this criterion. 9322e30981SGeorge Wilson */ 9422e30981SGeorge Wilson int zfs_mg_noalloc_threshold = 0; 9509c9d376SGeorge Wilson 9609c9d376SGeorge Wilson /* 972e4c9986SGeorge Wilson * Metaslab groups are considered eligible for allocations if their 982e4c9986SGeorge Wilson * fragmenation metric (measured as a percentage) is less than or equal to 992e4c9986SGeorge Wilson * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold 1002e4c9986SGeorge Wilson * then it will be skipped unless all metaslab groups within the metaslab 1012e4c9986SGeorge Wilson * class have also crossed this threshold. 1022e4c9986SGeorge Wilson */ 1032e4c9986SGeorge Wilson int zfs_mg_fragmentation_threshold = 85; 1042e4c9986SGeorge Wilson 1052e4c9986SGeorge Wilson /* 1062e4c9986SGeorge Wilson * Allow metaslabs to keep their active state as long as their fragmentation 1072e4c9986SGeorge Wilson * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 1082e4c9986SGeorge Wilson * active metaslab that exceeds this threshold will no longer keep its active 1092e4c9986SGeorge Wilson * status allowing better metaslabs to be selected. 1102e4c9986SGeorge Wilson */ 1112e4c9986SGeorge Wilson int zfs_metaslab_fragmentation_threshold = 70; 1122e4c9986SGeorge Wilson 1132e4c9986SGeorge Wilson /* 1140713e232SGeorge Wilson * When set will load all metaslabs when pool is first opened. 115b24ab676SJeff Bonwick */ 1160713e232SGeorge Wilson int metaslab_debug_load = 0; 1170713e232SGeorge Wilson 1180713e232SGeorge Wilson /* 1190713e232SGeorge Wilson * When set will prevent metaslabs from being unloaded. 1200713e232SGeorge Wilson */ 1210713e232SGeorge Wilson int metaslab_debug_unload = 0; 122b24ab676SJeff Bonwick 123b24ab676SJeff Bonwick /* 124d6e555bdSGeorge Wilson * Minimum size which forces the dynamic allocator to change 125d6e555bdSGeorge Wilson * it's allocation strategy. Once the space map cannot satisfy 126d6e555bdSGeorge Wilson * an allocation of this size then it switches to using more 127d6e555bdSGeorge Wilson * aggressive strategy (i.e search by size rather than offset). 128d6e555bdSGeorge Wilson */ 129b5152584SMatthew Ahrens uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 130d6e555bdSGeorge Wilson 131d6e555bdSGeorge Wilson /* 132d6e555bdSGeorge Wilson * The minimum free space, in percent, which must be available 133d6e555bdSGeorge Wilson * in a space map to continue allocations in a first-fit fashion. 134d6e555bdSGeorge Wilson * Once the space_map's free space drops below this level we dynamically 135d6e555bdSGeorge Wilson * switch to using best-fit allocations. 136d6e555bdSGeorge Wilson */ 13780eb36f2SGeorge Wilson int metaslab_df_free_pct = 4; 13880eb36f2SGeorge Wilson 13980eb36f2SGeorge Wilson /* 14080eb36f2SGeorge Wilson * A metaslab is considered "free" if it contains a contiguous 14180eb36f2SGeorge Wilson * segment which is greater than metaslab_min_alloc_size. 14280eb36f2SGeorge Wilson */ 14380eb36f2SGeorge Wilson uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; 14480eb36f2SGeorge Wilson 14580eb36f2SGeorge Wilson /* 1460713e232SGeorge Wilson * Percentage of all cpus that can be used by the metaslab taskq. 14780eb36f2SGeorge Wilson */ 1480713e232SGeorge Wilson int metaslab_load_pct = 50; 14980eb36f2SGeorge Wilson 15080eb36f2SGeorge Wilson /* 1510713e232SGeorge Wilson * Determines how many txgs a metaslab may remain loaded without having any 1520713e232SGeorge Wilson * allocations from it. As long as a metaslab continues to be used we will 1530713e232SGeorge Wilson * keep it loaded. 15480eb36f2SGeorge Wilson */ 1550713e232SGeorge Wilson int metaslab_unload_delay = TXG_SIZE * 2; 156d6e555bdSGeorge Wilson 157d6e555bdSGeorge Wilson /* 1580713e232SGeorge Wilson * Max number of metaslabs per group to preload. 1590713e232SGeorge Wilson */ 1600713e232SGeorge Wilson int metaslab_preload_limit = SPA_DVAS_PER_BP; 1610713e232SGeorge Wilson 1620713e232SGeorge Wilson /* 1630713e232SGeorge Wilson * Enable/disable preloading of metaslab. 1640713e232SGeorge Wilson */ 1650713e232SGeorge Wilson boolean_t metaslab_preload_enabled = B_TRUE; 1660713e232SGeorge Wilson 1670713e232SGeorge Wilson /* 1682e4c9986SGeorge Wilson * Enable/disable fragmentation weighting on metaslabs. 1690713e232SGeorge Wilson */ 1702e4c9986SGeorge Wilson boolean_t metaslab_fragmentation_factor_enabled = B_TRUE; 1710713e232SGeorge Wilson 1722e4c9986SGeorge Wilson /* 1732e4c9986SGeorge Wilson * Enable/disable lba weighting (i.e. outer tracks are given preference). 1742e4c9986SGeorge Wilson */ 1752e4c9986SGeorge Wilson boolean_t metaslab_lba_weighting_enabled = B_TRUE; 1762e4c9986SGeorge Wilson 1772e4c9986SGeorge Wilson /* 1782e4c9986SGeorge Wilson * Enable/disable metaslab group biasing. 1792e4c9986SGeorge Wilson */ 1802e4c9986SGeorge Wilson boolean_t metaslab_bias_enabled = B_TRUE; 1812e4c9986SGeorge Wilson 1822e4c9986SGeorge Wilson static uint64_t metaslab_fragmentation(metaslab_t *); 1830713e232SGeorge Wilson 1840713e232SGeorge Wilson /* 185fa9e4066Sahrens * ========================================================================== 186fa9e4066Sahrens * Metaslab classes 187fa9e4066Sahrens * ========================================================================== 188fa9e4066Sahrens */ 189fa9e4066Sahrens metaslab_class_t * 1900713e232SGeorge Wilson metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) 191fa9e4066Sahrens { 192fa9e4066Sahrens metaslab_class_t *mc; 193fa9e4066Sahrens 194fa9e4066Sahrens mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 195fa9e4066Sahrens 19688ecc943SGeorge Wilson mc->mc_spa = spa; 197fa9e4066Sahrens mc->mc_rotor = NULL; 198d6e555bdSGeorge Wilson mc->mc_ops = ops; 199fa9e4066Sahrens 200fa9e4066Sahrens return (mc); 201fa9e4066Sahrens } 202fa9e4066Sahrens 203fa9e4066Sahrens void 204fa9e4066Sahrens metaslab_class_destroy(metaslab_class_t *mc) 205fa9e4066Sahrens { 206a1521560SJeff Bonwick ASSERT(mc->mc_rotor == NULL); 207a1521560SJeff Bonwick ASSERT(mc->mc_alloc == 0); 208a1521560SJeff Bonwick ASSERT(mc->mc_deferred == 0); 209a1521560SJeff Bonwick ASSERT(mc->mc_space == 0); 210a1521560SJeff Bonwick ASSERT(mc->mc_dspace == 0); 211fa9e4066Sahrens 212fa9e4066Sahrens kmem_free(mc, sizeof (metaslab_class_t)); 213fa9e4066Sahrens } 214fa9e4066Sahrens 21588ecc943SGeorge Wilson int 21688ecc943SGeorge Wilson metaslab_class_validate(metaslab_class_t *mc) 21788ecc943SGeorge Wilson { 21888ecc943SGeorge Wilson metaslab_group_t *mg; 21988ecc943SGeorge Wilson vdev_t *vd; 22088ecc943SGeorge Wilson 22188ecc943SGeorge Wilson /* 22288ecc943SGeorge Wilson * Must hold one of the spa_config locks. 22388ecc943SGeorge Wilson */ 22488ecc943SGeorge Wilson ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 22588ecc943SGeorge Wilson spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 22688ecc943SGeorge Wilson 22788ecc943SGeorge Wilson if ((mg = mc->mc_rotor) == NULL) 22888ecc943SGeorge Wilson return (0); 22988ecc943SGeorge Wilson 23088ecc943SGeorge Wilson do { 23188ecc943SGeorge Wilson vd = mg->mg_vd; 23288ecc943SGeorge Wilson ASSERT(vd->vdev_mg != NULL); 23388ecc943SGeorge Wilson ASSERT3P(vd->vdev_top, ==, vd); 23488ecc943SGeorge Wilson ASSERT3P(mg->mg_class, ==, mc); 23588ecc943SGeorge Wilson ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 23688ecc943SGeorge Wilson } while ((mg = mg->mg_next) != mc->mc_rotor); 23788ecc943SGeorge Wilson 23888ecc943SGeorge Wilson return (0); 23988ecc943SGeorge Wilson } 24088ecc943SGeorge Wilson 241b24ab676SJeff Bonwick void 242b24ab676SJeff Bonwick metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 243b24ab676SJeff Bonwick int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 244b24ab676SJeff Bonwick { 245b24ab676SJeff Bonwick atomic_add_64(&mc->mc_alloc, alloc_delta); 246b24ab676SJeff Bonwick atomic_add_64(&mc->mc_deferred, defer_delta); 247b24ab676SJeff Bonwick atomic_add_64(&mc->mc_space, space_delta); 248b24ab676SJeff Bonwick atomic_add_64(&mc->mc_dspace, dspace_delta); 249b24ab676SJeff Bonwick } 250b24ab676SJeff Bonwick 251b24ab676SJeff Bonwick uint64_t 252b24ab676SJeff Bonwick metaslab_class_get_alloc(metaslab_class_t *mc) 253b24ab676SJeff Bonwick { 254b24ab676SJeff Bonwick return (mc->mc_alloc); 255b24ab676SJeff Bonwick } 256b24ab676SJeff Bonwick 257b24ab676SJeff Bonwick uint64_t 258b24ab676SJeff Bonwick metaslab_class_get_deferred(metaslab_class_t *mc) 259b24ab676SJeff Bonwick { 260b24ab676SJeff Bonwick return (mc->mc_deferred); 261b24ab676SJeff Bonwick } 262b24ab676SJeff Bonwick 263b24ab676SJeff Bonwick uint64_t 264b24ab676SJeff Bonwick metaslab_class_get_space(metaslab_class_t *mc) 265b24ab676SJeff Bonwick { 266b24ab676SJeff Bonwick return (mc->mc_space); 267b24ab676SJeff Bonwick } 268b24ab676SJeff Bonwick 269b24ab676SJeff Bonwick uint64_t 270b24ab676SJeff Bonwick metaslab_class_get_dspace(metaslab_class_t *mc) 271b24ab676SJeff Bonwick { 272b24ab676SJeff Bonwick return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 273b24ab676SJeff Bonwick } 274b24ab676SJeff Bonwick 2752e4c9986SGeorge Wilson void 2762e4c9986SGeorge Wilson metaslab_class_histogram_verify(metaslab_class_t *mc) 2772e4c9986SGeorge Wilson { 2782e4c9986SGeorge Wilson vdev_t *rvd = mc->mc_spa->spa_root_vdev; 2792e4c9986SGeorge Wilson uint64_t *mc_hist; 2802e4c9986SGeorge Wilson int i; 2812e4c9986SGeorge Wilson 2822e4c9986SGeorge Wilson if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 2832e4c9986SGeorge Wilson return; 2842e4c9986SGeorge Wilson 2852e4c9986SGeorge Wilson mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 2862e4c9986SGeorge Wilson KM_SLEEP); 2872e4c9986SGeorge Wilson 2882e4c9986SGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 2892e4c9986SGeorge Wilson vdev_t *tvd = rvd->vdev_child[c]; 2902e4c9986SGeorge Wilson metaslab_group_t *mg = tvd->vdev_mg; 2912e4c9986SGeorge Wilson 2922e4c9986SGeorge Wilson /* 2932e4c9986SGeorge Wilson * Skip any holes, uninitialized top-levels, or 2942e4c9986SGeorge Wilson * vdevs that are not in this metalab class. 2952e4c9986SGeorge Wilson */ 2962e4c9986SGeorge Wilson if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 2972e4c9986SGeorge Wilson mg->mg_class != mc) { 2982e4c9986SGeorge Wilson continue; 2992e4c9986SGeorge Wilson } 3002e4c9986SGeorge Wilson 3012e4c9986SGeorge Wilson for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 3022e4c9986SGeorge Wilson mc_hist[i] += mg->mg_histogram[i]; 3032e4c9986SGeorge Wilson } 3042e4c9986SGeorge Wilson 3052e4c9986SGeorge Wilson for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 3062e4c9986SGeorge Wilson VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 3072e4c9986SGeorge Wilson 3082e4c9986SGeorge Wilson kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 3092e4c9986SGeorge Wilson } 3102e4c9986SGeorge Wilson 3112e4c9986SGeorge Wilson /* 3122e4c9986SGeorge Wilson * Calculate the metaslab class's fragmentation metric. The metric 3132e4c9986SGeorge Wilson * is weighted based on the space contribution of each metaslab group. 3142e4c9986SGeorge Wilson * The return value will be a number between 0 and 100 (inclusive), or 3152e4c9986SGeorge Wilson * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 3162e4c9986SGeorge Wilson * zfs_frag_table for more information about the metric. 3172e4c9986SGeorge Wilson */ 3182e4c9986SGeorge Wilson uint64_t 3192e4c9986SGeorge Wilson metaslab_class_fragmentation(metaslab_class_t *mc) 3202e4c9986SGeorge Wilson { 3212e4c9986SGeorge Wilson vdev_t *rvd = mc->mc_spa->spa_root_vdev; 3222e4c9986SGeorge Wilson uint64_t fragmentation = 0; 3232e4c9986SGeorge Wilson 3242e4c9986SGeorge Wilson spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 3252e4c9986SGeorge Wilson 3262e4c9986SGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 3272e4c9986SGeorge Wilson vdev_t *tvd = rvd->vdev_child[c]; 3282e4c9986SGeorge Wilson metaslab_group_t *mg = tvd->vdev_mg; 3292e4c9986SGeorge Wilson 3302e4c9986SGeorge Wilson /* 3312e4c9986SGeorge Wilson * Skip any holes, uninitialized top-levels, or 3322e4c9986SGeorge Wilson * vdevs that are not in this metalab class. 3332e4c9986SGeorge Wilson */ 3342e4c9986SGeorge Wilson if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 3352e4c9986SGeorge Wilson mg->mg_class != mc) { 3362e4c9986SGeorge Wilson continue; 3372e4c9986SGeorge Wilson } 3382e4c9986SGeorge Wilson 3392e4c9986SGeorge Wilson /* 3402e4c9986SGeorge Wilson * If a metaslab group does not contain a fragmentation 3412e4c9986SGeorge Wilson * metric then just bail out. 3422e4c9986SGeorge Wilson */ 3432e4c9986SGeorge Wilson if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 3442e4c9986SGeorge Wilson spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 3452e4c9986SGeorge Wilson return (ZFS_FRAG_INVALID); 3462e4c9986SGeorge Wilson } 3472e4c9986SGeorge Wilson 3482e4c9986SGeorge Wilson /* 3492e4c9986SGeorge Wilson * Determine how much this metaslab_group is contributing 3502e4c9986SGeorge Wilson * to the overall pool fragmentation metric. 3512e4c9986SGeorge Wilson */ 3522e4c9986SGeorge Wilson fragmentation += mg->mg_fragmentation * 3532e4c9986SGeorge Wilson metaslab_group_get_space(mg); 3542e4c9986SGeorge Wilson } 3552e4c9986SGeorge Wilson fragmentation /= metaslab_class_get_space(mc); 3562e4c9986SGeorge Wilson 3572e4c9986SGeorge Wilson ASSERT3U(fragmentation, <=, 100); 3582e4c9986SGeorge Wilson spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 3592e4c9986SGeorge Wilson return (fragmentation); 3602e4c9986SGeorge Wilson } 3612e4c9986SGeorge Wilson 3622e4c9986SGeorge Wilson /* 3632e4c9986SGeorge Wilson * Calculate the amount of expandable space that is available in 3642e4c9986SGeorge Wilson * this metaslab class. If a device is expanded then its expandable 3652e4c9986SGeorge Wilson * space will be the amount of allocatable space that is currently not 3662e4c9986SGeorge Wilson * part of this metaslab class. 3672e4c9986SGeorge Wilson */ 3682e4c9986SGeorge Wilson uint64_t 3692e4c9986SGeorge Wilson metaslab_class_expandable_space(metaslab_class_t *mc) 3702e4c9986SGeorge Wilson { 3712e4c9986SGeorge Wilson vdev_t *rvd = mc->mc_spa->spa_root_vdev; 3722e4c9986SGeorge Wilson uint64_t space = 0; 3732e4c9986SGeorge Wilson 3742e4c9986SGeorge Wilson spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 3752e4c9986SGeorge Wilson for (int c = 0; c < rvd->vdev_children; c++) { 3762e4c9986SGeorge Wilson vdev_t *tvd = rvd->vdev_child[c]; 3772e4c9986SGeorge Wilson metaslab_group_t *mg = tvd->vdev_mg; 3782e4c9986SGeorge Wilson 3792e4c9986SGeorge Wilson if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 3802e4c9986SGeorge Wilson mg->mg_class != mc) { 3812e4c9986SGeorge Wilson continue; 3822e4c9986SGeorge Wilson } 3832e4c9986SGeorge Wilson 3842e4c9986SGeorge Wilson space += tvd->vdev_max_asize - tvd->vdev_asize; 3852e4c9986SGeorge Wilson } 3862e4c9986SGeorge Wilson spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 3872e4c9986SGeorge Wilson return (space); 3882e4c9986SGeorge Wilson } 3892e4c9986SGeorge Wilson 390fa9e4066Sahrens /* 391fa9e4066Sahrens * ========================================================================== 392fa9e4066Sahrens * Metaslab groups 393fa9e4066Sahrens * ========================================================================== 394fa9e4066Sahrens */ 395fa9e4066Sahrens static int 396fa9e4066Sahrens metaslab_compare(const void *x1, const void *x2) 397fa9e4066Sahrens { 398fa9e4066Sahrens const metaslab_t *m1 = x1; 399fa9e4066Sahrens const metaslab_t *m2 = x2; 400fa9e4066Sahrens 401fa9e4066Sahrens if (m1->ms_weight < m2->ms_weight) 402fa9e4066Sahrens return (1); 403fa9e4066Sahrens if (m1->ms_weight > m2->ms_weight) 404fa9e4066Sahrens return (-1); 405fa9e4066Sahrens 406fa9e4066Sahrens /* 407fa9e4066Sahrens * If the weights are identical, use the offset to force uniqueness. 408fa9e4066Sahrens */ 4090713e232SGeorge Wilson if (m1->ms_start < m2->ms_start) 410fa9e4066Sahrens return (-1); 4110713e232SGeorge Wilson if (m1->ms_start > m2->ms_start) 412fa9e4066Sahrens return (1); 413fa9e4066Sahrens 414fa9e4066Sahrens ASSERT3P(m1, ==, m2); 415fa9e4066Sahrens 416fa9e4066Sahrens return (0); 417fa9e4066Sahrens } 418fa9e4066Sahrens 41922e30981SGeorge Wilson /* 42022e30981SGeorge Wilson * Update the allocatable flag and the metaslab group's capacity. 42122e30981SGeorge Wilson * The allocatable flag is set to true if the capacity is below 42222e30981SGeorge Wilson * the zfs_mg_noalloc_threshold. If a metaslab group transitions 42322e30981SGeorge Wilson * from allocatable to non-allocatable or vice versa then the metaslab 42422e30981SGeorge Wilson * group's class is updated to reflect the transition. 42522e30981SGeorge Wilson */ 42622e30981SGeorge Wilson static void 42722e30981SGeorge Wilson metaslab_group_alloc_update(metaslab_group_t *mg) 42822e30981SGeorge Wilson { 42922e30981SGeorge Wilson vdev_t *vd = mg->mg_vd; 43022e30981SGeorge Wilson metaslab_class_t *mc = mg->mg_class; 43122e30981SGeorge Wilson vdev_stat_t *vs = &vd->vdev_stat; 43222e30981SGeorge Wilson boolean_t was_allocatable; 43322e30981SGeorge Wilson 43422e30981SGeorge Wilson ASSERT(vd == vd->vdev_top); 43522e30981SGeorge Wilson 43622e30981SGeorge Wilson mutex_enter(&mg->mg_lock); 43722e30981SGeorge Wilson was_allocatable = mg->mg_allocatable; 43822e30981SGeorge Wilson 43922e30981SGeorge Wilson mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 44022e30981SGeorge Wilson (vs->vs_space + 1); 44122e30981SGeorge Wilson 4422e4c9986SGeorge Wilson /* 4432e4c9986SGeorge Wilson * A metaslab group is considered allocatable if it has plenty 4442e4c9986SGeorge Wilson * of free space or is not heavily fragmented. We only take 4452e4c9986SGeorge Wilson * fragmentation into account if the metaslab group has a valid 4462e4c9986SGeorge Wilson * fragmentation metric (i.e. a value between 0 and 100). 4472e4c9986SGeorge Wilson */ 4482e4c9986SGeorge Wilson mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold && 4492e4c9986SGeorge Wilson (mg->mg_fragmentation == ZFS_FRAG_INVALID || 4502e4c9986SGeorge Wilson mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 45122e30981SGeorge Wilson 45222e30981SGeorge Wilson /* 45322e30981SGeorge Wilson * The mc_alloc_groups maintains a count of the number of 45422e30981SGeorge Wilson * groups in this metaslab class that are still above the 45522e30981SGeorge Wilson * zfs_mg_noalloc_threshold. This is used by the allocating 45622e30981SGeorge Wilson * threads to determine if they should avoid allocations to 45722e30981SGeorge Wilson * a given group. The allocator will avoid allocations to a group 45822e30981SGeorge Wilson * if that group has reached or is below the zfs_mg_noalloc_threshold 45922e30981SGeorge Wilson * and there are still other groups that are above the threshold. 46022e30981SGeorge Wilson * When a group transitions from allocatable to non-allocatable or 46122e30981SGeorge Wilson * vice versa we update the metaslab class to reflect that change. 46222e30981SGeorge Wilson * When the mc_alloc_groups value drops to 0 that means that all 46322e30981SGeorge Wilson * groups have reached the zfs_mg_noalloc_threshold making all groups 46422e30981SGeorge Wilson * eligible for allocations. This effectively means that all devices 46522e30981SGeorge Wilson * are balanced again. 46622e30981SGeorge Wilson */ 46722e30981SGeorge Wilson if (was_allocatable && !mg->mg_allocatable) 46822e30981SGeorge Wilson mc->mc_alloc_groups--; 46922e30981SGeorge Wilson else if (!was_allocatable && mg->mg_allocatable) 47022e30981SGeorge Wilson mc->mc_alloc_groups++; 4712e4c9986SGeorge Wilson 47222e30981SGeorge Wilson mutex_exit(&mg->mg_lock); 47322e30981SGeorge Wilson } 47422e30981SGeorge Wilson 475fa9e4066Sahrens metaslab_group_t * 476fa9e4066Sahrens metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 477fa9e4066Sahrens { 478fa9e4066Sahrens metaslab_group_t *mg; 479fa9e4066Sahrens 480fa9e4066Sahrens mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 481fa9e4066Sahrens mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 482fa9e4066Sahrens avl_create(&mg->mg_metaslab_tree, metaslab_compare, 483fa9e4066Sahrens sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 484fa9e4066Sahrens mg->mg_vd = vd; 485a1521560SJeff Bonwick mg->mg_class = mc; 486a1521560SJeff Bonwick mg->mg_activation_count = 0; 487fa9e4066Sahrens 488be082110SGeorge Wilson mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 4890713e232SGeorge Wilson minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); 4900713e232SGeorge Wilson 491fa9e4066Sahrens return (mg); 492fa9e4066Sahrens } 493fa9e4066Sahrens 494fa9e4066Sahrens void 495fa9e4066Sahrens metaslab_group_destroy(metaslab_group_t *mg) 496fa9e4066Sahrens { 497a1521560SJeff Bonwick ASSERT(mg->mg_prev == NULL); 498a1521560SJeff Bonwick ASSERT(mg->mg_next == NULL); 499a33cae98STim Haley /* 500a33cae98STim Haley * We may have gone below zero with the activation count 501a33cae98STim Haley * either because we never activated in the first place or 502a33cae98STim Haley * because we're done, and possibly removing the vdev. 503a33cae98STim Haley */ 504a33cae98STim Haley ASSERT(mg->mg_activation_count <= 0); 505a1521560SJeff Bonwick 506be082110SGeorge Wilson taskq_destroy(mg->mg_taskq); 507fa9e4066Sahrens avl_destroy(&mg->mg_metaslab_tree); 508fa9e4066Sahrens mutex_destroy(&mg->mg_lock); 509fa9e4066Sahrens kmem_free(mg, sizeof (metaslab_group_t)); 510fa9e4066Sahrens } 511fa9e4066Sahrens 512a1521560SJeff Bonwick void 513a1521560SJeff Bonwick metaslab_group_activate(metaslab_group_t *mg) 514a1521560SJeff Bonwick { 515a1521560SJeff Bonwick metaslab_class_t *mc = mg->mg_class; 516a1521560SJeff Bonwick metaslab_group_t *mgprev, *mgnext; 517a1521560SJeff Bonwick 518a1521560SJeff Bonwick ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 519a1521560SJeff Bonwick 520a1521560SJeff Bonwick ASSERT(mc->mc_rotor != mg); 521a1521560SJeff Bonwick ASSERT(mg->mg_prev == NULL); 522a1521560SJeff Bonwick ASSERT(mg->mg_next == NULL); 523a1521560SJeff Bonwick ASSERT(mg->mg_activation_count <= 0); 524a1521560SJeff Bonwick 525a1521560SJeff Bonwick if (++mg->mg_activation_count <= 0) 526a1521560SJeff Bonwick return; 527a1521560SJeff Bonwick 528a1521560SJeff Bonwick mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 52922e30981SGeorge Wilson metaslab_group_alloc_update(mg); 530a1521560SJeff Bonwick 531a1521560SJeff Bonwick if ((mgprev = mc->mc_rotor) == NULL) { 532a1521560SJeff Bonwick mg->mg_prev = mg; 533a1521560SJeff Bonwick mg->mg_next = mg; 534a1521560SJeff Bonwick } else { 535a1521560SJeff Bonwick mgnext = mgprev->mg_next; 536a1521560SJeff Bonwick mg->mg_prev = mgprev; 537a1521560SJeff Bonwick mg->mg_next = mgnext; 538a1521560SJeff Bonwick mgprev->mg_next = mg; 539a1521560SJeff Bonwick mgnext->mg_prev = mg; 540a1521560SJeff Bonwick } 541a1521560SJeff Bonwick mc->mc_rotor = mg; 542a1521560SJeff Bonwick } 543a1521560SJeff Bonwick 544a1521560SJeff Bonwick void 545a1521560SJeff Bonwick metaslab_group_passivate(metaslab_group_t *mg) 546a1521560SJeff Bonwick { 547a1521560SJeff Bonwick metaslab_class_t *mc = mg->mg_class; 548a1521560SJeff Bonwick metaslab_group_t *mgprev, *mgnext; 549a1521560SJeff Bonwick 550a1521560SJeff Bonwick ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 551a1521560SJeff Bonwick 552a1521560SJeff Bonwick if (--mg->mg_activation_count != 0) { 553a1521560SJeff Bonwick ASSERT(mc->mc_rotor != mg); 554a1521560SJeff Bonwick ASSERT(mg->mg_prev == NULL); 555a1521560SJeff Bonwick ASSERT(mg->mg_next == NULL); 556a1521560SJeff Bonwick ASSERT(mg->mg_activation_count < 0); 557a1521560SJeff Bonwick return; 558a1521560SJeff Bonwick } 559a1521560SJeff Bonwick 5600713e232SGeorge Wilson taskq_wait(mg->mg_taskq); 5612e4c9986SGeorge Wilson metaslab_group_alloc_update(mg); 5620713e232SGeorge Wilson 563a1521560SJeff Bonwick mgprev = mg->mg_prev; 564a1521560SJeff Bonwick mgnext = mg->mg_next; 565a1521560SJeff Bonwick 566a1521560SJeff Bonwick if (mg == mgnext) { 567a1521560SJeff Bonwick mc->mc_rotor = NULL; 568a1521560SJeff Bonwick } else { 569a1521560SJeff Bonwick mc->mc_rotor = mgnext; 570a1521560SJeff Bonwick mgprev->mg_next = mgnext; 571a1521560SJeff Bonwick mgnext->mg_prev = mgprev; 572a1521560SJeff Bonwick } 573a1521560SJeff Bonwick 574a1521560SJeff Bonwick mg->mg_prev = NULL; 575a1521560SJeff Bonwick mg->mg_next = NULL; 576a1521560SJeff Bonwick } 577a1521560SJeff Bonwick 5782e4c9986SGeorge Wilson uint64_t 5792e4c9986SGeorge Wilson metaslab_group_get_space(metaslab_group_t *mg) 5802e4c9986SGeorge Wilson { 5812e4c9986SGeorge Wilson return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); 5822e4c9986SGeorge Wilson } 5832e4c9986SGeorge Wilson 5842e4c9986SGeorge Wilson void 5852e4c9986SGeorge Wilson metaslab_group_histogram_verify(metaslab_group_t *mg) 5862e4c9986SGeorge Wilson { 5872e4c9986SGeorge Wilson uint64_t *mg_hist; 5882e4c9986SGeorge Wilson vdev_t *vd = mg->mg_vd; 5892e4c9986SGeorge Wilson uint64_t ashift = vd->vdev_ashift; 5902e4c9986SGeorge Wilson int i; 5912e4c9986SGeorge Wilson 5922e4c9986SGeorge Wilson if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 5932e4c9986SGeorge Wilson return; 5942e4c9986SGeorge Wilson 5952e4c9986SGeorge Wilson mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 5962e4c9986SGeorge Wilson KM_SLEEP); 5972e4c9986SGeorge Wilson 5982e4c9986SGeorge Wilson ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 5992e4c9986SGeorge Wilson SPACE_MAP_HISTOGRAM_SIZE + ashift); 6002e4c9986SGeorge Wilson 6012e4c9986SGeorge Wilson for (int m = 0; m < vd->vdev_ms_count; m++) { 6022e4c9986SGeorge Wilson metaslab_t *msp = vd->vdev_ms[m]; 6032e4c9986SGeorge Wilson 6042e4c9986SGeorge Wilson if (msp->ms_sm == NULL) 6052e4c9986SGeorge Wilson continue; 6062e4c9986SGeorge Wilson 6072e4c9986SGeorge Wilson for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 6082e4c9986SGeorge Wilson mg_hist[i + ashift] += 6092e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]; 6102e4c9986SGeorge Wilson } 6112e4c9986SGeorge Wilson 6122e4c9986SGeorge Wilson for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 6132e4c9986SGeorge Wilson VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 6142e4c9986SGeorge Wilson 6152e4c9986SGeorge Wilson kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 6162e4c9986SGeorge Wilson } 6172e4c9986SGeorge Wilson 6182e4c9986SGeorge Wilson static void 6192e4c9986SGeorge Wilson metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 6202e4c9986SGeorge Wilson { 6212e4c9986SGeorge Wilson metaslab_class_t *mc = mg->mg_class; 6222e4c9986SGeorge Wilson uint64_t ashift = mg->mg_vd->vdev_ashift; 6232e4c9986SGeorge Wilson 6242e4c9986SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 6252e4c9986SGeorge Wilson if (msp->ms_sm == NULL) 6262e4c9986SGeorge Wilson return; 6272e4c9986SGeorge Wilson 6282e4c9986SGeorge Wilson mutex_enter(&mg->mg_lock); 6292e4c9986SGeorge Wilson for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 6302e4c9986SGeorge Wilson mg->mg_histogram[i + ashift] += 6312e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]; 6322e4c9986SGeorge Wilson mc->mc_histogram[i + ashift] += 6332e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]; 6342e4c9986SGeorge Wilson } 6352e4c9986SGeorge Wilson mutex_exit(&mg->mg_lock); 6362e4c9986SGeorge Wilson } 6372e4c9986SGeorge Wilson 6382e4c9986SGeorge Wilson void 6392e4c9986SGeorge Wilson metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 6402e4c9986SGeorge Wilson { 6412e4c9986SGeorge Wilson metaslab_class_t *mc = mg->mg_class; 6422e4c9986SGeorge Wilson uint64_t ashift = mg->mg_vd->vdev_ashift; 6432e4c9986SGeorge Wilson 6442e4c9986SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 6452e4c9986SGeorge Wilson if (msp->ms_sm == NULL) 6462e4c9986SGeorge Wilson return; 6472e4c9986SGeorge Wilson 6482e4c9986SGeorge Wilson mutex_enter(&mg->mg_lock); 6492e4c9986SGeorge Wilson for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 6502e4c9986SGeorge Wilson ASSERT3U(mg->mg_histogram[i + ashift], >=, 6512e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]); 6522e4c9986SGeorge Wilson ASSERT3U(mc->mc_histogram[i + ashift], >=, 6532e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]); 6542e4c9986SGeorge Wilson 6552e4c9986SGeorge Wilson mg->mg_histogram[i + ashift] -= 6562e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]; 6572e4c9986SGeorge Wilson mc->mc_histogram[i + ashift] -= 6582e4c9986SGeorge Wilson msp->ms_sm->sm_phys->smp_histogram[i]; 6592e4c9986SGeorge Wilson } 6602e4c9986SGeorge Wilson mutex_exit(&mg->mg_lock); 6612e4c9986SGeorge Wilson } 6622e4c9986SGeorge Wilson 663ecc2d604Sbonwick static void 664ecc2d604Sbonwick metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 665fa9e4066Sahrens { 666fa9e4066Sahrens ASSERT(msp->ms_group == NULL); 6672e4c9986SGeorge Wilson mutex_enter(&mg->mg_lock); 668fa9e4066Sahrens msp->ms_group = mg; 669ecc2d604Sbonwick msp->ms_weight = 0; 670fa9e4066Sahrens avl_add(&mg->mg_metaslab_tree, msp); 671fa9e4066Sahrens mutex_exit(&mg->mg_lock); 6722e4c9986SGeorge Wilson 6732e4c9986SGeorge Wilson mutex_enter(&msp->ms_lock); 6742e4c9986SGeorge Wilson metaslab_group_histogram_add(mg, msp); 6752e4c9986SGeorge Wilson mutex_exit(&msp->ms_lock); 676fa9e4066Sahrens } 677fa9e4066Sahrens 678ecc2d604Sbonwick static void 679fa9e4066Sahrens metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 680fa9e4066Sahrens { 6812e4c9986SGeorge Wilson mutex_enter(&msp->ms_lock); 6822e4c9986SGeorge Wilson metaslab_group_histogram_remove(mg, msp); 6832e4c9986SGeorge Wilson mutex_exit(&msp->ms_lock); 6842e4c9986SGeorge Wilson 685fa9e4066Sahrens mutex_enter(&mg->mg_lock); 686fa9e4066Sahrens ASSERT(msp->ms_group == mg); 687fa9e4066Sahrens avl_remove(&mg->mg_metaslab_tree, msp); 688fa9e4066Sahrens msp->ms_group = NULL; 689fa9e4066Sahrens mutex_exit(&mg->mg_lock); 690fa9e4066Sahrens } 691fa9e4066Sahrens 692ecc2d604Sbonwick static void 693fa9e4066Sahrens metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 694fa9e4066Sahrens { 6955f5f7a6fSahrens /* 6965f5f7a6fSahrens * Although in principle the weight can be any value, in 6972e4c9986SGeorge Wilson * practice we do not use values in the range [1, 511]. 6985f5f7a6fSahrens */ 6992e4c9986SGeorge Wilson ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 700ecc2d604Sbonwick ASSERT(MUTEX_HELD(&msp->ms_lock)); 701ecc2d604Sbonwick 702fa9e4066Sahrens mutex_enter(&mg->mg_lock); 703fa9e4066Sahrens ASSERT(msp->ms_group == mg); 704fa9e4066Sahrens avl_remove(&mg->mg_metaslab_tree, msp); 705fa9e4066Sahrens msp->ms_weight = weight; 706fa9e4066Sahrens avl_add(&mg->mg_metaslab_tree, msp); 707fa9e4066Sahrens mutex_exit(&mg->mg_lock); 708fa9e4066Sahrens } 709fa9e4066Sahrens 710fa9e4066Sahrens /* 7112e4c9986SGeorge Wilson * Calculate the fragmentation for a given metaslab group. We can use 7122e4c9986SGeorge Wilson * a simple average here since all metaslabs within the group must have 7132e4c9986SGeorge Wilson * the same size. The return value will be a value between 0 and 100 7142e4c9986SGeorge Wilson * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 7152e4c9986SGeorge Wilson * group have a fragmentation metric. 7162e4c9986SGeorge Wilson */ 7172e4c9986SGeorge Wilson uint64_t 7182e4c9986SGeorge Wilson metaslab_group_fragmentation(metaslab_group_t *mg) 7192e4c9986SGeorge Wilson { 7202e4c9986SGeorge Wilson vdev_t *vd = mg->mg_vd; 7212e4c9986SGeorge Wilson uint64_t fragmentation = 0; 7222e4c9986SGeorge Wilson uint64_t valid_ms = 0; 7232e4c9986SGeorge Wilson 7242e4c9986SGeorge Wilson for (int m = 0; m < vd->vdev_ms_count; m++) { 7252e4c9986SGeorge Wilson metaslab_t *msp = vd->vdev_ms[m]; 7262e4c9986SGeorge Wilson 7272e4c9986SGeorge Wilson if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 7282e4c9986SGeorge Wilson continue; 7292e4c9986SGeorge Wilson 7302e4c9986SGeorge Wilson valid_ms++; 7312e4c9986SGeorge Wilson fragmentation += msp->ms_fragmentation; 7322e4c9986SGeorge Wilson } 7332e4c9986SGeorge Wilson 7342e4c9986SGeorge Wilson if (valid_ms <= vd->vdev_ms_count / 2) 7352e4c9986SGeorge Wilson return (ZFS_FRAG_INVALID); 7362e4c9986SGeorge Wilson 7372e4c9986SGeorge Wilson fragmentation /= valid_ms; 7382e4c9986SGeorge Wilson ASSERT3U(fragmentation, <=, 100); 7392e4c9986SGeorge Wilson return (fragmentation); 7402e4c9986SGeorge Wilson } 7412e4c9986SGeorge Wilson 7422e4c9986SGeorge Wilson /* 74322e30981SGeorge Wilson * Determine if a given metaslab group should skip allocations. A metaslab 7442e4c9986SGeorge Wilson * group should avoid allocations if its free capacity is less than the 7452e4c9986SGeorge Wilson * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 7462e4c9986SGeorge Wilson * zfs_mg_fragmentation_threshold and there is at least one metaslab group 74722e30981SGeorge Wilson * that can still handle allocations. 74822e30981SGeorge Wilson */ 74922e30981SGeorge Wilson static boolean_t 75022e30981SGeorge Wilson metaslab_group_allocatable(metaslab_group_t *mg) 75122e30981SGeorge Wilson { 75222e30981SGeorge Wilson vdev_t *vd = mg->mg_vd; 75322e30981SGeorge Wilson spa_t *spa = vd->vdev_spa; 75422e30981SGeorge Wilson metaslab_class_t *mc = mg->mg_class; 75522e30981SGeorge Wilson 75622e30981SGeorge Wilson /* 7572e4c9986SGeorge Wilson * We use two key metrics to determine if a metaslab group is 7582e4c9986SGeorge Wilson * considered allocatable -- free space and fragmentation. If 7592e4c9986SGeorge Wilson * the free space is greater than the free space threshold and 7602e4c9986SGeorge Wilson * the fragmentation is less than the fragmentation threshold then 7612e4c9986SGeorge Wilson * consider the group allocatable. There are two case when we will 7622e4c9986SGeorge Wilson * not consider these key metrics. The first is if the group is 7632e4c9986SGeorge Wilson * associated with a slog device and the second is if all groups 7642e4c9986SGeorge Wilson * in this metaslab class have already been consider ineligible 7652e4c9986SGeorge Wilson * for allocations. 76622e30981SGeorge Wilson */ 7672e4c9986SGeorge Wilson return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold && 7682e4c9986SGeorge Wilson (mg->mg_fragmentation == ZFS_FRAG_INVALID || 7692e4c9986SGeorge Wilson mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) || 77022e30981SGeorge Wilson mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0); 77122e30981SGeorge Wilson } 77222e30981SGeorge Wilson 77322e30981SGeorge Wilson /* 77480eb36f2SGeorge Wilson * ========================================================================== 7750713e232SGeorge Wilson * Range tree callbacks 77680eb36f2SGeorge Wilson * ========================================================================== 77780eb36f2SGeorge Wilson */ 77880eb36f2SGeorge Wilson 7790713e232SGeorge Wilson /* 7800713e232SGeorge Wilson * Comparison function for the private size-ordered tree. Tree is sorted 7810713e232SGeorge Wilson * by size, larger sizes at the end of the tree. 7820713e232SGeorge Wilson */ 7830713e232SGeorge Wilson static int 7840713e232SGeorge Wilson metaslab_rangesize_compare(const void *x1, const void *x2) 7850713e232SGeorge Wilson { 7860713e232SGeorge Wilson const range_seg_t *r1 = x1; 7870713e232SGeorge Wilson const range_seg_t *r2 = x2; 7880713e232SGeorge Wilson uint64_t rs_size1 = r1->rs_end - r1->rs_start; 7890713e232SGeorge Wilson uint64_t rs_size2 = r2->rs_end - r2->rs_start; 7900713e232SGeorge Wilson 7910713e232SGeorge Wilson if (rs_size1 < rs_size2) 79280eb36f2SGeorge Wilson return (-1); 7930713e232SGeorge Wilson if (rs_size1 > rs_size2) 79480eb36f2SGeorge Wilson return (1); 79580eb36f2SGeorge Wilson 7960713e232SGeorge Wilson if (r1->rs_start < r2->rs_start) 79780eb36f2SGeorge Wilson return (-1); 7980713e232SGeorge Wilson 7990713e232SGeorge Wilson if (r1->rs_start > r2->rs_start) 80080eb36f2SGeorge Wilson return (1); 80180eb36f2SGeorge Wilson 80280eb36f2SGeorge Wilson return (0); 80380eb36f2SGeorge Wilson } 80480eb36f2SGeorge Wilson 80580eb36f2SGeorge Wilson /* 8060713e232SGeorge Wilson * Create any block allocator specific components. The current allocators 8070713e232SGeorge Wilson * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 8080713e232SGeorge Wilson */ 8090713e232SGeorge Wilson static void 8100713e232SGeorge Wilson metaslab_rt_create(range_tree_t *rt, void *arg) 8110713e232SGeorge Wilson { 8120713e232SGeorge Wilson metaslab_t *msp = arg; 8130713e232SGeorge Wilson 8140713e232SGeorge Wilson ASSERT3P(rt->rt_arg, ==, msp); 8150713e232SGeorge Wilson ASSERT(msp->ms_tree == NULL); 8160713e232SGeorge Wilson 8170713e232SGeorge Wilson avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, 8180713e232SGeorge Wilson sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); 8190713e232SGeorge Wilson } 8200713e232SGeorge Wilson 8210713e232SGeorge Wilson /* 8220713e232SGeorge Wilson * Destroy the block allocator specific components. 8230713e232SGeorge Wilson */ 8240713e232SGeorge Wilson static void 8250713e232SGeorge Wilson metaslab_rt_destroy(range_tree_t *rt, void *arg) 8260713e232SGeorge Wilson { 8270713e232SGeorge Wilson metaslab_t *msp = arg; 8280713e232SGeorge Wilson 8290713e232SGeorge Wilson ASSERT3P(rt->rt_arg, ==, msp); 8300713e232SGeorge Wilson ASSERT3P(msp->ms_tree, ==, rt); 8310713e232SGeorge Wilson ASSERT0(avl_numnodes(&msp->ms_size_tree)); 8320713e232SGeorge Wilson 8330713e232SGeorge Wilson avl_destroy(&msp->ms_size_tree); 8340713e232SGeorge Wilson } 8350713e232SGeorge Wilson 8360713e232SGeorge Wilson static void 8370713e232SGeorge Wilson metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 8380713e232SGeorge Wilson { 8390713e232SGeorge Wilson metaslab_t *msp = arg; 8400713e232SGeorge Wilson 8410713e232SGeorge Wilson ASSERT3P(rt->rt_arg, ==, msp); 8420713e232SGeorge Wilson ASSERT3P(msp->ms_tree, ==, rt); 8430713e232SGeorge Wilson VERIFY(!msp->ms_condensing); 8440713e232SGeorge Wilson avl_add(&msp->ms_size_tree, rs); 8450713e232SGeorge Wilson } 8460713e232SGeorge Wilson 8470713e232SGeorge Wilson static void 8480713e232SGeorge Wilson metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 8490713e232SGeorge Wilson { 8500713e232SGeorge Wilson metaslab_t *msp = arg; 8510713e232SGeorge Wilson 8520713e232SGeorge Wilson ASSERT3P(rt->rt_arg, ==, msp); 8530713e232SGeorge Wilson ASSERT3P(msp->ms_tree, ==, rt); 8540713e232SGeorge Wilson VERIFY(!msp->ms_condensing); 8550713e232SGeorge Wilson avl_remove(&msp->ms_size_tree, rs); 8560713e232SGeorge Wilson } 8570713e232SGeorge Wilson 8580713e232SGeorge Wilson static void 8590713e232SGeorge Wilson metaslab_rt_vacate(range_tree_t *rt, void *arg) 8600713e232SGeorge Wilson { 8610713e232SGeorge Wilson metaslab_t *msp = arg; 8620713e232SGeorge Wilson 8630713e232SGeorge Wilson ASSERT3P(rt->rt_arg, ==, msp); 8640713e232SGeorge Wilson ASSERT3P(msp->ms_tree, ==, rt); 8650713e232SGeorge Wilson 8660713e232SGeorge Wilson /* 8670713e232SGeorge Wilson * Normally one would walk the tree freeing nodes along the way. 8680713e232SGeorge Wilson * Since the nodes are shared with the range trees we can avoid 8690713e232SGeorge Wilson * walking all nodes and just reinitialize the avl tree. The nodes 8700713e232SGeorge Wilson * will be freed by the range tree, so we don't want to free them here. 8710713e232SGeorge Wilson */ 8720713e232SGeorge Wilson avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, 8730713e232SGeorge Wilson sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); 8740713e232SGeorge Wilson } 8750713e232SGeorge Wilson 8760713e232SGeorge Wilson static range_tree_ops_t metaslab_rt_ops = { 8770713e232SGeorge Wilson metaslab_rt_create, 8780713e232SGeorge Wilson metaslab_rt_destroy, 8790713e232SGeorge Wilson metaslab_rt_add, 8800713e232SGeorge Wilson metaslab_rt_remove, 8810713e232SGeorge Wilson metaslab_rt_vacate 8820713e232SGeorge Wilson }; 8830713e232SGeorge Wilson 8840713e232SGeorge Wilson /* 8850713e232SGeorge Wilson * ========================================================================== 8860713e232SGeorge Wilson * Metaslab block operations 8870713e232SGeorge Wilson * ========================================================================== 8880713e232SGeorge Wilson */ 8890713e232SGeorge Wilson 8900713e232SGeorge Wilson /* 8910713e232SGeorge Wilson * Return the maximum contiguous segment within the metaslab. 8920713e232SGeorge Wilson */ 8930713e232SGeorge Wilson uint64_t 8940713e232SGeorge Wilson metaslab_block_maxsize(metaslab_t *msp) 8950713e232SGeorge Wilson { 8960713e232SGeorge Wilson avl_tree_t *t = &msp->ms_size_tree; 8970713e232SGeorge Wilson range_seg_t *rs; 8980713e232SGeorge Wilson 8990713e232SGeorge Wilson if (t == NULL || (rs = avl_last(t)) == NULL) 9000713e232SGeorge Wilson return (0ULL); 9010713e232SGeorge Wilson 9020713e232SGeorge Wilson return (rs->rs_end - rs->rs_start); 9030713e232SGeorge Wilson } 9040713e232SGeorge Wilson 9050713e232SGeorge Wilson uint64_t 9060713e232SGeorge Wilson metaslab_block_alloc(metaslab_t *msp, uint64_t size) 9070713e232SGeorge Wilson { 9080713e232SGeorge Wilson uint64_t start; 9090713e232SGeorge Wilson range_tree_t *rt = msp->ms_tree; 9100713e232SGeorge Wilson 9110713e232SGeorge Wilson VERIFY(!msp->ms_condensing); 9120713e232SGeorge Wilson 9130713e232SGeorge Wilson start = msp->ms_ops->msop_alloc(msp, size); 9140713e232SGeorge Wilson if (start != -1ULL) { 9150713e232SGeorge Wilson vdev_t *vd = msp->ms_group->mg_vd; 9160713e232SGeorge Wilson 9170713e232SGeorge Wilson VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 9180713e232SGeorge Wilson VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 9190713e232SGeorge Wilson VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 9200713e232SGeorge Wilson range_tree_remove(rt, start, size); 9210713e232SGeorge Wilson } 9220713e232SGeorge Wilson return (start); 9230713e232SGeorge Wilson } 9240713e232SGeorge Wilson 9250713e232SGeorge Wilson /* 9260713e232SGeorge Wilson * ========================================================================== 9270713e232SGeorge Wilson * Common allocator routines 9280713e232SGeorge Wilson * ========================================================================== 9290713e232SGeorge Wilson */ 9300713e232SGeorge Wilson 9310713e232SGeorge Wilson /* 932d6e555bdSGeorge Wilson * This is a helper function that can be used by the allocator to find 933d6e555bdSGeorge Wilson * a suitable block to allocate. This will search the specified AVL 934d6e555bdSGeorge Wilson * tree looking for a block that matches the specified criteria. 935fa9e4066Sahrens */ 936fa9e4066Sahrens static uint64_t 937d6e555bdSGeorge Wilson metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 938d6e555bdSGeorge Wilson uint64_t align) 939fa9e4066Sahrens { 9400713e232SGeorge Wilson range_seg_t *rs, rsearch; 941fa9e4066Sahrens avl_index_t where; 942fa9e4066Sahrens 9430713e232SGeorge Wilson rsearch.rs_start = *cursor; 9440713e232SGeorge Wilson rsearch.rs_end = *cursor + size; 945fa9e4066Sahrens 9460713e232SGeorge Wilson rs = avl_find(t, &rsearch, &where); 9470713e232SGeorge Wilson if (rs == NULL) 9480713e232SGeorge Wilson rs = avl_nearest(t, where, AVL_AFTER); 949fa9e4066Sahrens 9500713e232SGeorge Wilson while (rs != NULL) { 9510713e232SGeorge Wilson uint64_t offset = P2ROUNDUP(rs->rs_start, align); 952fa9e4066Sahrens 9530713e232SGeorge Wilson if (offset + size <= rs->rs_end) { 954fa9e4066Sahrens *cursor = offset + size; 955fa9e4066Sahrens return (offset); 956fa9e4066Sahrens } 9570713e232SGeorge Wilson rs = AVL_NEXT(t, rs); 958fa9e4066Sahrens } 959fa9e4066Sahrens 960ecc2d604Sbonwick /* 961ecc2d604Sbonwick * If we know we've searched the whole map (*cursor == 0), give up. 962ecc2d604Sbonwick * Otherwise, reset the cursor to the beginning and try again. 963ecc2d604Sbonwick */ 964ecc2d604Sbonwick if (*cursor == 0) 965ecc2d604Sbonwick return (-1ULL); 966ecc2d604Sbonwick 967fa9e4066Sahrens *cursor = 0; 968d6e555bdSGeorge Wilson return (metaslab_block_picker(t, cursor, size, align)); 969d6e555bdSGeorge Wilson } 970d6e555bdSGeorge Wilson 97180eb36f2SGeorge Wilson /* 97280eb36f2SGeorge Wilson * ========================================================================== 97380eb36f2SGeorge Wilson * The first-fit block allocator 97480eb36f2SGeorge Wilson * ========================================================================== 97580eb36f2SGeorge Wilson */ 97680eb36f2SGeorge Wilson static uint64_t 9770713e232SGeorge Wilson metaslab_ff_alloc(metaslab_t *msp, uint64_t size) 97880eb36f2SGeorge Wilson { 9790713e232SGeorge Wilson /* 9800713e232SGeorge Wilson * Find the largest power of 2 block size that evenly divides the 9810713e232SGeorge Wilson * requested size. This is used to try to allocate blocks with similar 9820713e232SGeorge Wilson * alignment from the same area of the metaslab (i.e. same cursor 9830713e232SGeorge Wilson * bucket) but it does not guarantee that other allocations sizes 9840713e232SGeorge Wilson * may exist in the same region. 9850713e232SGeorge Wilson */ 98680eb36f2SGeorge Wilson uint64_t align = size & -size; 987bf16b11eSMatthew Ahrens uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 9880713e232SGeorge Wilson avl_tree_t *t = &msp->ms_tree->rt_root; 98980eb36f2SGeorge Wilson 99080eb36f2SGeorge Wilson return (metaslab_block_picker(t, cursor, size, align)); 99180eb36f2SGeorge Wilson } 99280eb36f2SGeorge Wilson 9930713e232SGeorge Wilson static metaslab_ops_t metaslab_ff_ops = { 9942e4c9986SGeorge Wilson metaslab_ff_alloc 99580eb36f2SGeorge Wilson }; 99680eb36f2SGeorge Wilson 99780eb36f2SGeorge Wilson /* 99880eb36f2SGeorge Wilson * ========================================================================== 99980eb36f2SGeorge Wilson * Dynamic block allocator - 100080eb36f2SGeorge Wilson * Uses the first fit allocation scheme until space get low and then 100180eb36f2SGeorge Wilson * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 100280eb36f2SGeorge Wilson * and metaslab_df_free_pct to determine when to switch the allocation scheme. 100380eb36f2SGeorge Wilson * ========================================================================== 100480eb36f2SGeorge Wilson */ 1005d6e555bdSGeorge Wilson static uint64_t 10060713e232SGeorge Wilson metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1007d6e555bdSGeorge Wilson { 10080713e232SGeorge Wilson /* 10090713e232SGeorge Wilson * Find the largest power of 2 block size that evenly divides the 10100713e232SGeorge Wilson * requested size. This is used to try to allocate blocks with similar 10110713e232SGeorge Wilson * alignment from the same area of the metaslab (i.e. same cursor 10120713e232SGeorge Wilson * bucket) but it does not guarantee that other allocations sizes 10130713e232SGeorge Wilson * may exist in the same region. 10140713e232SGeorge Wilson */ 1015d6e555bdSGeorge Wilson uint64_t align = size & -size; 1016bf16b11eSMatthew Ahrens uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 10170713e232SGeorge Wilson range_tree_t *rt = msp->ms_tree; 10180713e232SGeorge Wilson avl_tree_t *t = &rt->rt_root; 10190713e232SGeorge Wilson uint64_t max_size = metaslab_block_maxsize(msp); 10200713e232SGeorge Wilson int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1021d6e555bdSGeorge Wilson 10220713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 10230713e232SGeorge Wilson ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); 1024d6e555bdSGeorge Wilson 1025d6e555bdSGeorge Wilson if (max_size < size) 1026d6e555bdSGeorge Wilson return (-1ULL); 1027d6e555bdSGeorge Wilson 1028d6e555bdSGeorge Wilson /* 1029d6e555bdSGeorge Wilson * If we're running low on space switch to using the size 1030d6e555bdSGeorge Wilson * sorted AVL tree (best-fit). 1031d6e555bdSGeorge Wilson */ 1032d6e555bdSGeorge Wilson if (max_size < metaslab_df_alloc_threshold || 1033d6e555bdSGeorge Wilson free_pct < metaslab_df_free_pct) { 10340713e232SGeorge Wilson t = &msp->ms_size_tree; 1035d6e555bdSGeorge Wilson *cursor = 0; 1036d6e555bdSGeorge Wilson } 1037d6e555bdSGeorge Wilson 1038d6e555bdSGeorge Wilson return (metaslab_block_picker(t, cursor, size, 1ULL)); 1039d6e555bdSGeorge Wilson } 1040d6e555bdSGeorge Wilson 10410713e232SGeorge Wilson static metaslab_ops_t metaslab_df_ops = { 10422e4c9986SGeorge Wilson metaslab_df_alloc 104380eb36f2SGeorge Wilson }; 104480eb36f2SGeorge Wilson 104580eb36f2SGeorge Wilson /* 104680eb36f2SGeorge Wilson * ========================================================================== 10470713e232SGeorge Wilson * Cursor fit block allocator - 10480713e232SGeorge Wilson * Select the largest region in the metaslab, set the cursor to the beginning 10490713e232SGeorge Wilson * of the range and the cursor_end to the end of the range. As allocations 10500713e232SGeorge Wilson * are made advance the cursor. Continue allocating from the cursor until 10510713e232SGeorge Wilson * the range is exhausted and then find a new range. 105280eb36f2SGeorge Wilson * ========================================================================== 105380eb36f2SGeorge Wilson */ 105480eb36f2SGeorge Wilson static uint64_t 10550713e232SGeorge Wilson metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 105680eb36f2SGeorge Wilson { 10570713e232SGeorge Wilson range_tree_t *rt = msp->ms_tree; 10580713e232SGeorge Wilson avl_tree_t *t = &msp->ms_size_tree; 10590713e232SGeorge Wilson uint64_t *cursor = &msp->ms_lbas[0]; 10600713e232SGeorge Wilson uint64_t *cursor_end = &msp->ms_lbas[1]; 106180eb36f2SGeorge Wilson uint64_t offset = 0; 106280eb36f2SGeorge Wilson 10630713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 10640713e232SGeorge Wilson ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); 106580eb36f2SGeorge Wilson 10660713e232SGeorge Wilson ASSERT3U(*cursor_end, >=, *cursor); 10670713e232SGeorge Wilson 10680713e232SGeorge Wilson if ((*cursor + size) > *cursor_end) { 10690713e232SGeorge Wilson range_seg_t *rs; 10700713e232SGeorge Wilson 10710713e232SGeorge Wilson rs = avl_last(&msp->ms_size_tree); 10720713e232SGeorge Wilson if (rs == NULL || (rs->rs_end - rs->rs_start) < size) 107380eb36f2SGeorge Wilson return (-1ULL); 107480eb36f2SGeorge Wilson 10750713e232SGeorge Wilson *cursor = rs->rs_start; 10760713e232SGeorge Wilson *cursor_end = rs->rs_end; 107780eb36f2SGeorge Wilson } 10780713e232SGeorge Wilson 10790713e232SGeorge Wilson offset = *cursor; 10800713e232SGeorge Wilson *cursor += size; 10810713e232SGeorge Wilson 108280eb36f2SGeorge Wilson return (offset); 108380eb36f2SGeorge Wilson } 108480eb36f2SGeorge Wilson 10850713e232SGeorge Wilson static metaslab_ops_t metaslab_cf_ops = { 10862e4c9986SGeorge Wilson metaslab_cf_alloc 108780eb36f2SGeorge Wilson }; 108880eb36f2SGeorge Wilson 10890713e232SGeorge Wilson /* 10900713e232SGeorge Wilson * ========================================================================== 10910713e232SGeorge Wilson * New dynamic fit allocator - 10920713e232SGeorge Wilson * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 10930713e232SGeorge Wilson * contiguous blocks. If no region is found then just use the largest segment 10940713e232SGeorge Wilson * that remains. 10950713e232SGeorge Wilson * ========================================================================== 10960713e232SGeorge Wilson */ 10970713e232SGeorge Wilson 10980713e232SGeorge Wilson /* 10990713e232SGeorge Wilson * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 11000713e232SGeorge Wilson * to request from the allocator. 11010713e232SGeorge Wilson */ 11028d18220dSMark J Musante uint64_t metaslab_ndf_clump_shift = 4; 11038d18220dSMark J Musante 110480eb36f2SGeorge Wilson static uint64_t 11050713e232SGeorge Wilson metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 110680eb36f2SGeorge Wilson { 11070713e232SGeorge Wilson avl_tree_t *t = &msp->ms_tree->rt_root; 110880eb36f2SGeorge Wilson avl_index_t where; 11090713e232SGeorge Wilson range_seg_t *rs, rsearch; 1110bf16b11eSMatthew Ahrens uint64_t hbit = highbit64(size); 11110713e232SGeorge Wilson uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 11120713e232SGeorge Wilson uint64_t max_size = metaslab_block_maxsize(msp); 111380eb36f2SGeorge Wilson 11140713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 11150713e232SGeorge Wilson ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); 111680eb36f2SGeorge Wilson 111780eb36f2SGeorge Wilson if (max_size < size) 111880eb36f2SGeorge Wilson return (-1ULL); 111980eb36f2SGeorge Wilson 11200713e232SGeorge Wilson rsearch.rs_start = *cursor; 11210713e232SGeorge Wilson rsearch.rs_end = *cursor + size; 112280eb36f2SGeorge Wilson 11230713e232SGeorge Wilson rs = avl_find(t, &rsearch, &where); 11240713e232SGeorge Wilson if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { 11250713e232SGeorge Wilson t = &msp->ms_size_tree; 112680eb36f2SGeorge Wilson 11270713e232SGeorge Wilson rsearch.rs_start = 0; 11280713e232SGeorge Wilson rsearch.rs_end = MIN(max_size, 11298d18220dSMark J Musante 1ULL << (hbit + metaslab_ndf_clump_shift)); 11300713e232SGeorge Wilson rs = avl_find(t, &rsearch, &where); 11310713e232SGeorge Wilson if (rs == NULL) 11320713e232SGeorge Wilson rs = avl_nearest(t, where, AVL_AFTER); 11330713e232SGeorge Wilson ASSERT(rs != NULL); 113480eb36f2SGeorge Wilson } 113580eb36f2SGeorge Wilson 11360713e232SGeorge Wilson if ((rs->rs_end - rs->rs_start) >= size) { 11370713e232SGeorge Wilson *cursor = rs->rs_start + size; 11380713e232SGeorge Wilson return (rs->rs_start); 113980eb36f2SGeorge Wilson } 114080eb36f2SGeorge Wilson return (-1ULL); 114180eb36f2SGeorge Wilson } 114280eb36f2SGeorge Wilson 11430713e232SGeorge Wilson static metaslab_ops_t metaslab_ndf_ops = { 11442e4c9986SGeorge Wilson metaslab_ndf_alloc 1145d6e555bdSGeorge Wilson }; 1146d6e555bdSGeorge Wilson 11470713e232SGeorge Wilson metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 1148d6e555bdSGeorge Wilson 1149d6e555bdSGeorge Wilson /* 1150ecc2d604Sbonwick * ========================================================================== 1151ecc2d604Sbonwick * Metaslabs 1152ecc2d604Sbonwick * ========================================================================== 1153ecc2d604Sbonwick */ 11540713e232SGeorge Wilson 11550713e232SGeorge Wilson /* 11560713e232SGeorge Wilson * Wait for any in-progress metaslab loads to complete. 11570713e232SGeorge Wilson */ 11580713e232SGeorge Wilson void 11590713e232SGeorge Wilson metaslab_load_wait(metaslab_t *msp) 11600713e232SGeorge Wilson { 11610713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 11620713e232SGeorge Wilson 11630713e232SGeorge Wilson while (msp->ms_loading) { 11640713e232SGeorge Wilson ASSERT(!msp->ms_loaded); 11650713e232SGeorge Wilson cv_wait(&msp->ms_load_cv, &msp->ms_lock); 11660713e232SGeorge Wilson } 11670713e232SGeorge Wilson } 11680713e232SGeorge Wilson 11690713e232SGeorge Wilson int 11700713e232SGeorge Wilson metaslab_load(metaslab_t *msp) 11710713e232SGeorge Wilson { 11720713e232SGeorge Wilson int error = 0; 11730713e232SGeorge Wilson 11740713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 11750713e232SGeorge Wilson ASSERT(!msp->ms_loaded); 11760713e232SGeorge Wilson ASSERT(!msp->ms_loading); 11770713e232SGeorge Wilson 11780713e232SGeorge Wilson msp->ms_loading = B_TRUE; 11790713e232SGeorge Wilson 11800713e232SGeorge Wilson /* 11810713e232SGeorge Wilson * If the space map has not been allocated yet, then treat 11820713e232SGeorge Wilson * all the space in the metaslab as free and add it to the 11830713e232SGeorge Wilson * ms_tree. 11840713e232SGeorge Wilson */ 11850713e232SGeorge Wilson if (msp->ms_sm != NULL) 11860713e232SGeorge Wilson error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); 11870713e232SGeorge Wilson else 11880713e232SGeorge Wilson range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); 11890713e232SGeorge Wilson 11900713e232SGeorge Wilson msp->ms_loaded = (error == 0); 11910713e232SGeorge Wilson msp->ms_loading = B_FALSE; 11920713e232SGeorge Wilson 11930713e232SGeorge Wilson if (msp->ms_loaded) { 11940713e232SGeorge Wilson for (int t = 0; t < TXG_DEFER_SIZE; t++) { 11950713e232SGeorge Wilson range_tree_walk(msp->ms_defertree[t], 11960713e232SGeorge Wilson range_tree_remove, msp->ms_tree); 11970713e232SGeorge Wilson } 11980713e232SGeorge Wilson } 11990713e232SGeorge Wilson cv_broadcast(&msp->ms_load_cv); 12000713e232SGeorge Wilson return (error); 12010713e232SGeorge Wilson } 12020713e232SGeorge Wilson 12030713e232SGeorge Wilson void 12040713e232SGeorge Wilson metaslab_unload(metaslab_t *msp) 12050713e232SGeorge Wilson { 12060713e232SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 12070713e232SGeorge Wilson range_tree_vacate(msp->ms_tree, NULL, NULL); 12080713e232SGeorge Wilson msp->ms_loaded = B_FALSE; 12090713e232SGeorge Wilson msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 12100713e232SGeorge Wilson } 12110713e232SGeorge Wilson 12121e9bd7ecSPrakash Surya int 12131e9bd7ecSPrakash Surya metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, 12141e9bd7ecSPrakash Surya metaslab_t **msp) 1215ecc2d604Sbonwick { 1216ecc2d604Sbonwick vdev_t *vd = mg->mg_vd; 12170713e232SGeorge Wilson objset_t *mos = vd->vdev_spa->spa_meta_objset; 12181e9bd7ecSPrakash Surya metaslab_t *ms; 12191e9bd7ecSPrakash Surya int error; 1220ecc2d604Sbonwick 12211e9bd7ecSPrakash Surya ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 12221e9bd7ecSPrakash Surya mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 12231e9bd7ecSPrakash Surya cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 12241e9bd7ecSPrakash Surya ms->ms_id = id; 12251e9bd7ecSPrakash Surya ms->ms_start = id << vd->vdev_ms_shift; 12261e9bd7ecSPrakash Surya ms->ms_size = 1ULL << vd->vdev_ms_shift; 1227ecc2d604Sbonwick 1228ecc2d604Sbonwick /* 12290713e232SGeorge Wilson * We only open space map objects that already exist. All others 12300713e232SGeorge Wilson * will be opened when we finally allocate an object for it. 12310713e232SGeorge Wilson */ 12320713e232SGeorge Wilson if (object != 0) { 12331e9bd7ecSPrakash Surya error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 12341e9bd7ecSPrakash Surya ms->ms_size, vd->vdev_ashift, &ms->ms_lock); 12351e9bd7ecSPrakash Surya 12361e9bd7ecSPrakash Surya if (error != 0) { 12371e9bd7ecSPrakash Surya kmem_free(ms, sizeof (metaslab_t)); 12381e9bd7ecSPrakash Surya return (error); 12391e9bd7ecSPrakash Surya } 12401e9bd7ecSPrakash Surya 12411e9bd7ecSPrakash Surya ASSERT(ms->ms_sm != NULL); 12420713e232SGeorge Wilson } 12430713e232SGeorge Wilson 12440713e232SGeorge Wilson /* 12450713e232SGeorge Wilson * We create the main range tree here, but we don't create the 12460713e232SGeorge Wilson * alloctree and freetree until metaslab_sync_done(). This serves 1247ecc2d604Sbonwick * two purposes: it allows metaslab_sync_done() to detect the 1248ecc2d604Sbonwick * addition of new space; and for debugging, it ensures that we'd 1249ecc2d604Sbonwick * data fault on any attempt to use this metaslab before it's ready. 1250ecc2d604Sbonwick */ 12511e9bd7ecSPrakash Surya ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock); 12521e9bd7ecSPrakash Surya metaslab_group_add(mg, ms); 1253ecc2d604Sbonwick 12541e9bd7ecSPrakash Surya ms->ms_fragmentation = metaslab_fragmentation(ms); 12551e9bd7ecSPrakash Surya ms->ms_ops = mg->mg_class->mc_ops; 1256b24ab676SJeff Bonwick 1257ecc2d604Sbonwick /* 1258ecc2d604Sbonwick * If we're opening an existing pool (txg == 0) or creating 1259ecc2d604Sbonwick * a new one (txg == TXG_INITIAL), all space is available now. 1260ecc2d604Sbonwick * If we're adding space to an existing pool, the new space 1261ecc2d604Sbonwick * does not become available until after this txg has synced. 1262ecc2d604Sbonwick */ 1263ecc2d604Sbonwick if (txg <= TXG_INITIAL) 12641e9bd7ecSPrakash Surya metaslab_sync_done(ms, 0); 1265ecc2d604Sbonwick 12660713e232SGeorge Wilson /* 12670713e232SGeorge Wilson * If metaslab_debug_load is set and we're initializing a metaslab 12680713e232SGeorge Wilson * that has an allocated space_map object then load the its space 12690713e232SGeorge Wilson * map so that can verify frees. 12700713e232SGeorge Wilson */ 12711e9bd7ecSPrakash Surya if (metaslab_debug_load && ms->ms_sm != NULL) { 12721e9bd7ecSPrakash Surya mutex_enter(&ms->ms_lock); 12731e9bd7ecSPrakash Surya VERIFY0(metaslab_load(ms)); 12741e9bd7ecSPrakash Surya mutex_exit(&ms->ms_lock); 12750713e232SGeorge Wilson } 12760713e232SGeorge Wilson 1277ecc2d604Sbonwick if (txg != 0) { 1278ecc2d604Sbonwick vdev_dirty(vd, 0, NULL, txg); 12791e9bd7ecSPrakash Surya vdev_dirty(vd, VDD_METASLAB, ms, txg); 1280ecc2d604Sbonwick } 1281ecc2d604Sbonwick 12821e9bd7ecSPrakash Surya *msp = ms; 12831e9bd7ecSPrakash Surya 12841e9bd7ecSPrakash Surya return (0); 1285ecc2d604Sbonwick } 1286ecc2d604Sbonwick 1287ecc2d604Sbonwick void 1288ecc2d604Sbonwick metaslab_fini(metaslab_t *msp) 1289ecc2d604Sbonwick { 1290ecc2d604Sbonwick metaslab_group_t *mg = msp->ms_group; 1291ecc2d604Sbonwick 1292ecc2d604Sbonwick metaslab_group_remove(mg, msp); 1293ecc2d604Sbonwick 1294ecc2d604Sbonwick mutex_enter(&msp->ms_lock); 1295ecc2d604Sbonwick 12960713e232SGeorge Wilson VERIFY(msp->ms_group == NULL); 12970713e232SGeorge Wilson vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), 12980713e232SGeorge Wilson 0, -msp->ms_size); 12990713e232SGeorge Wilson space_map_close(msp->ms_sm); 13000713e232SGeorge Wilson 13010713e232SGeorge Wilson metaslab_unload(msp); 13020713e232SGeorge Wilson range_tree_destroy(msp->ms_tree); 1303ecc2d604Sbonwick 1304468c413aSTim Haley for (int t = 0; t < TXG_SIZE; t++) { 13050713e232SGeorge Wilson range_tree_destroy(msp->ms_alloctree[t]); 13060713e232SGeorge Wilson range_tree_destroy(msp->ms_freetree[t]); 1307ecc2d604Sbonwick } 1308ecc2d604Sbonwick 130916a4a807SGeorge Wilson for (int t = 0; t < TXG_DEFER_SIZE; t++) { 13100713e232SGeorge Wilson range_tree_destroy(msp->ms_defertree[t]); 131116a4a807SGeorge Wilson } 1312468c413aSTim Haley 1313fb09f5aaSMadhav Suresh ASSERT0(msp->ms_deferspace); 1314468c413aSTim Haley 1315ecc2d604Sbonwick mutex_exit(&msp->ms_lock); 13160713e232SGeorge Wilson cv_destroy(&msp->ms_load_cv); 13175ad82045Snd150628 mutex_destroy(&msp->ms_lock); 1318ecc2d604Sbonwick 1319ecc2d604Sbonwick kmem_free(msp, sizeof (metaslab_t)); 1320ecc2d604Sbonwick } 1321ecc2d604Sbonwick 13222e4c9986SGeorge Wilson #define FRAGMENTATION_TABLE_SIZE 17 13232e4c9986SGeorge Wilson 13240713e232SGeorge Wilson /* 13252e4c9986SGeorge Wilson * This table defines a segment size based fragmentation metric that will 13262e4c9986SGeorge Wilson * allow each metaslab to derive its own fragmentation value. This is done 13272e4c9986SGeorge Wilson * by calculating the space in each bucket of the spacemap histogram and 13282e4c9986SGeorge Wilson * multiplying that by the fragmetation metric in this table. Doing 13292e4c9986SGeorge Wilson * this for all buckets and dividing it by the total amount of free 13302e4c9986SGeorge Wilson * space in this metaslab (i.e. the total free space in all buckets) gives 13312e4c9986SGeorge Wilson * us the fragmentation metric. This means that a high fragmentation metric 13322e4c9986SGeorge Wilson * equates to most of the free space being comprised of small segments. 13332e4c9986SGeorge Wilson * Conversely, if the metric is low, then most of the free space is in 13342e4c9986SGeorge Wilson * large segments. A 10% change in fragmentation equates to approximately 13352e4c9986SGeorge Wilson * double the number of segments. 13360713e232SGeorge Wilson * 13372e4c9986SGeorge Wilson * This table defines 0% fragmented space using 16MB segments. Testing has 13382e4c9986SGeorge Wilson * shown that segments that are greater than or equal to 16MB do not suffer 13392e4c9986SGeorge Wilson * from drastic performance problems. Using this value, we derive the rest 13402e4c9986SGeorge Wilson * of the table. Since the fragmentation value is never stored on disk, it 13412e4c9986SGeorge Wilson * is possible to change these calculations in the future. 13422e4c9986SGeorge Wilson */ 13432e4c9986SGeorge Wilson int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 13442e4c9986SGeorge Wilson 100, /* 512B */ 13452e4c9986SGeorge Wilson 100, /* 1K */ 13462e4c9986SGeorge Wilson 98, /* 2K */ 13472e4c9986SGeorge Wilson 95, /* 4K */ 13482e4c9986SGeorge Wilson 90, /* 8K */ 13492e4c9986SGeorge Wilson 80, /* 16K */ 13502e4c9986SGeorge Wilson 70, /* 32K */ 13512e4c9986SGeorge Wilson 60, /* 64K */ 13522e4c9986SGeorge Wilson 50, /* 128K */ 13532e4c9986SGeorge Wilson 40, /* 256K */ 13542e4c9986SGeorge Wilson 30, /* 512K */ 13552e4c9986SGeorge Wilson 20, /* 1M */ 13562e4c9986SGeorge Wilson 15, /* 2M */ 13572e4c9986SGeorge Wilson 10, /* 4M */ 13582e4c9986SGeorge Wilson 5, /* 8M */ 13592e4c9986SGeorge Wilson 0 /* 16M */ 13602e4c9986SGeorge Wilson }; 13612e4c9986SGeorge Wilson 13622e4c9986SGeorge Wilson /* 13632e4c9986SGeorge Wilson * Calclate the metaslab's fragmentation metric. A return value 13642e4c9986SGeorge Wilson * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does 13652e4c9986SGeorge Wilson * not support this metric. Otherwise, the return value should be in the 13662e4c9986SGeorge Wilson * range [0, 100]. 13670713e232SGeorge Wilson */ 13680713e232SGeorge Wilson static uint64_t 13692e4c9986SGeorge Wilson metaslab_fragmentation(metaslab_t *msp) 13700713e232SGeorge Wilson { 13712e4c9986SGeorge Wilson spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 13722e4c9986SGeorge Wilson uint64_t fragmentation = 0; 13732e4c9986SGeorge Wilson uint64_t total = 0; 13742e4c9986SGeorge Wilson boolean_t feature_enabled = spa_feature_is_enabled(spa, 13752e4c9986SGeorge Wilson SPA_FEATURE_SPACEMAP_HISTOGRAM); 13762e4c9986SGeorge Wilson 13772e4c9986SGeorge Wilson if (!feature_enabled) 13782e4c9986SGeorge Wilson return (ZFS_FRAG_INVALID); 13790713e232SGeorge Wilson 13800713e232SGeorge Wilson /* 13812e4c9986SGeorge Wilson * A null space map means that the entire metaslab is free 13822e4c9986SGeorge Wilson * and thus is not fragmented. 13830713e232SGeorge Wilson */ 13842e4c9986SGeorge Wilson if (msp->ms_sm == NULL) 13850713e232SGeorge Wilson return (0); 13860713e232SGeorge Wilson 13872e4c9986SGeorge Wilson /* 13882e4c9986SGeorge Wilson * If this metaslab's space_map has not been upgraded, flag it 13892e4c9986SGeorge Wilson * so that we upgrade next time we encounter it. 13902e4c9986SGeorge Wilson */ 13912e4c9986SGeorge Wilson if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 13922e4c9986SGeorge Wilson uint64_t txg = spa_syncing_txg(spa); 13932e4c9986SGeorge Wilson vdev_t *vd = msp->ms_group->mg_vd; 13942e4c9986SGeorge Wilson 1395b1be2892SMatthew Ahrens if (spa_writeable(spa)) { 13962e4c9986SGeorge Wilson msp->ms_condense_wanted = B_TRUE; 13972e4c9986SGeorge Wilson vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 13982e4c9986SGeorge Wilson spa_dbgmsg(spa, "txg %llu, requesting force condense: " 13992e4c9986SGeorge Wilson "msp %p, vd %p", txg, msp, vd); 1400b1be2892SMatthew Ahrens } 14012e4c9986SGeorge Wilson return (ZFS_FRAG_INVALID); 14022e4c9986SGeorge Wilson } 14032e4c9986SGeorge Wilson 14042e4c9986SGeorge Wilson for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 14052e4c9986SGeorge Wilson uint64_t space = 0; 14062e4c9986SGeorge Wilson uint8_t shift = msp->ms_sm->sm_shift; 14072e4c9986SGeorge Wilson int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 14082e4c9986SGeorge Wilson FRAGMENTATION_TABLE_SIZE - 1); 14092e4c9986SGeorge Wilson 14100713e232SGeorge Wilson if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 14110713e232SGeorge Wilson continue; 14120713e232SGeorge Wilson 14132e4c9986SGeorge Wilson space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 14142e4c9986SGeorge Wilson total += space; 14152e4c9986SGeorge Wilson 14162e4c9986SGeorge Wilson ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 14172e4c9986SGeorge Wilson fragmentation += space * zfs_frag_table[idx]; 14180713e232SGeorge Wilson } 1419ecc2d604Sbonwick 14202e4c9986SGeorge Wilson if (total > 0) 14212e4c9986SGeorge Wilson fragmentation /= total; 14222e4c9986SGeorge Wilson ASSERT3U(fragmentation, <=, 100); 14232e4c9986SGeorge Wilson return (fragmentation); 14242e4c9986SGeorge Wilson } 14252e4c9986SGeorge Wilson 14262e4c9986SGeorge Wilson /* 14272e4c9986SGeorge Wilson * Compute a weight -- a selection preference value -- for the given metaslab. 14282e4c9986SGeorge Wilson * This is based on the amount of free space, the level of fragmentation, 14292e4c9986SGeorge Wilson * the LBA range, and whether the metaslab is loaded. 14302e4c9986SGeorge Wilson */ 1431fa9e4066Sahrens static uint64_t 1432ecc2d604Sbonwick metaslab_weight(metaslab_t *msp) 1433fa9e4066Sahrens { 143444cd46caSbillm metaslab_group_t *mg = msp->ms_group; 143544cd46caSbillm vdev_t *vd = mg->mg_vd; 1436ecc2d604Sbonwick uint64_t weight, space; 1437fa9e4066Sahrens 1438fa9e4066Sahrens ASSERT(MUTEX_HELD(&msp->ms_lock)); 1439fa9e4066Sahrens 1440ecc2d604Sbonwick /* 14419eb57f7fSGeorge Wilson * This vdev is in the process of being removed so there is nothing 14429eb57f7fSGeorge Wilson * for us to do here. 14439eb57f7fSGeorge Wilson */ 14449eb57f7fSGeorge Wilson if (vd->vdev_removing) { 14450713e232SGeorge Wilson ASSERT0(space_map_allocated(msp->ms_sm)); 14469eb57f7fSGeorge Wilson ASSERT0(vd->vdev_ms_shift); 14479eb57f7fSGeorge Wilson return (0); 14489eb57f7fSGeorge Wilson } 14499eb57f7fSGeorge Wilson 14509eb57f7fSGeorge Wilson /* 1451ecc2d604Sbonwick * The baseline weight is the metaslab's free space. 1452ecc2d604Sbonwick */ 14530713e232SGeorge Wilson space = msp->ms_size - space_map_allocated(msp->ms_sm); 14542e4c9986SGeorge Wilson 14552e4c9986SGeorge Wilson msp->ms_fragmentation = metaslab_fragmentation(msp); 14562e4c9986SGeorge Wilson if (metaslab_fragmentation_factor_enabled && 14572e4c9986SGeorge Wilson msp->ms_fragmentation != ZFS_FRAG_INVALID) { 14582e4c9986SGeorge Wilson /* 14592e4c9986SGeorge Wilson * Use the fragmentation information to inversely scale 14602e4c9986SGeorge Wilson * down the baseline weight. We need to ensure that we 14612e4c9986SGeorge Wilson * don't exclude this metaslab completely when it's 100% 14622e4c9986SGeorge Wilson * fragmented. To avoid this we reduce the fragmented value 14632e4c9986SGeorge Wilson * by 1. 14642e4c9986SGeorge Wilson */ 14652e4c9986SGeorge Wilson space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 14662e4c9986SGeorge Wilson 14672e4c9986SGeorge Wilson /* 14682e4c9986SGeorge Wilson * If space < SPA_MINBLOCKSIZE, then we will not allocate from 14692e4c9986SGeorge Wilson * this metaslab again. The fragmentation metric may have 14702e4c9986SGeorge Wilson * decreased the space to something smaller than 14712e4c9986SGeorge Wilson * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 14722e4c9986SGeorge Wilson * so that we can consume any remaining space. 14732e4c9986SGeorge Wilson */ 14742e4c9986SGeorge Wilson if (space > 0 && space < SPA_MINBLOCKSIZE) 14752e4c9986SGeorge Wilson space = SPA_MINBLOCKSIZE; 14762e4c9986SGeorge Wilson } 1477ecc2d604Sbonwick weight = space; 1478ecc2d604Sbonwick 1479ecc2d604Sbonwick /* 1480ecc2d604Sbonwick * Modern disks have uniform bit density and constant angular velocity. 1481ecc2d604Sbonwick * Therefore, the outer recording zones are faster (higher bandwidth) 1482ecc2d604Sbonwick * than the inner zones by the ratio of outer to inner track diameter, 1483ecc2d604Sbonwick * which is typically around 2:1. We account for this by assigning 1484ecc2d604Sbonwick * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 1485ecc2d604Sbonwick * In effect, this means that we'll select the metaslab with the most 1486ecc2d604Sbonwick * free bandwidth rather than simply the one with the most free space. 1487ecc2d604Sbonwick */ 14882e4c9986SGeorge Wilson if (metaslab_lba_weighting_enabled) { 14890713e232SGeorge Wilson weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 1490ecc2d604Sbonwick ASSERT(weight >= space && weight <= 2 * space); 14912e4c9986SGeorge Wilson } 1492ecc2d604Sbonwick 1493ecc2d604Sbonwick /* 149480eb36f2SGeorge Wilson * If this metaslab is one we're actively using, adjust its 149580eb36f2SGeorge Wilson * weight to make it preferable to any inactive metaslab so 14962e4c9986SGeorge Wilson * we'll polish it off. If the fragmentation on this metaslab 14972e4c9986SGeorge Wilson * has exceed our threshold, then don't mark it active. 1498ecc2d604Sbonwick */ 14992e4c9986SGeorge Wilson if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 15002e4c9986SGeorge Wilson msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 150144cd46caSbillm weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 150280eb36f2SGeorge Wilson } 15030713e232SGeorge Wilson 1504ecc2d604Sbonwick return (weight); 1505fa9e4066Sahrens } 1506ecc2d604Sbonwick 1507ecc2d604Sbonwick static int 150809c9d376SGeorge Wilson metaslab_activate(metaslab_t *msp, uint64_t activation_weight) 1509ecc2d604Sbonwick { 1510ecc2d604Sbonwick ASSERT(MUTEX_HELD(&msp->ms_lock)); 1511ecc2d604Sbonwick 151244cd46caSbillm if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 15130713e232SGeorge Wilson metaslab_load_wait(msp); 15140713e232SGeorge Wilson if (!msp->ms_loaded) { 15150713e232SGeorge Wilson int error = metaslab_load(msp); 1516ecc2d604Sbonwick if (error) { 1517ecc2d604Sbonwick metaslab_group_sort(msp->ms_group, msp, 0); 1518ecc2d604Sbonwick return (error); 1519ecc2d604Sbonwick } 1520468c413aSTim Haley } 1521d6e555bdSGeorge Wilson 1522ecc2d604Sbonwick metaslab_group_sort(msp->ms_group, msp, 152344cd46caSbillm msp->ms_weight | activation_weight); 1524ecc2d604Sbonwick } 15250713e232SGeorge Wilson ASSERT(msp->ms_loaded); 152644cd46caSbillm ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 1527ecc2d604Sbonwick 1528ecc2d604Sbonwick return (0); 1529ecc2d604Sbonwick } 1530ecc2d604Sbonwick 1531ecc2d604Sbonwick static void 1532ecc2d604Sbonwick metaslab_passivate(metaslab_t *msp, uint64_t size) 1533ecc2d604Sbonwick { 15345f5f7a6fSahrens /* 15355f5f7a6fSahrens * If size < SPA_MINBLOCKSIZE, then we will not allocate from 15365f5f7a6fSahrens * this metaslab again. In that case, it had better be empty, 15375f5f7a6fSahrens * or we would be leaving space on the table. 15385f5f7a6fSahrens */ 15390713e232SGeorge Wilson ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0); 154044cd46caSbillm metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 154144cd46caSbillm ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 1542ecc2d604Sbonwick } 1543ecc2d604Sbonwick 15440713e232SGeorge Wilson static void 15450713e232SGeorge Wilson metaslab_preload(void *arg) 15460713e232SGeorge Wilson { 15470713e232SGeorge Wilson metaslab_t *msp = arg; 15480713e232SGeorge Wilson spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 15490713e232SGeorge Wilson 155030beaff4SGeorge Wilson ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 155130beaff4SGeorge Wilson 15520713e232SGeorge Wilson mutex_enter(&msp->ms_lock); 15530713e232SGeorge Wilson metaslab_load_wait(msp); 15540713e232SGeorge Wilson if (!msp->ms_loaded) 15550713e232SGeorge Wilson (void) metaslab_load(msp); 15560713e232SGeorge Wilson 1557ecc2d604Sbonwick /* 15580713e232SGeorge Wilson * Set the ms_access_txg value so that we don't unload it right away. 15590713e232SGeorge Wilson */ 15600713e232SGeorge Wilson msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1; 15610713e232SGeorge Wilson mutex_exit(&msp->ms_lock); 15620713e232SGeorge Wilson } 15630713e232SGeorge Wilson 15640713e232SGeorge Wilson static void 15650713e232SGeorge Wilson metaslab_group_preload(metaslab_group_t *mg) 15660713e232SGeorge Wilson { 15670713e232SGeorge Wilson spa_t *spa = mg->mg_vd->vdev_spa; 15680713e232SGeorge Wilson metaslab_t *msp; 15690713e232SGeorge Wilson avl_tree_t *t = &mg->mg_metaslab_tree; 15700713e232SGeorge Wilson int m = 0; 15710713e232SGeorge Wilson 15720713e232SGeorge Wilson if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 15730713e232SGeorge Wilson taskq_wait(mg->mg_taskq); 15740713e232SGeorge Wilson return; 15750713e232SGeorge Wilson } 15760713e232SGeorge Wilson 157730beaff4SGeorge Wilson mutex_enter(&mg->mg_lock); 15780713e232SGeorge Wilson /* 157930beaff4SGeorge Wilson * Load the next potential metaslabs 15800713e232SGeorge Wilson */ 158130beaff4SGeorge Wilson msp = avl_first(t); 158230beaff4SGeorge Wilson while (msp != NULL) { 158330beaff4SGeorge Wilson metaslab_t *msp_next = AVL_NEXT(t, msp); 15840713e232SGeorge Wilson 15852e4c9986SGeorge Wilson /* 15862e4c9986SGeorge Wilson * We preload only the maximum number of metaslabs specified 15872e4c9986SGeorge Wilson * by metaslab_preload_limit. If a metaslab is being forced 15882e4c9986SGeorge Wilson * to condense then we preload it too. This will ensure 15892e4c9986SGeorge Wilson * that force condensing happens in the next txg. 15902e4c9986SGeorge Wilson */ 15912e4c9986SGeorge Wilson if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 15922e4c9986SGeorge Wilson msp = msp_next; 15932e4c9986SGeorge Wilson continue; 15942e4c9986SGeorge Wilson } 15950713e232SGeorge Wilson 159630beaff4SGeorge Wilson /* 159730beaff4SGeorge Wilson * We must drop the metaslab group lock here to preserve 159830beaff4SGeorge Wilson * lock ordering with the ms_lock (when grabbing both 159930beaff4SGeorge Wilson * the mg_lock and the ms_lock, the ms_lock must be taken 160030beaff4SGeorge Wilson * first). As a result, it is possible that the ordering 160130beaff4SGeorge Wilson * of the metaslabs within the avl tree may change before 160230beaff4SGeorge Wilson * we reacquire the lock. The metaslab cannot be removed from 160330beaff4SGeorge Wilson * the tree while we're in syncing context so it is safe to 160430beaff4SGeorge Wilson * drop the mg_lock here. If the metaslabs are reordered 160530beaff4SGeorge Wilson * nothing will break -- we just may end up loading a 160630beaff4SGeorge Wilson * less than optimal one. 160730beaff4SGeorge Wilson */ 160830beaff4SGeorge Wilson mutex_exit(&mg->mg_lock); 16090713e232SGeorge Wilson VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 16100713e232SGeorge Wilson msp, TQ_SLEEP) != NULL); 161130beaff4SGeorge Wilson mutex_enter(&mg->mg_lock); 161230beaff4SGeorge Wilson msp = msp_next; 16130713e232SGeorge Wilson } 16140713e232SGeorge Wilson mutex_exit(&mg->mg_lock); 16150713e232SGeorge Wilson } 16160713e232SGeorge Wilson 16170713e232SGeorge Wilson /* 16180713e232SGeorge Wilson * Determine if the space map's on-disk footprint is past our tolerance 16190713e232SGeorge Wilson * for inefficiency. We would like to use the following criteria to make 16200713e232SGeorge Wilson * our decision: 162116a4a807SGeorge Wilson * 162216a4a807SGeorge Wilson * 1. The size of the space map object should not dramatically increase as a 16230713e232SGeorge Wilson * result of writing out the free space range tree. 162416a4a807SGeorge Wilson * 162516a4a807SGeorge Wilson * 2. The minimal on-disk space map representation is zfs_condense_pct/100 16260713e232SGeorge Wilson * times the size than the free space range tree representation 16270713e232SGeorge Wilson * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). 162816a4a807SGeorge Wilson * 16292a104a52SAlex Reece * 3. The on-disk size of the space map should actually decrease. 16302a104a52SAlex Reece * 163116a4a807SGeorge Wilson * Checking the first condition is tricky since we don't want to walk 163216a4a807SGeorge Wilson * the entire AVL tree calculating the estimated on-disk size. Instead we 16330713e232SGeorge Wilson * use the size-ordered range tree in the metaslab and calculate the 16340713e232SGeorge Wilson * size required to write out the largest segment in our free tree. If the 163516a4a807SGeorge Wilson * size required to represent that segment on disk is larger than the space 163616a4a807SGeorge Wilson * map object then we avoid condensing this map. 163716a4a807SGeorge Wilson * 163816a4a807SGeorge Wilson * To determine the second criterion we use a best-case estimate and assume 163916a4a807SGeorge Wilson * each segment can be represented on-disk as a single 64-bit entry. We refer 164016a4a807SGeorge Wilson * to this best-case estimate as the space map's minimal form. 16412a104a52SAlex Reece * 16422a104a52SAlex Reece * Unfortunately, we cannot compute the on-disk size of the space map in this 16432a104a52SAlex Reece * context because we cannot accurately compute the effects of compression, etc. 16442a104a52SAlex Reece * Instead, we apply the heuristic described in the block comment for 16452a104a52SAlex Reece * zfs_metaslab_condense_block_threshold - we only condense if the space used 16462a104a52SAlex Reece * is greater than a threshold number of blocks. 164716a4a807SGeorge Wilson */ 164816a4a807SGeorge Wilson static boolean_t 164916a4a807SGeorge Wilson metaslab_should_condense(metaslab_t *msp) 165016a4a807SGeorge Wilson { 16510713e232SGeorge Wilson space_map_t *sm = msp->ms_sm; 16520713e232SGeorge Wilson range_seg_t *rs; 16532a104a52SAlex Reece uint64_t size, entries, segsz, object_size, optimal_size, record_size; 16542a104a52SAlex Reece dmu_object_info_t doi; 16552a104a52SAlex Reece uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift; 165616a4a807SGeorge Wilson 165716a4a807SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 16580713e232SGeorge Wilson ASSERT(msp->ms_loaded); 165916a4a807SGeorge Wilson 166016a4a807SGeorge Wilson /* 16610713e232SGeorge Wilson * Use the ms_size_tree range tree, which is ordered by size, to 16622e4c9986SGeorge Wilson * obtain the largest segment in the free tree. We always condense 16632e4c9986SGeorge Wilson * metaslabs that are empty and metaslabs for which a condense 16642e4c9986SGeorge Wilson * request has been made. 166516a4a807SGeorge Wilson */ 16660713e232SGeorge Wilson rs = avl_last(&msp->ms_size_tree); 16672e4c9986SGeorge Wilson if (rs == NULL || msp->ms_condense_wanted) 166816a4a807SGeorge Wilson return (B_TRUE); 166916a4a807SGeorge Wilson 167016a4a807SGeorge Wilson /* 167116a4a807SGeorge Wilson * Calculate the number of 64-bit entries this segment would 167216a4a807SGeorge Wilson * require when written to disk. If this single segment would be 167316a4a807SGeorge Wilson * larger on-disk than the entire current on-disk structure, then 167416a4a807SGeorge Wilson * clearly condensing will increase the on-disk structure size. 167516a4a807SGeorge Wilson */ 16760713e232SGeorge Wilson size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 167716a4a807SGeorge Wilson entries = size / (MIN(size, SM_RUN_MAX)); 167816a4a807SGeorge Wilson segsz = entries * sizeof (uint64_t); 167916a4a807SGeorge Wilson 16802a104a52SAlex Reece optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root); 16812a104a52SAlex Reece object_size = space_map_length(msp->ms_sm); 16822a104a52SAlex Reece 16832a104a52SAlex Reece dmu_object_info_from_db(sm->sm_dbuf, &doi); 16842a104a52SAlex Reece record_size = MAX(doi.doi_data_block_size, vdev_blocksize); 16852a104a52SAlex Reece 16862a104a52SAlex Reece return (segsz <= object_size && 16872a104a52SAlex Reece object_size >= (optimal_size * zfs_condense_pct / 100) && 16882a104a52SAlex Reece object_size > zfs_metaslab_condense_block_threshold * record_size); 168916a4a807SGeorge Wilson } 169016a4a807SGeorge Wilson 169116a4a807SGeorge Wilson /* 169216a4a807SGeorge Wilson * Condense the on-disk space map representation to its minimized form. 169316a4a807SGeorge Wilson * The minimized form consists of a small number of allocations followed by 16940713e232SGeorge Wilson * the entries of the free range tree. 169516a4a807SGeorge Wilson */ 169616a4a807SGeorge Wilson static void 169716a4a807SGeorge Wilson metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) 169816a4a807SGeorge Wilson { 169916a4a807SGeorge Wilson spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 17000713e232SGeorge Wilson range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK]; 17010713e232SGeorge Wilson range_tree_t *condense_tree; 17020713e232SGeorge Wilson space_map_t *sm = msp->ms_sm; 170316a4a807SGeorge Wilson 170416a4a807SGeorge Wilson ASSERT(MUTEX_HELD(&msp->ms_lock)); 170516a4a807SGeorge Wilson ASSERT3U(spa_sync_pass(spa), ==, 1); 17060713e232SGeorge Wilson ASSERT(msp->ms_loaded); 170716a4a807SGeorge Wilson 17082e4c9986SGeorge Wilson 1709daec38ecSJoe Stein spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, " 1710daec38ecSJoe Stein "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, 1711daec38ecSJoe Stein msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, 1712daec38ecSJoe Stein msp->ms_group->mg_vd->vdev_spa->spa_name, 1713daec38ecSJoe Stein space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root), 17142e4c9986SGeorge Wilson msp->ms_condense_wanted ? "TRUE" : "FALSE"); 17152e4c9986SGeorge Wilson 17162e4c9986SGeorge Wilson msp->ms_condense_wanted = B_FALSE; 171716a4a807SGeorge Wilson 171816a4a807SGeorge Wilson /* 17190713e232SGeorge Wilson * Create an range tree that is 100% allocated. We remove segments 172016a4a807SGeorge Wilson * that have been freed in this txg, any deferred frees that exist, 172116a4a807SGeorge Wilson * and any allocation in the future. Removing segments should be 17220713e232SGeorge Wilson * a relatively inexpensive operation since we expect these trees to 17230713e232SGeorge Wilson * have a small number of nodes. 172416a4a807SGeorge Wilson */ 17250713e232SGeorge Wilson condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); 17260713e232SGeorge Wilson range_tree_add(condense_tree, msp->ms_start, msp->ms_size); 172716a4a807SGeorge Wilson 172816a4a807SGeorge Wilson /* 17290713e232SGeorge Wilson * Remove what's been freed in this txg from the condense_tree. 173016a4a807SGeorge Wilson * Since we're in sync_pass 1, we know that all the frees from 17310713e232SGeorge Wilson * this txg are in the freetree. 173216a4a807SGeorge Wilson */ 17330713e232SGeorge Wilson range_tree_walk(freetree, range_tree_remove, condense_tree); 173416a4a807SGeorge Wilson 17350713e232SGeorge Wilson for (int t = 0; t < TXG_DEFER_SIZE; t++) { 17360713e232SGeorge Wilson range_tree_walk(msp->ms_defertree[t], 17370713e232SGeorge Wilson range_tree_remove, condense_tree); 17380713e232SGeorge Wilson } 173916a4a807SGeorge Wilson 17400713e232SGeorge Wilson for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 17410713e232SGeorge Wilson range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], 17420713e232SGeorge Wilson range_tree_remove, condense_tree); 17430713e232SGeorge Wilson } 174416a4a807SGeorge Wilson 174516a4a807SGeorge Wilson /* 174616a4a807SGeorge Wilson * We're about to drop the metaslab's lock thus allowing 174716a4a807SGeorge Wilson * other consumers to change it's content. Set the 17480713e232SGeorge Wilson * metaslab's ms_condensing flag to ensure that 174916a4a807SGeorge Wilson * allocations on this metaslab do not occur while we're 175016a4a807SGeorge Wilson * in the middle of committing it to disk. This is only critical 17510713e232SGeorge Wilson * for the ms_tree as all other range trees use per txg 175216a4a807SGeorge Wilson * views of their content. 175316a4a807SGeorge Wilson */ 17540713e232SGeorge Wilson msp->ms_condensing = B_TRUE; 175516a4a807SGeorge Wilson 175616a4a807SGeorge Wilson mutex_exit(&msp->ms_lock); 17570713e232SGeorge Wilson space_map_truncate(sm, tx); 175816a4a807SGeorge Wilson mutex_enter(&msp->ms_lock); 175916a4a807SGeorge Wilson 176016a4a807SGeorge Wilson /* 176116a4a807SGeorge Wilson * While we would ideally like to create a space_map representation 176216a4a807SGeorge Wilson * that consists only of allocation records, doing so can be 17630713e232SGeorge Wilson * prohibitively expensive because the in-core free tree can be 176416a4a807SGeorge Wilson * large, and therefore computationally expensive to subtract 17650713e232SGeorge Wilson * from the condense_tree. Instead we sync out two trees, a cheap 17660713e232SGeorge Wilson * allocation only tree followed by the in-core free tree. While not 176716a4a807SGeorge Wilson * optimal, this is typically close to optimal, and much cheaper to 176816a4a807SGeorge Wilson * compute. 176916a4a807SGeorge Wilson */ 17700713e232SGeorge Wilson space_map_write(sm, condense_tree, SM_ALLOC, tx); 17710713e232SGeorge Wilson range_tree_vacate(condense_tree, NULL, NULL); 17720713e232SGeorge Wilson range_tree_destroy(condense_tree); 177316a4a807SGeorge Wilson 17740713e232SGeorge Wilson space_map_write(sm, msp->ms_tree, SM_FREE, tx); 17750713e232SGeorge Wilson msp->ms_condensing = B_FALSE; 177616a4a807SGeorge Wilson } 177716a4a807SGeorge Wilson 177816a4a807SGeorge Wilson /* 1779ecc2d604Sbonwick * Write a metaslab to disk in the context of the specified transaction group. 1780ecc2d604Sbonwick */ 1781ecc2d604Sbonwick void 1782ecc2d604Sbonwick metaslab_sync(metaslab_t *msp, uint64_t txg) 1783ecc2d604Sbonwick { 17840713e232SGeorge Wilson metaslab_group_t *mg = msp->ms_group; 17850713e232SGeorge Wilson vdev_t *vd = mg->mg_vd; 1786ecc2d604Sbonwick spa_t *spa = vd->vdev_spa; 1787b24ab676SJeff Bonwick objset_t *mos = spa_meta_objset(spa); 17880713e232SGeorge Wilson range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; 17890713e232SGeorge Wilson range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK]; 17900713e232SGeorge Wilson range_tree_t **freed_tree = 17910713e232SGeorge Wilson &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; 1792ecc2d604Sbonwick dmu_tx_t *tx; 17930713e232SGeorge Wilson uint64_t object = space_map_object(msp->ms_sm); 1794ecc2d604Sbonwick 179588ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 179688ecc943SGeorge Wilson 179716a4a807SGeorge Wilson /* 179816a4a807SGeorge Wilson * This metaslab has just been added so there's no work to do now. 179916a4a807SGeorge Wilson */ 18000713e232SGeorge Wilson if (*freetree == NULL) { 18010713e232SGeorge Wilson ASSERT3P(alloctree, ==, NULL); 180216a4a807SGeorge Wilson return; 180316a4a807SGeorge Wilson } 180416a4a807SGeorge Wilson 18050713e232SGeorge Wilson ASSERT3P(alloctree, !=, NULL); 18060713e232SGeorge Wilson ASSERT3P(*freetree, !=, NULL); 18070713e232SGeorge Wilson ASSERT3P(*freed_tree, !=, NULL); 180816a4a807SGeorge Wilson 18092e4c9986SGeorge Wilson /* 18102e4c9986SGeorge Wilson * Normally, we don't want to process a metaslab if there 18112e4c9986SGeorge Wilson * are no allocations or frees to perform. However, if the metaslab 18122e4c9986SGeorge Wilson * is being forced to condense we need to let it through. 18132e4c9986SGeorge Wilson */ 18140713e232SGeorge Wilson if (range_tree_space(alloctree) == 0 && 18152e4c9986SGeorge Wilson range_tree_space(*freetree) == 0 && 18162e4c9986SGeorge Wilson !msp->ms_condense_wanted) 1817468c413aSTim Haley return; 1818ecc2d604Sbonwick 1819ecc2d604Sbonwick /* 1820ecc2d604Sbonwick * The only state that can actually be changing concurrently with 18210713e232SGeorge Wilson * metaslab_sync() is the metaslab's ms_tree. No other thread can 18220713e232SGeorge Wilson * be modifying this txg's alloctree, freetree, freed_tree, or 18230713e232SGeorge Wilson * space_map_phys_t. Therefore, we only hold ms_lock to satify 18240713e232SGeorge Wilson * space_map ASSERTs. We drop it whenever we call into the DMU, 18250713e232SGeorge Wilson * because the DMU can call down to us (e.g. via zio_free()) at 18260713e232SGeorge Wilson * any time. 1827ecc2d604Sbonwick */ 1828468c413aSTim Haley 1829468c413aSTim Haley tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 1830ecc2d604Sbonwick 18310713e232SGeorge Wilson if (msp->ms_sm == NULL) { 18320713e232SGeorge Wilson uint64_t new_object; 18330713e232SGeorge Wilson 18340713e232SGeorge Wilson new_object = space_map_alloc(mos, tx); 18350713e232SGeorge Wilson VERIFY3U(new_object, !=, 0); 18360713e232SGeorge Wilson 18370713e232SGeorge Wilson VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 18380713e232SGeorge Wilson msp->ms_start, msp->ms_size, vd->vdev_ashift, 18390713e232SGeorge Wilson &msp->ms_lock)); 18400713e232SGeorge Wilson ASSERT(msp->ms_sm != NULL); 1841ecc2d604Sbonwick } 1842ecc2d604Sbonwick 1843468c413aSTim Haley mutex_enter(&msp->ms_lock); 1844468c413aSTim Haley 1845b1be2892SMatthew Ahrens /* 1846b1be2892SMatthew Ahrens * Note: metaslab_condense() clears the space_map's histogram. 1847b1be2892SMatthew Ahrens * Therefore we must verify and remove this histogram before 1848b1be2892SMatthew Ahrens * condensing. 1849b1be2892SMatthew Ahrens */ 1850b1be2892SMatthew Ahrens metaslab_group_histogram_verify(mg); 1851b1be2892SMatthew Ahrens metaslab_class_histogram_verify(mg->mg_class); 1852b1be2892SMatthew Ahrens metaslab_group_histogram_remove(mg, msp); 1853b1be2892SMatthew Ahrens 18540713e232SGeorge Wilson if (msp->ms_loaded && spa_sync_pass(spa) == 1 && 185516a4a807SGeorge Wilson metaslab_should_condense(msp)) { 185616a4a807SGeorge Wilson metaslab_condense(msp, txg, tx); 185716a4a807SGeorge Wilson } else { 18580713e232SGeorge Wilson space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); 18590713e232SGeorge Wilson space_map_write(msp->ms_sm, *freetree, SM_FREE, tx); 1860ecc2d604Sbonwick } 1861ecc2d604Sbonwick 18620713e232SGeorge Wilson if (msp->ms_loaded) { 18630713e232SGeorge Wilson /* 18640713e232SGeorge Wilson * When the space map is loaded, we have an accruate 18650713e232SGeorge Wilson * histogram in the range tree. This gives us an opportunity 18660713e232SGeorge Wilson * to bring the space map's histogram up-to-date so we clear 18670713e232SGeorge Wilson * it first before updating it. 18680713e232SGeorge Wilson */ 18690713e232SGeorge Wilson space_map_histogram_clear(msp->ms_sm); 18700713e232SGeorge Wilson space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); 18710713e232SGeorge Wilson } else { 18720713e232SGeorge Wilson /* 18730713e232SGeorge Wilson * Since the space map is not loaded we simply update the 18740713e232SGeorge Wilson * exisiting histogram with what was freed in this txg. This 18750713e232SGeorge Wilson * means that the on-disk histogram may not have an accurate 18760713e232SGeorge Wilson * view of the free space but it's close enough to allow 18770713e232SGeorge Wilson * us to make allocation decisions. 18780713e232SGeorge Wilson */ 18790713e232SGeorge Wilson space_map_histogram_add(msp->ms_sm, *freetree, tx); 18800713e232SGeorge Wilson } 18812e4c9986SGeorge Wilson metaslab_group_histogram_add(mg, msp); 18822e4c9986SGeorge Wilson metaslab_group_histogram_verify(mg); 18832e4c9986SGeorge Wilson metaslab_class_histogram_verify(mg->mg_class); 188416a4a807SGeorge Wilson 188516a4a807SGeorge Wilson /* 18860713e232SGeorge Wilson * For sync pass 1, we avoid traversing this txg's free range tree 18870713e232SGeorge Wilson * and instead will just swap the pointers for freetree and 18880713e232SGeorge Wilson * freed_tree. We can safely do this since the freed_tree is 188916a4a807SGeorge Wilson * guaranteed to be empty on the initial pass. 189016a4a807SGeorge Wilson */ 189116a4a807SGeorge Wilson if (spa_sync_pass(spa) == 1) { 18920713e232SGeorge Wilson range_tree_swap(freetree, freed_tree); 189316a4a807SGeorge Wilson } else { 18940713e232SGeorge Wilson range_tree_vacate(*freetree, range_tree_add, *freed_tree); 189516a4a807SGeorge Wilson } 18962e4c9986SGeorge Wilson range_tree_vacate(alloctree, NULL, NULL); 189716a4a807SGeorge Wilson 18980713e232SGeorge Wilson ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); 18990713e232SGeorge Wilson ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); 1900ecc2d604Sbonwick 1901ecc2d604Sbonwick mutex_exit(&msp->ms_lock); 1902ecc2d604Sbonwick 19030713e232SGeorge Wilson if (object != space_map_object(msp->ms_sm)) { 19040713e232SGeorge Wilson object = space_map_object(msp->ms_sm); 19050713e232SGeorge Wilson dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 19060713e232SGeorge Wilson msp->ms_id, sizeof (uint64_t), &object, tx); 19070713e232SGeorge Wilson } 1908ecc2d604Sbonwick dmu_tx_commit(tx); 1909ecc2d604Sbonwick } 1910ecc2d604Sbonwick 1911ecc2d604Sbonwick /* 1912ecc2d604Sbonwick * Called after a transaction group has completely synced to mark 1913ecc2d604Sbonwick * all of the metaslab's free space as usable. 1914ecc2d604Sbonwick */ 1915ecc2d604Sbonwick void 1916ecc2d604Sbonwick metaslab_sync_done(metaslab_t *msp, uint64_t txg) 1917ecc2d604Sbonwick { 1918ecc2d604Sbonwick metaslab_group_t *mg = msp->ms_group; 1919ecc2d604Sbonwick vdev_t *vd = mg->mg_vd; 19200713e232SGeorge Wilson range_tree_t **freed_tree; 19210713e232SGeorge Wilson range_tree_t **defer_tree; 1922468c413aSTim Haley int64_t alloc_delta, defer_delta; 1923ecc2d604Sbonwick 192488ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 192588ecc943SGeorge Wilson 1926ecc2d604Sbonwick mutex_enter(&msp->ms_lock); 1927ecc2d604Sbonwick 1928ecc2d604Sbonwick /* 1929ecc2d604Sbonwick * If this metaslab is just becoming available, initialize its 19300713e232SGeorge Wilson * alloctrees, freetrees, and defertree and add its capacity to 19310713e232SGeorge Wilson * the vdev. 1932ecc2d604Sbonwick */ 19330713e232SGeorge Wilson if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) { 1934468c413aSTim Haley for (int t = 0; t < TXG_SIZE; t++) { 19350713e232SGeorge Wilson ASSERT(msp->ms_alloctree[t] == NULL); 19360713e232SGeorge Wilson ASSERT(msp->ms_freetree[t] == NULL); 19370713e232SGeorge Wilson 19380713e232SGeorge Wilson msp->ms_alloctree[t] = range_tree_create(NULL, msp, 19390713e232SGeorge Wilson &msp->ms_lock); 19400713e232SGeorge Wilson msp->ms_freetree[t] = range_tree_create(NULL, msp, 19410713e232SGeorge Wilson &msp->ms_lock); 1942ecc2d604Sbonwick } 1943468c413aSTim Haley 194416a4a807SGeorge Wilson for (int t = 0; t < TXG_DEFER_SIZE; t++) { 19450713e232SGeorge Wilson ASSERT(msp->ms_defertree[t] == NULL); 19460713e232SGeorge Wilson 19470713e232SGeorge Wilson msp->ms_defertree[t] = range_tree_create(NULL, msp, 19480713e232SGeorge Wilson &msp->ms_lock); 194916a4a807SGeorge Wilson } 195016a4a807SGeorge Wilson 19510713e232SGeorge Wilson vdev_space_update(vd, 0, 0, msp->ms_size); 1952ecc2d604Sbonwick } 1953ecc2d604Sbonwick 19540713e232SGeorge Wilson freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; 19550713e232SGeorge Wilson defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; 19560713e232SGeorge Wilson 19570713e232SGeorge Wilson alloc_delta = space_map_alloc_delta(msp->ms_sm); 19580713e232SGeorge Wilson defer_delta = range_tree_space(*freed_tree) - 19590713e232SGeorge Wilson range_tree_space(*defer_tree); 1960468c413aSTim Haley 1961b24ab676SJeff Bonwick vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); 1962ecc2d604Sbonwick 19630713e232SGeorge Wilson ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); 19640713e232SGeorge Wilson ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); 1965ecc2d604Sbonwick 1966ecc2d604Sbonwick /* 19670713e232SGeorge Wilson * If there's a metaslab_load() in progress, wait for it to complete 1968ecc2d604Sbonwick * so that we have a consistent view of the in-core space map. 1969ecc2d604Sbonwick */ 19700713e232SGeorge Wilson metaslab_load_wait(msp); 19719eb57f7fSGeorge Wilson 19729eb57f7fSGeorge Wilson /* 19730713e232SGeorge Wilson * Move the frees from the defer_tree back to the free 19740713e232SGeorge Wilson * range tree (if it's loaded). Swap the freed_tree and the 19750713e232SGeorge Wilson * defer_tree -- this is safe to do because we've just emptied out 19760713e232SGeorge Wilson * the defer_tree. 19779eb57f7fSGeorge Wilson */ 19780713e232SGeorge Wilson range_tree_vacate(*defer_tree, 19790713e232SGeorge Wilson msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); 19800713e232SGeorge Wilson range_tree_swap(freed_tree, defer_tree); 1981ecc2d604Sbonwick 19820713e232SGeorge Wilson space_map_update(msp->ms_sm); 1983ecc2d604Sbonwick 1984468c413aSTim Haley msp->ms_deferspace += defer_delta; 1985468c413aSTim Haley ASSERT3S(msp->ms_deferspace, >=, 0); 19860713e232SGeorge Wilson ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 1987468c413aSTim Haley if (msp->ms_deferspace != 0) { 1988468c413aSTim Haley /* 1989468c413aSTim Haley * Keep syncing this metaslab until all deferred frees 1990468c413aSTim Haley * are back in circulation. 1991468c413aSTim Haley */ 1992468c413aSTim Haley vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 1993468c413aSTim Haley } 1994468c413aSTim Haley 19950713e232SGeorge Wilson if (msp->ms_loaded && msp->ms_access_txg < txg) { 19960713e232SGeorge Wilson for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 19970713e232SGeorge Wilson VERIFY0(range_tree_space( 19980713e232SGeorge Wilson msp->ms_alloctree[(txg + t) & TXG_MASK])); 19990713e232SGeorge Wilson } 2000ecc2d604Sbonwick 20010713e232SGeorge Wilson if (!metaslab_debug_unload) 20020713e232SGeorge Wilson metaslab_unload(msp); 2003ecc2d604Sbonwick } 2004ecc2d604Sbonwick 2005ecc2d604Sbonwick metaslab_group_sort(mg, msp, metaslab_weight(msp)); 2006ecc2d604Sbonwick mutex_exit(&msp->ms_lock); 2007fa9e4066Sahrens } 2008fa9e4066Sahrens 200980eb36f2SGeorge Wilson void 201080eb36f2SGeorge Wilson metaslab_sync_reassess(metaslab_group_t *mg) 201180eb36f2SGeorge Wilson { 201222e30981SGeorge Wilson metaslab_group_alloc_update(mg); 20132e4c9986SGeorge Wilson mg->mg_fragmentation = metaslab_group_fragmentation(mg); 201409c9d376SGeorge Wilson 201580eb36f2SGeorge Wilson /* 20160713e232SGeorge Wilson * Preload the next potential metaslabs 201780eb36f2SGeorge Wilson */ 20180713e232SGeorge Wilson metaslab_group_preload(mg); 201980eb36f2SGeorge Wilson } 202080eb36f2SGeorge Wilson 202144cd46caSbillm static uint64_t 202244cd46caSbillm metaslab_distance(metaslab_t *msp, dva_t *dva) 202344cd46caSbillm { 202444cd46caSbillm uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 202544cd46caSbillm uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 20260713e232SGeorge Wilson uint64_t start = msp->ms_id; 202744cd46caSbillm 202844cd46caSbillm if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 202944cd46caSbillm return (1ULL << 63); 203044cd46caSbillm 203144cd46caSbillm if (offset < start) 203244cd46caSbillm return ((start - offset) << ms_shift); 203344cd46caSbillm if (offset > start) 203444cd46caSbillm return ((offset - start) << ms_shift); 203544cd46caSbillm return (0); 203644cd46caSbillm } 203744cd46caSbillm 203844cd46caSbillm static uint64_t 203909c9d376SGeorge Wilson metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize, 2040b6240e83SGeorge Wilson uint64_t txg, uint64_t min_distance, dva_t *dva, int d) 2041fa9e4066Sahrens { 204209c9d376SGeorge Wilson spa_t *spa = mg->mg_vd->vdev_spa; 2043ecc2d604Sbonwick metaslab_t *msp = NULL; 2044ecc2d604Sbonwick uint64_t offset = -1ULL; 204544cd46caSbillm avl_tree_t *t = &mg->mg_metaslab_tree; 204644cd46caSbillm uint64_t activation_weight; 204744cd46caSbillm uint64_t target_distance; 204844cd46caSbillm int i; 204944cd46caSbillm 205044cd46caSbillm activation_weight = METASLAB_WEIGHT_PRIMARY; 2051d6e555bdSGeorge Wilson for (i = 0; i < d; i++) { 2052d6e555bdSGeorge Wilson if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 205344cd46caSbillm activation_weight = METASLAB_WEIGHT_SECONDARY; 2054d6e555bdSGeorge Wilson break; 2055d6e555bdSGeorge Wilson } 2056d6e555bdSGeorge Wilson } 2057fa9e4066Sahrens 2058ecc2d604Sbonwick for (;;) { 2059d6e555bdSGeorge Wilson boolean_t was_active; 2060d6e555bdSGeorge Wilson 2061fa9e4066Sahrens mutex_enter(&mg->mg_lock); 206244cd46caSbillm for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 206309c9d376SGeorge Wilson if (msp->ms_weight < asize) { 206409c9d376SGeorge Wilson spa_dbgmsg(spa, "%s: failed to meet weight " 206509c9d376SGeorge Wilson "requirement: vdev %llu, txg %llu, mg %p, " 206609c9d376SGeorge Wilson "msp %p, psize %llu, asize %llu, " 2067b6240e83SGeorge Wilson "weight %llu", spa_name(spa), 2068b6240e83SGeorge Wilson mg->mg_vd->vdev_id, txg, 2069b6240e83SGeorge Wilson mg, msp, psize, asize, msp->ms_weight); 2070ecc2d604Sbonwick mutex_exit(&mg->mg_lock); 207144cd46caSbillm return (-1ULL); 207244cd46caSbillm } 207303f8c366SGeorge Wilson 207403f8c366SGeorge Wilson /* 207503f8c366SGeorge Wilson * If the selected metaslab is condensing, skip it. 207603f8c366SGeorge Wilson */ 20770713e232SGeorge Wilson if (msp->ms_condensing) 207803f8c366SGeorge Wilson continue; 207903f8c366SGeorge Wilson 2080d6e555bdSGeorge Wilson was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 208144cd46caSbillm if (activation_weight == METASLAB_WEIGHT_PRIMARY) 208244cd46caSbillm break; 208344cd46caSbillm 208444cd46caSbillm target_distance = min_distance + 20850713e232SGeorge Wilson (space_map_allocated(msp->ms_sm) != 0 ? 0 : 20860713e232SGeorge Wilson min_distance >> 1); 208744cd46caSbillm 208844cd46caSbillm for (i = 0; i < d; i++) 208944cd46caSbillm if (metaslab_distance(msp, &dva[i]) < 209044cd46caSbillm target_distance) 209144cd46caSbillm break; 209244cd46caSbillm if (i == d) 209344cd46caSbillm break; 2094ecc2d604Sbonwick } 2095fa9e4066Sahrens mutex_exit(&mg->mg_lock); 209644cd46caSbillm if (msp == NULL) 209744cd46caSbillm return (-1ULL); 2098fa9e4066Sahrens 209922e30981SGeorge Wilson mutex_enter(&msp->ms_lock); 210022e30981SGeorge Wilson 210109c9d376SGeorge Wilson /* 2102aeb1c1b6Sgw25295 * Ensure that the metaslab we have selected is still 2103aeb1c1b6Sgw25295 * capable of handling our request. It's possible that 2104aeb1c1b6Sgw25295 * another thread may have changed the weight while we 2105aeb1c1b6Sgw25295 * were blocked on the metaslab lock. 2106aeb1c1b6Sgw25295 */ 210709c9d376SGeorge Wilson if (msp->ms_weight < asize || (was_active && 2108d6e555bdSGeorge Wilson !(msp->ms_weight & METASLAB_ACTIVE_MASK) && 2109d6e555bdSGeorge Wilson activation_weight == METASLAB_WEIGHT_PRIMARY)) { 2110aeb1c1b6Sgw25295 mutex_exit(&msp->ms_lock); 2111aeb1c1b6Sgw25295 continue; 2112aeb1c1b6Sgw25295 } 2113aeb1c1b6Sgw25295 211444cd46caSbillm if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 211544cd46caSbillm activation_weight == METASLAB_WEIGHT_PRIMARY) { 211644cd46caSbillm metaslab_passivate(msp, 21175f5f7a6fSahrens msp->ms_weight & ~METASLAB_ACTIVE_MASK); 211844cd46caSbillm mutex_exit(&msp->ms_lock); 211944cd46caSbillm continue; 212044cd46caSbillm } 212144cd46caSbillm 212209c9d376SGeorge Wilson if (metaslab_activate(msp, activation_weight) != 0) { 2123fa9e4066Sahrens mutex_exit(&msp->ms_lock); 2124fa9e4066Sahrens continue; 2125fa9e4066Sahrens } 2126fa9e4066Sahrens 212703f8c366SGeorge Wilson /* 212803f8c366SGeorge Wilson * If this metaslab is currently condensing then pick again as 212903f8c366SGeorge Wilson * we can't manipulate this metaslab until it's committed 213003f8c366SGeorge Wilson * to disk. 213103f8c366SGeorge Wilson */ 21320713e232SGeorge Wilson if (msp->ms_condensing) { 213303f8c366SGeorge Wilson mutex_exit(&msp->ms_lock); 213403f8c366SGeorge Wilson continue; 213503f8c366SGeorge Wilson } 213603f8c366SGeorge Wilson 21370713e232SGeorge Wilson if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL) 2138ecc2d604Sbonwick break; 2139ecc2d604Sbonwick 21400713e232SGeorge Wilson metaslab_passivate(msp, metaslab_block_maxsize(msp)); 2141ecc2d604Sbonwick mutex_exit(&msp->ms_lock); 2142ecc2d604Sbonwick } 2143ecc2d604Sbonwick 21440713e232SGeorge Wilson if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) 2145ecc2d604Sbonwick vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 2146ecc2d604Sbonwick 21470713e232SGeorge Wilson range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize); 21480713e232SGeorge Wilson msp->ms_access_txg = txg + metaslab_unload_delay; 2149ecc2d604Sbonwick 2150ecc2d604Sbonwick mutex_exit(&msp->ms_lock); 2151ecc2d604Sbonwick 215244cd46caSbillm return (offset); 2153fa9e4066Sahrens } 2154fa9e4066Sahrens 2155fa9e4066Sahrens /* 2156fa9e4066Sahrens * Allocate a block for the specified i/o. 2157fa9e4066Sahrens */ 215844cd46caSbillm static int 21598654d025Sperrin metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 2160e14bb325SJeff Bonwick dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) 2161fa9e4066Sahrens { 2162fa9e4066Sahrens metaslab_group_t *mg, *rotor; 2163fa9e4066Sahrens vdev_t *vd; 216444cd46caSbillm int dshift = 3; 216544cd46caSbillm int all_zero; 21668ad4d6ddSJeff Bonwick int zio_lock = B_FALSE; 21678ad4d6ddSJeff Bonwick boolean_t allocatable; 2168fa9e4066Sahrens uint64_t offset = -1ULL; 2169fa9e4066Sahrens uint64_t asize; 217044cd46caSbillm uint64_t distance; 2171fa9e4066Sahrens 2172d80c45e0Sbonwick ASSERT(!DVA_IS_VALID(&dva[d])); 2173d80c45e0Sbonwick 2174fa9e4066Sahrens /* 2175e05725b1Sbonwick * For testing, make some blocks above a certain size be gang blocks. 2176e05725b1Sbonwick */ 2177d3d50737SRafael Vanoni if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) 2178be6fd75aSMatthew Ahrens return (SET_ERROR(ENOSPC)); 2179e05725b1Sbonwick 2180e05725b1Sbonwick /* 2181fa9e4066Sahrens * Start at the rotor and loop through all mgs until we find something. 2182b24ab676SJeff Bonwick * Note that there's no locking on mc_rotor or mc_aliquot because 2183fa9e4066Sahrens * nothing actually breaks if we miss a few updates -- we just won't 2184fa9e4066Sahrens * allocate quite as evenly. It all balances out over time. 218544cd46caSbillm * 218667bd71c6Sperrin * If we are doing ditto or log blocks, try to spread them across 218767bd71c6Sperrin * consecutive vdevs. If we're forced to reuse a vdev before we've 218867bd71c6Sperrin * allocated all of our ditto blocks, then try and spread them out on 218967bd71c6Sperrin * that vdev as much as possible. If it turns out to not be possible, 219044cd46caSbillm * gradually lower our standards until anything becomes acceptable. 219144cd46caSbillm * Also, allocating on consecutive vdevs (as opposed to random vdevs) 219244cd46caSbillm * gives us hope of containing our fault domains to something we're 219344cd46caSbillm * able to reason about. Otherwise, any two top-level vdev failures 219444cd46caSbillm * will guarantee the loss of data. With consecutive allocation, 219544cd46caSbillm * only two adjacent top-level vdev failures will result in data loss. 219644cd46caSbillm * 219744cd46caSbillm * If we are doing gang blocks (hintdva is non-NULL), try to keep 219844cd46caSbillm * ourselves on the same vdev as our gang block header. That 219944cd46caSbillm * way, we can hope for locality in vdev_cache, plus it makes our 220044cd46caSbillm * fault domains something tractable. 2201fa9e4066Sahrens */ 220244cd46caSbillm if (hintdva) { 220344cd46caSbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 220488ecc943SGeorge Wilson 220588ecc943SGeorge Wilson /* 220688ecc943SGeorge Wilson * It's possible the vdev we're using as the hint no 220788ecc943SGeorge Wilson * longer exists (i.e. removed). Consult the rotor when 220888ecc943SGeorge Wilson * all else fails. 220988ecc943SGeorge Wilson */ 2210a1521560SJeff Bonwick if (vd != NULL) { 221144cd46caSbillm mg = vd->vdev_mg; 221288ecc943SGeorge Wilson 221388ecc943SGeorge Wilson if (flags & METASLAB_HINTBP_AVOID && 221488ecc943SGeorge Wilson mg->mg_next != NULL) 221588ecc943SGeorge Wilson mg = mg->mg_next; 221688ecc943SGeorge Wilson } else { 221788ecc943SGeorge Wilson mg = mc->mc_rotor; 221888ecc943SGeorge Wilson } 221944cd46caSbillm } else if (d != 0) { 222044cd46caSbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 222144cd46caSbillm mg = vd->vdev_mg->mg_next; 222244cd46caSbillm } else { 222344cd46caSbillm mg = mc->mc_rotor; 222444cd46caSbillm } 222544cd46caSbillm 22268654d025Sperrin /* 2227a1521560SJeff Bonwick * If the hint put us into the wrong metaslab class, or into a 2228a1521560SJeff Bonwick * metaslab group that has been passivated, just follow the rotor. 22298654d025Sperrin */ 2230a1521560SJeff Bonwick if (mg->mg_class != mc || mg->mg_activation_count <= 0) 22318654d025Sperrin mg = mc->mc_rotor; 22328654d025Sperrin 22338654d025Sperrin rotor = mg; 223444cd46caSbillm top: 223544cd46caSbillm all_zero = B_TRUE; 2236fa9e4066Sahrens do { 2237a1521560SJeff Bonwick ASSERT(mg->mg_activation_count == 1); 2238a1521560SJeff Bonwick 2239fa9e4066Sahrens vd = mg->mg_vd; 22408ad4d6ddSJeff Bonwick 22410a4e9518Sgw25295 /* 2242e14bb325SJeff Bonwick * Don't allocate from faulted devices. 22430a4e9518Sgw25295 */ 22448ad4d6ddSJeff Bonwick if (zio_lock) { 22458ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 22468ad4d6ddSJeff Bonwick allocatable = vdev_allocatable(vd); 22478ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_ZIO, FTAG); 22488ad4d6ddSJeff Bonwick } else { 22498ad4d6ddSJeff Bonwick allocatable = vdev_allocatable(vd); 22508ad4d6ddSJeff Bonwick } 225122e30981SGeorge Wilson 225222e30981SGeorge Wilson /* 225322e30981SGeorge Wilson * Determine if the selected metaslab group is eligible 225422e30981SGeorge Wilson * for allocations. If we're ganging or have requested 225522e30981SGeorge Wilson * an allocation for the smallest gang block size 225622e30981SGeorge Wilson * then we don't want to avoid allocating to the this 225722e30981SGeorge Wilson * metaslab group. If we're in this condition we should 225822e30981SGeorge Wilson * try to allocate from any device possible so that we 225922e30981SGeorge Wilson * don't inadvertently return ENOSPC and suspend the pool 226022e30981SGeorge Wilson * even though space is still available. 226122e30981SGeorge Wilson */ 226222e30981SGeorge Wilson if (allocatable && CAN_FASTGANG(flags) && 226322e30981SGeorge Wilson psize > SPA_GANGBLOCKSIZE) 226422e30981SGeorge Wilson allocatable = metaslab_group_allocatable(mg); 226522e30981SGeorge Wilson 22668ad4d6ddSJeff Bonwick if (!allocatable) 22670a4e9518Sgw25295 goto next; 22688ad4d6ddSJeff Bonwick 22690a4e9518Sgw25295 /* 22700a4e9518Sgw25295 * Avoid writing single-copy data to a failing vdev 22719dc3941cSSašo Kiselkov * unless the user instructs us that it is okay. 22720a4e9518Sgw25295 */ 22730a4e9518Sgw25295 if ((vd->vdev_stat.vs_write_errors > 0 || 22740a4e9518Sgw25295 vd->vdev_state < VDEV_STATE_HEALTHY) && 22752e4c9986SGeorge Wilson d == 0 && dshift == 3 && vd->vdev_children == 0) { 22760a4e9518Sgw25295 all_zero = B_FALSE; 22770a4e9518Sgw25295 goto next; 22780a4e9518Sgw25295 } 227944cd46caSbillm 22808654d025Sperrin ASSERT(mg->mg_class == mc); 22818654d025Sperrin 228244cd46caSbillm distance = vd->vdev_asize >> dshift; 228344cd46caSbillm if (distance <= (1ULL << vd->vdev_ms_shift)) 228444cd46caSbillm distance = 0; 228544cd46caSbillm else 228644cd46caSbillm all_zero = B_FALSE; 228744cd46caSbillm 2288fa9e4066Sahrens asize = vdev_psize_to_asize(vd, psize); 2289fa9e4066Sahrens ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 2290fa9e4066Sahrens 229109c9d376SGeorge Wilson offset = metaslab_group_alloc(mg, psize, asize, txg, distance, 2292b6240e83SGeorge Wilson dva, d); 229344cd46caSbillm if (offset != -1ULL) { 2294fa9e4066Sahrens /* 2295fa9e4066Sahrens * If we've just selected this metaslab group, 2296fa9e4066Sahrens * figure out whether the corresponding vdev is 2297fa9e4066Sahrens * over- or under-used relative to the pool, 2298fa9e4066Sahrens * and set an allocation bias to even it out. 2299fa9e4066Sahrens */ 23002e4c9986SGeorge Wilson if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { 2301fa9e4066Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2302b24ab676SJeff Bonwick int64_t vu, cu; 2303fa9e4066Sahrens 230409c9d376SGeorge Wilson vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); 230509c9d376SGeorge Wilson cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); 2306fa9e4066Sahrens 2307fa9e4066Sahrens /* 230809c9d376SGeorge Wilson * Calculate how much more or less we should 230909c9d376SGeorge Wilson * try to allocate from this device during 231009c9d376SGeorge Wilson * this iteration around the rotor. 231109c9d376SGeorge Wilson * For example, if a device is 80% full 231209c9d376SGeorge Wilson * and the pool is 20% full then we should 231309c9d376SGeorge Wilson * reduce allocations by 60% on this device. 231409c9d376SGeorge Wilson * 231509c9d376SGeorge Wilson * mg_bias = (20 - 80) * 512K / 100 = -307K 231609c9d376SGeorge Wilson * 231709c9d376SGeorge Wilson * This reduces allocations by 307K for this 231809c9d376SGeorge Wilson * iteration. 2319fa9e4066Sahrens */ 2320b24ab676SJeff Bonwick mg->mg_bias = ((cu - vu) * 232109c9d376SGeorge Wilson (int64_t)mg->mg_aliquot) / 100; 23222e4c9986SGeorge Wilson } else if (!metaslab_bias_enabled) { 23232e4c9986SGeorge Wilson mg->mg_bias = 0; 2324fa9e4066Sahrens } 2325fa9e4066Sahrens 2326b24ab676SJeff Bonwick if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= 2327fa9e4066Sahrens mg->mg_aliquot + mg->mg_bias) { 2328fa9e4066Sahrens mc->mc_rotor = mg->mg_next; 2329b24ab676SJeff Bonwick mc->mc_aliquot = 0; 2330fa9e4066Sahrens } 2331fa9e4066Sahrens 233244cd46caSbillm DVA_SET_VDEV(&dva[d], vd->vdev_id); 233344cd46caSbillm DVA_SET_OFFSET(&dva[d], offset); 2334e14bb325SJeff Bonwick DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 233544cd46caSbillm DVA_SET_ASIZE(&dva[d], asize); 2336fa9e4066Sahrens 2337fa9e4066Sahrens return (0); 2338fa9e4066Sahrens } 23390a4e9518Sgw25295 next: 2340fa9e4066Sahrens mc->mc_rotor = mg->mg_next; 2341b24ab676SJeff Bonwick mc->mc_aliquot = 0; 2342fa9e4066Sahrens } while ((mg = mg->mg_next) != rotor); 2343fa9e4066Sahrens 234444cd46caSbillm if (!all_zero) { 234544cd46caSbillm dshift++; 234644cd46caSbillm ASSERT(dshift < 64); 234744cd46caSbillm goto top; 234844cd46caSbillm } 234944cd46caSbillm 2350d6e555bdSGeorge Wilson if (!allocatable && !zio_lock) { 23518ad4d6ddSJeff Bonwick dshift = 3; 23528ad4d6ddSJeff Bonwick zio_lock = B_TRUE; 23538ad4d6ddSJeff Bonwick goto top; 23548ad4d6ddSJeff Bonwick } 23558ad4d6ddSJeff Bonwick 235644cd46caSbillm bzero(&dva[d], sizeof (dva_t)); 2357fa9e4066Sahrens 2358be6fd75aSMatthew Ahrens return (SET_ERROR(ENOSPC)); 2359fa9e4066Sahrens } 2360fa9e4066Sahrens 2361fa9e4066Sahrens /* 2362fa9e4066Sahrens * Free the block represented by DVA in the context of the specified 2363fa9e4066Sahrens * transaction group. 2364fa9e4066Sahrens */ 2365d80c45e0Sbonwick static void 2366d80c45e0Sbonwick metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 2367fa9e4066Sahrens { 2368fa9e4066Sahrens uint64_t vdev = DVA_GET_VDEV(dva); 2369fa9e4066Sahrens uint64_t offset = DVA_GET_OFFSET(dva); 2370fa9e4066Sahrens uint64_t size = DVA_GET_ASIZE(dva); 2371fa9e4066Sahrens vdev_t *vd; 2372fa9e4066Sahrens metaslab_t *msp; 2373fa9e4066Sahrens 2374d80c45e0Sbonwick ASSERT(DVA_IS_VALID(dva)); 2375d80c45e0Sbonwick 2376fa9e4066Sahrens if (txg > spa_freeze_txg(spa)) 2377fa9e4066Sahrens return; 2378fa9e4066Sahrens 2379d80c45e0Sbonwick if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 2380d80c45e0Sbonwick (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 2381d80c45e0Sbonwick cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 2382d80c45e0Sbonwick (u_longlong_t)vdev, (u_longlong_t)offset); 2383fa9e4066Sahrens ASSERT(0); 2384fa9e4066Sahrens return; 2385fa9e4066Sahrens } 2386fa9e4066Sahrens 2387fa9e4066Sahrens msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 2388fa9e4066Sahrens 2389fa9e4066Sahrens if (DVA_GET_GANG(dva)) 2390fa9e4066Sahrens size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 2391fa9e4066Sahrens 2392fa9e4066Sahrens mutex_enter(&msp->ms_lock); 2393fa9e4066Sahrens 2394ecc2d604Sbonwick if (now) { 23950713e232SGeorge Wilson range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], 2396ecc2d604Sbonwick offset, size); 23970713e232SGeorge Wilson 23980713e232SGeorge Wilson VERIFY(!msp->ms_condensing); 23990713e232SGeorge Wilson VERIFY3U(offset, >=, msp->ms_start); 24000713e232SGeorge Wilson VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 24010713e232SGeorge Wilson VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, 24020713e232SGeorge Wilson msp->ms_size); 24030713e232SGeorge Wilson VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 24040713e232SGeorge Wilson VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 24050713e232SGeorge Wilson range_tree_add(msp->ms_tree, offset, size); 2406ecc2d604Sbonwick } else { 24070713e232SGeorge Wilson if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0) 2408ecc2d604Sbonwick vdev_dirty(vd, VDD_METASLAB, msp, txg); 24090713e232SGeorge Wilson range_tree_add(msp->ms_freetree[txg & TXG_MASK], 24100713e232SGeorge Wilson offset, size); 2411ecc2d604Sbonwick } 2412fa9e4066Sahrens 2413fa9e4066Sahrens mutex_exit(&msp->ms_lock); 2414fa9e4066Sahrens } 2415d80c45e0Sbonwick 2416d80c45e0Sbonwick /* 2417d80c45e0Sbonwick * Intent log support: upon opening the pool after a crash, notify the SPA 2418d80c45e0Sbonwick * of blocks that the intent log has allocated for immediate write, but 2419d80c45e0Sbonwick * which are still considered free by the SPA because the last transaction 2420d80c45e0Sbonwick * group didn't commit yet. 2421d80c45e0Sbonwick */ 2422d80c45e0Sbonwick static int 2423d80c45e0Sbonwick metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 2424d80c45e0Sbonwick { 2425d80c45e0Sbonwick uint64_t vdev = DVA_GET_VDEV(dva); 2426d80c45e0Sbonwick uint64_t offset = DVA_GET_OFFSET(dva); 2427d80c45e0Sbonwick uint64_t size = DVA_GET_ASIZE(dva); 2428d80c45e0Sbonwick vdev_t *vd; 2429d80c45e0Sbonwick metaslab_t *msp; 2430b24ab676SJeff Bonwick int error = 0; 2431d80c45e0Sbonwick 2432d80c45e0Sbonwick ASSERT(DVA_IS_VALID(dva)); 2433d80c45e0Sbonwick 2434d80c45e0Sbonwick if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 2435d80c45e0Sbonwick (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 2436be6fd75aSMatthew Ahrens return (SET_ERROR(ENXIO)); 2437d80c45e0Sbonwick 2438d80c45e0Sbonwick msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 2439d80c45e0Sbonwick 2440d80c45e0Sbonwick if (DVA_GET_GANG(dva)) 2441d80c45e0Sbonwick size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 2442d80c45e0Sbonwick 2443d80c45e0Sbonwick mutex_enter(&msp->ms_lock); 2444d80c45e0Sbonwick 24450713e232SGeorge Wilson if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) 244609c9d376SGeorge Wilson error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); 2447b24ab676SJeff Bonwick 24480713e232SGeorge Wilson if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) 2449be6fd75aSMatthew Ahrens error = SET_ERROR(ENOENT); 2450b24ab676SJeff Bonwick 2451e14bb325SJeff Bonwick if (error || txg == 0) { /* txg == 0 indicates dry run */ 2452d80c45e0Sbonwick mutex_exit(&msp->ms_lock); 2453d80c45e0Sbonwick return (error); 2454d80c45e0Sbonwick } 2455d80c45e0Sbonwick 24560713e232SGeorge Wilson VERIFY(!msp->ms_condensing); 24570713e232SGeorge Wilson VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 24580713e232SGeorge Wilson VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 24590713e232SGeorge Wilson VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); 24600713e232SGeorge Wilson range_tree_remove(msp->ms_tree, offset, size); 2461e14bb325SJeff Bonwick 24628ad4d6ddSJeff Bonwick if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 24630713e232SGeorge Wilson if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) 2464d80c45e0Sbonwick vdev_dirty(vd, VDD_METASLAB, msp, txg); 24650713e232SGeorge Wilson range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); 2466e14bb325SJeff Bonwick } 2467d80c45e0Sbonwick 2468d80c45e0Sbonwick mutex_exit(&msp->ms_lock); 2469d80c45e0Sbonwick 2470d80c45e0Sbonwick return (0); 2471d80c45e0Sbonwick } 2472d80c45e0Sbonwick 2473d80c45e0Sbonwick int 24748654d025Sperrin metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 2475e14bb325SJeff Bonwick int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) 2476d80c45e0Sbonwick { 2477d80c45e0Sbonwick dva_t *dva = bp->blk_dva; 2478d80c45e0Sbonwick dva_t *hintdva = hintbp->blk_dva; 2479d80c45e0Sbonwick int error = 0; 2480d80c45e0Sbonwick 2481e14bb325SJeff Bonwick ASSERT(bp->blk_birth == 0); 2482b24ab676SJeff Bonwick ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 2483e14bb325SJeff Bonwick 2484e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 2485e14bb325SJeff Bonwick 2486e14bb325SJeff Bonwick if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 2487e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALLOC, FTAG); 2488be6fd75aSMatthew Ahrens return (SET_ERROR(ENOSPC)); 2489e14bb325SJeff Bonwick } 24908654d025Sperrin 2491d80c45e0Sbonwick ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 2492d80c45e0Sbonwick ASSERT(BP_GET_NDVAS(bp) == 0); 2493d80c45e0Sbonwick ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 2494d80c45e0Sbonwick 2495e14bb325SJeff Bonwick for (int d = 0; d < ndvas; d++) { 24968654d025Sperrin error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 2497e14bb325SJeff Bonwick txg, flags); 24980713e232SGeorge Wilson if (error != 0) { 2499d80c45e0Sbonwick for (d--; d >= 0; d--) { 2500d80c45e0Sbonwick metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 2501d80c45e0Sbonwick bzero(&dva[d], sizeof (dva_t)); 2502d80c45e0Sbonwick } 2503e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALLOC, FTAG); 2504d80c45e0Sbonwick return (error); 2505d80c45e0Sbonwick } 2506d80c45e0Sbonwick } 2507d80c45e0Sbonwick ASSERT(error == 0); 2508d80c45e0Sbonwick ASSERT(BP_GET_NDVAS(bp) == ndvas); 2509d80c45e0Sbonwick 2510e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALLOC, FTAG); 2511e14bb325SJeff Bonwick 2512b24ab676SJeff Bonwick BP_SET_BIRTH(bp, txg, txg); 2513e14bb325SJeff Bonwick 2514d80c45e0Sbonwick return (0); 2515d80c45e0Sbonwick } 2516d80c45e0Sbonwick 2517d80c45e0Sbonwick void 2518d80c45e0Sbonwick metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 2519d80c45e0Sbonwick { 2520d80c45e0Sbonwick const dva_t *dva = bp->blk_dva; 2521d80c45e0Sbonwick int ndvas = BP_GET_NDVAS(bp); 2522d80c45e0Sbonwick 2523d80c45e0Sbonwick ASSERT(!BP_IS_HOLE(bp)); 2524b24ab676SJeff Bonwick ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 2525d80c45e0Sbonwick 2526e14bb325SJeff Bonwick spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 2527e14bb325SJeff Bonwick 2528e14bb325SJeff Bonwick for (int d = 0; d < ndvas; d++) 2529d80c45e0Sbonwick metaslab_free_dva(spa, &dva[d], txg, now); 2530e14bb325SJeff Bonwick 2531e14bb325SJeff Bonwick spa_config_exit(spa, SCL_FREE, FTAG); 2532d80c45e0Sbonwick } 2533d80c45e0Sbonwick 2534d80c45e0Sbonwick int 2535d80c45e0Sbonwick metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 2536d80c45e0Sbonwick { 2537d80c45e0Sbonwick const dva_t *dva = bp->blk_dva; 2538d80c45e0Sbonwick int ndvas = BP_GET_NDVAS(bp); 2539e14bb325SJeff Bonwick int error = 0; 2540d80c45e0Sbonwick 2541d80c45e0Sbonwick ASSERT(!BP_IS_HOLE(bp)); 2542d80c45e0Sbonwick 2543e14bb325SJeff Bonwick if (txg != 0) { 2544e14bb325SJeff Bonwick /* 2545e14bb325SJeff Bonwick * First do a dry run to make sure all DVAs are claimable, 2546e14bb325SJeff Bonwick * so we don't have to unwind from partial failures below. 2547e14bb325SJeff Bonwick */ 2548e14bb325SJeff Bonwick if ((error = metaslab_claim(spa, bp, 0)) != 0) 2549e14bb325SJeff Bonwick return (error); 2550e14bb325SJeff Bonwick } 2551d80c45e0Sbonwick 2552e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 2553e14bb325SJeff Bonwick 2554e14bb325SJeff Bonwick for (int d = 0; d < ndvas; d++) 2555e14bb325SJeff Bonwick if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 2556e14bb325SJeff Bonwick break; 2557e14bb325SJeff Bonwick 2558e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALLOC, FTAG); 2559e14bb325SJeff Bonwick 2560e14bb325SJeff Bonwick ASSERT(error == 0 || txg == 0); 2561e14bb325SJeff Bonwick 2562e14bb325SJeff Bonwick return (error); 2563d80c45e0Sbonwick } 25643b2aab18SMatthew Ahrens 25653b2aab18SMatthew Ahrens void 25663b2aab18SMatthew Ahrens metaslab_check_free(spa_t *spa, const blkptr_t *bp) 25673b2aab18SMatthew Ahrens { 25683b2aab18SMatthew Ahrens if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 25693b2aab18SMatthew Ahrens return; 25703b2aab18SMatthew Ahrens 25713b2aab18SMatthew Ahrens spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 25723b2aab18SMatthew Ahrens for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 25730713e232SGeorge Wilson uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 25740713e232SGeorge Wilson vdev_t *vd = vdev_lookup_top(spa, vdev); 25750713e232SGeorge Wilson uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 25763b2aab18SMatthew Ahrens uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 25770713e232SGeorge Wilson metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 25783b2aab18SMatthew Ahrens 25790713e232SGeorge Wilson if (msp->ms_loaded) 25800713e232SGeorge Wilson range_tree_verify(msp->ms_tree, offset, size); 25813b2aab18SMatthew Ahrens 25823b2aab18SMatthew Ahrens for (int j = 0; j < TXG_SIZE; j++) 25830713e232SGeorge Wilson range_tree_verify(msp->ms_freetree[j], offset, size); 25843b2aab18SMatthew Ahrens for (int j = 0; j < TXG_DEFER_SIZE; j++) 25850713e232SGeorge Wilson range_tree_verify(msp->ms_defertree[j], offset, size); 25863b2aab18SMatthew Ahrens } 25873b2aab18SMatthew Ahrens spa_config_exit(spa, SCL_VDEV, FTAG); 25883b2aab18SMatthew Ahrens } 2589