1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 252c48331dSMatt Macy * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 27eda14cbcSMatt Macy */ 28eda14cbcSMatt Macy 29eda14cbcSMatt Macy #include <sys/zfs_context.h> 30eda14cbcSMatt Macy #include <sys/dmu.h> 31eda14cbcSMatt Macy #include <sys/dmu_tx.h> 32eda14cbcSMatt Macy #include <sys/space_map.h> 33eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 34eda14cbcSMatt Macy #include <sys/vdev_impl.h> 357877fdebSMatt Macy #include <sys/vdev_draid.h> 36eda14cbcSMatt Macy #include <sys/zio.h> 37eda14cbcSMatt Macy #include <sys/spa_impl.h> 38eda14cbcSMatt Macy #include <sys/zfeature.h> 39eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h> 40eda14cbcSMatt Macy #include <sys/zap.h> 41eda14cbcSMatt Macy #include <sys/btree.h> 42eda14cbcSMatt Macy 43eda14cbcSMatt Macy #define WITH_DF_BLOCK_ALLOCATOR 44eda14cbcSMatt Macy 45eda14cbcSMatt Macy #define GANG_ALLOCATION(flags) \ 46eda14cbcSMatt Macy ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 47eda14cbcSMatt Macy 48eda14cbcSMatt Macy /* 49eda14cbcSMatt Macy * Metaslab granularity, in bytes. This is roughly similar to what would be 50eda14cbcSMatt Macy * referred to as the "stripe size" in traditional RAID arrays. In normal 51*716fd348SMartin Matuska * operation, we will try to write this amount of data to each disk before 52*716fd348SMartin Matuska * moving on to the next top-level vdev. 53eda14cbcSMatt Macy */ 54*716fd348SMartin Matuska static unsigned long metaslab_aliquot = 1024 * 1024; 55eda14cbcSMatt Macy 56eda14cbcSMatt Macy /* 57eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 58eda14cbcSMatt Macy */ 59eda14cbcSMatt Macy unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; 60eda14cbcSMatt Macy 61eda14cbcSMatt Macy /* 62eda14cbcSMatt Macy * In pools where the log space map feature is not enabled we touch 63eda14cbcSMatt Macy * multiple metaslabs (and their respective space maps) with each 64eda14cbcSMatt Macy * transaction group. Thus, we benefit from having a small space map 65eda14cbcSMatt Macy * block size since it allows us to issue more I/O operations scattered 66eda14cbcSMatt Macy * around the disk. So a sane default for the space map block size 67eda14cbcSMatt Macy * is 8~16K. 68eda14cbcSMatt Macy */ 69eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_no_log = (1 << 14); 70eda14cbcSMatt Macy 71eda14cbcSMatt Macy /* 72eda14cbcSMatt Macy * When the log space map feature is enabled, we accumulate a lot of 73eda14cbcSMatt Macy * changes per metaslab that are flushed once in a while so we benefit 74eda14cbcSMatt Macy * from a bigger block size like 128K for the metaslab space maps. 75eda14cbcSMatt Macy */ 76eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_with_log = (1 << 17); 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy /* 79eda14cbcSMatt Macy * The in-core space map representation is more compact than its on-disk form. 80eda14cbcSMatt Macy * The zfs_condense_pct determines how much more compact the in-core 81eda14cbcSMatt Macy * space map representation must be before we compact it on-disk. 82eda14cbcSMatt Macy * Values should be greater than or equal to 100. 83eda14cbcSMatt Macy */ 84eda14cbcSMatt Macy int zfs_condense_pct = 200; 85eda14cbcSMatt Macy 86eda14cbcSMatt Macy /* 87eda14cbcSMatt Macy * Condensing a metaslab is not guaranteed to actually reduce the amount of 88eda14cbcSMatt Macy * space used on disk. In particular, a space map uses data in increments of 89eda14cbcSMatt Macy * MAX(1 << ashift, space_map_blksz), so a metaslab might use the 90eda14cbcSMatt Macy * same number of blocks after condensing. Since the goal of condensing is to 91eda14cbcSMatt Macy * reduce the number of IOPs required to read the space map, we only want to 92eda14cbcSMatt Macy * condense when we can be sure we will reduce the number of blocks used by the 93eda14cbcSMatt Macy * space map. Unfortunately, we cannot precisely compute whether or not this is 94eda14cbcSMatt Macy * the case in metaslab_should_condense since we are holding ms_lock. Instead, 95eda14cbcSMatt Macy * we apply the following heuristic: do not condense a spacemap unless the 96eda14cbcSMatt Macy * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 97eda14cbcSMatt Macy * blocks. 98eda14cbcSMatt Macy */ 99e92ffd9bSMartin Matuska static const int zfs_metaslab_condense_block_threshold = 4; 100eda14cbcSMatt Macy 101eda14cbcSMatt Macy /* 102eda14cbcSMatt Macy * The zfs_mg_noalloc_threshold defines which metaslab groups should 103eda14cbcSMatt Macy * be eligible for allocation. The value is defined as a percentage of 104eda14cbcSMatt Macy * free space. Metaslab groups that have more free space than 105eda14cbcSMatt Macy * zfs_mg_noalloc_threshold are always eligible for allocations. Once 106eda14cbcSMatt Macy * a metaslab group's free space is less than or equal to the 107eda14cbcSMatt Macy * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 108eda14cbcSMatt Macy * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 109eda14cbcSMatt Macy * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 110eda14cbcSMatt Macy * groups are allowed to accept allocations. Gang blocks are always 111eda14cbcSMatt Macy * eligible to allocate on any metaslab group. The default value of 0 means 112eda14cbcSMatt Macy * no metaslab group will be excluded based on this criterion. 113eda14cbcSMatt Macy */ 114e92ffd9bSMartin Matuska static int zfs_mg_noalloc_threshold = 0; 115eda14cbcSMatt Macy 116eda14cbcSMatt Macy /* 117eda14cbcSMatt Macy * Metaslab groups are considered eligible for allocations if their 118eda14cbcSMatt Macy * fragmentation metric (measured as a percentage) is less than or 119eda14cbcSMatt Macy * equal to zfs_mg_fragmentation_threshold. If a metaslab group 120eda14cbcSMatt Macy * exceeds this threshold then it will be skipped unless all metaslab 121eda14cbcSMatt Macy * groups within the metaslab class have also crossed this threshold. 122eda14cbcSMatt Macy * 123eda14cbcSMatt Macy * This tunable was introduced to avoid edge cases where we continue 124eda14cbcSMatt Macy * allocating from very fragmented disks in our pool while other, less 125eda14cbcSMatt Macy * fragmented disks, exists. On the other hand, if all disks in the 126eda14cbcSMatt Macy * pool are uniformly approaching the threshold, the threshold can 127eda14cbcSMatt Macy * be a speed bump in performance, where we keep switching the disks 128eda14cbcSMatt Macy * that we allocate from (e.g. we allocate some segments from disk A 129eda14cbcSMatt Macy * making it bypassing the threshold while freeing segments from disk 130eda14cbcSMatt Macy * B getting its fragmentation below the threshold). 131eda14cbcSMatt Macy * 132eda14cbcSMatt Macy * Empirically, we've seen that our vdev selection for allocations is 133eda14cbcSMatt Macy * good enough that fragmentation increases uniformly across all vdevs 134eda14cbcSMatt Macy * the majority of the time. Thus we set the threshold percentage high 135eda14cbcSMatt Macy * enough to avoid hitting the speed bump on pools that are being pushed 136eda14cbcSMatt Macy * to the edge. 137eda14cbcSMatt Macy */ 138e92ffd9bSMartin Matuska static int zfs_mg_fragmentation_threshold = 95; 139eda14cbcSMatt Macy 140eda14cbcSMatt Macy /* 141eda14cbcSMatt Macy * Allow metaslabs to keep their active state as long as their fragmentation 142eda14cbcSMatt Macy * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 143eda14cbcSMatt Macy * active metaslab that exceeds this threshold will no longer keep its active 144eda14cbcSMatt Macy * status allowing better metaslabs to be selected. 145eda14cbcSMatt Macy */ 146e92ffd9bSMartin Matuska static int zfs_metaslab_fragmentation_threshold = 70; 147eda14cbcSMatt Macy 148eda14cbcSMatt Macy /* 149eda14cbcSMatt Macy * When set will load all metaslabs when pool is first opened. 150eda14cbcSMatt Macy */ 151e92ffd9bSMartin Matuska int metaslab_debug_load = B_FALSE; 152eda14cbcSMatt Macy 153eda14cbcSMatt Macy /* 154eda14cbcSMatt Macy * When set will prevent metaslabs from being unloaded. 155eda14cbcSMatt Macy */ 156e92ffd9bSMartin Matuska static int metaslab_debug_unload = B_FALSE; 157eda14cbcSMatt Macy 158eda14cbcSMatt Macy /* 159eda14cbcSMatt Macy * Minimum size which forces the dynamic allocator to change 160eda14cbcSMatt Macy * it's allocation strategy. Once the space map cannot satisfy 161eda14cbcSMatt Macy * an allocation of this size then it switches to using more 162eda14cbcSMatt Macy * aggressive strategy (i.e search by size rather than offset). 163eda14cbcSMatt Macy */ 164eda14cbcSMatt Macy uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 165eda14cbcSMatt Macy 166eda14cbcSMatt Macy /* 167eda14cbcSMatt Macy * The minimum free space, in percent, which must be available 168eda14cbcSMatt Macy * in a space map to continue allocations in a first-fit fashion. 169eda14cbcSMatt Macy * Once the space map's free space drops below this level we dynamically 170eda14cbcSMatt Macy * switch to using best-fit allocations. 171eda14cbcSMatt Macy */ 172eda14cbcSMatt Macy int metaslab_df_free_pct = 4; 173eda14cbcSMatt Macy 174eda14cbcSMatt Macy /* 175eda14cbcSMatt Macy * Maximum distance to search forward from the last offset. Without this 176eda14cbcSMatt Macy * limit, fragmented pools can see >100,000 iterations and 177eda14cbcSMatt Macy * metaslab_block_picker() becomes the performance limiting factor on 178eda14cbcSMatt Macy * high-performance storage. 179eda14cbcSMatt Macy * 180eda14cbcSMatt Macy * With the default setting of 16MB, we typically see less than 500 181eda14cbcSMatt Macy * iterations, even with very fragmented, ashift=9 pools. The maximum number 182eda14cbcSMatt Macy * of iterations possible is: 183eda14cbcSMatt Macy * metaslab_df_max_search / (2 * (1<<ashift)) 184eda14cbcSMatt Macy * With the default setting of 16MB this is 16*1024 (with ashift=9) or 185eda14cbcSMatt Macy * 2048 (with ashift=12). 186eda14cbcSMatt Macy */ 187e92ffd9bSMartin Matuska static int metaslab_df_max_search = 16 * 1024 * 1024; 188eda14cbcSMatt Macy 189eda14cbcSMatt Macy /* 190eda14cbcSMatt Macy * Forces the metaslab_block_picker function to search for at least this many 191eda14cbcSMatt Macy * segments forwards until giving up on finding a segment that the allocation 192eda14cbcSMatt Macy * will fit into. 193eda14cbcSMatt Macy */ 194e92ffd9bSMartin Matuska static const uint32_t metaslab_min_search_count = 100; 195eda14cbcSMatt Macy 196eda14cbcSMatt Macy /* 197eda14cbcSMatt Macy * If we are not searching forward (due to metaslab_df_max_search, 198eda14cbcSMatt Macy * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable 199eda14cbcSMatt Macy * controls what segment is used. If it is set, we will use the largest free 200eda14cbcSMatt Macy * segment. If it is not set, we will use a segment of exactly the requested 201eda14cbcSMatt Macy * size (or larger). 202eda14cbcSMatt Macy */ 203e92ffd9bSMartin Matuska static int metaslab_df_use_largest_segment = B_FALSE; 204eda14cbcSMatt Macy 205eda14cbcSMatt Macy /* 206eda14cbcSMatt Macy * Percentage of all cpus that can be used by the metaslab taskq. 207eda14cbcSMatt Macy */ 208eda14cbcSMatt Macy int metaslab_load_pct = 50; 209eda14cbcSMatt Macy 210eda14cbcSMatt Macy /* 211eda14cbcSMatt Macy * These tunables control how long a metaslab will remain loaded after the 212eda14cbcSMatt Macy * last allocation from it. A metaslab can't be unloaded until at least 213eda14cbcSMatt Macy * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds 214eda14cbcSMatt Macy * have elapsed. However, zfs_metaslab_mem_limit may cause it to be 215eda14cbcSMatt Macy * unloaded sooner. These settings are intended to be generous -- to keep 216eda14cbcSMatt Macy * metaslabs loaded for a long time, reducing the rate of metaslab loading. 217eda14cbcSMatt Macy */ 218e92ffd9bSMartin Matuska static int metaslab_unload_delay = 32; 219e92ffd9bSMartin Matuska static int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ 220eda14cbcSMatt Macy 221eda14cbcSMatt Macy /* 222eda14cbcSMatt Macy * Max number of metaslabs per group to preload. 223eda14cbcSMatt Macy */ 224eda14cbcSMatt Macy int metaslab_preload_limit = 10; 225eda14cbcSMatt Macy 226eda14cbcSMatt Macy /* 227eda14cbcSMatt Macy * Enable/disable preloading of metaslab. 228eda14cbcSMatt Macy */ 229e92ffd9bSMartin Matuska static int metaslab_preload_enabled = B_TRUE; 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy /* 232eda14cbcSMatt Macy * Enable/disable fragmentation weighting on metaslabs. 233eda14cbcSMatt Macy */ 234e92ffd9bSMartin Matuska static int metaslab_fragmentation_factor_enabled = B_TRUE; 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy /* 237eda14cbcSMatt Macy * Enable/disable lba weighting (i.e. outer tracks are given preference). 238eda14cbcSMatt Macy */ 239e92ffd9bSMartin Matuska static int metaslab_lba_weighting_enabled = B_TRUE; 240eda14cbcSMatt Macy 241eda14cbcSMatt Macy /* 242eda14cbcSMatt Macy * Enable/disable metaslab group biasing. 243eda14cbcSMatt Macy */ 244e92ffd9bSMartin Matuska static int metaslab_bias_enabled = B_TRUE; 245eda14cbcSMatt Macy 246eda14cbcSMatt Macy /* 247eda14cbcSMatt Macy * Enable/disable remapping of indirect DVAs to their concrete vdevs. 248eda14cbcSMatt Macy */ 249e92ffd9bSMartin Matuska static const boolean_t zfs_remap_blkptr_enable = B_TRUE; 250eda14cbcSMatt Macy 251eda14cbcSMatt Macy /* 252eda14cbcSMatt Macy * Enable/disable segment-based metaslab selection. 253eda14cbcSMatt Macy */ 254e92ffd9bSMartin Matuska static int zfs_metaslab_segment_weight_enabled = B_TRUE; 255eda14cbcSMatt Macy 256eda14cbcSMatt Macy /* 257eda14cbcSMatt Macy * When using segment-based metaslab selection, we will continue 258eda14cbcSMatt Macy * allocating from the active metaslab until we have exhausted 259eda14cbcSMatt Macy * zfs_metaslab_switch_threshold of its buckets. 260eda14cbcSMatt Macy */ 261e92ffd9bSMartin Matuska static int zfs_metaslab_switch_threshold = 2; 262eda14cbcSMatt Macy 263eda14cbcSMatt Macy /* 264eda14cbcSMatt Macy * Internal switch to enable/disable the metaslab allocation tracing 265eda14cbcSMatt Macy * facility. 266eda14cbcSMatt Macy */ 267e92ffd9bSMartin Matuska static const boolean_t metaslab_trace_enabled = B_FALSE; 268eda14cbcSMatt Macy 269eda14cbcSMatt Macy /* 270eda14cbcSMatt Macy * Maximum entries that the metaslab allocation tracing facility will keep 271eda14cbcSMatt Macy * in a given list when running in non-debug mode. We limit the number 272eda14cbcSMatt Macy * of entries in non-debug mode to prevent us from using up too much memory. 273eda14cbcSMatt Macy * The limit should be sufficiently large that we don't expect any allocation 274eda14cbcSMatt Macy * to every exceed this value. In debug mode, the system will panic if this 275eda14cbcSMatt Macy * limit is ever reached allowing for further investigation. 276eda14cbcSMatt Macy */ 277e92ffd9bSMartin Matuska static const uint64_t metaslab_trace_max_entries = 5000; 278eda14cbcSMatt Macy 279eda14cbcSMatt Macy /* 280eda14cbcSMatt Macy * Maximum number of metaslabs per group that can be disabled 281eda14cbcSMatt Macy * simultaneously. 282eda14cbcSMatt Macy */ 283e92ffd9bSMartin Matuska static const int max_disabled_ms = 3; 284eda14cbcSMatt Macy 285eda14cbcSMatt Macy /* 286eda14cbcSMatt Macy * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. 287eda14cbcSMatt Macy * To avoid 64-bit overflow, don't set above UINT32_MAX. 288eda14cbcSMatt Macy */ 289e92ffd9bSMartin Matuska static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ 290eda14cbcSMatt Macy 291eda14cbcSMatt Macy /* 292eda14cbcSMatt Macy * Maximum percentage of memory to use on storing loaded metaslabs. If loading 293eda14cbcSMatt Macy * a metaslab would take it over this percentage, the oldest selected metaslab 294eda14cbcSMatt Macy * is automatically unloaded. 295eda14cbcSMatt Macy */ 296e92ffd9bSMartin Matuska static int zfs_metaslab_mem_limit = 25; 297eda14cbcSMatt Macy 298eda14cbcSMatt Macy /* 299eda14cbcSMatt Macy * Force the per-metaslab range trees to use 64-bit integers to store 300eda14cbcSMatt Macy * segments. Used for debugging purposes. 301eda14cbcSMatt Macy */ 302e92ffd9bSMartin Matuska static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; 303eda14cbcSMatt Macy 304eda14cbcSMatt Macy /* 305eda14cbcSMatt Macy * By default we only store segments over a certain size in the size-sorted 306eda14cbcSMatt Macy * metaslab trees (ms_allocatable_by_size and 307eda14cbcSMatt Macy * ms_unflushed_frees_by_size). This dramatically reduces memory usage and 308eda14cbcSMatt Macy * improves load and unload times at the cost of causing us to use slightly 309eda14cbcSMatt Macy * larger segments than we would otherwise in some cases. 310eda14cbcSMatt Macy */ 311e92ffd9bSMartin Matuska static const uint32_t metaslab_by_size_min_shift = 14; 312eda14cbcSMatt Macy 3137877fdebSMatt Macy /* 3147877fdebSMatt Macy * If not set, we will first try normal allocation. If that fails then 3157877fdebSMatt Macy * we will do a gang allocation. If that fails then we will do a "try hard" 3167877fdebSMatt Macy * gang allocation. If that fails then we will have a multi-layer gang 3177877fdebSMatt Macy * block. 3187877fdebSMatt Macy * 3197877fdebSMatt Macy * If set, we will first try normal allocation. If that fails then 3207877fdebSMatt Macy * we will do a "try hard" allocation. If that fails we will do a gang 3217877fdebSMatt Macy * allocation. If that fails we will do a "try hard" gang allocation. If 3227877fdebSMatt Macy * that fails then we will have a multi-layer gang block. 3237877fdebSMatt Macy */ 324e92ffd9bSMartin Matuska static int zfs_metaslab_try_hard_before_gang = B_FALSE; 3257877fdebSMatt Macy 3267877fdebSMatt Macy /* 3277877fdebSMatt Macy * When not trying hard, we only consider the best zfs_metaslab_find_max_tries 3287877fdebSMatt Macy * metaslabs. This improves performance, especially when there are many 3297877fdebSMatt Macy * metaslabs per vdev and the allocation can't actually be satisfied (so we 3307877fdebSMatt Macy * would otherwise iterate all the metaslabs). If there is a metaslab with a 3317877fdebSMatt Macy * worse weight but it can actually satisfy the allocation, we won't find it 3327877fdebSMatt Macy * until trying hard. This may happen if the worse metaslab is not loaded 3337877fdebSMatt Macy * (and the true weight is better than we have calculated), or due to weight 3347877fdebSMatt Macy * bucketization. E.g. we are looking for a 60K segment, and the best 3357877fdebSMatt Macy * metaslabs all have free segments in the 32-63K bucket, but the best 3367877fdebSMatt Macy * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a 3377877fdebSMatt Macy * subsequent metaslab has ms_max_size >60KB (but fewer segments in this 3387877fdebSMatt Macy * bucket, and therefore a lower weight). 3397877fdebSMatt Macy */ 340e92ffd9bSMartin Matuska static int zfs_metaslab_find_max_tries = 100; 3417877fdebSMatt Macy 342eda14cbcSMatt Macy static uint64_t metaslab_weight(metaslab_t *, boolean_t); 343eda14cbcSMatt Macy static void metaslab_set_fragmentation(metaslab_t *, boolean_t); 344eda14cbcSMatt Macy static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 345eda14cbcSMatt Macy static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 346eda14cbcSMatt Macy 347eda14cbcSMatt Macy static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 348eda14cbcSMatt Macy static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 349eda14cbcSMatt Macy static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); 350eda14cbcSMatt Macy static unsigned int metaslab_idx_func(multilist_t *, void *); 351eda14cbcSMatt Macy static void metaslab_evict(metaslab_t *, uint64_t); 352eda14cbcSMatt Macy static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg); 353eda14cbcSMatt Macy kmem_cache_t *metaslab_alloc_trace_cache; 354eda14cbcSMatt Macy 355eda14cbcSMatt Macy typedef struct metaslab_stats { 356eda14cbcSMatt Macy kstat_named_t metaslabstat_trace_over_limit; 357eda14cbcSMatt Macy kstat_named_t metaslabstat_reload_tree; 3587877fdebSMatt Macy kstat_named_t metaslabstat_too_many_tries; 3597877fdebSMatt Macy kstat_named_t metaslabstat_try_hard; 360eda14cbcSMatt Macy } metaslab_stats_t; 361eda14cbcSMatt Macy 362eda14cbcSMatt Macy static metaslab_stats_t metaslab_stats = { 363eda14cbcSMatt Macy { "trace_over_limit", KSTAT_DATA_UINT64 }, 364eda14cbcSMatt Macy { "reload_tree", KSTAT_DATA_UINT64 }, 3657877fdebSMatt Macy { "too_many_tries", KSTAT_DATA_UINT64 }, 3667877fdebSMatt Macy { "try_hard", KSTAT_DATA_UINT64 }, 367eda14cbcSMatt Macy }; 368eda14cbcSMatt Macy 369eda14cbcSMatt Macy #define METASLABSTAT_BUMP(stat) \ 370eda14cbcSMatt Macy atomic_inc_64(&metaslab_stats.stat.value.ui64); 371eda14cbcSMatt Macy 372eda14cbcSMatt Macy 373e92ffd9bSMartin Matuska static kstat_t *metaslab_ksp; 374eda14cbcSMatt Macy 375eda14cbcSMatt Macy void 376eda14cbcSMatt Macy metaslab_stat_init(void) 377eda14cbcSMatt Macy { 378eda14cbcSMatt Macy ASSERT(metaslab_alloc_trace_cache == NULL); 379eda14cbcSMatt Macy metaslab_alloc_trace_cache = kmem_cache_create( 380eda14cbcSMatt Macy "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 381eda14cbcSMatt Macy 0, NULL, NULL, NULL, NULL, NULL, 0); 382eda14cbcSMatt Macy metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", 383eda14cbcSMatt Macy "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / 384eda14cbcSMatt Macy sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 385eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 386eda14cbcSMatt Macy metaslab_ksp->ks_data = &metaslab_stats; 387eda14cbcSMatt Macy kstat_install(metaslab_ksp); 388eda14cbcSMatt Macy } 389eda14cbcSMatt Macy } 390eda14cbcSMatt Macy 391eda14cbcSMatt Macy void 392eda14cbcSMatt Macy metaslab_stat_fini(void) 393eda14cbcSMatt Macy { 394eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 395eda14cbcSMatt Macy kstat_delete(metaslab_ksp); 396eda14cbcSMatt Macy metaslab_ksp = NULL; 397eda14cbcSMatt Macy } 398eda14cbcSMatt Macy 399eda14cbcSMatt Macy kmem_cache_destroy(metaslab_alloc_trace_cache); 400eda14cbcSMatt Macy metaslab_alloc_trace_cache = NULL; 401eda14cbcSMatt Macy } 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy /* 404eda14cbcSMatt Macy * ========================================================================== 405eda14cbcSMatt Macy * Metaslab classes 406eda14cbcSMatt Macy * ========================================================================== 407eda14cbcSMatt Macy */ 408eda14cbcSMatt Macy metaslab_class_t * 409e92ffd9bSMartin Matuska metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) 410eda14cbcSMatt Macy { 411eda14cbcSMatt Macy metaslab_class_t *mc; 412eda14cbcSMatt Macy 4137877fdebSMatt Macy mc = kmem_zalloc(offsetof(metaslab_class_t, 4147877fdebSMatt Macy mc_allocator[spa->spa_alloc_count]), KM_SLEEP); 415eda14cbcSMatt Macy 416eda14cbcSMatt Macy mc->mc_spa = spa; 417eda14cbcSMatt Macy mc->mc_ops = ops; 418eda14cbcSMatt Macy mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 4193ff01b23SMartin Matuska multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t), 420eda14cbcSMatt Macy offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); 4217877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4227877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4237877fdebSMatt Macy mca->mca_rotor = NULL; 4247877fdebSMatt Macy zfs_refcount_create_tracked(&mca->mca_alloc_slots); 4257877fdebSMatt Macy } 426eda14cbcSMatt Macy 427eda14cbcSMatt Macy return (mc); 428eda14cbcSMatt Macy } 429eda14cbcSMatt Macy 430eda14cbcSMatt Macy void 431eda14cbcSMatt Macy metaslab_class_destroy(metaslab_class_t *mc) 432eda14cbcSMatt Macy { 4337877fdebSMatt Macy spa_t *spa = mc->mc_spa; 4347877fdebSMatt Macy 435eda14cbcSMatt Macy ASSERT(mc->mc_alloc == 0); 436eda14cbcSMatt Macy ASSERT(mc->mc_deferred == 0); 437eda14cbcSMatt Macy ASSERT(mc->mc_space == 0); 438eda14cbcSMatt Macy ASSERT(mc->mc_dspace == 0); 439eda14cbcSMatt Macy 4407877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4417877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4427877fdebSMatt Macy ASSERT(mca->mca_rotor == NULL); 4437877fdebSMatt Macy zfs_refcount_destroy(&mca->mca_alloc_slots); 4447877fdebSMatt Macy } 445eda14cbcSMatt Macy mutex_destroy(&mc->mc_lock); 4463ff01b23SMartin Matuska multilist_destroy(&mc->mc_metaslab_txg_list); 4477877fdebSMatt Macy kmem_free(mc, offsetof(metaslab_class_t, 4487877fdebSMatt Macy mc_allocator[spa->spa_alloc_count])); 449eda14cbcSMatt Macy } 450eda14cbcSMatt Macy 451eda14cbcSMatt Macy int 452eda14cbcSMatt Macy metaslab_class_validate(metaslab_class_t *mc) 453eda14cbcSMatt Macy { 454eda14cbcSMatt Macy metaslab_group_t *mg; 455eda14cbcSMatt Macy vdev_t *vd; 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy /* 458eda14cbcSMatt Macy * Must hold one of the spa_config locks. 459eda14cbcSMatt Macy */ 460eda14cbcSMatt Macy ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 461eda14cbcSMatt Macy spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 462eda14cbcSMatt Macy 4637877fdebSMatt Macy if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) 464eda14cbcSMatt Macy return (0); 465eda14cbcSMatt Macy 466eda14cbcSMatt Macy do { 467eda14cbcSMatt Macy vd = mg->mg_vd; 468eda14cbcSMatt Macy ASSERT(vd->vdev_mg != NULL); 469eda14cbcSMatt Macy ASSERT3P(vd->vdev_top, ==, vd); 470eda14cbcSMatt Macy ASSERT3P(mg->mg_class, ==, mc); 471eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 4727877fdebSMatt Macy } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy return (0); 475eda14cbcSMatt Macy } 476eda14cbcSMatt Macy 477eda14cbcSMatt Macy static void 478eda14cbcSMatt Macy metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 479eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 480eda14cbcSMatt Macy { 481eda14cbcSMatt Macy atomic_add_64(&mc->mc_alloc, alloc_delta); 482eda14cbcSMatt Macy atomic_add_64(&mc->mc_deferred, defer_delta); 483eda14cbcSMatt Macy atomic_add_64(&mc->mc_space, space_delta); 484eda14cbcSMatt Macy atomic_add_64(&mc->mc_dspace, dspace_delta); 485eda14cbcSMatt Macy } 486eda14cbcSMatt Macy 487eda14cbcSMatt Macy uint64_t 488eda14cbcSMatt Macy metaslab_class_get_alloc(metaslab_class_t *mc) 489eda14cbcSMatt Macy { 490eda14cbcSMatt Macy return (mc->mc_alloc); 491eda14cbcSMatt Macy } 492eda14cbcSMatt Macy 493eda14cbcSMatt Macy uint64_t 494eda14cbcSMatt Macy metaslab_class_get_deferred(metaslab_class_t *mc) 495eda14cbcSMatt Macy { 496eda14cbcSMatt Macy return (mc->mc_deferred); 497eda14cbcSMatt Macy } 498eda14cbcSMatt Macy 499eda14cbcSMatt Macy uint64_t 500eda14cbcSMatt Macy metaslab_class_get_space(metaslab_class_t *mc) 501eda14cbcSMatt Macy { 502eda14cbcSMatt Macy return (mc->mc_space); 503eda14cbcSMatt Macy } 504eda14cbcSMatt Macy 505eda14cbcSMatt Macy uint64_t 506eda14cbcSMatt Macy metaslab_class_get_dspace(metaslab_class_t *mc) 507eda14cbcSMatt Macy { 508eda14cbcSMatt Macy return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 509eda14cbcSMatt Macy } 510eda14cbcSMatt Macy 511eda14cbcSMatt Macy void 512eda14cbcSMatt Macy metaslab_class_histogram_verify(metaslab_class_t *mc) 513eda14cbcSMatt Macy { 514eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 515eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 516eda14cbcSMatt Macy uint64_t *mc_hist; 517eda14cbcSMatt Macy int i; 518eda14cbcSMatt Macy 519eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 520eda14cbcSMatt Macy return; 521eda14cbcSMatt Macy 522eda14cbcSMatt Macy mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 523eda14cbcSMatt Macy KM_SLEEP); 524eda14cbcSMatt Macy 525184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 526eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 527eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 528184c1b94SMartin Matuska metaslab_group_t *mg = vdev_get_mg(tvd, mc); 529eda14cbcSMatt Macy 530eda14cbcSMatt Macy /* 531eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, or 532eda14cbcSMatt Macy * vdevs that are not in this metalab class. 533eda14cbcSMatt Macy */ 534eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 535eda14cbcSMatt Macy mg->mg_class != mc) { 536eda14cbcSMatt Macy continue; 537eda14cbcSMatt Macy } 538eda14cbcSMatt Macy 539184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 540184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 541184c1b94SMartin Matuska 542eda14cbcSMatt Macy for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 543eda14cbcSMatt Macy mc_hist[i] += mg->mg_histogram[i]; 544eda14cbcSMatt Macy } 545eda14cbcSMatt Macy 546184c1b94SMartin Matuska for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 547eda14cbcSMatt Macy VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 548184c1b94SMartin Matuska } 549eda14cbcSMatt Macy 550184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 551eda14cbcSMatt Macy kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 552eda14cbcSMatt Macy } 553eda14cbcSMatt Macy 554eda14cbcSMatt Macy /* 555eda14cbcSMatt Macy * Calculate the metaslab class's fragmentation metric. The metric 556eda14cbcSMatt Macy * is weighted based on the space contribution of each metaslab group. 557eda14cbcSMatt Macy * The return value will be a number between 0 and 100 (inclusive), or 558eda14cbcSMatt Macy * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 559eda14cbcSMatt Macy * zfs_frag_table for more information about the metric. 560eda14cbcSMatt Macy */ 561eda14cbcSMatt Macy uint64_t 562eda14cbcSMatt Macy metaslab_class_fragmentation(metaslab_class_t *mc) 563eda14cbcSMatt Macy { 564eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 565eda14cbcSMatt Macy uint64_t fragmentation = 0; 566eda14cbcSMatt Macy 567eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 568eda14cbcSMatt Macy 569eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 570eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 571eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 572eda14cbcSMatt Macy 573eda14cbcSMatt Macy /* 574eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, 575eda14cbcSMatt Macy * or vdevs that are not in this metalab class. 576eda14cbcSMatt Macy */ 577eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 578eda14cbcSMatt Macy mg->mg_class != mc) { 579eda14cbcSMatt Macy continue; 580eda14cbcSMatt Macy } 581eda14cbcSMatt Macy 582eda14cbcSMatt Macy /* 583eda14cbcSMatt Macy * If a metaslab group does not contain a fragmentation 584eda14cbcSMatt Macy * metric then just bail out. 585eda14cbcSMatt Macy */ 586eda14cbcSMatt Macy if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 587eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 588eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 589eda14cbcSMatt Macy } 590eda14cbcSMatt Macy 591eda14cbcSMatt Macy /* 592eda14cbcSMatt Macy * Determine how much this metaslab_group is contributing 593eda14cbcSMatt Macy * to the overall pool fragmentation metric. 594eda14cbcSMatt Macy */ 595eda14cbcSMatt Macy fragmentation += mg->mg_fragmentation * 596eda14cbcSMatt Macy metaslab_group_get_space(mg); 597eda14cbcSMatt Macy } 598eda14cbcSMatt Macy fragmentation /= metaslab_class_get_space(mc); 599eda14cbcSMatt Macy 600eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 601eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 602eda14cbcSMatt Macy return (fragmentation); 603eda14cbcSMatt Macy } 604eda14cbcSMatt Macy 605eda14cbcSMatt Macy /* 606eda14cbcSMatt Macy * Calculate the amount of expandable space that is available in 607eda14cbcSMatt Macy * this metaslab class. If a device is expanded then its expandable 608eda14cbcSMatt Macy * space will be the amount of allocatable space that is currently not 609eda14cbcSMatt Macy * part of this metaslab class. 610eda14cbcSMatt Macy */ 611eda14cbcSMatt Macy uint64_t 612eda14cbcSMatt Macy metaslab_class_expandable_space(metaslab_class_t *mc) 613eda14cbcSMatt Macy { 614eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 615eda14cbcSMatt Macy uint64_t space = 0; 616eda14cbcSMatt Macy 617eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 618eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 619eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 620eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 621eda14cbcSMatt Macy 622eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 623eda14cbcSMatt Macy mg->mg_class != mc) { 624eda14cbcSMatt Macy continue; 625eda14cbcSMatt Macy } 626eda14cbcSMatt Macy 627eda14cbcSMatt Macy /* 628eda14cbcSMatt Macy * Calculate if we have enough space to add additional 629eda14cbcSMatt Macy * metaslabs. We report the expandable space in terms 630eda14cbcSMatt Macy * of the metaslab size since that's the unit of expansion. 631eda14cbcSMatt Macy */ 632eda14cbcSMatt Macy space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, 633eda14cbcSMatt Macy 1ULL << tvd->vdev_ms_shift); 634eda14cbcSMatt Macy } 635eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 636eda14cbcSMatt Macy return (space); 637eda14cbcSMatt Macy } 638eda14cbcSMatt Macy 639eda14cbcSMatt Macy void 640eda14cbcSMatt Macy metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) 641eda14cbcSMatt Macy { 6423ff01b23SMartin Matuska multilist_t *ml = &mc->mc_metaslab_txg_list; 643eda14cbcSMatt Macy for (int i = 0; i < multilist_get_num_sublists(ml); i++) { 644eda14cbcSMatt Macy multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 645eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 646eda14cbcSMatt Macy multilist_sublist_unlock(mls); 647eda14cbcSMatt Macy while (msp != NULL) { 648eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 649eda14cbcSMatt Macy 650eda14cbcSMatt Macy /* 651eda14cbcSMatt Macy * If the metaslab has been removed from the list 652eda14cbcSMatt Macy * (which could happen if we were at the memory limit 653eda14cbcSMatt Macy * and it was evicted during this loop), then we can't 654eda14cbcSMatt Macy * proceed and we should restart the sublist. 655eda14cbcSMatt Macy */ 656eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 657eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 658eda14cbcSMatt Macy i--; 659eda14cbcSMatt Macy break; 660eda14cbcSMatt Macy } 661eda14cbcSMatt Macy mls = multilist_sublist_lock(ml, i); 662eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 663eda14cbcSMatt Macy multilist_sublist_unlock(mls); 664eda14cbcSMatt Macy if (txg > 665eda14cbcSMatt Macy msp->ms_selected_txg + metaslab_unload_delay && 666eda14cbcSMatt Macy gethrtime() > msp->ms_selected_time + 667eda14cbcSMatt Macy (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) { 668eda14cbcSMatt Macy metaslab_evict(msp, txg); 669eda14cbcSMatt Macy } else { 670eda14cbcSMatt Macy /* 671eda14cbcSMatt Macy * Once we've hit a metaslab selected too 672eda14cbcSMatt Macy * recently to evict, we're done evicting for 673eda14cbcSMatt Macy * now. 674eda14cbcSMatt Macy */ 675eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 676eda14cbcSMatt Macy break; 677eda14cbcSMatt Macy } 678eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 679eda14cbcSMatt Macy msp = next_msp; 680eda14cbcSMatt Macy } 681eda14cbcSMatt Macy } 682eda14cbcSMatt Macy } 683eda14cbcSMatt Macy 684eda14cbcSMatt Macy static int 685eda14cbcSMatt Macy metaslab_compare(const void *x1, const void *x2) 686eda14cbcSMatt Macy { 687eda14cbcSMatt Macy const metaslab_t *m1 = (const metaslab_t *)x1; 688eda14cbcSMatt Macy const metaslab_t *m2 = (const metaslab_t *)x2; 689eda14cbcSMatt Macy 690eda14cbcSMatt Macy int sort1 = 0; 691eda14cbcSMatt Macy int sort2 = 0; 692eda14cbcSMatt Macy if (m1->ms_allocator != -1 && m1->ms_primary) 693eda14cbcSMatt Macy sort1 = 1; 694eda14cbcSMatt Macy else if (m1->ms_allocator != -1 && !m1->ms_primary) 695eda14cbcSMatt Macy sort1 = 2; 696eda14cbcSMatt Macy if (m2->ms_allocator != -1 && m2->ms_primary) 697eda14cbcSMatt Macy sort2 = 1; 698eda14cbcSMatt Macy else if (m2->ms_allocator != -1 && !m2->ms_primary) 699eda14cbcSMatt Macy sort2 = 2; 700eda14cbcSMatt Macy 701eda14cbcSMatt Macy /* 702eda14cbcSMatt Macy * Sort inactive metaslabs first, then primaries, then secondaries. When 703eda14cbcSMatt Macy * selecting a metaslab to allocate from, an allocator first tries its 704eda14cbcSMatt Macy * primary, then secondary active metaslab. If it doesn't have active 705eda14cbcSMatt Macy * metaslabs, or can't allocate from them, it searches for an inactive 706eda14cbcSMatt Macy * metaslab to activate. If it can't find a suitable one, it will steal 707eda14cbcSMatt Macy * a primary or secondary metaslab from another allocator. 708eda14cbcSMatt Macy */ 709eda14cbcSMatt Macy if (sort1 < sort2) 710eda14cbcSMatt Macy return (-1); 711eda14cbcSMatt Macy if (sort1 > sort2) 712eda14cbcSMatt Macy return (1); 713eda14cbcSMatt Macy 714eda14cbcSMatt Macy int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); 715eda14cbcSMatt Macy if (likely(cmp)) 716eda14cbcSMatt Macy return (cmp); 717eda14cbcSMatt Macy 718eda14cbcSMatt Macy IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 719eda14cbcSMatt Macy 720eda14cbcSMatt Macy return (TREE_CMP(m1->ms_start, m2->ms_start)); 721eda14cbcSMatt Macy } 722eda14cbcSMatt Macy 723eda14cbcSMatt Macy /* 724eda14cbcSMatt Macy * ========================================================================== 725eda14cbcSMatt Macy * Metaslab groups 726eda14cbcSMatt Macy * ========================================================================== 727eda14cbcSMatt Macy */ 728eda14cbcSMatt Macy /* 729eda14cbcSMatt Macy * Update the allocatable flag and the metaslab group's capacity. 730eda14cbcSMatt Macy * The allocatable flag is set to true if the capacity is below 731eda14cbcSMatt Macy * the zfs_mg_noalloc_threshold or has a fragmentation value that is 732eda14cbcSMatt Macy * greater than zfs_mg_fragmentation_threshold. If a metaslab group 733eda14cbcSMatt Macy * transitions from allocatable to non-allocatable or vice versa then the 734eda14cbcSMatt Macy * metaslab group's class is updated to reflect the transition. 735eda14cbcSMatt Macy */ 736eda14cbcSMatt Macy static void 737eda14cbcSMatt Macy metaslab_group_alloc_update(metaslab_group_t *mg) 738eda14cbcSMatt Macy { 739eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 740eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 741eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 742eda14cbcSMatt Macy boolean_t was_allocatable; 743eda14cbcSMatt Macy boolean_t was_initialized; 744eda14cbcSMatt Macy 745eda14cbcSMatt Macy ASSERT(vd == vd->vdev_top); 746eda14cbcSMatt Macy ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 747eda14cbcSMatt Macy SCL_ALLOC); 748eda14cbcSMatt Macy 749eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 750eda14cbcSMatt Macy was_allocatable = mg->mg_allocatable; 751eda14cbcSMatt Macy was_initialized = mg->mg_initialized; 752eda14cbcSMatt Macy 753eda14cbcSMatt Macy mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 754eda14cbcSMatt Macy (vs->vs_space + 1); 755eda14cbcSMatt Macy 756eda14cbcSMatt Macy mutex_enter(&mc->mc_lock); 757eda14cbcSMatt Macy 758eda14cbcSMatt Macy /* 759eda14cbcSMatt Macy * If the metaslab group was just added then it won't 760eda14cbcSMatt Macy * have any space until we finish syncing out this txg. 761eda14cbcSMatt Macy * At that point we will consider it initialized and available 762eda14cbcSMatt Macy * for allocations. We also don't consider non-activated 763eda14cbcSMatt Macy * metaslab groups (e.g. vdevs that are in the middle of being removed) 764eda14cbcSMatt Macy * to be initialized, because they can't be used for allocation. 765eda14cbcSMatt Macy */ 766eda14cbcSMatt Macy mg->mg_initialized = metaslab_group_initialized(mg); 767eda14cbcSMatt Macy if (!was_initialized && mg->mg_initialized) { 768eda14cbcSMatt Macy mc->mc_groups++; 769eda14cbcSMatt Macy } else if (was_initialized && !mg->mg_initialized) { 770eda14cbcSMatt Macy ASSERT3U(mc->mc_groups, >, 0); 771eda14cbcSMatt Macy mc->mc_groups--; 772eda14cbcSMatt Macy } 773eda14cbcSMatt Macy if (mg->mg_initialized) 774eda14cbcSMatt Macy mg->mg_no_free_space = B_FALSE; 775eda14cbcSMatt Macy 776eda14cbcSMatt Macy /* 777eda14cbcSMatt Macy * A metaslab group is considered allocatable if it has plenty 778eda14cbcSMatt Macy * of free space or is not heavily fragmented. We only take 779eda14cbcSMatt Macy * fragmentation into account if the metaslab group has a valid 780eda14cbcSMatt Macy * fragmentation metric (i.e. a value between 0 and 100). 781eda14cbcSMatt Macy */ 782eda14cbcSMatt Macy mg->mg_allocatable = (mg->mg_activation_count > 0 && 783eda14cbcSMatt Macy mg->mg_free_capacity > zfs_mg_noalloc_threshold && 784eda14cbcSMatt Macy (mg->mg_fragmentation == ZFS_FRAG_INVALID || 785eda14cbcSMatt Macy mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 786eda14cbcSMatt Macy 787eda14cbcSMatt Macy /* 788eda14cbcSMatt Macy * The mc_alloc_groups maintains a count of the number of 789eda14cbcSMatt Macy * groups in this metaslab class that are still above the 790eda14cbcSMatt Macy * zfs_mg_noalloc_threshold. This is used by the allocating 791eda14cbcSMatt Macy * threads to determine if they should avoid allocations to 792eda14cbcSMatt Macy * a given group. The allocator will avoid allocations to a group 793eda14cbcSMatt Macy * if that group has reached or is below the zfs_mg_noalloc_threshold 794eda14cbcSMatt Macy * and there are still other groups that are above the threshold. 795eda14cbcSMatt Macy * When a group transitions from allocatable to non-allocatable or 796eda14cbcSMatt Macy * vice versa we update the metaslab class to reflect that change. 797eda14cbcSMatt Macy * When the mc_alloc_groups value drops to 0 that means that all 798eda14cbcSMatt Macy * groups have reached the zfs_mg_noalloc_threshold making all groups 799eda14cbcSMatt Macy * eligible for allocations. This effectively means that all devices 800eda14cbcSMatt Macy * are balanced again. 801eda14cbcSMatt Macy */ 802eda14cbcSMatt Macy if (was_allocatable && !mg->mg_allocatable) 803eda14cbcSMatt Macy mc->mc_alloc_groups--; 804eda14cbcSMatt Macy else if (!was_allocatable && mg->mg_allocatable) 805eda14cbcSMatt Macy mc->mc_alloc_groups++; 806eda14cbcSMatt Macy mutex_exit(&mc->mc_lock); 807eda14cbcSMatt Macy 808eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 809eda14cbcSMatt Macy } 810eda14cbcSMatt Macy 811eda14cbcSMatt Macy int 812eda14cbcSMatt Macy metaslab_sort_by_flushed(const void *va, const void *vb) 813eda14cbcSMatt Macy { 814eda14cbcSMatt Macy const metaslab_t *a = va; 815eda14cbcSMatt Macy const metaslab_t *b = vb; 816eda14cbcSMatt Macy 817eda14cbcSMatt Macy int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); 818eda14cbcSMatt Macy if (likely(cmp)) 819eda14cbcSMatt Macy return (cmp); 820eda14cbcSMatt Macy 821eda14cbcSMatt Macy uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; 822eda14cbcSMatt Macy uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; 823eda14cbcSMatt Macy cmp = TREE_CMP(a_vdev_id, b_vdev_id); 824eda14cbcSMatt Macy if (cmp) 825eda14cbcSMatt Macy return (cmp); 826eda14cbcSMatt Macy 827eda14cbcSMatt Macy return (TREE_CMP(a->ms_id, b->ms_id)); 828eda14cbcSMatt Macy } 829eda14cbcSMatt Macy 830eda14cbcSMatt Macy metaslab_group_t * 831eda14cbcSMatt Macy metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 832eda14cbcSMatt Macy { 833eda14cbcSMatt Macy metaslab_group_t *mg; 834eda14cbcSMatt Macy 8357877fdebSMatt Macy mg = kmem_zalloc(offsetof(metaslab_group_t, 8367877fdebSMatt Macy mg_allocator[allocators]), KM_SLEEP); 837eda14cbcSMatt Macy mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 838eda14cbcSMatt Macy mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); 839eda14cbcSMatt Macy cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); 840eda14cbcSMatt Macy avl_create(&mg->mg_metaslab_tree, metaslab_compare, 841eda14cbcSMatt Macy sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); 842eda14cbcSMatt Macy mg->mg_vd = vd; 843eda14cbcSMatt Macy mg->mg_class = mc; 844eda14cbcSMatt Macy mg->mg_activation_count = 0; 845eda14cbcSMatt Macy mg->mg_initialized = B_FALSE; 846eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 847eda14cbcSMatt Macy mg->mg_allocators = allocators; 848eda14cbcSMatt Macy 849eda14cbcSMatt Macy for (int i = 0; i < allocators; i++) { 850eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 851eda14cbcSMatt Macy zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); 852eda14cbcSMatt Macy } 853eda14cbcSMatt Macy 854eda14cbcSMatt Macy mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 855eda14cbcSMatt Macy maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); 856eda14cbcSMatt Macy 857eda14cbcSMatt Macy return (mg); 858eda14cbcSMatt Macy } 859eda14cbcSMatt Macy 860eda14cbcSMatt Macy void 861eda14cbcSMatt Macy metaslab_group_destroy(metaslab_group_t *mg) 862eda14cbcSMatt Macy { 863eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 864eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 865eda14cbcSMatt Macy /* 866eda14cbcSMatt Macy * We may have gone below zero with the activation count 867eda14cbcSMatt Macy * either because we never activated in the first place or 868eda14cbcSMatt Macy * because we're done, and possibly removing the vdev. 869eda14cbcSMatt Macy */ 870eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 871eda14cbcSMatt Macy 872eda14cbcSMatt Macy taskq_destroy(mg->mg_taskq); 873eda14cbcSMatt Macy avl_destroy(&mg->mg_metaslab_tree); 874eda14cbcSMatt Macy mutex_destroy(&mg->mg_lock); 875eda14cbcSMatt Macy mutex_destroy(&mg->mg_ms_disabled_lock); 876eda14cbcSMatt Macy cv_destroy(&mg->mg_ms_disabled_cv); 877eda14cbcSMatt Macy 878eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 879eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 880eda14cbcSMatt Macy zfs_refcount_destroy(&mga->mga_alloc_queue_depth); 881eda14cbcSMatt Macy } 8827877fdebSMatt Macy kmem_free(mg, offsetof(metaslab_group_t, 8837877fdebSMatt Macy mg_allocator[mg->mg_allocators])); 884eda14cbcSMatt Macy } 885eda14cbcSMatt Macy 886eda14cbcSMatt Macy void 887eda14cbcSMatt Macy metaslab_group_activate(metaslab_group_t *mg) 888eda14cbcSMatt Macy { 889eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 8907877fdebSMatt Macy spa_t *spa = mc->mc_spa; 891eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 892eda14cbcSMatt Macy 8937877fdebSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); 894eda14cbcSMatt Macy 895eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 896eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 897eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 898eda14cbcSMatt Macy 899eda14cbcSMatt Macy if (++mg->mg_activation_count <= 0) 900eda14cbcSMatt Macy return; 901eda14cbcSMatt Macy 902*716fd348SMartin Matuska mg->mg_aliquot = metaslab_aliquot * MAX(1, 903*716fd348SMartin Matuska vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd)); 904eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 905eda14cbcSMatt Macy 9067877fdebSMatt Macy if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { 907eda14cbcSMatt Macy mg->mg_prev = mg; 908eda14cbcSMatt Macy mg->mg_next = mg; 909eda14cbcSMatt Macy } else { 910eda14cbcSMatt Macy mgnext = mgprev->mg_next; 911eda14cbcSMatt Macy mg->mg_prev = mgprev; 912eda14cbcSMatt Macy mg->mg_next = mgnext; 913eda14cbcSMatt Macy mgprev->mg_next = mg; 914eda14cbcSMatt Macy mgnext->mg_prev = mg; 915eda14cbcSMatt Macy } 9167877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9177877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mg; 9187877fdebSMatt Macy mg = mg->mg_next; 9197877fdebSMatt Macy } 920eda14cbcSMatt Macy } 921eda14cbcSMatt Macy 922eda14cbcSMatt Macy /* 923eda14cbcSMatt Macy * Passivate a metaslab group and remove it from the allocation rotor. 924eda14cbcSMatt Macy * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 925eda14cbcSMatt Macy * a metaslab group. This function will momentarily drop spa_config_locks 926eda14cbcSMatt Macy * that are lower than the SCL_ALLOC lock (see comment below). 927eda14cbcSMatt Macy */ 928eda14cbcSMatt Macy void 929eda14cbcSMatt Macy metaslab_group_passivate(metaslab_group_t *mg) 930eda14cbcSMatt Macy { 931eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 932eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 933eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 934eda14cbcSMatt Macy int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 935eda14cbcSMatt Macy 936eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 937eda14cbcSMatt Macy (SCL_ALLOC | SCL_ZIO)); 938eda14cbcSMatt Macy 939eda14cbcSMatt Macy if (--mg->mg_activation_count != 0) { 9407877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) 9417877fdebSMatt Macy ASSERT(mc->mc_allocator[i].mca_rotor != mg); 942eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 943eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 944eda14cbcSMatt Macy ASSERT(mg->mg_activation_count < 0); 945eda14cbcSMatt Macy return; 946eda14cbcSMatt Macy } 947eda14cbcSMatt Macy 948eda14cbcSMatt Macy /* 949eda14cbcSMatt Macy * The spa_config_lock is an array of rwlocks, ordered as 950eda14cbcSMatt Macy * follows (from highest to lowest): 951eda14cbcSMatt Macy * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 952eda14cbcSMatt Macy * SCL_ZIO > SCL_FREE > SCL_VDEV 953eda14cbcSMatt Macy * (For more information about the spa_config_lock see spa_misc.c) 954eda14cbcSMatt Macy * The higher the lock, the broader its coverage. When we passivate 955eda14cbcSMatt Macy * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 956eda14cbcSMatt Macy * config locks. However, the metaslab group's taskq might be trying 957eda14cbcSMatt Macy * to preload metaslabs so we must drop the SCL_ZIO lock and any 958eda14cbcSMatt Macy * lower locks to allow the I/O to complete. At a minimum, 959eda14cbcSMatt Macy * we continue to hold the SCL_ALLOC lock, which prevents any future 960eda14cbcSMatt Macy * allocations from taking place and any changes to the vdev tree. 961eda14cbcSMatt Macy */ 962eda14cbcSMatt Macy spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 963eda14cbcSMatt Macy taskq_wait_outstanding(mg->mg_taskq, 0); 964eda14cbcSMatt Macy spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 965eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 966eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 967eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 968eda14cbcSMatt Macy metaslab_t *msp = mga->mga_primary; 969eda14cbcSMatt Macy if (msp != NULL) { 970eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 971eda14cbcSMatt Macy metaslab_passivate(msp, 972eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 973eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 974eda14cbcSMatt Macy } 975eda14cbcSMatt Macy msp = mga->mga_secondary; 976eda14cbcSMatt Macy if (msp != NULL) { 977eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 978eda14cbcSMatt Macy metaslab_passivate(msp, 979eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 980eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 981eda14cbcSMatt Macy } 982eda14cbcSMatt Macy } 983eda14cbcSMatt Macy 984eda14cbcSMatt Macy mgprev = mg->mg_prev; 985eda14cbcSMatt Macy mgnext = mg->mg_next; 986eda14cbcSMatt Macy 987eda14cbcSMatt Macy if (mg == mgnext) { 9887877fdebSMatt Macy mgnext = NULL; 989eda14cbcSMatt Macy } else { 990eda14cbcSMatt Macy mgprev->mg_next = mgnext; 991eda14cbcSMatt Macy mgnext->mg_prev = mgprev; 992eda14cbcSMatt Macy } 9937877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9947877fdebSMatt Macy if (mc->mc_allocator[i].mca_rotor == mg) 9957877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mgnext; 9967877fdebSMatt Macy } 997eda14cbcSMatt Macy 998eda14cbcSMatt Macy mg->mg_prev = NULL; 999eda14cbcSMatt Macy mg->mg_next = NULL; 1000eda14cbcSMatt Macy } 1001eda14cbcSMatt Macy 1002eda14cbcSMatt Macy boolean_t 1003eda14cbcSMatt Macy metaslab_group_initialized(metaslab_group_t *mg) 1004eda14cbcSMatt Macy { 1005eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1006eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 1007eda14cbcSMatt Macy 1008eda14cbcSMatt Macy return (vs->vs_space != 0 && mg->mg_activation_count > 0); 1009eda14cbcSMatt Macy } 1010eda14cbcSMatt Macy 1011eda14cbcSMatt Macy uint64_t 1012eda14cbcSMatt Macy metaslab_group_get_space(metaslab_group_t *mg) 1013eda14cbcSMatt Macy { 1014184c1b94SMartin Matuska /* 1015184c1b94SMartin Matuska * Note that the number of nodes in mg_metaslab_tree may be one less 1016184c1b94SMartin Matuska * than vdev_ms_count, due to the embedded log metaslab. 1017184c1b94SMartin Matuska */ 1018184c1b94SMartin Matuska mutex_enter(&mg->mg_lock); 1019184c1b94SMartin Matuska uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree); 1020184c1b94SMartin Matuska mutex_exit(&mg->mg_lock); 1021184c1b94SMartin Matuska return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count); 1022eda14cbcSMatt Macy } 1023eda14cbcSMatt Macy 1024eda14cbcSMatt Macy void 1025eda14cbcSMatt Macy metaslab_group_histogram_verify(metaslab_group_t *mg) 1026eda14cbcSMatt Macy { 1027eda14cbcSMatt Macy uint64_t *mg_hist; 1028184c1b94SMartin Matuska avl_tree_t *t = &mg->mg_metaslab_tree; 1029184c1b94SMartin Matuska uint64_t ashift = mg->mg_vd->vdev_ashift; 1030eda14cbcSMatt Macy 1031eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 1032eda14cbcSMatt Macy return; 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 1035eda14cbcSMatt Macy KM_SLEEP); 1036eda14cbcSMatt Macy 1037eda14cbcSMatt Macy ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 1038eda14cbcSMatt Macy SPACE_MAP_HISTOGRAM_SIZE + ashift); 1039eda14cbcSMatt Macy 1040184c1b94SMartin Matuska mutex_enter(&mg->mg_lock); 1041184c1b94SMartin Matuska for (metaslab_t *msp = avl_first(t); 1042184c1b94SMartin Matuska msp != NULL; msp = AVL_NEXT(t, msp)) { 1043184c1b94SMartin Matuska VERIFY3P(msp->ms_group, ==, mg); 1044184c1b94SMartin Matuska /* skip if not active */ 1045184c1b94SMartin Matuska if (msp->ms_sm == NULL) 1046eda14cbcSMatt Macy continue; 1047eda14cbcSMatt Macy 1048184c1b94SMartin Matuska for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1049eda14cbcSMatt Macy mg_hist[i + ashift] += 1050eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1051eda14cbcSMatt Macy } 1052184c1b94SMartin Matuska } 1053eda14cbcSMatt Macy 1054184c1b94SMartin Matuska for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 1055eda14cbcSMatt Macy VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 1056eda14cbcSMatt Macy 1057184c1b94SMartin Matuska mutex_exit(&mg->mg_lock); 1058184c1b94SMartin Matuska 1059eda14cbcSMatt Macy kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 1060eda14cbcSMatt Macy } 1061eda14cbcSMatt Macy 1062eda14cbcSMatt Macy static void 1063eda14cbcSMatt Macy metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 1064eda14cbcSMatt Macy { 1065eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1066eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1067eda14cbcSMatt Macy 1068eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1069eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1070eda14cbcSMatt Macy return; 1071eda14cbcSMatt Macy 1072eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1073184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 1074eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1075184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 1076184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1077eda14cbcSMatt Macy mg->mg_histogram[i + ashift] += 1078eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1079eda14cbcSMatt Macy mc->mc_histogram[i + ashift] += 1080eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1081eda14cbcSMatt Macy } 1082184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 1083eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1084eda14cbcSMatt Macy } 1085eda14cbcSMatt Macy 1086eda14cbcSMatt Macy void 1087eda14cbcSMatt Macy metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 1088eda14cbcSMatt Macy { 1089eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1090eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1091eda14cbcSMatt Macy 1092eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1093eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1094eda14cbcSMatt Macy return; 1095eda14cbcSMatt Macy 1096eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1097184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 1098eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1099eda14cbcSMatt Macy ASSERT3U(mg->mg_histogram[i + ashift], >=, 1100eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1101eda14cbcSMatt Macy ASSERT3U(mc->mc_histogram[i + ashift], >=, 1102eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1103184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 1104184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1105eda14cbcSMatt Macy 1106eda14cbcSMatt Macy mg->mg_histogram[i + ashift] -= 1107eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1108eda14cbcSMatt Macy mc->mc_histogram[i + ashift] -= 1109eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1110eda14cbcSMatt Macy } 1111184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 1112eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1113eda14cbcSMatt Macy } 1114eda14cbcSMatt Macy 1115eda14cbcSMatt Macy static void 1116eda14cbcSMatt Macy metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 1117eda14cbcSMatt Macy { 1118eda14cbcSMatt Macy ASSERT(msp->ms_group == NULL); 1119eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1120eda14cbcSMatt Macy msp->ms_group = mg; 1121eda14cbcSMatt Macy msp->ms_weight = 0; 1122eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1123eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1124eda14cbcSMatt Macy 1125eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1126eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 1127eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1128eda14cbcSMatt Macy } 1129eda14cbcSMatt Macy 1130eda14cbcSMatt Macy static void 1131eda14cbcSMatt Macy metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 1132eda14cbcSMatt Macy { 1133eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1134eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 1135eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1136eda14cbcSMatt Macy 1137eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1138eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1139eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1140eda14cbcSMatt Macy 1141eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 1142eda14cbcSMatt Macy multilist_sublist_t *mls = 11433ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 1144eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 1145eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 1146eda14cbcSMatt Macy multilist_sublist_unlock(mls); 1147eda14cbcSMatt Macy 1148eda14cbcSMatt Macy msp->ms_group = NULL; 1149eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1150eda14cbcSMatt Macy } 1151eda14cbcSMatt Macy 1152eda14cbcSMatt Macy static void 1153eda14cbcSMatt Macy metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1154eda14cbcSMatt Macy { 1155eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1156eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_lock)); 1157eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1158eda14cbcSMatt Macy 1159eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1160eda14cbcSMatt Macy msp->ms_weight = weight; 1161eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1162eda14cbcSMatt Macy 1163eda14cbcSMatt Macy } 1164eda14cbcSMatt Macy 1165eda14cbcSMatt Macy static void 1166eda14cbcSMatt Macy metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1167eda14cbcSMatt Macy { 1168eda14cbcSMatt Macy /* 1169eda14cbcSMatt Macy * Although in principle the weight can be any value, in 1170eda14cbcSMatt Macy * practice we do not use values in the range [1, 511]. 1171eda14cbcSMatt Macy */ 1172eda14cbcSMatt Macy ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 1173eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1174eda14cbcSMatt Macy 1175eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1176eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 1177eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1178eda14cbcSMatt Macy } 1179eda14cbcSMatt Macy 1180eda14cbcSMatt Macy /* 1181eda14cbcSMatt Macy * Calculate the fragmentation for a given metaslab group. We can use 1182eda14cbcSMatt Macy * a simple average here since all metaslabs within the group must have 1183eda14cbcSMatt Macy * the same size. The return value will be a value between 0 and 100 1184eda14cbcSMatt Macy * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 1185eda14cbcSMatt Macy * group have a fragmentation metric. 1186eda14cbcSMatt Macy */ 1187eda14cbcSMatt Macy uint64_t 1188eda14cbcSMatt Macy metaslab_group_fragmentation(metaslab_group_t *mg) 1189eda14cbcSMatt Macy { 1190eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1191eda14cbcSMatt Macy uint64_t fragmentation = 0; 1192eda14cbcSMatt Macy uint64_t valid_ms = 0; 1193eda14cbcSMatt Macy 1194eda14cbcSMatt Macy for (int m = 0; m < vd->vdev_ms_count; m++) { 1195eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[m]; 1196eda14cbcSMatt Macy 1197eda14cbcSMatt Macy if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 1198eda14cbcSMatt Macy continue; 1199eda14cbcSMatt Macy if (msp->ms_group != mg) 1200eda14cbcSMatt Macy continue; 1201eda14cbcSMatt Macy 1202eda14cbcSMatt Macy valid_ms++; 1203eda14cbcSMatt Macy fragmentation += msp->ms_fragmentation; 1204eda14cbcSMatt Macy } 1205eda14cbcSMatt Macy 1206eda14cbcSMatt Macy if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) 1207eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 1208eda14cbcSMatt Macy 1209eda14cbcSMatt Macy fragmentation /= valid_ms; 1210eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 1211eda14cbcSMatt Macy return (fragmentation); 1212eda14cbcSMatt Macy } 1213eda14cbcSMatt Macy 1214eda14cbcSMatt Macy /* 1215eda14cbcSMatt Macy * Determine if a given metaslab group should skip allocations. A metaslab 1216eda14cbcSMatt Macy * group should avoid allocations if its free capacity is less than the 1217eda14cbcSMatt Macy * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1218eda14cbcSMatt Macy * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1219eda14cbcSMatt Macy * that can still handle allocations. If the allocation throttle is enabled 1220eda14cbcSMatt Macy * then we skip allocations to devices that have reached their maximum 1221eda14cbcSMatt Macy * allocation queue depth unless the selected metaslab group is the only 1222eda14cbcSMatt Macy * eligible group remaining. 1223eda14cbcSMatt Macy */ 1224eda14cbcSMatt Macy static boolean_t 1225eda14cbcSMatt Macy metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 1226eda14cbcSMatt Macy uint64_t psize, int allocator, int d) 1227eda14cbcSMatt Macy { 1228eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 1229eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1230eda14cbcSMatt Macy 1231eda14cbcSMatt Macy /* 1232eda14cbcSMatt Macy * We can only consider skipping this metaslab group if it's 1233eda14cbcSMatt Macy * in the normal metaslab class and there are other metaslab 1234eda14cbcSMatt Macy * groups to select from. Otherwise, we always consider it eligible 1235eda14cbcSMatt Macy * for allocations. 1236eda14cbcSMatt Macy */ 1237eda14cbcSMatt Macy if ((mc != spa_normal_class(spa) && 1238eda14cbcSMatt Macy mc != spa_special_class(spa) && 1239eda14cbcSMatt Macy mc != spa_dedup_class(spa)) || 1240eda14cbcSMatt Macy mc->mc_groups <= 1) 1241eda14cbcSMatt Macy return (B_TRUE); 1242eda14cbcSMatt Macy 1243eda14cbcSMatt Macy /* 1244eda14cbcSMatt Macy * If the metaslab group's mg_allocatable flag is set (see comments 1245eda14cbcSMatt Macy * in metaslab_group_alloc_update() for more information) and 1246eda14cbcSMatt Macy * the allocation throttle is disabled then allow allocations to this 1247eda14cbcSMatt Macy * device. However, if the allocation throttle is enabled then 12487877fdebSMatt Macy * check if we have reached our allocation limit (mga_alloc_queue_depth) 1249eda14cbcSMatt Macy * to determine if we should allow allocations to this metaslab group. 1250eda14cbcSMatt Macy * If all metaslab groups are no longer considered allocatable 1251eda14cbcSMatt Macy * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1252eda14cbcSMatt Macy * gang block size then we allow allocations on this metaslab group 1253eda14cbcSMatt Macy * regardless of the mg_allocatable or throttle settings. 1254eda14cbcSMatt Macy */ 1255eda14cbcSMatt Macy if (mg->mg_allocatable) { 1256eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 1257eda14cbcSMatt Macy int64_t qdepth; 1258eda14cbcSMatt Macy uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; 1259eda14cbcSMatt Macy 1260eda14cbcSMatt Macy if (!mc->mc_alloc_throttle_enabled) 1261eda14cbcSMatt Macy return (B_TRUE); 1262eda14cbcSMatt Macy 1263eda14cbcSMatt Macy /* 1264eda14cbcSMatt Macy * If this metaslab group does not have any free space, then 1265eda14cbcSMatt Macy * there is no point in looking further. 1266eda14cbcSMatt Macy */ 1267eda14cbcSMatt Macy if (mg->mg_no_free_space) 1268eda14cbcSMatt Macy return (B_FALSE); 1269eda14cbcSMatt Macy 1270eda14cbcSMatt Macy /* 1271eda14cbcSMatt Macy * Relax allocation throttling for ditto blocks. Due to 1272eda14cbcSMatt Macy * random imbalances in allocation it tends to push copies 1273eda14cbcSMatt Macy * to one vdev, that looks a bit better at the moment. 1274eda14cbcSMatt Macy */ 1275eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1276eda14cbcSMatt Macy 1277eda14cbcSMatt Macy qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); 1278eda14cbcSMatt Macy 1279eda14cbcSMatt Macy /* 1280eda14cbcSMatt Macy * If this metaslab group is below its qmax or it's 1281eda14cbcSMatt Macy * the only allocatable metasable group, then attempt 1282eda14cbcSMatt Macy * to allocate from it. 1283eda14cbcSMatt Macy */ 1284eda14cbcSMatt Macy if (qdepth < qmax || mc->mc_alloc_groups == 1) 1285eda14cbcSMatt Macy return (B_TRUE); 1286eda14cbcSMatt Macy ASSERT3U(mc->mc_alloc_groups, >, 1); 1287eda14cbcSMatt Macy 1288eda14cbcSMatt Macy /* 1289eda14cbcSMatt Macy * Since this metaslab group is at or over its qmax, we 1290eda14cbcSMatt Macy * need to determine if there are metaslab groups after this 1291eda14cbcSMatt Macy * one that might be able to handle this allocation. This is 1292eda14cbcSMatt Macy * racy since we can't hold the locks for all metaslab 1293eda14cbcSMatt Macy * groups at the same time when we make this check. 1294eda14cbcSMatt Macy */ 1295eda14cbcSMatt Macy for (metaslab_group_t *mgp = mg->mg_next; 1296eda14cbcSMatt Macy mgp != rotor; mgp = mgp->mg_next) { 1297eda14cbcSMatt Macy metaslab_group_allocator_t *mgap = 1298eda14cbcSMatt Macy &mgp->mg_allocator[allocator]; 1299eda14cbcSMatt Macy qmax = mgap->mga_cur_max_alloc_queue_depth; 1300eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1301eda14cbcSMatt Macy qdepth = 1302eda14cbcSMatt Macy zfs_refcount_count(&mgap->mga_alloc_queue_depth); 1303eda14cbcSMatt Macy 1304eda14cbcSMatt Macy /* 1305eda14cbcSMatt Macy * If there is another metaslab group that 1306eda14cbcSMatt Macy * might be able to handle the allocation, then 1307eda14cbcSMatt Macy * we return false so that we skip this group. 1308eda14cbcSMatt Macy */ 1309eda14cbcSMatt Macy if (qdepth < qmax && !mgp->mg_no_free_space) 1310eda14cbcSMatt Macy return (B_FALSE); 1311eda14cbcSMatt Macy } 1312eda14cbcSMatt Macy 1313eda14cbcSMatt Macy /* 1314eda14cbcSMatt Macy * We didn't find another group to handle the allocation 1315eda14cbcSMatt Macy * so we can't skip this metaslab group even though 1316eda14cbcSMatt Macy * we are at or over our qmax. 1317eda14cbcSMatt Macy */ 1318eda14cbcSMatt Macy return (B_TRUE); 1319eda14cbcSMatt Macy 1320eda14cbcSMatt Macy } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1321eda14cbcSMatt Macy return (B_TRUE); 1322eda14cbcSMatt Macy } 1323eda14cbcSMatt Macy return (B_FALSE); 1324eda14cbcSMatt Macy } 1325eda14cbcSMatt Macy 1326eda14cbcSMatt Macy /* 1327eda14cbcSMatt Macy * ========================================================================== 1328eda14cbcSMatt Macy * Range tree callbacks 1329eda14cbcSMatt Macy * ========================================================================== 1330eda14cbcSMatt Macy */ 1331eda14cbcSMatt Macy 1332eda14cbcSMatt Macy /* 1333eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 32-bit 1334eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1335eda14cbcSMatt Macy */ 1336eda14cbcSMatt Macy static int 1337eda14cbcSMatt Macy metaslab_rangesize32_compare(const void *x1, const void *x2) 1338eda14cbcSMatt Macy { 1339eda14cbcSMatt Macy const range_seg32_t *r1 = x1; 1340eda14cbcSMatt Macy const range_seg32_t *r2 = x2; 1341eda14cbcSMatt Macy 1342eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1343eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1344eda14cbcSMatt Macy 1345eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1346eda14cbcSMatt Macy if (likely(cmp)) 1347eda14cbcSMatt Macy return (cmp); 1348eda14cbcSMatt Macy 1349eda14cbcSMatt Macy return (TREE_CMP(r1->rs_start, r2->rs_start)); 1350eda14cbcSMatt Macy } 1351eda14cbcSMatt Macy 1352eda14cbcSMatt Macy /* 1353eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 64-bit 1354eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1355eda14cbcSMatt Macy */ 1356eda14cbcSMatt Macy static int 1357eda14cbcSMatt Macy metaslab_rangesize64_compare(const void *x1, const void *x2) 1358eda14cbcSMatt Macy { 1359eda14cbcSMatt Macy const range_seg64_t *r1 = x1; 1360eda14cbcSMatt Macy const range_seg64_t *r2 = x2; 1361eda14cbcSMatt Macy 1362eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1363eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1364eda14cbcSMatt Macy 1365eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1366eda14cbcSMatt Macy if (likely(cmp)) 1367eda14cbcSMatt Macy return (cmp); 1368eda14cbcSMatt Macy 1369eda14cbcSMatt Macy return (TREE_CMP(r1->rs_start, r2->rs_start)); 1370eda14cbcSMatt Macy } 1371eda14cbcSMatt Macy typedef struct metaslab_rt_arg { 1372eda14cbcSMatt Macy zfs_btree_t *mra_bt; 1373eda14cbcSMatt Macy uint32_t mra_floor_shift; 1374eda14cbcSMatt Macy } metaslab_rt_arg_t; 1375eda14cbcSMatt Macy 1376eda14cbcSMatt Macy struct mssa_arg { 1377eda14cbcSMatt Macy range_tree_t *rt; 1378eda14cbcSMatt Macy metaslab_rt_arg_t *mra; 1379eda14cbcSMatt Macy }; 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy static void 1382eda14cbcSMatt Macy metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) 1383eda14cbcSMatt Macy { 1384eda14cbcSMatt Macy struct mssa_arg *mssap = arg; 1385eda14cbcSMatt Macy range_tree_t *rt = mssap->rt; 1386eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = mssap->mra; 1387eda14cbcSMatt Macy range_seg_max_t seg = {0}; 1388eda14cbcSMatt Macy rs_set_start(&seg, rt, start); 1389eda14cbcSMatt Macy rs_set_end(&seg, rt, start + size); 1390eda14cbcSMatt Macy metaslab_rt_add(rt, &seg, mrap); 1391eda14cbcSMatt Macy } 1392eda14cbcSMatt Macy 1393eda14cbcSMatt Macy static void 1394eda14cbcSMatt Macy metaslab_size_tree_full_load(range_tree_t *rt) 1395eda14cbcSMatt Macy { 1396eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = rt->rt_arg; 1397eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_reload_tree); 1398eda14cbcSMatt Macy ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); 1399eda14cbcSMatt Macy mrap->mra_floor_shift = 0; 1400eda14cbcSMatt Macy struct mssa_arg arg = {0}; 1401eda14cbcSMatt Macy arg.rt = rt; 1402eda14cbcSMatt Macy arg.mra = mrap; 1403eda14cbcSMatt Macy range_tree_walk(rt, metaslab_size_sorted_add, &arg); 1404eda14cbcSMatt Macy } 1405eda14cbcSMatt Macy 1406eda14cbcSMatt Macy /* 1407eda14cbcSMatt Macy * Create any block allocator specific components. The current allocators 1408eda14cbcSMatt Macy * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 1409eda14cbcSMatt Macy */ 1410eda14cbcSMatt Macy static void 1411eda14cbcSMatt Macy metaslab_rt_create(range_tree_t *rt, void *arg) 1412eda14cbcSMatt Macy { 1413eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1414eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1415eda14cbcSMatt Macy 1416eda14cbcSMatt Macy size_t size; 1417eda14cbcSMatt Macy int (*compare) (const void *, const void *); 1418eda14cbcSMatt Macy switch (rt->rt_type) { 1419eda14cbcSMatt Macy case RANGE_SEG32: 1420eda14cbcSMatt Macy size = sizeof (range_seg32_t); 1421eda14cbcSMatt Macy compare = metaslab_rangesize32_compare; 1422eda14cbcSMatt Macy break; 1423eda14cbcSMatt Macy case RANGE_SEG64: 1424eda14cbcSMatt Macy size = sizeof (range_seg64_t); 1425eda14cbcSMatt Macy compare = metaslab_rangesize64_compare; 1426eda14cbcSMatt Macy break; 1427eda14cbcSMatt Macy default: 1428eda14cbcSMatt Macy panic("Invalid range seg type %d", rt->rt_type); 1429eda14cbcSMatt Macy } 1430eda14cbcSMatt Macy zfs_btree_create(size_tree, compare, size); 1431eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 1432eda14cbcSMatt Macy } 1433eda14cbcSMatt Macy 1434eda14cbcSMatt Macy static void 1435eda14cbcSMatt Macy metaslab_rt_destroy(range_tree_t *rt, void *arg) 1436eda14cbcSMatt Macy { 1437e92ffd9bSMartin Matuska (void) rt; 1438eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1439eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1440eda14cbcSMatt Macy 1441eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1442eda14cbcSMatt Macy kmem_free(mrap, sizeof (*mrap)); 1443eda14cbcSMatt Macy } 1444eda14cbcSMatt Macy 1445eda14cbcSMatt Macy static void 1446eda14cbcSMatt Macy metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 1447eda14cbcSMatt Macy { 1448eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1449eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1450eda14cbcSMatt Macy 1451eda14cbcSMatt Macy if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < 1452eda14cbcSMatt Macy (1 << mrap->mra_floor_shift)) 1453eda14cbcSMatt Macy return; 1454eda14cbcSMatt Macy 1455eda14cbcSMatt Macy zfs_btree_add(size_tree, rs); 1456eda14cbcSMatt Macy } 1457eda14cbcSMatt Macy 1458eda14cbcSMatt Macy static void 1459eda14cbcSMatt Macy metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 1460eda14cbcSMatt Macy { 1461eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1462eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1463eda14cbcSMatt Macy 1464eda14cbcSMatt Macy if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 << 1465eda14cbcSMatt Macy mrap->mra_floor_shift)) 1466eda14cbcSMatt Macy return; 1467eda14cbcSMatt Macy 1468eda14cbcSMatt Macy zfs_btree_remove(size_tree, rs); 1469eda14cbcSMatt Macy } 1470eda14cbcSMatt Macy 1471eda14cbcSMatt Macy static void 1472eda14cbcSMatt Macy metaslab_rt_vacate(range_tree_t *rt, void *arg) 1473eda14cbcSMatt Macy { 1474eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1475eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1476eda14cbcSMatt Macy zfs_btree_clear(size_tree); 1477eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1478eda14cbcSMatt Macy 1479eda14cbcSMatt Macy metaslab_rt_create(rt, arg); 1480eda14cbcSMatt Macy } 1481eda14cbcSMatt Macy 1482e92ffd9bSMartin Matuska static const range_tree_ops_t metaslab_rt_ops = { 1483eda14cbcSMatt Macy .rtop_create = metaslab_rt_create, 1484eda14cbcSMatt Macy .rtop_destroy = metaslab_rt_destroy, 1485eda14cbcSMatt Macy .rtop_add = metaslab_rt_add, 1486eda14cbcSMatt Macy .rtop_remove = metaslab_rt_remove, 1487eda14cbcSMatt Macy .rtop_vacate = metaslab_rt_vacate 1488eda14cbcSMatt Macy }; 1489eda14cbcSMatt Macy 1490eda14cbcSMatt Macy /* 1491eda14cbcSMatt Macy * ========================================================================== 1492eda14cbcSMatt Macy * Common allocator routines 1493eda14cbcSMatt Macy * ========================================================================== 1494eda14cbcSMatt Macy */ 1495eda14cbcSMatt Macy 1496eda14cbcSMatt Macy /* 1497eda14cbcSMatt Macy * Return the maximum contiguous segment within the metaslab. 1498eda14cbcSMatt Macy */ 1499eda14cbcSMatt Macy uint64_t 1500eda14cbcSMatt Macy metaslab_largest_allocatable(metaslab_t *msp) 1501eda14cbcSMatt Macy { 1502eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1503eda14cbcSMatt Macy range_seg_t *rs; 1504eda14cbcSMatt Macy 1505eda14cbcSMatt Macy if (t == NULL) 1506eda14cbcSMatt Macy return (0); 1507eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1508eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1509eda14cbcSMatt Macy 1510eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1511eda14cbcSMatt Macy if (rs == NULL) 1512eda14cbcSMatt Macy return (0); 1513eda14cbcSMatt Macy 1514eda14cbcSMatt Macy return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs, 1515eda14cbcSMatt Macy msp->ms_allocatable)); 1516eda14cbcSMatt Macy } 1517eda14cbcSMatt Macy 1518eda14cbcSMatt Macy /* 1519eda14cbcSMatt Macy * Return the maximum contiguous segment within the unflushed frees of this 1520eda14cbcSMatt Macy * metaslab. 1521eda14cbcSMatt Macy */ 1522eda14cbcSMatt Macy static uint64_t 1523eda14cbcSMatt Macy metaslab_largest_unflushed_free(metaslab_t *msp) 1524eda14cbcSMatt Macy { 1525eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1526eda14cbcSMatt Macy 1527eda14cbcSMatt Macy if (msp->ms_unflushed_frees == NULL) 1528eda14cbcSMatt Macy return (0); 1529eda14cbcSMatt Macy 1530eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) 1531eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_unflushed_frees); 1532eda14cbcSMatt Macy range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, 1533eda14cbcSMatt Macy NULL); 1534eda14cbcSMatt Macy if (rs == NULL) 1535eda14cbcSMatt Macy return (0); 1536eda14cbcSMatt Macy 1537eda14cbcSMatt Macy /* 1538eda14cbcSMatt Macy * When a range is freed from the metaslab, that range is added to 1539eda14cbcSMatt Macy * both the unflushed frees and the deferred frees. While the block 1540eda14cbcSMatt Macy * will eventually be usable, if the metaslab were loaded the range 1541eda14cbcSMatt Macy * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE 1542eda14cbcSMatt Macy * txgs had passed. As a result, when attempting to estimate an upper 1543eda14cbcSMatt Macy * bound for the largest currently-usable free segment in the 1544eda14cbcSMatt Macy * metaslab, we need to not consider any ranges currently in the defer 1545eda14cbcSMatt Macy * trees. This algorithm approximates the largest available chunk in 1546eda14cbcSMatt Macy * the largest range in the unflushed_frees tree by taking the first 1547eda14cbcSMatt Macy * chunk. While this may be a poor estimate, it should only remain so 1548eda14cbcSMatt Macy * briefly and should eventually self-correct as frees are no longer 1549eda14cbcSMatt Macy * deferred. Similar logic applies to the ms_freed tree. See 1550eda14cbcSMatt Macy * metaslab_load() for more details. 1551eda14cbcSMatt Macy * 1552eda14cbcSMatt Macy * There are two primary sources of inaccuracy in this estimate. Both 1553eda14cbcSMatt Macy * are tolerated for performance reasons. The first source is that we 1554eda14cbcSMatt Macy * only check the largest segment for overlaps. Smaller segments may 1555eda14cbcSMatt Macy * have more favorable overlaps with the other trees, resulting in 1556eda14cbcSMatt Macy * larger usable chunks. Second, we only look at the first chunk in 1557eda14cbcSMatt Macy * the largest segment; there may be other usable chunks in the 1558eda14cbcSMatt Macy * largest segment, but we ignore them. 1559eda14cbcSMatt Macy */ 1560eda14cbcSMatt Macy uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees); 1561eda14cbcSMatt Macy uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart; 1562eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1563eda14cbcSMatt Macy uint64_t start = 0; 1564eda14cbcSMatt Macy uint64_t size = 0; 1565eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart, 1566eda14cbcSMatt Macy rsize, &start, &size); 1567eda14cbcSMatt Macy if (found) { 1568eda14cbcSMatt Macy if (rstart == start) 1569eda14cbcSMatt Macy return (0); 1570eda14cbcSMatt Macy rsize = start - rstart; 1571eda14cbcSMatt Macy } 1572eda14cbcSMatt Macy } 1573eda14cbcSMatt Macy 1574eda14cbcSMatt Macy uint64_t start = 0; 1575eda14cbcSMatt Macy uint64_t size = 0; 1576eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_freed, rstart, 1577eda14cbcSMatt Macy rsize, &start, &size); 1578eda14cbcSMatt Macy if (found) 1579eda14cbcSMatt Macy rsize = start - rstart; 1580eda14cbcSMatt Macy 1581eda14cbcSMatt Macy return (rsize); 1582eda14cbcSMatt Macy } 1583eda14cbcSMatt Macy 1584eda14cbcSMatt Macy static range_seg_t * 1585eda14cbcSMatt Macy metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start, 1586eda14cbcSMatt Macy uint64_t size, zfs_btree_index_t *where) 1587eda14cbcSMatt Macy { 1588eda14cbcSMatt Macy range_seg_t *rs; 1589eda14cbcSMatt Macy range_seg_max_t rsearch; 1590eda14cbcSMatt Macy 1591eda14cbcSMatt Macy rs_set_start(&rsearch, rt, start); 1592eda14cbcSMatt Macy rs_set_end(&rsearch, rt, start + size); 1593eda14cbcSMatt Macy 1594eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, where); 1595eda14cbcSMatt Macy if (rs == NULL) { 1596eda14cbcSMatt Macy rs = zfs_btree_next(t, where, where); 1597eda14cbcSMatt Macy } 1598eda14cbcSMatt Macy 1599eda14cbcSMatt Macy return (rs); 1600eda14cbcSMatt Macy } 1601eda14cbcSMatt Macy 1602eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR) || \ 1603eda14cbcSMatt Macy defined(WITH_CF_BLOCK_ALLOCATOR) 16047877fdebSMatt Macy 1605eda14cbcSMatt Macy /* 1606eda14cbcSMatt Macy * This is a helper function that can be used by the allocator to find a 1607eda14cbcSMatt Macy * suitable block to allocate. This will search the specified B-tree looking 1608eda14cbcSMatt Macy * for a block that matches the specified criteria. 1609eda14cbcSMatt Macy */ 1610eda14cbcSMatt Macy static uint64_t 1611eda14cbcSMatt Macy metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size, 1612eda14cbcSMatt Macy uint64_t max_search) 1613eda14cbcSMatt Macy { 1614eda14cbcSMatt Macy if (*cursor == 0) 1615eda14cbcSMatt Macy *cursor = rt->rt_start; 1616eda14cbcSMatt Macy zfs_btree_t *bt = &rt->rt_root; 1617eda14cbcSMatt Macy zfs_btree_index_t where; 1618eda14cbcSMatt Macy range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where); 1619eda14cbcSMatt Macy uint64_t first_found; 1620eda14cbcSMatt Macy int count_searched = 0; 1621eda14cbcSMatt Macy 1622eda14cbcSMatt Macy if (rs != NULL) 1623eda14cbcSMatt Macy first_found = rs_get_start(rs, rt); 1624eda14cbcSMatt Macy 1625eda14cbcSMatt Macy while (rs != NULL && (rs_get_start(rs, rt) - first_found <= 1626eda14cbcSMatt Macy max_search || count_searched < metaslab_min_search_count)) { 1627eda14cbcSMatt Macy uint64_t offset = rs_get_start(rs, rt); 1628eda14cbcSMatt Macy if (offset + size <= rs_get_end(rs, rt)) { 1629eda14cbcSMatt Macy *cursor = offset + size; 1630eda14cbcSMatt Macy return (offset); 1631eda14cbcSMatt Macy } 1632eda14cbcSMatt Macy rs = zfs_btree_next(bt, &where, &where); 1633eda14cbcSMatt Macy count_searched++; 1634eda14cbcSMatt Macy } 1635eda14cbcSMatt Macy 1636eda14cbcSMatt Macy *cursor = 0; 1637eda14cbcSMatt Macy return (-1ULL); 1638eda14cbcSMatt Macy } 1639eda14cbcSMatt Macy #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */ 1640eda14cbcSMatt Macy 1641eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR) 1642eda14cbcSMatt Macy /* 1643eda14cbcSMatt Macy * ========================================================================== 1644eda14cbcSMatt Macy * Dynamic Fit (df) block allocator 1645eda14cbcSMatt Macy * 1646eda14cbcSMatt Macy * Search for a free chunk of at least this size, starting from the last 1647eda14cbcSMatt Macy * offset (for this alignment of block) looking for up to 1648eda14cbcSMatt Macy * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not 1649eda14cbcSMatt Macy * found within 16MB, then return a free chunk of exactly the requested size (or 1650eda14cbcSMatt Macy * larger). 1651eda14cbcSMatt Macy * 1652eda14cbcSMatt Macy * If it seems like searching from the last offset will be unproductive, skip 1653eda14cbcSMatt Macy * that and just return a free chunk of exactly the requested size (or larger). 1654eda14cbcSMatt Macy * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This 1655eda14cbcSMatt Macy * mechanism is probably not very useful and may be removed in the future. 1656eda14cbcSMatt Macy * 1657eda14cbcSMatt Macy * The behavior when not searching can be changed to return the largest free 1658eda14cbcSMatt Macy * chunk, instead of a free chunk of exactly the requested size, by setting 1659eda14cbcSMatt Macy * metaslab_df_use_largest_segment. 1660eda14cbcSMatt Macy * ========================================================================== 1661eda14cbcSMatt Macy */ 1662eda14cbcSMatt Macy static uint64_t 1663eda14cbcSMatt Macy metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1664eda14cbcSMatt Macy { 1665eda14cbcSMatt Macy /* 1666eda14cbcSMatt Macy * Find the largest power of 2 block size that evenly divides the 1667eda14cbcSMatt Macy * requested size. This is used to try to allocate blocks with similar 1668eda14cbcSMatt Macy * alignment from the same area of the metaslab (i.e. same cursor 1669eda14cbcSMatt Macy * bucket) but it does not guarantee that other allocations sizes 1670eda14cbcSMatt Macy * may exist in the same region. 1671eda14cbcSMatt Macy */ 1672eda14cbcSMatt Macy uint64_t align = size & -size; 1673eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1674eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1675eda14cbcSMatt Macy int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1676eda14cbcSMatt Macy uint64_t offset; 1677eda14cbcSMatt Macy 1678eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1679eda14cbcSMatt Macy 1680eda14cbcSMatt Macy /* 1681eda14cbcSMatt Macy * If we're running low on space, find a segment based on size, 1682eda14cbcSMatt Macy * rather than iterating based on offset. 1683eda14cbcSMatt Macy */ 1684eda14cbcSMatt Macy if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || 1685eda14cbcSMatt Macy free_pct < metaslab_df_free_pct) { 1686eda14cbcSMatt Macy offset = -1; 1687eda14cbcSMatt Macy } else { 1688eda14cbcSMatt Macy offset = metaslab_block_picker(rt, 1689eda14cbcSMatt Macy cursor, size, metaslab_df_max_search); 1690eda14cbcSMatt Macy } 1691eda14cbcSMatt Macy 1692eda14cbcSMatt Macy if (offset == -1) { 1693eda14cbcSMatt Macy range_seg_t *rs; 1694eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) 1695eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 16967877fdebSMatt Macy 1697eda14cbcSMatt Macy if (metaslab_df_use_largest_segment) { 1698eda14cbcSMatt Macy /* use largest free segment */ 1699eda14cbcSMatt Macy rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); 1700eda14cbcSMatt Macy } else { 1701eda14cbcSMatt Macy zfs_btree_index_t where; 1702eda14cbcSMatt Macy /* use segment of this size, or next largest */ 1703eda14cbcSMatt Macy rs = metaslab_block_find(&msp->ms_allocatable_by_size, 1704eda14cbcSMatt Macy rt, msp->ms_start, size, &where); 1705eda14cbcSMatt Macy } 1706eda14cbcSMatt Macy if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs, 1707eda14cbcSMatt Macy rt)) { 1708eda14cbcSMatt Macy offset = rs_get_start(rs, rt); 1709eda14cbcSMatt Macy *cursor = offset + size; 1710eda14cbcSMatt Macy } 1711eda14cbcSMatt Macy } 1712eda14cbcSMatt Macy 1713eda14cbcSMatt Macy return (offset); 1714eda14cbcSMatt Macy } 1715eda14cbcSMatt Macy 1716e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = { 1717eda14cbcSMatt Macy metaslab_df_alloc 1718eda14cbcSMatt Macy }; 1719eda14cbcSMatt Macy #endif /* WITH_DF_BLOCK_ALLOCATOR */ 1720eda14cbcSMatt Macy 1721eda14cbcSMatt Macy #if defined(WITH_CF_BLOCK_ALLOCATOR) 1722eda14cbcSMatt Macy /* 1723eda14cbcSMatt Macy * ========================================================================== 1724eda14cbcSMatt Macy * Cursor fit block allocator - 1725eda14cbcSMatt Macy * Select the largest region in the metaslab, set the cursor to the beginning 1726eda14cbcSMatt Macy * of the range and the cursor_end to the end of the range. As allocations 1727eda14cbcSMatt Macy * are made advance the cursor. Continue allocating from the cursor until 1728eda14cbcSMatt Macy * the range is exhausted and then find a new range. 1729eda14cbcSMatt Macy * ========================================================================== 1730eda14cbcSMatt Macy */ 1731eda14cbcSMatt Macy static uint64_t 1732eda14cbcSMatt Macy metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1733eda14cbcSMatt Macy { 1734eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1735eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1736eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[0]; 1737eda14cbcSMatt Macy uint64_t *cursor_end = &msp->ms_lbas[1]; 1738eda14cbcSMatt Macy uint64_t offset = 0; 1739eda14cbcSMatt Macy 1740eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1741eda14cbcSMatt Macy 1742eda14cbcSMatt Macy ASSERT3U(*cursor_end, >=, *cursor); 1743eda14cbcSMatt Macy 1744eda14cbcSMatt Macy if ((*cursor + size) > *cursor_end) { 1745eda14cbcSMatt Macy range_seg_t *rs; 1746eda14cbcSMatt Macy 1747eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1748eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1749eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1750eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < 1751eda14cbcSMatt Macy size) 1752eda14cbcSMatt Macy return (-1ULL); 1753eda14cbcSMatt Macy 1754eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt); 1755eda14cbcSMatt Macy *cursor_end = rs_get_end(rs, rt); 1756eda14cbcSMatt Macy } 1757eda14cbcSMatt Macy 1758eda14cbcSMatt Macy offset = *cursor; 1759eda14cbcSMatt Macy *cursor += size; 1760eda14cbcSMatt Macy 1761eda14cbcSMatt Macy return (offset); 1762eda14cbcSMatt Macy } 1763eda14cbcSMatt Macy 1764e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = { 1765eda14cbcSMatt Macy metaslab_cf_alloc 1766eda14cbcSMatt Macy }; 1767eda14cbcSMatt Macy #endif /* WITH_CF_BLOCK_ALLOCATOR */ 1768eda14cbcSMatt Macy 1769eda14cbcSMatt Macy #if defined(WITH_NDF_BLOCK_ALLOCATOR) 1770eda14cbcSMatt Macy /* 1771eda14cbcSMatt Macy * ========================================================================== 1772eda14cbcSMatt Macy * New dynamic fit allocator - 1773eda14cbcSMatt Macy * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1774eda14cbcSMatt Macy * contiguous blocks. If no region is found then just use the largest segment 1775eda14cbcSMatt Macy * that remains. 1776eda14cbcSMatt Macy * ========================================================================== 1777eda14cbcSMatt Macy */ 1778eda14cbcSMatt Macy 1779eda14cbcSMatt Macy /* 1780eda14cbcSMatt Macy * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1781eda14cbcSMatt Macy * to request from the allocator. 1782eda14cbcSMatt Macy */ 1783eda14cbcSMatt Macy uint64_t metaslab_ndf_clump_shift = 4; 1784eda14cbcSMatt Macy 1785eda14cbcSMatt Macy static uint64_t 1786eda14cbcSMatt Macy metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1787eda14cbcSMatt Macy { 1788eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable->rt_root; 1789eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1790eda14cbcSMatt Macy zfs_btree_index_t where; 1791eda14cbcSMatt Macy range_seg_t *rs; 1792eda14cbcSMatt Macy range_seg_max_t rsearch; 1793eda14cbcSMatt Macy uint64_t hbit = highbit64(size); 1794eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1795eda14cbcSMatt Macy uint64_t max_size = metaslab_largest_allocatable(msp); 1796eda14cbcSMatt Macy 1797eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1798eda14cbcSMatt Macy 1799eda14cbcSMatt Macy if (max_size < size) 1800eda14cbcSMatt Macy return (-1ULL); 1801eda14cbcSMatt Macy 1802eda14cbcSMatt Macy rs_set_start(&rsearch, rt, *cursor); 1803eda14cbcSMatt Macy rs_set_end(&rsearch, rt, *cursor + size); 1804eda14cbcSMatt Macy 1805eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1806eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) { 1807eda14cbcSMatt Macy t = &msp->ms_allocatable_by_size; 1808eda14cbcSMatt Macy 1809eda14cbcSMatt Macy rs_set_start(&rsearch, rt, 0); 1810eda14cbcSMatt Macy rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + 1811eda14cbcSMatt Macy metaslab_ndf_clump_shift))); 1812eda14cbcSMatt Macy 1813eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1814eda14cbcSMatt Macy if (rs == NULL) 1815eda14cbcSMatt Macy rs = zfs_btree_next(t, &where, &where); 1816eda14cbcSMatt Macy ASSERT(rs != NULL); 1817eda14cbcSMatt Macy } 1818eda14cbcSMatt Macy 1819eda14cbcSMatt Macy if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) { 1820eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt) + size; 1821eda14cbcSMatt Macy return (rs_get_start(rs, rt)); 1822eda14cbcSMatt Macy } 1823eda14cbcSMatt Macy return (-1ULL); 1824eda14cbcSMatt Macy } 1825eda14cbcSMatt Macy 1826e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = { 1827eda14cbcSMatt Macy metaslab_ndf_alloc 1828eda14cbcSMatt Macy }; 1829eda14cbcSMatt Macy #endif /* WITH_NDF_BLOCK_ALLOCATOR */ 1830eda14cbcSMatt Macy 1831eda14cbcSMatt Macy 1832eda14cbcSMatt Macy /* 1833eda14cbcSMatt Macy * ========================================================================== 1834eda14cbcSMatt Macy * Metaslabs 1835eda14cbcSMatt Macy * ========================================================================== 1836eda14cbcSMatt Macy */ 1837eda14cbcSMatt Macy 1838eda14cbcSMatt Macy /* 1839eda14cbcSMatt Macy * Wait for any in-progress metaslab loads to complete. 1840eda14cbcSMatt Macy */ 1841eda14cbcSMatt Macy static void 1842eda14cbcSMatt Macy metaslab_load_wait(metaslab_t *msp) 1843eda14cbcSMatt Macy { 1844eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1845eda14cbcSMatt Macy 1846eda14cbcSMatt Macy while (msp->ms_loading) { 1847eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 1848eda14cbcSMatt Macy cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1849eda14cbcSMatt Macy } 1850eda14cbcSMatt Macy } 1851eda14cbcSMatt Macy 1852eda14cbcSMatt Macy /* 1853eda14cbcSMatt Macy * Wait for any in-progress flushing to complete. 1854eda14cbcSMatt Macy */ 1855eda14cbcSMatt Macy static void 1856eda14cbcSMatt Macy metaslab_flush_wait(metaslab_t *msp) 1857eda14cbcSMatt Macy { 1858eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1859eda14cbcSMatt Macy 1860eda14cbcSMatt Macy while (msp->ms_flushing) 1861eda14cbcSMatt Macy cv_wait(&msp->ms_flush_cv, &msp->ms_lock); 1862eda14cbcSMatt Macy } 1863eda14cbcSMatt Macy 1864eda14cbcSMatt Macy static unsigned int 1865eda14cbcSMatt Macy metaslab_idx_func(multilist_t *ml, void *arg) 1866eda14cbcSMatt Macy { 1867eda14cbcSMatt Macy metaslab_t *msp = arg; 18682617128aSMartin Matuska 18692617128aSMartin Matuska /* 18702617128aSMartin Matuska * ms_id values are allocated sequentially, so full 64bit 18712617128aSMartin Matuska * division would be a waste of time, so limit it to 32 bits. 18722617128aSMartin Matuska */ 18732617128aSMartin Matuska return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); 1874eda14cbcSMatt Macy } 1875eda14cbcSMatt Macy 1876eda14cbcSMatt Macy uint64_t 1877eda14cbcSMatt Macy metaslab_allocated_space(metaslab_t *msp) 1878eda14cbcSMatt Macy { 1879eda14cbcSMatt Macy return (msp->ms_allocated_space); 1880eda14cbcSMatt Macy } 1881eda14cbcSMatt Macy 1882eda14cbcSMatt Macy /* 1883eda14cbcSMatt Macy * Verify that the space accounting on disk matches the in-core range_trees. 1884eda14cbcSMatt Macy */ 1885eda14cbcSMatt Macy static void 1886eda14cbcSMatt Macy metaslab_verify_space(metaslab_t *msp, uint64_t txg) 1887eda14cbcSMatt Macy { 1888eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1889eda14cbcSMatt Macy uint64_t allocating = 0; 1890eda14cbcSMatt Macy uint64_t sm_free_space, msp_free_space; 1891eda14cbcSMatt Macy 1892eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1893eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 1894eda14cbcSMatt Macy 1895eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1896eda14cbcSMatt Macy return; 1897eda14cbcSMatt Macy 1898eda14cbcSMatt Macy /* 1899eda14cbcSMatt Macy * We can only verify the metaslab space when we're called 1900eda14cbcSMatt Macy * from syncing context with a loaded metaslab that has an 1901eda14cbcSMatt Macy * allocated space map. Calling this in non-syncing context 1902eda14cbcSMatt Macy * does not provide a consistent view of the metaslab since 1903eda14cbcSMatt Macy * we're performing allocations in the future. 1904eda14cbcSMatt Macy */ 1905eda14cbcSMatt Macy if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 1906eda14cbcSMatt Macy !msp->ms_loaded) 1907eda14cbcSMatt Macy return; 1908eda14cbcSMatt Macy 1909eda14cbcSMatt Macy /* 1910eda14cbcSMatt Macy * Even though the smp_alloc field can get negative, 1911eda14cbcSMatt Macy * when it comes to a metaslab's space map, that should 1912eda14cbcSMatt Macy * never be the case. 1913eda14cbcSMatt Macy */ 1914eda14cbcSMatt Macy ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 1915eda14cbcSMatt Macy 1916eda14cbcSMatt Macy ASSERT3U(space_map_allocated(msp->ms_sm), >=, 1917eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1918eda14cbcSMatt Macy 1919eda14cbcSMatt Macy ASSERT3U(metaslab_allocated_space(msp), ==, 1920eda14cbcSMatt Macy space_map_allocated(msp->ms_sm) + 1921eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_allocs) - 1922eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1923eda14cbcSMatt Macy 1924eda14cbcSMatt Macy sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 1925eda14cbcSMatt Macy 1926eda14cbcSMatt Macy /* 1927eda14cbcSMatt Macy * Account for future allocations since we would have 1928eda14cbcSMatt Macy * already deducted that space from the ms_allocatable. 1929eda14cbcSMatt Macy */ 1930eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 1931eda14cbcSMatt Macy allocating += 1932eda14cbcSMatt Macy range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); 1933eda14cbcSMatt Macy } 1934eda14cbcSMatt Macy ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, 1935eda14cbcSMatt Macy msp->ms_allocating_total); 1936eda14cbcSMatt Macy 1937eda14cbcSMatt Macy ASSERT3U(msp->ms_deferspace, ==, 1938eda14cbcSMatt Macy range_tree_space(msp->ms_defer[0]) + 1939eda14cbcSMatt Macy range_tree_space(msp->ms_defer[1])); 1940eda14cbcSMatt Macy 1941eda14cbcSMatt Macy msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + 1942eda14cbcSMatt Macy msp->ms_deferspace + range_tree_space(msp->ms_freed); 1943eda14cbcSMatt Macy 1944eda14cbcSMatt Macy VERIFY3U(sm_free_space, ==, msp_free_space); 1945eda14cbcSMatt Macy } 1946eda14cbcSMatt Macy 1947eda14cbcSMatt Macy static void 1948eda14cbcSMatt Macy metaslab_aux_histograms_clear(metaslab_t *msp) 1949eda14cbcSMatt Macy { 1950eda14cbcSMatt Macy /* 1951eda14cbcSMatt Macy * Auxiliary histograms are only cleared when resetting them, 1952eda14cbcSMatt Macy * which can only happen while the metaslab is loaded. 1953eda14cbcSMatt Macy */ 1954eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 1955eda14cbcSMatt Macy 1956da5137abSMartin Matuska memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 1957eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) 1958da5137abSMartin Matuska memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t])); 1959eda14cbcSMatt Macy } 1960eda14cbcSMatt Macy 1961eda14cbcSMatt Macy static void 1962eda14cbcSMatt Macy metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 1963eda14cbcSMatt Macy range_tree_t *rt) 1964eda14cbcSMatt Macy { 1965eda14cbcSMatt Macy /* 1966eda14cbcSMatt Macy * This is modeled after space_map_histogram_add(), so refer to that 1967eda14cbcSMatt Macy * function for implementation details. We want this to work like 1968eda14cbcSMatt Macy * the space map histogram, and not the range tree histogram, as we 1969eda14cbcSMatt Macy * are essentially constructing a delta that will be later subtracted 1970eda14cbcSMatt Macy * from the space map histogram. 1971eda14cbcSMatt Macy */ 1972eda14cbcSMatt Macy int idx = 0; 1973eda14cbcSMatt Macy for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 1974eda14cbcSMatt Macy ASSERT3U(i, >=, idx + shift); 1975eda14cbcSMatt Macy histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 1976eda14cbcSMatt Macy 1977eda14cbcSMatt Macy if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 1978eda14cbcSMatt Macy ASSERT3U(idx + shift, ==, i); 1979eda14cbcSMatt Macy idx++; 1980eda14cbcSMatt Macy ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 1981eda14cbcSMatt Macy } 1982eda14cbcSMatt Macy } 1983eda14cbcSMatt Macy } 1984eda14cbcSMatt Macy 1985eda14cbcSMatt Macy /* 1986eda14cbcSMatt Macy * Called at every sync pass that the metaslab gets synced. 1987eda14cbcSMatt Macy * 1988eda14cbcSMatt Macy * The reason is that we want our auxiliary histograms to be updated 1989eda14cbcSMatt Macy * wherever the metaslab's space map histogram is updated. This way 1990eda14cbcSMatt Macy * we stay consistent on which parts of the metaslab space map's 1991eda14cbcSMatt Macy * histogram are currently not available for allocations (e.g because 1992eda14cbcSMatt Macy * they are in the defer, freed, and freeing trees). 1993eda14cbcSMatt Macy */ 1994eda14cbcSMatt Macy static void 1995eda14cbcSMatt Macy metaslab_aux_histograms_update(metaslab_t *msp) 1996eda14cbcSMatt Macy { 1997eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 1998eda14cbcSMatt Macy ASSERT(sm != NULL); 1999eda14cbcSMatt Macy 2000eda14cbcSMatt Macy /* 2001eda14cbcSMatt Macy * This is similar to the metaslab's space map histogram updates 2002eda14cbcSMatt Macy * that take place in metaslab_sync(). The only difference is that 2003eda14cbcSMatt Macy * we only care about segments that haven't made it into the 2004eda14cbcSMatt Macy * ms_allocatable tree yet. 2005eda14cbcSMatt Macy */ 2006eda14cbcSMatt Macy if (msp->ms_loaded) { 2007eda14cbcSMatt Macy metaslab_aux_histograms_clear(msp); 2008eda14cbcSMatt Macy 2009eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 2010eda14cbcSMatt Macy sm->sm_shift, msp->ms_freed); 2011eda14cbcSMatt Macy 2012eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2013eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_deferhist[t], 2014eda14cbcSMatt Macy sm->sm_shift, msp->ms_defer[t]); 2015eda14cbcSMatt Macy } 2016eda14cbcSMatt Macy } 2017eda14cbcSMatt Macy 2018eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 2019eda14cbcSMatt Macy sm->sm_shift, msp->ms_freeing); 2020eda14cbcSMatt Macy } 2021eda14cbcSMatt Macy 2022eda14cbcSMatt Macy /* 2023eda14cbcSMatt Macy * Called every time we are done syncing (writing to) the metaslab, 2024eda14cbcSMatt Macy * i.e. at the end of each sync pass. 2025eda14cbcSMatt Macy * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 2026eda14cbcSMatt Macy */ 2027eda14cbcSMatt Macy static void 2028eda14cbcSMatt Macy metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 2029eda14cbcSMatt Macy { 2030eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2031eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 2032eda14cbcSMatt Macy 2033eda14cbcSMatt Macy if (sm == NULL) { 2034eda14cbcSMatt Macy /* 2035eda14cbcSMatt Macy * We came here from metaslab_init() when creating/opening a 2036eda14cbcSMatt Macy * pool, looking at a metaslab that hasn't had any allocations 2037eda14cbcSMatt Macy * yet. 2038eda14cbcSMatt Macy */ 2039eda14cbcSMatt Macy return; 2040eda14cbcSMatt Macy } 2041eda14cbcSMatt Macy 2042eda14cbcSMatt Macy /* 2043eda14cbcSMatt Macy * This is similar to the actions that we take for the ms_freed 2044eda14cbcSMatt Macy * and ms_defer trees in metaslab_sync_done(). 2045eda14cbcSMatt Macy */ 2046eda14cbcSMatt Macy uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 2047eda14cbcSMatt Macy if (defer_allowed) { 2048da5137abSMartin Matuska memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist, 2049eda14cbcSMatt Macy sizeof (msp->ms_synchist)); 2050eda14cbcSMatt Macy } else { 2051da5137abSMartin Matuska memset(msp->ms_deferhist[hist_index], 0, 2052eda14cbcSMatt Macy sizeof (msp->ms_deferhist[hist_index])); 2053eda14cbcSMatt Macy } 2054da5137abSMartin Matuska memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2055eda14cbcSMatt Macy } 2056eda14cbcSMatt Macy 2057eda14cbcSMatt Macy /* 2058eda14cbcSMatt Macy * Ensure that the metaslab's weight and fragmentation are consistent 2059eda14cbcSMatt Macy * with the contents of the histogram (either the range tree's histogram 2060eda14cbcSMatt Macy * or the space map's depending whether the metaslab is loaded). 2061eda14cbcSMatt Macy */ 2062eda14cbcSMatt Macy static void 2063eda14cbcSMatt Macy metaslab_verify_weight_and_frag(metaslab_t *msp) 2064eda14cbcSMatt Macy { 2065eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2066eda14cbcSMatt Macy 2067eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 2068eda14cbcSMatt Macy return; 2069eda14cbcSMatt Macy 2070eda14cbcSMatt Macy /* 2071eda14cbcSMatt Macy * We can end up here from vdev_remove_complete(), in which case we 2072eda14cbcSMatt Macy * cannot do these assertions because we hold spa config locks and 2073eda14cbcSMatt Macy * thus we are not allowed to read from the DMU. 2074eda14cbcSMatt Macy * 2075eda14cbcSMatt Macy * We check if the metaslab group has been removed and if that's 2076eda14cbcSMatt Macy * the case we return immediately as that would mean that we are 2077eda14cbcSMatt Macy * here from the aforementioned code path. 2078eda14cbcSMatt Macy */ 2079eda14cbcSMatt Macy if (msp->ms_group == NULL) 2080eda14cbcSMatt Macy return; 2081eda14cbcSMatt Macy 2082eda14cbcSMatt Macy /* 2083eda14cbcSMatt Macy * Devices being removed always return a weight of 0 and leave 2084eda14cbcSMatt Macy * fragmentation and ms_max_size as is - there is nothing for 2085eda14cbcSMatt Macy * us to verify here. 2086eda14cbcSMatt Macy */ 2087eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2088eda14cbcSMatt Macy if (vd->vdev_removing) 2089eda14cbcSMatt Macy return; 2090eda14cbcSMatt Macy 2091eda14cbcSMatt Macy /* 2092eda14cbcSMatt Macy * If the metaslab is dirty it probably means that we've done 2093eda14cbcSMatt Macy * some allocations or frees that have changed our histograms 2094eda14cbcSMatt Macy * and thus the weight. 2095eda14cbcSMatt Macy */ 2096eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2097eda14cbcSMatt Macy if (txg_list_member(&vd->vdev_ms_list, msp, t)) 2098eda14cbcSMatt Macy return; 2099eda14cbcSMatt Macy } 2100eda14cbcSMatt Macy 2101eda14cbcSMatt Macy /* 2102eda14cbcSMatt Macy * This verification checks that our in-memory state is consistent 2103eda14cbcSMatt Macy * with what's on disk. If the pool is read-only then there aren't 2104eda14cbcSMatt Macy * any changes and we just have the initially-loaded state. 2105eda14cbcSMatt Macy */ 2106eda14cbcSMatt Macy if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 2107eda14cbcSMatt Macy return; 2108eda14cbcSMatt Macy 2109eda14cbcSMatt Macy /* some extra verification for in-core tree if you can */ 2110eda14cbcSMatt Macy if (msp->ms_loaded) { 2111eda14cbcSMatt Macy range_tree_stat_verify(msp->ms_allocatable); 2112eda14cbcSMatt Macy VERIFY(space_map_histogram_verify(msp->ms_sm, 2113eda14cbcSMatt Macy msp->ms_allocatable)); 2114eda14cbcSMatt Macy } 2115eda14cbcSMatt Macy 2116eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2117eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2118eda14cbcSMatt Macy boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 2119eda14cbcSMatt Macy uint64_t frag = msp->ms_fragmentation; 2120eda14cbcSMatt Macy uint64_t max_segsize = msp->ms_max_size; 2121eda14cbcSMatt Macy 2122eda14cbcSMatt Macy msp->ms_weight = 0; 2123eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2124eda14cbcSMatt Macy 2125eda14cbcSMatt Macy /* 2126eda14cbcSMatt Macy * This function is used for verification purposes and thus should 2127eda14cbcSMatt Macy * not introduce any side-effects/mutations on the system's state. 2128eda14cbcSMatt Macy * 2129eda14cbcSMatt Macy * Regardless of whether metaslab_weight() thinks this metaslab 2130eda14cbcSMatt Macy * should be active or not, we want to ensure that the actual weight 2131eda14cbcSMatt Macy * (and therefore the value of ms_weight) would be the same if it 2132eda14cbcSMatt Macy * was to be recalculated at this point. 2133eda14cbcSMatt Macy * 2134eda14cbcSMatt Macy * In addition we set the nodirty flag so metaslab_weight() does 2135eda14cbcSMatt Macy * not dirty the metaslab for future TXGs (e.g. when trying to 2136eda14cbcSMatt Macy * force condensing to upgrade the metaslab spacemaps). 2137eda14cbcSMatt Macy */ 2138eda14cbcSMatt Macy msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; 2139eda14cbcSMatt Macy 2140eda14cbcSMatt Macy VERIFY3U(max_segsize, ==, msp->ms_max_size); 2141eda14cbcSMatt Macy 2142eda14cbcSMatt Macy /* 2143eda14cbcSMatt Macy * If the weight type changed then there is no point in doing 2144eda14cbcSMatt Macy * verification. Revert fields to their original values. 2145eda14cbcSMatt Macy */ 2146eda14cbcSMatt Macy if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 2147eda14cbcSMatt Macy (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 2148eda14cbcSMatt Macy msp->ms_fragmentation = frag; 2149eda14cbcSMatt Macy msp->ms_weight = weight; 2150eda14cbcSMatt Macy return; 2151eda14cbcSMatt Macy } 2152eda14cbcSMatt Macy 2153eda14cbcSMatt Macy VERIFY3U(msp->ms_fragmentation, ==, frag); 2154eda14cbcSMatt Macy VERIFY3U(msp->ms_weight, ==, weight); 2155eda14cbcSMatt Macy } 2156eda14cbcSMatt Macy 2157eda14cbcSMatt Macy /* 2158eda14cbcSMatt Macy * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from 2159eda14cbcSMatt Macy * this class that was used longest ago, and attempt to unload it. We don't 2160eda14cbcSMatt Macy * want to spend too much time in this loop to prevent performance 2161eda14cbcSMatt Macy * degradation, and we expect that most of the time this operation will 2162eda14cbcSMatt Macy * succeed. Between that and the normal unloading processing during txg sync, 2163eda14cbcSMatt Macy * we expect this to keep the metaslab memory usage under control. 2164eda14cbcSMatt Macy */ 2165eda14cbcSMatt Macy static void 2166eda14cbcSMatt Macy metaslab_potentially_evict(metaslab_class_t *mc) 2167eda14cbcSMatt Macy { 2168eda14cbcSMatt Macy #ifdef _KERNEL 2169eda14cbcSMatt Macy uint64_t allmem = arc_all_memory(); 2170eda14cbcSMatt Macy uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2171eda14cbcSMatt Macy uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); 2172eda14cbcSMatt Macy int tries = 0; 2173eda14cbcSMatt Macy for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && 21743ff01b23SMartin Matuska tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; 2175eda14cbcSMatt Macy tries++) { 2176eda14cbcSMatt Macy unsigned int idx = multilist_get_random_index( 21773ff01b23SMartin Matuska &mc->mc_metaslab_txg_list); 2178eda14cbcSMatt Macy multilist_sublist_t *mls = 21793ff01b23SMartin Matuska multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx); 2180eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 2181eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2182eda14cbcSMatt Macy while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < 2183eda14cbcSMatt Macy inuse * size) { 2184eda14cbcSMatt Macy VERIFY3P(mls, ==, multilist_sublist_lock( 21853ff01b23SMartin Matuska &mc->mc_metaslab_txg_list, idx)); 2186eda14cbcSMatt Macy ASSERT3U(idx, ==, 21873ff01b23SMartin Matuska metaslab_idx_func(&mc->mc_metaslab_txg_list, msp)); 2188eda14cbcSMatt Macy 2189eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 2190eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2191eda14cbcSMatt Macy break; 2192eda14cbcSMatt Macy } 2193eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 2194eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2195eda14cbcSMatt Macy /* 2196eda14cbcSMatt Macy * If the metaslab is currently loading there are two 2197eda14cbcSMatt Macy * cases. If it's the metaslab we're evicting, we 2198eda14cbcSMatt Macy * can't continue on or we'll panic when we attempt to 2199eda14cbcSMatt Macy * recursively lock the mutex. If it's another 2200eda14cbcSMatt Macy * metaslab that's loading, it can be safely skipped, 2201eda14cbcSMatt Macy * since we know it's very new and therefore not a 2202eda14cbcSMatt Macy * good eviction candidate. We check later once the 2203eda14cbcSMatt Macy * lock is held that the metaslab is fully loaded 2204eda14cbcSMatt Macy * before actually unloading it. 2205eda14cbcSMatt Macy */ 2206eda14cbcSMatt Macy if (msp->ms_loading) { 2207eda14cbcSMatt Macy msp = next_msp; 2208eda14cbcSMatt Macy inuse = 2209eda14cbcSMatt Macy spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2210eda14cbcSMatt Macy continue; 2211eda14cbcSMatt Macy } 2212eda14cbcSMatt Macy /* 2213eda14cbcSMatt Macy * We can't unload metaslabs with no spacemap because 2214eda14cbcSMatt Macy * they're not ready to be unloaded yet. We can't 2215eda14cbcSMatt Macy * unload metaslabs with outstanding allocations 2216eda14cbcSMatt Macy * because doing so could cause the metaslab's weight 2217eda14cbcSMatt Macy * to decrease while it's unloaded, which violates an 2218eda14cbcSMatt Macy * invariant that we use to prevent unnecessary 2219eda14cbcSMatt Macy * loading. We also don't unload metaslabs that are 2220eda14cbcSMatt Macy * currently active because they are high-weight 2221eda14cbcSMatt Macy * metaslabs that are likely to be used in the near 2222eda14cbcSMatt Macy * future. 2223eda14cbcSMatt Macy */ 2224eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2225eda14cbcSMatt Macy if (msp->ms_allocator == -1 && msp->ms_sm != NULL && 2226eda14cbcSMatt Macy msp->ms_allocating_total == 0) { 2227eda14cbcSMatt Macy metaslab_unload(msp); 2228eda14cbcSMatt Macy } 2229eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2230eda14cbcSMatt Macy msp = next_msp; 2231eda14cbcSMatt Macy inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2232eda14cbcSMatt Macy } 2233eda14cbcSMatt Macy } 2234e92ffd9bSMartin Matuska #else 2235e92ffd9bSMartin Matuska (void) mc, (void) zfs_metaslab_mem_limit; 2236eda14cbcSMatt Macy #endif 2237eda14cbcSMatt Macy } 2238eda14cbcSMatt Macy 2239eda14cbcSMatt Macy static int 2240eda14cbcSMatt Macy metaslab_load_impl(metaslab_t *msp) 2241eda14cbcSMatt Macy { 2242eda14cbcSMatt Macy int error = 0; 2243eda14cbcSMatt Macy 2244eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2245eda14cbcSMatt Macy ASSERT(msp->ms_loading); 2246eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2247eda14cbcSMatt Macy 2248eda14cbcSMatt Macy /* 2249eda14cbcSMatt Macy * We temporarily drop the lock to unblock other operations while we 2250eda14cbcSMatt Macy * are reading the space map. Therefore, metaslab_sync() and 2251eda14cbcSMatt Macy * metaslab_sync_done() can run at the same time as we do. 2252eda14cbcSMatt Macy * 2253eda14cbcSMatt Macy * If we are using the log space maps, metaslab_sync() can't write to 2254eda14cbcSMatt Macy * the metaslab's space map while we are loading as we only write to 2255eda14cbcSMatt Macy * it when we are flushing the metaslab, and that can't happen while 2256eda14cbcSMatt Macy * we are loading it. 2257eda14cbcSMatt Macy * 2258eda14cbcSMatt Macy * If we are not using log space maps though, metaslab_sync() can 2259eda14cbcSMatt Macy * append to the space map while we are loading. Therefore we load 2260eda14cbcSMatt Macy * only entries that existed when we started the load. Additionally, 2261eda14cbcSMatt Macy * metaslab_sync_done() has to wait for the load to complete because 2262eda14cbcSMatt Macy * there are potential races like metaslab_load() loading parts of the 2263eda14cbcSMatt Macy * space map that are currently being appended by metaslab_sync(). If 2264eda14cbcSMatt Macy * we didn't, the ms_allocatable would have entries that 2265eda14cbcSMatt Macy * metaslab_sync_done() would try to re-add later. 2266eda14cbcSMatt Macy * 2267eda14cbcSMatt Macy * That's why before dropping the lock we remember the synced length 2268eda14cbcSMatt Macy * of the metaslab and read up to that point of the space map, 2269eda14cbcSMatt Macy * ignoring entries appended by metaslab_sync() that happen after we 2270eda14cbcSMatt Macy * drop the lock. 2271eda14cbcSMatt Macy */ 2272eda14cbcSMatt Macy uint64_t length = msp->ms_synced_length; 2273eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2274eda14cbcSMatt Macy 2275eda14cbcSMatt Macy hrtime_t load_start = gethrtime(); 2276eda14cbcSMatt Macy metaslab_rt_arg_t *mrap; 2277eda14cbcSMatt Macy if (msp->ms_allocatable->rt_arg == NULL) { 2278eda14cbcSMatt Macy mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2279eda14cbcSMatt Macy } else { 2280eda14cbcSMatt Macy mrap = msp->ms_allocatable->rt_arg; 2281eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = NULL; 2282eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = NULL; 2283eda14cbcSMatt Macy } 2284eda14cbcSMatt Macy mrap->mra_bt = &msp->ms_allocatable_by_size; 2285eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 2286eda14cbcSMatt Macy 2287eda14cbcSMatt Macy if (msp->ms_sm != NULL) { 2288eda14cbcSMatt Macy error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 2289eda14cbcSMatt Macy SM_FREE, length); 2290eda14cbcSMatt Macy 2291eda14cbcSMatt Macy /* Now, populate the size-sorted tree. */ 2292eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2293eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2294eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2295eda14cbcSMatt Macy 2296eda14cbcSMatt Macy struct mssa_arg arg = {0}; 2297eda14cbcSMatt Macy arg.rt = msp->ms_allocatable; 2298eda14cbcSMatt Macy arg.mra = mrap; 2299eda14cbcSMatt Macy range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add, 2300eda14cbcSMatt Macy &arg); 2301eda14cbcSMatt Macy } else { 2302eda14cbcSMatt Macy /* 2303eda14cbcSMatt Macy * Add the size-sorted tree first, since we don't need to load 2304eda14cbcSMatt Macy * the metaslab from the spacemap. 2305eda14cbcSMatt Macy */ 2306eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2307eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2308eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2309eda14cbcSMatt Macy /* 2310eda14cbcSMatt Macy * The space map has not been allocated yet, so treat 2311eda14cbcSMatt Macy * all the space in the metaslab as free and add it to the 2312eda14cbcSMatt Macy * ms_allocatable tree. 2313eda14cbcSMatt Macy */ 2314eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, 2315eda14cbcSMatt Macy msp->ms_start, msp->ms_size); 2316eda14cbcSMatt Macy 2317f9693befSMartin Matuska if (msp->ms_new) { 2318eda14cbcSMatt Macy /* 2319eda14cbcSMatt Macy * If the ms_sm doesn't exist, this means that this 2320eda14cbcSMatt Macy * metaslab hasn't gone through metaslab_sync() and 2321eda14cbcSMatt Macy * thus has never been dirtied. So we shouldn't 2322eda14cbcSMatt Macy * expect any unflushed allocs or frees from previous 2323eda14cbcSMatt Macy * TXGs. 2324eda14cbcSMatt Macy */ 2325eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 2326eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 2327eda14cbcSMatt Macy } 2328eda14cbcSMatt Macy } 2329eda14cbcSMatt Macy 2330eda14cbcSMatt Macy /* 2331eda14cbcSMatt Macy * We need to grab the ms_sync_lock to prevent metaslab_sync() from 2332eda14cbcSMatt Macy * changing the ms_sm (or log_sm) and the metaslab's range trees 2333eda14cbcSMatt Macy * while we are about to use them and populate the ms_allocatable. 2334eda14cbcSMatt Macy * The ms_lock is insufficient for this because metaslab_sync() doesn't 2335eda14cbcSMatt Macy * hold the ms_lock while writing the ms_checkpointing tree to disk. 2336eda14cbcSMatt Macy */ 2337eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 2338eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2339eda14cbcSMatt Macy 2340eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2341eda14cbcSMatt Macy ASSERT(!msp->ms_flushing); 2342eda14cbcSMatt Macy 2343eda14cbcSMatt Macy if (error != 0) { 2344eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2345eda14cbcSMatt Macy return (error); 2346eda14cbcSMatt Macy } 2347eda14cbcSMatt Macy 2348eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2349eda14cbcSMatt Macy msp->ms_loaded = B_TRUE; 2350eda14cbcSMatt Macy 2351eda14cbcSMatt Macy /* 2352eda14cbcSMatt Macy * Apply all the unflushed changes to ms_allocatable right 2353eda14cbcSMatt Macy * away so any manipulations we do below have a clear view 2354eda14cbcSMatt Macy * of what is allocated and what is free. 2355eda14cbcSMatt Macy */ 2356eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_allocs, 2357eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2358eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_frees, 2359eda14cbcSMatt Macy range_tree_add, msp->ms_allocatable); 2360eda14cbcSMatt Macy 2361eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2362eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2363eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) != NULL) { 2364eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, 2365eda14cbcSMatt Macy SPA_FEATURE_LOG_SPACEMAP)); 2366eda14cbcSMatt Macy 2367eda14cbcSMatt Macy /* 2368eda14cbcSMatt Macy * If we use a log space map we add all the segments 2369eda14cbcSMatt Macy * that are in ms_unflushed_frees so they are available 2370eda14cbcSMatt Macy * for allocation. 2371eda14cbcSMatt Macy * 2372eda14cbcSMatt Macy * ms_allocatable needs to contain all free segments 2373eda14cbcSMatt Macy * that are ready for allocations (thus not segments 2374eda14cbcSMatt Macy * from ms_freeing, ms_freed, and the ms_defer trees). 2375eda14cbcSMatt Macy * But if we grab the lock in this code path at a sync 2376eda14cbcSMatt Macy * pass later that 1, then it also contains the 2377eda14cbcSMatt Macy * segments of ms_freed (they were added to it earlier 2378eda14cbcSMatt Macy * in this path through ms_unflushed_frees). So we 2379eda14cbcSMatt Macy * need to remove all the segments that exist in 2380eda14cbcSMatt Macy * ms_freed from ms_allocatable as they will be added 2381eda14cbcSMatt Macy * later in metaslab_sync_done(). 2382eda14cbcSMatt Macy * 2383eda14cbcSMatt Macy * When there's no log space map, the ms_allocatable 2384eda14cbcSMatt Macy * correctly doesn't contain any segments that exist 2385eda14cbcSMatt Macy * in ms_freed [see ms_synced_length]. 2386eda14cbcSMatt Macy */ 2387eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, 2388eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2389eda14cbcSMatt Macy } 2390eda14cbcSMatt Macy 2391eda14cbcSMatt Macy /* 2392eda14cbcSMatt Macy * If we are not using the log space map, ms_allocatable 2393eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees 2394eda14cbcSMatt Macy * [see ms_synced_length]. Thus we need to remove them 2395eda14cbcSMatt Macy * from ms_allocatable as they will be added again in 2396eda14cbcSMatt Macy * metaslab_sync_done(). 2397eda14cbcSMatt Macy * 2398eda14cbcSMatt Macy * If we are using the log space map, ms_allocatable still 2399eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees. 2400eda14cbcSMatt Macy * Not because it read them through the ms_sm though. But 2401eda14cbcSMatt Macy * because these segments are part of ms_unflushed_frees 2402eda14cbcSMatt Macy * whose segments we add to ms_allocatable earlier in this 2403eda14cbcSMatt Macy * code path. 2404eda14cbcSMatt Macy */ 2405eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2406eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 2407eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2408eda14cbcSMatt Macy } 2409eda14cbcSMatt Macy 2410eda14cbcSMatt Macy /* 2411eda14cbcSMatt Macy * Call metaslab_recalculate_weight_and_sort() now that the 2412eda14cbcSMatt Macy * metaslab is loaded so we get the metaslab's real weight. 2413eda14cbcSMatt Macy * 2414eda14cbcSMatt Macy * Unless this metaslab was created with older software and 2415eda14cbcSMatt Macy * has not yet been converted to use segment-based weight, we 2416eda14cbcSMatt Macy * expect the new weight to be better or equal to the weight 2417eda14cbcSMatt Macy * that the metaslab had while it was not loaded. This is 2418eda14cbcSMatt Macy * because the old weight does not take into account the 2419eda14cbcSMatt Macy * consolidation of adjacent segments between TXGs. [see 2420eda14cbcSMatt Macy * comment for ms_synchist and ms_deferhist[] for more info] 2421eda14cbcSMatt Macy */ 2422eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2423eda14cbcSMatt Macy uint64_t max_size = msp->ms_max_size; 2424eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2425eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(weight)) 2426eda14cbcSMatt Macy ASSERT3U(weight, <=, msp->ms_weight); 2427eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 2428eda14cbcSMatt Macy ASSERT3U(max_size, <=, msp->ms_max_size); 2429eda14cbcSMatt Macy hrtime_t load_end = gethrtime(); 2430eda14cbcSMatt Macy msp->ms_load_time = load_end; 2431eda14cbcSMatt Macy zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " 2432eda14cbcSMatt Macy "ms_id %llu, smp_length %llu, " 2433eda14cbcSMatt Macy "unflushed_allocs %llu, unflushed_frees %llu, " 2434eda14cbcSMatt Macy "freed %llu, defer %llu + %llu, unloaded time %llu ms, " 2435eda14cbcSMatt Macy "loading_time %lld ms, ms_max_size %llu, " 2436eda14cbcSMatt Macy "max size error %lld, " 2437eda14cbcSMatt Macy "old_weight %llx, new_weight %llx", 243833b8c039SMartin Matuska (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 243933b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 244033b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 244133b8c039SMartin Matuska (u_longlong_t)space_map_length(msp->ms_sm), 244233b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 244333b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 244433b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_freed), 244533b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_defer[0]), 244633b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_defer[1]), 2447eda14cbcSMatt Macy (longlong_t)((load_start - msp->ms_unload_time) / 1000000), 2448eda14cbcSMatt Macy (longlong_t)((load_end - load_start) / 1000000), 244933b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size, 245033b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size - max_size, 245133b8c039SMartin Matuska (u_longlong_t)weight, (u_longlong_t)msp->ms_weight); 2452eda14cbcSMatt Macy 2453eda14cbcSMatt Macy metaslab_verify_space(msp, spa_syncing_txg(spa)); 2454eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2455eda14cbcSMatt Macy return (0); 2456eda14cbcSMatt Macy } 2457eda14cbcSMatt Macy 2458eda14cbcSMatt Macy int 2459eda14cbcSMatt Macy metaslab_load(metaslab_t *msp) 2460eda14cbcSMatt Macy { 2461eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2462eda14cbcSMatt Macy 2463eda14cbcSMatt Macy /* 2464eda14cbcSMatt Macy * There may be another thread loading the same metaslab, if that's 2465eda14cbcSMatt Macy * the case just wait until the other thread is done and return. 2466eda14cbcSMatt Macy */ 2467eda14cbcSMatt Macy metaslab_load_wait(msp); 2468eda14cbcSMatt Macy if (msp->ms_loaded) 2469eda14cbcSMatt Macy return (0); 2470eda14cbcSMatt Macy VERIFY(!msp->ms_loading); 2471eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2472eda14cbcSMatt Macy 2473eda14cbcSMatt Macy /* 2474eda14cbcSMatt Macy * We set the loading flag BEFORE potentially dropping the lock to 2475eda14cbcSMatt Macy * wait for an ongoing flush (see ms_flushing below). This way other 2476eda14cbcSMatt Macy * threads know that there is already a thread that is loading this 2477eda14cbcSMatt Macy * metaslab. 2478eda14cbcSMatt Macy */ 2479eda14cbcSMatt Macy msp->ms_loading = B_TRUE; 2480eda14cbcSMatt Macy 2481eda14cbcSMatt Macy /* 2482eda14cbcSMatt Macy * Wait for any in-progress flushing to finish as we drop the ms_lock 2483eda14cbcSMatt Macy * both here (during space_map_load()) and in metaslab_flush() (when 2484eda14cbcSMatt Macy * we flush our changes to the ms_sm). 2485eda14cbcSMatt Macy */ 2486eda14cbcSMatt Macy if (msp->ms_flushing) 2487eda14cbcSMatt Macy metaslab_flush_wait(msp); 2488eda14cbcSMatt Macy 2489eda14cbcSMatt Macy /* 2490eda14cbcSMatt Macy * In the possibility that we were waiting for the metaslab to be 2491eda14cbcSMatt Macy * flushed (where we temporarily dropped the ms_lock), ensure that 2492eda14cbcSMatt Macy * no one else loaded the metaslab somehow. 2493eda14cbcSMatt Macy */ 2494eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 2495eda14cbcSMatt Macy 2496eda14cbcSMatt Macy /* 2497eda14cbcSMatt Macy * If we're loading a metaslab in the normal class, consider evicting 2498eda14cbcSMatt Macy * another one to keep our memory usage under the limit defined by the 2499eda14cbcSMatt Macy * zfs_metaslab_mem_limit tunable. 2500eda14cbcSMatt Macy */ 2501eda14cbcSMatt Macy if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == 2502eda14cbcSMatt Macy msp->ms_group->mg_class) { 2503eda14cbcSMatt Macy metaslab_potentially_evict(msp->ms_group->mg_class); 2504eda14cbcSMatt Macy } 2505eda14cbcSMatt Macy 2506eda14cbcSMatt Macy int error = metaslab_load_impl(msp); 2507eda14cbcSMatt Macy 2508eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2509eda14cbcSMatt Macy msp->ms_loading = B_FALSE; 2510eda14cbcSMatt Macy cv_broadcast(&msp->ms_load_cv); 2511eda14cbcSMatt Macy 2512eda14cbcSMatt Macy return (error); 2513eda14cbcSMatt Macy } 2514eda14cbcSMatt Macy 2515eda14cbcSMatt Macy void 2516eda14cbcSMatt Macy metaslab_unload(metaslab_t *msp) 2517eda14cbcSMatt Macy { 2518eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2519eda14cbcSMatt Macy 2520eda14cbcSMatt Macy /* 2521eda14cbcSMatt Macy * This can happen if a metaslab is selected for eviction (in 2522eda14cbcSMatt Macy * metaslab_potentially_evict) and then unloaded during spa_sync (via 2523eda14cbcSMatt Macy * metaslab_class_evict_old). 2524eda14cbcSMatt Macy */ 2525eda14cbcSMatt Macy if (!msp->ms_loaded) 2526eda14cbcSMatt Macy return; 2527eda14cbcSMatt Macy 2528eda14cbcSMatt Macy range_tree_vacate(msp->ms_allocatable, NULL, NULL); 2529eda14cbcSMatt Macy msp->ms_loaded = B_FALSE; 2530eda14cbcSMatt Macy msp->ms_unload_time = gethrtime(); 2531eda14cbcSMatt Macy 2532eda14cbcSMatt Macy msp->ms_activation_weight = 0; 2533eda14cbcSMatt Macy msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 2534eda14cbcSMatt Macy 2535eda14cbcSMatt Macy if (msp->ms_group != NULL) { 2536eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2537eda14cbcSMatt Macy multilist_sublist_t *mls = 25383ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2539eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2540eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2541eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2542eda14cbcSMatt Macy 2543eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2544eda14cbcSMatt Macy zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " 2545eda14cbcSMatt Macy "ms_id %llu, weight %llx, " 2546eda14cbcSMatt Macy "selected txg %llu (%llu ms ago), alloc_txg %llu, " 2547eda14cbcSMatt Macy "loaded %llu ms ago, max_size %llu", 254833b8c039SMartin Matuska (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 254933b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 255033b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 255133b8c039SMartin Matuska (u_longlong_t)msp->ms_weight, 255233b8c039SMartin Matuska (u_longlong_t)msp->ms_selected_txg, 255333b8c039SMartin Matuska (u_longlong_t)(msp->ms_unload_time - 255433b8c039SMartin Matuska msp->ms_selected_time) / 1000 / 1000, 255533b8c039SMartin Matuska (u_longlong_t)msp->ms_alloc_txg, 255633b8c039SMartin Matuska (u_longlong_t)(msp->ms_unload_time - 255733b8c039SMartin Matuska msp->ms_load_time) / 1000 / 1000, 255833b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size); 2559eda14cbcSMatt Macy } 2560eda14cbcSMatt Macy 2561eda14cbcSMatt Macy /* 2562eda14cbcSMatt Macy * We explicitly recalculate the metaslab's weight based on its space 2563eda14cbcSMatt Macy * map (as it is now not loaded). We want unload metaslabs to always 2564eda14cbcSMatt Macy * have their weights calculated from the space map histograms, while 2565eda14cbcSMatt Macy * loaded ones have it calculated from their in-core range tree 2566eda14cbcSMatt Macy * [see metaslab_load()]. This way, the weight reflects the information 2567eda14cbcSMatt Macy * available in-core, whether it is loaded or not. 2568eda14cbcSMatt Macy * 2569eda14cbcSMatt Macy * If ms_group == NULL means that we came here from metaslab_fini(), 2570eda14cbcSMatt Macy * at which point it doesn't make sense for us to do the recalculation 2571eda14cbcSMatt Macy * and the sorting. 2572eda14cbcSMatt Macy */ 2573eda14cbcSMatt Macy if (msp->ms_group != NULL) 2574eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2575eda14cbcSMatt Macy } 2576eda14cbcSMatt Macy 2577eda14cbcSMatt Macy /* 2578eda14cbcSMatt Macy * We want to optimize the memory use of the per-metaslab range 2579eda14cbcSMatt Macy * trees. To do this, we store the segments in the range trees in 2580eda14cbcSMatt Macy * units of sectors, zero-indexing from the start of the metaslab. If 2581eda14cbcSMatt Macy * the vdev_ms_shift - the vdev_ashift is less than 32, we can store 2582eda14cbcSMatt Macy * the ranges using two uint32_ts, rather than two uint64_ts. 2583eda14cbcSMatt Macy */ 2584eda14cbcSMatt Macy range_seg_type_t 2585eda14cbcSMatt Macy metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, 2586eda14cbcSMatt Macy uint64_t *start, uint64_t *shift) 2587eda14cbcSMatt Macy { 2588eda14cbcSMatt Macy if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && 2589eda14cbcSMatt Macy !zfs_metaslab_force_large_segs) { 2590eda14cbcSMatt Macy *shift = vdev->vdev_ashift; 2591eda14cbcSMatt Macy *start = msp->ms_start; 2592eda14cbcSMatt Macy return (RANGE_SEG32); 2593eda14cbcSMatt Macy } else { 2594eda14cbcSMatt Macy *shift = 0; 2595eda14cbcSMatt Macy *start = 0; 2596eda14cbcSMatt Macy return (RANGE_SEG64); 2597eda14cbcSMatt Macy } 2598eda14cbcSMatt Macy } 2599eda14cbcSMatt Macy 2600eda14cbcSMatt Macy void 2601eda14cbcSMatt Macy metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) 2602eda14cbcSMatt Macy { 2603eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2604eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2605eda14cbcSMatt Macy multilist_sublist_t *mls = 26063ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2607eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2608eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2609eda14cbcSMatt Macy msp->ms_selected_txg = txg; 2610eda14cbcSMatt Macy msp->ms_selected_time = gethrtime(); 2611eda14cbcSMatt Macy multilist_sublist_insert_tail(mls, msp); 2612eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2613eda14cbcSMatt Macy } 2614eda14cbcSMatt Macy 2615eda14cbcSMatt Macy void 2616eda14cbcSMatt Macy metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 2617eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta) 2618eda14cbcSMatt Macy { 2619eda14cbcSMatt Macy vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 2620eda14cbcSMatt Macy 2621eda14cbcSMatt Macy ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 2622eda14cbcSMatt Macy ASSERT(vd->vdev_ms_count != 0); 2623eda14cbcSMatt Macy 2624eda14cbcSMatt Macy metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 2625eda14cbcSMatt Macy vdev_deflated_space(vd, space_delta)); 2626eda14cbcSMatt Macy } 2627eda14cbcSMatt Macy 2628eda14cbcSMatt Macy int 2629eda14cbcSMatt Macy metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, 2630eda14cbcSMatt Macy uint64_t txg, metaslab_t **msp) 2631eda14cbcSMatt Macy { 2632eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2633eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2634eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 2635eda14cbcSMatt Macy metaslab_t *ms; 2636eda14cbcSMatt Macy int error; 2637eda14cbcSMatt Macy 2638eda14cbcSMatt Macy ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 2639eda14cbcSMatt Macy mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 2640eda14cbcSMatt Macy mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 2641eda14cbcSMatt Macy cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 2642eda14cbcSMatt Macy cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); 2643eda14cbcSMatt Macy multilist_link_init(&ms->ms_class_txg_node); 2644eda14cbcSMatt Macy 2645eda14cbcSMatt Macy ms->ms_id = id; 2646eda14cbcSMatt Macy ms->ms_start = id << vd->vdev_ms_shift; 2647eda14cbcSMatt Macy ms->ms_size = 1ULL << vd->vdev_ms_shift; 2648eda14cbcSMatt Macy ms->ms_allocator = -1; 2649eda14cbcSMatt Macy ms->ms_new = B_TRUE; 2650eda14cbcSMatt Macy 26517877fdebSMatt Macy vdev_ops_t *ops = vd->vdev_ops; 26527877fdebSMatt Macy if (ops->vdev_op_metaslab_init != NULL) 26537877fdebSMatt Macy ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); 26547877fdebSMatt Macy 2655eda14cbcSMatt Macy /* 2656eda14cbcSMatt Macy * We only open space map objects that already exist. All others 265781b22a98SMartin Matuska * will be opened when we finally allocate an object for it. For 265881b22a98SMartin Matuska * readonly pools there is no need to open the space map object. 2659eda14cbcSMatt Macy * 2660eda14cbcSMatt Macy * Note: 2661eda14cbcSMatt Macy * When called from vdev_expand(), we can't call into the DMU as 2662eda14cbcSMatt Macy * we are holding the spa_config_lock as a writer and we would 2663eda14cbcSMatt Macy * deadlock [see relevant comment in vdev_metaslab_init()]. in 2664eda14cbcSMatt Macy * that case, the object parameter is zero though, so we won't 2665eda14cbcSMatt Macy * call into the DMU. 2666eda14cbcSMatt Macy */ 266781b22a98SMartin Matuska if (object != 0 && !(spa->spa_mode == SPA_MODE_READ && 266881b22a98SMartin Matuska !spa->spa_read_spacemaps)) { 2669eda14cbcSMatt Macy error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 2670eda14cbcSMatt Macy ms->ms_size, vd->vdev_ashift); 2671eda14cbcSMatt Macy 2672eda14cbcSMatt Macy if (error != 0) { 2673eda14cbcSMatt Macy kmem_free(ms, sizeof (metaslab_t)); 2674eda14cbcSMatt Macy return (error); 2675eda14cbcSMatt Macy } 2676eda14cbcSMatt Macy 2677eda14cbcSMatt Macy ASSERT(ms->ms_sm != NULL); 2678eda14cbcSMatt Macy ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 2679eda14cbcSMatt Macy } 2680eda14cbcSMatt Macy 2681eda14cbcSMatt Macy uint64_t shift, start; 2682f9693befSMartin Matuska range_seg_type_t type = 2683f9693befSMartin Matuska metaslab_calculate_range_tree_type(vd, ms, &start, &shift); 2684eda14cbcSMatt Macy 2685eda14cbcSMatt Macy ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift); 2686f9693befSMartin Matuska for (int t = 0; t < TXG_SIZE; t++) { 2687f9693befSMartin Matuska ms->ms_allocating[t] = range_tree_create(NULL, type, 2688f9693befSMartin Matuska NULL, start, shift); 2689f9693befSMartin Matuska } 2690f9693befSMartin Matuska ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift); 2691f9693befSMartin Matuska ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift); 2692f9693befSMartin Matuska for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2693f9693befSMartin Matuska ms->ms_defer[t] = range_tree_create(NULL, type, NULL, 2694f9693befSMartin Matuska start, shift); 2695f9693befSMartin Matuska } 2696f9693befSMartin Matuska ms->ms_checkpointing = 2697f9693befSMartin Matuska range_tree_create(NULL, type, NULL, start, shift); 2698f9693befSMartin Matuska ms->ms_unflushed_allocs = 2699f9693befSMartin Matuska range_tree_create(NULL, type, NULL, start, shift); 2700f9693befSMartin Matuska 2701f9693befSMartin Matuska metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2702f9693befSMartin Matuska mrap->mra_bt = &ms->ms_unflushed_frees_by_size; 2703f9693befSMartin Matuska mrap->mra_floor_shift = metaslab_by_size_min_shift; 2704f9693befSMartin Matuska ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops, 2705f9693befSMartin Matuska type, mrap, start, shift); 2706eda14cbcSMatt Macy 2707eda14cbcSMatt Macy ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift); 2708eda14cbcSMatt Macy 2709eda14cbcSMatt Macy metaslab_group_add(mg, ms); 2710eda14cbcSMatt Macy metaslab_set_fragmentation(ms, B_FALSE); 2711eda14cbcSMatt Macy 2712eda14cbcSMatt Macy /* 2713eda14cbcSMatt Macy * If we're opening an existing pool (txg == 0) or creating 2714eda14cbcSMatt Macy * a new one (txg == TXG_INITIAL), all space is available now. 2715eda14cbcSMatt Macy * If we're adding space to an existing pool, the new space 2716eda14cbcSMatt Macy * does not become available until after this txg has synced. 2717eda14cbcSMatt Macy * The metaslab's weight will also be initialized when we sync 2718eda14cbcSMatt Macy * out this txg. This ensures that we don't attempt to allocate 2719eda14cbcSMatt Macy * from it before we have initialized it completely. 2720eda14cbcSMatt Macy */ 2721eda14cbcSMatt Macy if (txg <= TXG_INITIAL) { 2722eda14cbcSMatt Macy metaslab_sync_done(ms, 0); 2723eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2724eda14cbcSMatt Macy metaslab_allocated_space(ms), 0, 0); 2725eda14cbcSMatt Macy } 2726eda14cbcSMatt Macy 2727eda14cbcSMatt Macy if (txg != 0) { 2728eda14cbcSMatt Macy vdev_dirty(vd, 0, NULL, txg); 2729eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, ms, txg); 2730eda14cbcSMatt Macy } 2731eda14cbcSMatt Macy 2732eda14cbcSMatt Macy *msp = ms; 2733eda14cbcSMatt Macy 2734eda14cbcSMatt Macy return (0); 2735eda14cbcSMatt Macy } 2736eda14cbcSMatt Macy 2737eda14cbcSMatt Macy static void 2738eda14cbcSMatt Macy metaslab_fini_flush_data(metaslab_t *msp) 2739eda14cbcSMatt Macy { 2740eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2741eda14cbcSMatt Macy 2742eda14cbcSMatt Macy if (metaslab_unflushed_txg(msp) == 0) { 2743eda14cbcSMatt Macy ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), 2744eda14cbcSMatt Macy ==, NULL); 2745eda14cbcSMatt Macy return; 2746eda14cbcSMatt Macy } 2747eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 2748eda14cbcSMatt Macy 2749eda14cbcSMatt Macy mutex_enter(&spa->spa_flushed_ms_lock); 2750eda14cbcSMatt Macy avl_remove(&spa->spa_metaslabs_by_flushed, msp); 2751eda14cbcSMatt Macy mutex_exit(&spa->spa_flushed_ms_lock); 2752eda14cbcSMatt Macy 2753eda14cbcSMatt Macy spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2754*716fd348SMartin Matuska spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp), 2755*716fd348SMartin Matuska metaslab_unflushed_dirty(msp)); 2756eda14cbcSMatt Macy } 2757eda14cbcSMatt Macy 2758eda14cbcSMatt Macy uint64_t 2759eda14cbcSMatt Macy metaslab_unflushed_changes_memused(metaslab_t *ms) 2760eda14cbcSMatt Macy { 2761eda14cbcSMatt Macy return ((range_tree_numsegs(ms->ms_unflushed_allocs) + 2762eda14cbcSMatt Macy range_tree_numsegs(ms->ms_unflushed_frees)) * 2763eda14cbcSMatt Macy ms->ms_unflushed_allocs->rt_root.bt_elem_size); 2764eda14cbcSMatt Macy } 2765eda14cbcSMatt Macy 2766eda14cbcSMatt Macy void 2767eda14cbcSMatt Macy metaslab_fini(metaslab_t *msp) 2768eda14cbcSMatt Macy { 2769eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 2770eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2771eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2772eda14cbcSMatt Macy 2773eda14cbcSMatt Macy metaslab_fini_flush_data(msp); 2774eda14cbcSMatt Macy 2775eda14cbcSMatt Macy metaslab_group_remove(mg, msp); 2776eda14cbcSMatt Macy 2777eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2778eda14cbcSMatt Macy VERIFY(msp->ms_group == NULL); 2779f9693befSMartin Matuska 2780184c1b94SMartin Matuska /* 2781f9693befSMartin Matuska * If this metaslab hasn't been through metaslab_sync_done() yet its 2782184c1b94SMartin Matuska * space hasn't been accounted for in its vdev and doesn't need to be 2783184c1b94SMartin Matuska * subtracted. 2784184c1b94SMartin Matuska */ 2785f9693befSMartin Matuska if (!msp->ms_new) { 2786eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2787eda14cbcSMatt Macy -metaslab_allocated_space(msp), 0, -msp->ms_size); 2788eda14cbcSMatt Macy 2789184c1b94SMartin Matuska } 2790eda14cbcSMatt Macy space_map_close(msp->ms_sm); 2791eda14cbcSMatt Macy msp->ms_sm = NULL; 2792eda14cbcSMatt Macy 2793eda14cbcSMatt Macy metaslab_unload(msp); 2794184c1b94SMartin Matuska 2795eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocatable); 2796eda14cbcSMatt Macy range_tree_destroy(msp->ms_freeing); 2797eda14cbcSMatt Macy range_tree_destroy(msp->ms_freed); 2798eda14cbcSMatt Macy 2799eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 2800eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 2801eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 2802eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 2803eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 2804eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_allocs); 2805184c1b94SMartin Matuska range_tree_destroy(msp->ms_checkpointing); 2806eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 2807eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_frees); 2808eda14cbcSMatt Macy 2809eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2810eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocating[t]); 2811eda14cbcSMatt Macy } 2812eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2813eda14cbcSMatt Macy range_tree_destroy(msp->ms_defer[t]); 2814eda14cbcSMatt Macy } 2815eda14cbcSMatt Macy ASSERT0(msp->ms_deferspace); 2816eda14cbcSMatt Macy 2817eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 2818eda14cbcSMatt Macy ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 2819eda14cbcSMatt Macy 2820eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 2821eda14cbcSMatt Macy range_tree_destroy(msp->ms_trim); 2822eda14cbcSMatt Macy 2823eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2824eda14cbcSMatt Macy cv_destroy(&msp->ms_load_cv); 2825eda14cbcSMatt Macy cv_destroy(&msp->ms_flush_cv); 2826eda14cbcSMatt Macy mutex_destroy(&msp->ms_lock); 2827eda14cbcSMatt Macy mutex_destroy(&msp->ms_sync_lock); 2828eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, ==, -1); 2829eda14cbcSMatt Macy 2830eda14cbcSMatt Macy kmem_free(msp, sizeof (metaslab_t)); 2831eda14cbcSMatt Macy } 2832eda14cbcSMatt Macy 2833eda14cbcSMatt Macy #define FRAGMENTATION_TABLE_SIZE 17 2834eda14cbcSMatt Macy 2835eda14cbcSMatt Macy /* 2836eda14cbcSMatt Macy * This table defines a segment size based fragmentation metric that will 2837eda14cbcSMatt Macy * allow each metaslab to derive its own fragmentation value. This is done 2838eda14cbcSMatt Macy * by calculating the space in each bucket of the spacemap histogram and 2839eda14cbcSMatt Macy * multiplying that by the fragmentation metric in this table. Doing 2840eda14cbcSMatt Macy * this for all buckets and dividing it by the total amount of free 2841eda14cbcSMatt Macy * space in this metaslab (i.e. the total free space in all buckets) gives 2842eda14cbcSMatt Macy * us the fragmentation metric. This means that a high fragmentation metric 2843eda14cbcSMatt Macy * equates to most of the free space being comprised of small segments. 2844eda14cbcSMatt Macy * Conversely, if the metric is low, then most of the free space is in 2845eda14cbcSMatt Macy * large segments. A 10% change in fragmentation equates to approximately 2846eda14cbcSMatt Macy * double the number of segments. 2847eda14cbcSMatt Macy * 2848eda14cbcSMatt Macy * This table defines 0% fragmented space using 16MB segments. Testing has 2849eda14cbcSMatt Macy * shown that segments that are greater than or equal to 16MB do not suffer 2850eda14cbcSMatt Macy * from drastic performance problems. Using this value, we derive the rest 2851eda14cbcSMatt Macy * of the table. Since the fragmentation value is never stored on disk, it 2852eda14cbcSMatt Macy * is possible to change these calculations in the future. 2853eda14cbcSMatt Macy */ 2854e92ffd9bSMartin Matuska static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 2855eda14cbcSMatt Macy 100, /* 512B */ 2856eda14cbcSMatt Macy 100, /* 1K */ 2857eda14cbcSMatt Macy 98, /* 2K */ 2858eda14cbcSMatt Macy 95, /* 4K */ 2859eda14cbcSMatt Macy 90, /* 8K */ 2860eda14cbcSMatt Macy 80, /* 16K */ 2861eda14cbcSMatt Macy 70, /* 32K */ 2862eda14cbcSMatt Macy 60, /* 64K */ 2863eda14cbcSMatt Macy 50, /* 128K */ 2864eda14cbcSMatt Macy 40, /* 256K */ 2865eda14cbcSMatt Macy 30, /* 512K */ 2866eda14cbcSMatt Macy 20, /* 1M */ 2867eda14cbcSMatt Macy 15, /* 2M */ 2868eda14cbcSMatt Macy 10, /* 4M */ 2869eda14cbcSMatt Macy 5, /* 8M */ 2870eda14cbcSMatt Macy 0 /* 16M */ 2871eda14cbcSMatt Macy }; 2872eda14cbcSMatt Macy 2873eda14cbcSMatt Macy /* 2874eda14cbcSMatt Macy * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 2875eda14cbcSMatt Macy * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 2876eda14cbcSMatt Macy * been upgraded and does not support this metric. Otherwise, the return 2877eda14cbcSMatt Macy * value should be in the range [0, 100]. 2878eda14cbcSMatt Macy */ 2879eda14cbcSMatt Macy static void 2880eda14cbcSMatt Macy metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) 2881eda14cbcSMatt Macy { 2882eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2883eda14cbcSMatt Macy uint64_t fragmentation = 0; 2884eda14cbcSMatt Macy uint64_t total = 0; 2885eda14cbcSMatt Macy boolean_t feature_enabled = spa_feature_is_enabled(spa, 2886eda14cbcSMatt Macy SPA_FEATURE_SPACEMAP_HISTOGRAM); 2887eda14cbcSMatt Macy 2888eda14cbcSMatt Macy if (!feature_enabled) { 2889eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2890eda14cbcSMatt Macy return; 2891eda14cbcSMatt Macy } 2892eda14cbcSMatt Macy 2893eda14cbcSMatt Macy /* 2894eda14cbcSMatt Macy * A null space map means that the entire metaslab is free 2895eda14cbcSMatt Macy * and thus is not fragmented. 2896eda14cbcSMatt Macy */ 2897eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 2898eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2899eda14cbcSMatt Macy return; 2900eda14cbcSMatt Macy } 2901eda14cbcSMatt Macy 2902eda14cbcSMatt Macy /* 2903eda14cbcSMatt Macy * If this metaslab's space map has not been upgraded, flag it 2904eda14cbcSMatt Macy * so that we upgrade next time we encounter it. 2905eda14cbcSMatt Macy */ 2906eda14cbcSMatt Macy if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 2907eda14cbcSMatt Macy uint64_t txg = spa_syncing_txg(spa); 2908eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2909eda14cbcSMatt Macy 2910eda14cbcSMatt Macy /* 2911eda14cbcSMatt Macy * If we've reached the final dirty txg, then we must 2912eda14cbcSMatt Macy * be shutting down the pool. We don't want to dirty 2913eda14cbcSMatt Macy * any data past this point so skip setting the condense 2914eda14cbcSMatt Macy * flag. We can retry this action the next time the pool 2915eda14cbcSMatt Macy * is imported. We also skip marking this metaslab for 2916eda14cbcSMatt Macy * condensing if the caller has explicitly set nodirty. 2917eda14cbcSMatt Macy */ 2918eda14cbcSMatt Macy if (!nodirty && 2919eda14cbcSMatt Macy spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 2920eda14cbcSMatt Macy msp->ms_condense_wanted = B_TRUE; 2921eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2922eda14cbcSMatt Macy zfs_dbgmsg("txg %llu, requesting force condense: " 292333b8c039SMartin Matuska "ms_id %llu, vdev_id %llu", (u_longlong_t)txg, 292433b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 292533b8c039SMartin Matuska (u_longlong_t)vd->vdev_id); 2926eda14cbcSMatt Macy } 2927eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2928eda14cbcSMatt Macy return; 2929eda14cbcSMatt Macy } 2930eda14cbcSMatt Macy 2931eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2932eda14cbcSMatt Macy uint64_t space = 0; 2933eda14cbcSMatt Macy uint8_t shift = msp->ms_sm->sm_shift; 2934eda14cbcSMatt Macy 2935eda14cbcSMatt Macy int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 2936eda14cbcSMatt Macy FRAGMENTATION_TABLE_SIZE - 1); 2937eda14cbcSMatt Macy 2938eda14cbcSMatt Macy if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 2939eda14cbcSMatt Macy continue; 2940eda14cbcSMatt Macy 2941eda14cbcSMatt Macy space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 2942eda14cbcSMatt Macy total += space; 2943eda14cbcSMatt Macy 2944eda14cbcSMatt Macy ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 2945eda14cbcSMatt Macy fragmentation += space * zfs_frag_table[idx]; 2946eda14cbcSMatt Macy } 2947eda14cbcSMatt Macy 2948eda14cbcSMatt Macy if (total > 0) 2949eda14cbcSMatt Macy fragmentation /= total; 2950eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 2951eda14cbcSMatt Macy 2952eda14cbcSMatt Macy msp->ms_fragmentation = fragmentation; 2953eda14cbcSMatt Macy } 2954eda14cbcSMatt Macy 2955eda14cbcSMatt Macy /* 2956eda14cbcSMatt Macy * Compute a weight -- a selection preference value -- for the given metaslab. 2957eda14cbcSMatt Macy * This is based on the amount of free space, the level of fragmentation, 2958eda14cbcSMatt Macy * the LBA range, and whether the metaslab is loaded. 2959eda14cbcSMatt Macy */ 2960eda14cbcSMatt Macy static uint64_t 2961eda14cbcSMatt Macy metaslab_space_weight(metaslab_t *msp) 2962eda14cbcSMatt Macy { 2963eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 2964eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2965eda14cbcSMatt Macy uint64_t weight, space; 2966eda14cbcSMatt Macy 2967eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2968eda14cbcSMatt Macy 2969eda14cbcSMatt Macy /* 2970eda14cbcSMatt Macy * The baseline weight is the metaslab's free space. 2971eda14cbcSMatt Macy */ 2972eda14cbcSMatt Macy space = msp->ms_size - metaslab_allocated_space(msp); 2973eda14cbcSMatt Macy 2974eda14cbcSMatt Macy if (metaslab_fragmentation_factor_enabled && 2975eda14cbcSMatt Macy msp->ms_fragmentation != ZFS_FRAG_INVALID) { 2976eda14cbcSMatt Macy /* 2977eda14cbcSMatt Macy * Use the fragmentation information to inversely scale 2978eda14cbcSMatt Macy * down the baseline weight. We need to ensure that we 2979eda14cbcSMatt Macy * don't exclude this metaslab completely when it's 100% 2980eda14cbcSMatt Macy * fragmented. To avoid this we reduce the fragmented value 2981eda14cbcSMatt Macy * by 1. 2982eda14cbcSMatt Macy */ 2983eda14cbcSMatt Macy space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 2984eda14cbcSMatt Macy 2985eda14cbcSMatt Macy /* 2986eda14cbcSMatt Macy * If space < SPA_MINBLOCKSIZE, then we will not allocate from 2987eda14cbcSMatt Macy * this metaslab again. The fragmentation metric may have 2988eda14cbcSMatt Macy * decreased the space to something smaller than 2989eda14cbcSMatt Macy * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 2990eda14cbcSMatt Macy * so that we can consume any remaining space. 2991eda14cbcSMatt Macy */ 2992eda14cbcSMatt Macy if (space > 0 && space < SPA_MINBLOCKSIZE) 2993eda14cbcSMatt Macy space = SPA_MINBLOCKSIZE; 2994eda14cbcSMatt Macy } 2995eda14cbcSMatt Macy weight = space; 2996eda14cbcSMatt Macy 2997eda14cbcSMatt Macy /* 2998eda14cbcSMatt Macy * Modern disks have uniform bit density and constant angular velocity. 2999eda14cbcSMatt Macy * Therefore, the outer recording zones are faster (higher bandwidth) 3000eda14cbcSMatt Macy * than the inner zones by the ratio of outer to inner track diameter, 3001eda14cbcSMatt Macy * which is typically around 2:1. We account for this by assigning 3002eda14cbcSMatt Macy * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 3003eda14cbcSMatt Macy * In effect, this means that we'll select the metaslab with the most 3004eda14cbcSMatt Macy * free bandwidth rather than simply the one with the most free space. 3005eda14cbcSMatt Macy */ 3006eda14cbcSMatt Macy if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 3007eda14cbcSMatt Macy weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 3008eda14cbcSMatt Macy ASSERT(weight >= space && weight <= 2 * space); 3009eda14cbcSMatt Macy } 3010eda14cbcSMatt Macy 3011eda14cbcSMatt Macy /* 3012eda14cbcSMatt Macy * If this metaslab is one we're actively using, adjust its 3013eda14cbcSMatt Macy * weight to make it preferable to any inactive metaslab so 3014eda14cbcSMatt Macy * we'll polish it off. If the fragmentation on this metaslab 3015eda14cbcSMatt Macy * has exceed our threshold, then don't mark it active. 3016eda14cbcSMatt Macy */ 3017eda14cbcSMatt Macy if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 3018eda14cbcSMatt Macy msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 3019eda14cbcSMatt Macy weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 3020eda14cbcSMatt Macy } 3021eda14cbcSMatt Macy 3022eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 3023eda14cbcSMatt Macy return (weight); 3024eda14cbcSMatt Macy } 3025eda14cbcSMatt Macy 3026eda14cbcSMatt Macy /* 3027eda14cbcSMatt Macy * Return the weight of the specified metaslab, according to the segment-based 3028eda14cbcSMatt Macy * weighting algorithm. The metaslab must be loaded. This function can 3029eda14cbcSMatt Macy * be called within a sync pass since it relies only on the metaslab's 3030eda14cbcSMatt Macy * range tree which is always accurate when the metaslab is loaded. 3031eda14cbcSMatt Macy */ 3032eda14cbcSMatt Macy static uint64_t 3033eda14cbcSMatt Macy metaslab_weight_from_range_tree(metaslab_t *msp) 3034eda14cbcSMatt Macy { 3035eda14cbcSMatt Macy uint64_t weight = 0; 3036eda14cbcSMatt Macy uint32_t segments = 0; 3037eda14cbcSMatt Macy 3038eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3039eda14cbcSMatt Macy 3040eda14cbcSMatt Macy for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 3041eda14cbcSMatt Macy i--) { 3042eda14cbcSMatt Macy uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 3043eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3044eda14cbcSMatt Macy 3045eda14cbcSMatt Macy segments <<= 1; 3046eda14cbcSMatt Macy segments += msp->ms_allocatable->rt_histogram[i]; 3047eda14cbcSMatt Macy 3048eda14cbcSMatt Macy /* 3049eda14cbcSMatt Macy * The range tree provides more precision than the space map 3050eda14cbcSMatt Macy * and must be downgraded so that all values fit within the 3051eda14cbcSMatt Macy * space map's histogram. This allows us to compare loaded 3052eda14cbcSMatt Macy * vs. unloaded metaslabs to determine which metaslab is 3053eda14cbcSMatt Macy * considered "best". 3054eda14cbcSMatt Macy */ 3055eda14cbcSMatt Macy if (i > max_idx) 3056eda14cbcSMatt Macy continue; 3057eda14cbcSMatt Macy 3058eda14cbcSMatt Macy if (segments != 0) { 3059eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, segments); 3060eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i); 3061eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3062eda14cbcSMatt Macy break; 3063eda14cbcSMatt Macy } 3064eda14cbcSMatt Macy } 3065eda14cbcSMatt Macy return (weight); 3066eda14cbcSMatt Macy } 3067eda14cbcSMatt Macy 3068eda14cbcSMatt Macy /* 3069eda14cbcSMatt Macy * Calculate the weight based on the on-disk histogram. Should be applied 3070eda14cbcSMatt Macy * only to unloaded metaslabs (i.e no incoming allocations) in-order to 3071eda14cbcSMatt Macy * give results consistent with the on-disk state 3072eda14cbcSMatt Macy */ 3073eda14cbcSMatt Macy static uint64_t 3074eda14cbcSMatt Macy metaslab_weight_from_spacemap(metaslab_t *msp) 3075eda14cbcSMatt Macy { 3076eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3077eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 3078eda14cbcSMatt Macy ASSERT(sm != NULL); 3079eda14cbcSMatt Macy ASSERT3U(space_map_object(sm), !=, 0); 3080eda14cbcSMatt Macy ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3081eda14cbcSMatt Macy 3082eda14cbcSMatt Macy /* 3083eda14cbcSMatt Macy * Create a joint histogram from all the segments that have made 3084eda14cbcSMatt Macy * it to the metaslab's space map histogram, that are not yet 3085eda14cbcSMatt Macy * available for allocation because they are still in the freeing 3086eda14cbcSMatt Macy * pipeline (e.g. freeing, freed, and defer trees). Then subtract 3087eda14cbcSMatt Macy * these segments from the space map's histogram to get a more 3088eda14cbcSMatt Macy * accurate weight. 3089eda14cbcSMatt Macy */ 3090eda14cbcSMatt Macy uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 3091eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 3092eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_synchist[i]; 3093eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3094eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3095eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_deferhist[t][i]; 3096eda14cbcSMatt Macy } 3097eda14cbcSMatt Macy } 3098eda14cbcSMatt Macy 3099eda14cbcSMatt Macy uint64_t weight = 0; 3100eda14cbcSMatt Macy for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 3101eda14cbcSMatt Macy ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 3102eda14cbcSMatt Macy deferspace_histogram[i]); 3103eda14cbcSMatt Macy uint64_t count = 3104eda14cbcSMatt Macy sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 3105eda14cbcSMatt Macy if (count != 0) { 3106eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, count); 3107eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 3108eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3109eda14cbcSMatt Macy break; 3110eda14cbcSMatt Macy } 3111eda14cbcSMatt Macy } 3112eda14cbcSMatt Macy return (weight); 3113eda14cbcSMatt Macy } 3114eda14cbcSMatt Macy 3115eda14cbcSMatt Macy /* 3116eda14cbcSMatt Macy * Compute a segment-based weight for the specified metaslab. The weight 3117eda14cbcSMatt Macy * is determined by highest bucket in the histogram. The information 3118eda14cbcSMatt Macy * for the highest bucket is encoded into the weight value. 3119eda14cbcSMatt Macy */ 3120eda14cbcSMatt Macy static uint64_t 3121eda14cbcSMatt Macy metaslab_segment_weight(metaslab_t *msp) 3122eda14cbcSMatt Macy { 3123eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3124eda14cbcSMatt Macy uint64_t weight = 0; 3125eda14cbcSMatt Macy uint8_t shift = mg->mg_vd->vdev_ashift; 3126eda14cbcSMatt Macy 3127eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3128eda14cbcSMatt Macy 3129eda14cbcSMatt Macy /* 3130eda14cbcSMatt Macy * The metaslab is completely free. 3131eda14cbcSMatt Macy */ 3132eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == 0) { 3133eda14cbcSMatt Macy int idx = highbit64(msp->ms_size) - 1; 3134eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3135eda14cbcSMatt Macy 3136eda14cbcSMatt Macy if (idx < max_idx) { 3137eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL); 3138eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, idx); 3139eda14cbcSMatt Macy } else { 3140eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 3141eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, max_idx); 3142eda14cbcSMatt Macy } 3143eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3144eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 3145eda14cbcSMatt Macy return (weight); 3146eda14cbcSMatt Macy } 3147eda14cbcSMatt Macy 3148eda14cbcSMatt Macy ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3149eda14cbcSMatt Macy 3150eda14cbcSMatt Macy /* 3151eda14cbcSMatt Macy * If the metaslab is fully allocated then just make the weight 0. 3152eda14cbcSMatt Macy */ 3153eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == msp->ms_size) 3154eda14cbcSMatt Macy return (0); 3155eda14cbcSMatt Macy /* 3156eda14cbcSMatt Macy * If the metaslab is already loaded, then use the range tree to 3157eda14cbcSMatt Macy * determine the weight. Otherwise, we rely on the space map information 3158eda14cbcSMatt Macy * to generate the weight. 3159eda14cbcSMatt Macy */ 3160eda14cbcSMatt Macy if (msp->ms_loaded) { 3161eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 3162eda14cbcSMatt Macy } else { 3163eda14cbcSMatt Macy weight = metaslab_weight_from_spacemap(msp); 3164eda14cbcSMatt Macy } 3165eda14cbcSMatt Macy 3166eda14cbcSMatt Macy /* 3167eda14cbcSMatt Macy * If the metaslab was active the last time we calculated its weight 3168eda14cbcSMatt Macy * then keep it active. We want to consume the entire region that 3169eda14cbcSMatt Macy * is associated with this weight. 3170eda14cbcSMatt Macy */ 3171eda14cbcSMatt Macy if (msp->ms_activation_weight != 0 && weight != 0) 3172eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 3173eda14cbcSMatt Macy return (weight); 3174eda14cbcSMatt Macy } 3175eda14cbcSMatt Macy 3176eda14cbcSMatt Macy /* 3177eda14cbcSMatt Macy * Determine if we should attempt to allocate from this metaslab. If the 3178eda14cbcSMatt Macy * metaslab is loaded, then we can determine if the desired allocation 3179eda14cbcSMatt Macy * can be satisfied by looking at the size of the maximum free segment 3180eda14cbcSMatt Macy * on that metaslab. Otherwise, we make our decision based on the metaslab's 3181eda14cbcSMatt Macy * weight. For segment-based weighting we can determine the maximum 3182eda14cbcSMatt Macy * allocation based on the index encoded in its value. For space-based 3183eda14cbcSMatt Macy * weights we rely on the entire weight (excluding the weight-type bit). 3184eda14cbcSMatt Macy */ 3185eda14cbcSMatt Macy static boolean_t 3186eda14cbcSMatt Macy metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) 3187eda14cbcSMatt Macy { 3188eda14cbcSMatt Macy /* 3189eda14cbcSMatt Macy * If the metaslab is loaded, ms_max_size is definitive and we can use 3190eda14cbcSMatt Macy * the fast check. If it's not, the ms_max_size is a lower bound (once 3191eda14cbcSMatt Macy * set), and we should use the fast check as long as we're not in 3192eda14cbcSMatt Macy * try_hard and it's been less than zfs_metaslab_max_size_cache_sec 3193eda14cbcSMatt Macy * seconds since the metaslab was unloaded. 3194eda14cbcSMatt Macy */ 3195eda14cbcSMatt Macy if (msp->ms_loaded || 3196eda14cbcSMatt Macy (msp->ms_max_size != 0 && !try_hard && gethrtime() < 3197eda14cbcSMatt Macy msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) 3198eda14cbcSMatt Macy return (msp->ms_max_size >= asize); 3199eda14cbcSMatt Macy 3200eda14cbcSMatt Macy boolean_t should_allocate; 3201eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3202eda14cbcSMatt Macy /* 3203eda14cbcSMatt Macy * The metaslab segment weight indicates segments in the 3204eda14cbcSMatt Macy * range [2^i, 2^(i+1)), where i is the index in the weight. 3205eda14cbcSMatt Macy * Since the asize might be in the middle of the range, we 3206eda14cbcSMatt Macy * should attempt the allocation if asize < 2^(i+1). 3207eda14cbcSMatt Macy */ 3208eda14cbcSMatt Macy should_allocate = (asize < 3209eda14cbcSMatt Macy 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 3210eda14cbcSMatt Macy } else { 3211eda14cbcSMatt Macy should_allocate = (asize <= 3212eda14cbcSMatt Macy (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 3213eda14cbcSMatt Macy } 3214eda14cbcSMatt Macy 3215eda14cbcSMatt Macy return (should_allocate); 3216eda14cbcSMatt Macy } 3217eda14cbcSMatt Macy 3218eda14cbcSMatt Macy static uint64_t 3219eda14cbcSMatt Macy metaslab_weight(metaslab_t *msp, boolean_t nodirty) 3220eda14cbcSMatt Macy { 3221eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3222eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 3223eda14cbcSMatt Macy uint64_t weight; 3224eda14cbcSMatt Macy 3225eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3226eda14cbcSMatt Macy 3227eda14cbcSMatt Macy metaslab_set_fragmentation(msp, nodirty); 3228eda14cbcSMatt Macy 3229eda14cbcSMatt Macy /* 3230eda14cbcSMatt Macy * Update the maximum size. If the metaslab is loaded, this will 3231eda14cbcSMatt Macy * ensure that we get an accurate maximum size if newly freed space 3232eda14cbcSMatt Macy * has been added back into the free tree. If the metaslab is 3233eda14cbcSMatt Macy * unloaded, we check if there's a larger free segment in the 3234eda14cbcSMatt Macy * unflushed frees. This is a lower bound on the largest allocatable 3235eda14cbcSMatt Macy * segment size. Coalescing of adjacent entries may reveal larger 3236eda14cbcSMatt Macy * allocatable segments, but we aren't aware of those until loading 3237eda14cbcSMatt Macy * the space map into a range tree. 3238eda14cbcSMatt Macy */ 3239eda14cbcSMatt Macy if (msp->ms_loaded) { 3240eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 3241eda14cbcSMatt Macy } else { 3242eda14cbcSMatt Macy msp->ms_max_size = MAX(msp->ms_max_size, 3243eda14cbcSMatt Macy metaslab_largest_unflushed_free(msp)); 3244eda14cbcSMatt Macy } 3245eda14cbcSMatt Macy 3246eda14cbcSMatt Macy /* 3247eda14cbcSMatt Macy * Segment-based weighting requires space map histogram support. 3248eda14cbcSMatt Macy */ 3249eda14cbcSMatt Macy if (zfs_metaslab_segment_weight_enabled && 3250eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 3251eda14cbcSMatt Macy (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 3252eda14cbcSMatt Macy sizeof (space_map_phys_t))) { 3253eda14cbcSMatt Macy weight = metaslab_segment_weight(msp); 3254eda14cbcSMatt Macy } else { 3255eda14cbcSMatt Macy weight = metaslab_space_weight(msp); 3256eda14cbcSMatt Macy } 3257eda14cbcSMatt Macy return (weight); 3258eda14cbcSMatt Macy } 3259eda14cbcSMatt Macy 3260eda14cbcSMatt Macy void 3261eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(metaslab_t *msp) 3262eda14cbcSMatt Macy { 3263eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3264eda14cbcSMatt Macy 3265eda14cbcSMatt Macy /* note: we preserve the mask (e.g. indication of primary, etc..) */ 3266eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 3267eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 3268eda14cbcSMatt Macy metaslab_weight(msp, B_FALSE) | was_active); 3269eda14cbcSMatt Macy } 3270eda14cbcSMatt Macy 3271eda14cbcSMatt Macy static int 3272eda14cbcSMatt Macy metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3273eda14cbcSMatt Macy int allocator, uint64_t activation_weight) 3274eda14cbcSMatt Macy { 3275eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 3276eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3277eda14cbcSMatt Macy 3278eda14cbcSMatt Macy /* 3279eda14cbcSMatt Macy * If we're activating for the claim code, we don't want to actually 3280eda14cbcSMatt Macy * set the metaslab up for a specific allocator. 3281eda14cbcSMatt Macy */ 3282eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_CLAIM) { 3283eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3284eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3285eda14cbcSMatt Macy metaslab_group_sort(mg, msp, msp->ms_weight | 3286eda14cbcSMatt Macy activation_weight); 3287eda14cbcSMatt Macy return (0); 3288eda14cbcSMatt Macy } 3289eda14cbcSMatt Macy 3290eda14cbcSMatt Macy metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 3291eda14cbcSMatt Macy &mga->mga_primary : &mga->mga_secondary); 3292eda14cbcSMatt Macy 3293eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3294eda14cbcSMatt Macy if (*mspp != NULL) { 3295eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3296eda14cbcSMatt Macy return (EEXIST); 3297eda14cbcSMatt Macy } 3298eda14cbcSMatt Macy 3299eda14cbcSMatt Macy *mspp = msp; 3300eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 3301eda14cbcSMatt Macy msp->ms_allocator = allocator; 3302eda14cbcSMatt Macy msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 3303eda14cbcSMatt Macy 3304eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3305eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3306eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, 3307eda14cbcSMatt Macy msp->ms_weight | activation_weight); 3308eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3309eda14cbcSMatt Macy 3310eda14cbcSMatt Macy return (0); 3311eda14cbcSMatt Macy } 3312eda14cbcSMatt Macy 3313eda14cbcSMatt Macy static int 3314eda14cbcSMatt Macy metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 3315eda14cbcSMatt Macy { 3316eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3317eda14cbcSMatt Macy 3318eda14cbcSMatt Macy /* 3319eda14cbcSMatt Macy * The current metaslab is already activated for us so there 3320eda14cbcSMatt Macy * is nothing to do. Already activated though, doesn't mean 3321eda14cbcSMatt Macy * that this metaslab is activated for our allocator nor our 3322eda14cbcSMatt Macy * requested activation weight. The metaslab could have started 3323eda14cbcSMatt Macy * as an active one for our allocator but changed allocators 3324eda14cbcSMatt Macy * while we were waiting to grab its ms_lock or we stole it 3325eda14cbcSMatt Macy * [see find_valid_metaslab()]. This means that there is a 3326eda14cbcSMatt Macy * possibility of passivating a metaslab of another allocator 3327eda14cbcSMatt Macy * or from a different activation mask, from this thread. 3328eda14cbcSMatt Macy */ 3329eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3330eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3331eda14cbcSMatt Macy return (0); 3332eda14cbcSMatt Macy } 3333eda14cbcSMatt Macy 3334eda14cbcSMatt Macy int error = metaslab_load(msp); 3335eda14cbcSMatt Macy if (error != 0) { 3336eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 0); 3337eda14cbcSMatt Macy return (error); 3338eda14cbcSMatt Macy } 3339eda14cbcSMatt Macy 3340eda14cbcSMatt Macy /* 3341eda14cbcSMatt Macy * When entering metaslab_load() we may have dropped the 3342eda14cbcSMatt Macy * ms_lock because we were loading this metaslab, or we 3343eda14cbcSMatt Macy * were waiting for another thread to load it for us. In 3344eda14cbcSMatt Macy * that scenario, we recheck the weight of the metaslab 3345eda14cbcSMatt Macy * to see if it was activated by another thread. 3346eda14cbcSMatt Macy * 3347eda14cbcSMatt Macy * If the metaslab was activated for another allocator or 3348eda14cbcSMatt Macy * it was activated with a different activation weight (e.g. 3349eda14cbcSMatt Macy * we wanted to make it a primary but it was activated as 3350eda14cbcSMatt Macy * secondary) we return error (EBUSY). 3351eda14cbcSMatt Macy * 3352eda14cbcSMatt Macy * If the metaslab was activated for the same allocator 3353eda14cbcSMatt Macy * and requested activation mask, skip activating it. 3354eda14cbcSMatt Macy */ 3355eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3356eda14cbcSMatt Macy if (msp->ms_allocator != allocator) 3357eda14cbcSMatt Macy return (EBUSY); 3358eda14cbcSMatt Macy 3359eda14cbcSMatt Macy if ((msp->ms_weight & activation_weight) == 0) 3360eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 3361eda14cbcSMatt Macy 3362eda14cbcSMatt Macy EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), 3363eda14cbcSMatt Macy msp->ms_primary); 3364eda14cbcSMatt Macy return (0); 3365eda14cbcSMatt Macy } 3366eda14cbcSMatt Macy 3367eda14cbcSMatt Macy /* 3368eda14cbcSMatt Macy * If the metaslab has literally 0 space, it will have weight 0. In 3369eda14cbcSMatt Macy * that case, don't bother activating it. This can happen if the 3370eda14cbcSMatt Macy * metaslab had space during find_valid_metaslab, but another thread 3371eda14cbcSMatt Macy * loaded it and used all that space while we were waiting to grab the 3372eda14cbcSMatt Macy * lock. 3373eda14cbcSMatt Macy */ 3374eda14cbcSMatt Macy if (msp->ms_weight == 0) { 3375eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocatable)); 3376eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 3377eda14cbcSMatt Macy } 3378eda14cbcSMatt Macy 3379eda14cbcSMatt Macy if ((error = metaslab_activate_allocator(msp->ms_group, msp, 3380eda14cbcSMatt Macy allocator, activation_weight)) != 0) { 3381eda14cbcSMatt Macy return (error); 3382eda14cbcSMatt Macy } 3383eda14cbcSMatt Macy 3384eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3385eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 3386eda14cbcSMatt Macy 3387eda14cbcSMatt Macy return (0); 3388eda14cbcSMatt Macy } 3389eda14cbcSMatt Macy 3390eda14cbcSMatt Macy static void 3391eda14cbcSMatt Macy metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3392eda14cbcSMatt Macy uint64_t weight) 3393eda14cbcSMatt Macy { 3394eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3395eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3396eda14cbcSMatt Macy 3397eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 3398eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 3399eda14cbcSMatt Macy return; 3400eda14cbcSMatt Macy } 3401eda14cbcSMatt Macy 3402eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3403eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3404eda14cbcSMatt Macy ASSERT3S(0, <=, msp->ms_allocator); 3405eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 3406eda14cbcSMatt Macy 3407eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; 3408eda14cbcSMatt Macy if (msp->ms_primary) { 3409eda14cbcSMatt Macy ASSERT3P(mga->mga_primary, ==, msp); 3410eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 3411eda14cbcSMatt Macy mga->mga_primary = NULL; 3412eda14cbcSMatt Macy } else { 3413eda14cbcSMatt Macy ASSERT3P(mga->mga_secondary, ==, msp); 3414eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 3415eda14cbcSMatt Macy mga->mga_secondary = NULL; 3416eda14cbcSMatt Macy } 3417eda14cbcSMatt Macy msp->ms_allocator = -1; 3418eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 3419eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3420eda14cbcSMatt Macy } 3421eda14cbcSMatt Macy 3422eda14cbcSMatt Macy static void 3423eda14cbcSMatt Macy metaslab_passivate(metaslab_t *msp, uint64_t weight) 3424eda14cbcSMatt Macy { 3425eda14cbcSMatt Macy uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; 3426eda14cbcSMatt Macy 3427eda14cbcSMatt Macy /* 3428eda14cbcSMatt Macy * If size < SPA_MINBLOCKSIZE, then we will not allocate from 3429eda14cbcSMatt Macy * this metaslab again. In that case, it had better be empty, 3430eda14cbcSMatt Macy * or we would be leaving space on the table. 3431eda14cbcSMatt Macy */ 3432eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || 3433eda14cbcSMatt Macy size >= SPA_MINBLOCKSIZE || 3434eda14cbcSMatt Macy range_tree_space(msp->ms_allocatable) == 0); 3435eda14cbcSMatt Macy ASSERT0(weight & METASLAB_ACTIVE_MASK); 3436eda14cbcSMatt Macy 3437eda14cbcSMatt Macy ASSERT(msp->ms_activation_weight != 0); 3438eda14cbcSMatt Macy msp->ms_activation_weight = 0; 3439eda14cbcSMatt Macy metaslab_passivate_allocator(msp->ms_group, msp, weight); 3440eda14cbcSMatt Macy ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); 3441eda14cbcSMatt Macy } 3442eda14cbcSMatt Macy 3443eda14cbcSMatt Macy /* 3444eda14cbcSMatt Macy * Segment-based metaslabs are activated once and remain active until 3445eda14cbcSMatt Macy * we either fail an allocation attempt (similar to space-based metaslabs) 3446eda14cbcSMatt Macy * or have exhausted the free space in zfs_metaslab_switch_threshold 3447eda14cbcSMatt Macy * buckets since the metaslab was activated. This function checks to see 3448eda14cbcSMatt Macy * if we've exhausted the zfs_metaslab_switch_threshold buckets in the 3449eda14cbcSMatt Macy * metaslab and passivates it proactively. This will allow us to select a 3450eda14cbcSMatt Macy * metaslab with a larger contiguous region, if any, remaining within this 3451eda14cbcSMatt Macy * metaslab group. If we're in sync pass > 1, then we continue using this 3452eda14cbcSMatt Macy * metaslab so that we don't dirty more block and cause more sync passes. 3453eda14cbcSMatt Macy */ 3454eda14cbcSMatt Macy static void 3455eda14cbcSMatt Macy metaslab_segment_may_passivate(metaslab_t *msp) 3456eda14cbcSMatt Macy { 3457eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3458eda14cbcSMatt Macy 3459eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 3460eda14cbcSMatt Macy return; 3461eda14cbcSMatt Macy 3462eda14cbcSMatt Macy /* 3463eda14cbcSMatt Macy * Since we are in the middle of a sync pass, the most accurate 3464eda14cbcSMatt Macy * information that is accessible to us is the in-core range tree 3465eda14cbcSMatt Macy * histogram; calculate the new weight based on that information. 3466eda14cbcSMatt Macy */ 3467eda14cbcSMatt Macy uint64_t weight = metaslab_weight_from_range_tree(msp); 3468eda14cbcSMatt Macy int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 3469eda14cbcSMatt Macy int current_idx = WEIGHT_GET_INDEX(weight); 3470eda14cbcSMatt Macy 3471eda14cbcSMatt Macy if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 3472eda14cbcSMatt Macy metaslab_passivate(msp, weight); 3473eda14cbcSMatt Macy } 3474eda14cbcSMatt Macy 3475eda14cbcSMatt Macy static void 3476eda14cbcSMatt Macy metaslab_preload(void *arg) 3477eda14cbcSMatt Macy { 3478eda14cbcSMatt Macy metaslab_t *msp = arg; 3479eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 3480eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 3481eda14cbcSMatt Macy fstrans_cookie_t cookie = spl_fstrans_mark(); 3482eda14cbcSMatt Macy 3483eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 3484eda14cbcSMatt Macy 3485eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3486eda14cbcSMatt Macy (void) metaslab_load(msp); 3487eda14cbcSMatt Macy metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); 3488eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3489eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 3490eda14cbcSMatt Macy } 3491eda14cbcSMatt Macy 3492eda14cbcSMatt Macy static void 3493eda14cbcSMatt Macy metaslab_group_preload(metaslab_group_t *mg) 3494eda14cbcSMatt Macy { 3495eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3496eda14cbcSMatt Macy metaslab_t *msp; 3497eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 3498eda14cbcSMatt Macy int m = 0; 3499eda14cbcSMatt Macy 3500eda14cbcSMatt Macy if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 3501eda14cbcSMatt Macy taskq_wait_outstanding(mg->mg_taskq, 0); 3502eda14cbcSMatt Macy return; 3503eda14cbcSMatt Macy } 3504eda14cbcSMatt Macy 3505eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3506eda14cbcSMatt Macy 3507eda14cbcSMatt Macy /* 3508eda14cbcSMatt Macy * Load the next potential metaslabs 3509eda14cbcSMatt Macy */ 3510eda14cbcSMatt Macy for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 3511eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3512eda14cbcSMatt Macy 3513eda14cbcSMatt Macy /* 3514eda14cbcSMatt Macy * We preload only the maximum number of metaslabs specified 3515eda14cbcSMatt Macy * by metaslab_preload_limit. If a metaslab is being forced 3516eda14cbcSMatt Macy * to condense then we preload it too. This will ensure 3517eda14cbcSMatt Macy * that force condensing happens in the next txg. 3518eda14cbcSMatt Macy */ 3519eda14cbcSMatt Macy if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 3520eda14cbcSMatt Macy continue; 3521eda14cbcSMatt Macy } 3522eda14cbcSMatt Macy 3523eda14cbcSMatt Macy VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 3524eda14cbcSMatt Macy msp, TQ_SLEEP) != TASKQID_INVALID); 3525eda14cbcSMatt Macy } 3526eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3527eda14cbcSMatt Macy } 3528eda14cbcSMatt Macy 3529eda14cbcSMatt Macy /* 3530eda14cbcSMatt Macy * Determine if the space map's on-disk footprint is past our tolerance for 3531eda14cbcSMatt Macy * inefficiency. We would like to use the following criteria to make our 3532eda14cbcSMatt Macy * decision: 3533eda14cbcSMatt Macy * 3534eda14cbcSMatt Macy * 1. Do not condense if the size of the space map object would dramatically 3535eda14cbcSMatt Macy * increase as a result of writing out the free space range tree. 3536eda14cbcSMatt Macy * 3537eda14cbcSMatt Macy * 2. Condense if the on on-disk space map representation is at least 3538eda14cbcSMatt Macy * zfs_condense_pct/100 times the size of the optimal representation 3539eda14cbcSMatt Macy * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). 3540eda14cbcSMatt Macy * 3541eda14cbcSMatt Macy * 3. Do not condense if the on-disk size of the space map does not actually 3542eda14cbcSMatt Macy * decrease. 3543eda14cbcSMatt Macy * 3544eda14cbcSMatt Macy * Unfortunately, we cannot compute the on-disk size of the space map in this 3545eda14cbcSMatt Macy * context because we cannot accurately compute the effects of compression, etc. 3546eda14cbcSMatt Macy * Instead, we apply the heuristic described in the block comment for 3547eda14cbcSMatt Macy * zfs_metaslab_condense_block_threshold - we only condense if the space used 3548eda14cbcSMatt Macy * is greater than a threshold number of blocks. 3549eda14cbcSMatt Macy */ 3550eda14cbcSMatt Macy static boolean_t 3551eda14cbcSMatt Macy metaslab_should_condense(metaslab_t *msp) 3552eda14cbcSMatt Macy { 3553eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3554eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3555eda14cbcSMatt Macy uint64_t vdev_blocksize = 1 << vd->vdev_ashift; 3556eda14cbcSMatt Macy 3557eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3558eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3559eda14cbcSMatt Macy ASSERT(sm != NULL); 3560eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); 3561eda14cbcSMatt Macy 3562eda14cbcSMatt Macy /* 3563eda14cbcSMatt Macy * We always condense metaslabs that are empty and metaslabs for 3564eda14cbcSMatt Macy * which a condense request has been made. 3565eda14cbcSMatt Macy */ 3566eda14cbcSMatt Macy if (range_tree_numsegs(msp->ms_allocatable) == 0 || 3567eda14cbcSMatt Macy msp->ms_condense_wanted) 3568eda14cbcSMatt Macy return (B_TRUE); 3569eda14cbcSMatt Macy 3570eda14cbcSMatt Macy uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); 3571eda14cbcSMatt Macy uint64_t object_size = space_map_length(sm); 3572eda14cbcSMatt Macy uint64_t optimal_size = space_map_estimate_optimal_size(sm, 3573eda14cbcSMatt Macy msp->ms_allocatable, SM_NO_VDEVID); 3574eda14cbcSMatt Macy 3575eda14cbcSMatt Macy return (object_size >= (optimal_size * zfs_condense_pct / 100) && 3576eda14cbcSMatt Macy object_size > zfs_metaslab_condense_block_threshold * record_size); 3577eda14cbcSMatt Macy } 3578eda14cbcSMatt Macy 3579eda14cbcSMatt Macy /* 3580eda14cbcSMatt Macy * Condense the on-disk space map representation to its minimized form. 3581eda14cbcSMatt Macy * The minimized form consists of a small number of allocations followed 3582eda14cbcSMatt Macy * by the entries of the free range tree (ms_allocatable). The condensed 3583eda14cbcSMatt Macy * spacemap contains all the entries of previous TXGs (including those in 3584eda14cbcSMatt Macy * the pool-wide log spacemaps; thus this is effectively a superset of 3585eda14cbcSMatt Macy * metaslab_flush()), but this TXG's entries still need to be written. 3586eda14cbcSMatt Macy */ 3587eda14cbcSMatt Macy static void 3588eda14cbcSMatt Macy metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) 3589eda14cbcSMatt Macy { 3590eda14cbcSMatt Macy range_tree_t *condense_tree; 3591eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3592eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 3593eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3594eda14cbcSMatt Macy 3595eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3596eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3597eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3598eda14cbcSMatt Macy 3599eda14cbcSMatt Macy /* 3600eda14cbcSMatt Macy * In order to condense the space map, we need to change it so it 3601eda14cbcSMatt Macy * only describes which segments are currently allocated and free. 3602eda14cbcSMatt Macy * 3603eda14cbcSMatt Macy * All the current free space resides in the ms_allocatable, all 3604eda14cbcSMatt Macy * the ms_defer trees, and all the ms_allocating trees. We ignore 3605eda14cbcSMatt Macy * ms_freed because it is empty because we're in sync pass 1. We 3606eda14cbcSMatt Macy * ignore ms_freeing because these changes are not yet reflected 3607eda14cbcSMatt Macy * in the spacemap (they will be written later this txg). 3608eda14cbcSMatt Macy * 3609eda14cbcSMatt Macy * So to truncate the space map to represent all the entries of 3610eda14cbcSMatt Macy * previous TXGs we do the following: 3611eda14cbcSMatt Macy * 3612eda14cbcSMatt Macy * 1] We create a range tree (condense tree) that is 100% empty. 3613eda14cbcSMatt Macy * 2] We add to it all segments found in the ms_defer trees 3614eda14cbcSMatt Macy * as those segments are marked as free in the original space 3615eda14cbcSMatt Macy * map. We do the same with the ms_allocating trees for the same 3616eda14cbcSMatt Macy * reason. Adding these segments should be a relatively 3617eda14cbcSMatt Macy * inexpensive operation since we expect these trees to have a 3618eda14cbcSMatt Macy * small number of nodes. 3619eda14cbcSMatt Macy * 3] We vacate any unflushed allocs, since they are not frees we 3620eda14cbcSMatt Macy * need to add to the condense tree. Then we vacate any 3621eda14cbcSMatt Macy * unflushed frees as they should already be part of ms_allocatable. 3622eda14cbcSMatt Macy * 4] At this point, we would ideally like to add all segments 3623eda14cbcSMatt Macy * in the ms_allocatable tree from the condense tree. This way 3624eda14cbcSMatt Macy * we would write all the entries of the condense tree as the 3625eda14cbcSMatt Macy * condensed space map, which would only contain freed 3626eda14cbcSMatt Macy * segments with everything else assumed to be allocated. 3627eda14cbcSMatt Macy * 3628eda14cbcSMatt Macy * Doing so can be prohibitively expensive as ms_allocatable can 3629eda14cbcSMatt Macy * be large, and therefore computationally expensive to add to 3630eda14cbcSMatt Macy * the condense_tree. Instead we first sync out an entry marking 3631eda14cbcSMatt Macy * everything as allocated, then the condense_tree and then the 3632eda14cbcSMatt Macy * ms_allocatable, in the condensed space map. While this is not 3633eda14cbcSMatt Macy * optimal, it is typically close to optimal and more importantly 3634eda14cbcSMatt Macy * much cheaper to compute. 3635eda14cbcSMatt Macy * 3636eda14cbcSMatt Macy * 5] Finally, as both of the unflushed trees were written to our 3637eda14cbcSMatt Macy * new and condensed metaslab space map, we basically flushed 3638eda14cbcSMatt Macy * all the unflushed changes to disk, thus we call 3639eda14cbcSMatt Macy * metaslab_flush_update(). 3640eda14cbcSMatt Macy */ 3641eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3642eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ 3643eda14cbcSMatt Macy 3644eda14cbcSMatt Macy zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " 364533b8c039SMartin Matuska "spa %s, smp size %llu, segments %llu, forcing condense=%s", 364633b8c039SMartin Matuska (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp, 364733b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 364833b8c039SMartin Matuska spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm), 364933b8c039SMartin Matuska (u_longlong_t)range_tree_numsegs(msp->ms_allocatable), 3650eda14cbcSMatt Macy msp->ms_condense_wanted ? "TRUE" : "FALSE"); 3651eda14cbcSMatt Macy 3652eda14cbcSMatt Macy msp->ms_condense_wanted = B_FALSE; 3653eda14cbcSMatt Macy 3654eda14cbcSMatt Macy range_seg_type_t type; 3655eda14cbcSMatt Macy uint64_t shift, start; 3656eda14cbcSMatt Macy type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, 3657eda14cbcSMatt Macy &start, &shift); 3658eda14cbcSMatt Macy 3659eda14cbcSMatt Macy condense_tree = range_tree_create(NULL, type, NULL, start, shift); 3660eda14cbcSMatt Macy 3661eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3662eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 3663eda14cbcSMatt Macy range_tree_add, condense_tree); 3664eda14cbcSMatt Macy } 3665eda14cbcSMatt Macy 3666eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 3667eda14cbcSMatt Macy range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 3668eda14cbcSMatt Macy range_tree_add, condense_tree); 3669eda14cbcSMatt Macy } 3670eda14cbcSMatt Macy 3671eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3672eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3673eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3674eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3675eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3676eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3677eda14cbcSMatt Macy 3678eda14cbcSMatt Macy /* 3679eda14cbcSMatt Macy * We're about to drop the metaslab's lock thus allowing other 3680eda14cbcSMatt Macy * consumers to change it's content. Set the metaslab's ms_condensing 3681eda14cbcSMatt Macy * flag to ensure that allocations on this metaslab do not occur 3682eda14cbcSMatt Macy * while we're in the middle of committing it to disk. This is only 3683eda14cbcSMatt Macy * critical for ms_allocatable as all other range trees use per TXG 3684eda14cbcSMatt Macy * views of their content. 3685eda14cbcSMatt Macy */ 3686eda14cbcSMatt Macy msp->ms_condensing = B_TRUE; 3687eda14cbcSMatt Macy 3688eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3689eda14cbcSMatt Macy uint64_t object = space_map_object(msp->ms_sm); 3690eda14cbcSMatt Macy space_map_truncate(sm, 3691eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3692eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); 3693eda14cbcSMatt Macy 3694eda14cbcSMatt Macy /* 3695eda14cbcSMatt Macy * space_map_truncate() may have reallocated the spacemap object. 3696eda14cbcSMatt Macy * If so, update the vdev_ms_array. 3697eda14cbcSMatt Macy */ 3698eda14cbcSMatt Macy if (space_map_object(msp->ms_sm) != object) { 3699eda14cbcSMatt Macy object = space_map_object(msp->ms_sm); 3700eda14cbcSMatt Macy dmu_write(spa->spa_meta_objset, 3701eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * 3702eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &object, tx); 3703eda14cbcSMatt Macy } 3704eda14cbcSMatt Macy 3705eda14cbcSMatt Macy /* 3706eda14cbcSMatt Macy * Note: 3707eda14cbcSMatt Macy * When the log space map feature is enabled, each space map will 3708eda14cbcSMatt Macy * always have ALLOCS followed by FREES for each sync pass. This is 3709eda14cbcSMatt Macy * typically true even when the log space map feature is disabled, 3710eda14cbcSMatt Macy * except from the case where a metaslab goes through metaslab_sync() 3711eda14cbcSMatt Macy * and gets condensed. In that case the metaslab's space map will have 3712eda14cbcSMatt Macy * ALLOCS followed by FREES (due to condensing) followed by ALLOCS 3713eda14cbcSMatt Macy * followed by FREES (due to space_map_write() in metaslab_sync()) for 3714eda14cbcSMatt Macy * sync pass 1. 3715eda14cbcSMatt Macy */ 3716eda14cbcSMatt Macy range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start, 3717eda14cbcSMatt Macy shift); 3718eda14cbcSMatt Macy range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); 3719eda14cbcSMatt Macy space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); 3720eda14cbcSMatt Macy space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 3721eda14cbcSMatt Macy space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); 3722eda14cbcSMatt Macy 3723eda14cbcSMatt Macy range_tree_vacate(condense_tree, NULL, NULL); 3724eda14cbcSMatt Macy range_tree_destroy(condense_tree); 3725eda14cbcSMatt Macy range_tree_vacate(tmp_tree, NULL, NULL); 3726eda14cbcSMatt Macy range_tree_destroy(tmp_tree); 3727eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3728eda14cbcSMatt Macy 3729eda14cbcSMatt Macy msp->ms_condensing = B_FALSE; 3730eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 3731eda14cbcSMatt Macy } 3732eda14cbcSMatt Macy 3733*716fd348SMartin Matuska static void 3734*716fd348SMartin Matuska metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx) 3735*716fd348SMartin Matuska { 3736*716fd348SMartin Matuska spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3737*716fd348SMartin Matuska ASSERT(spa_syncing_log_sm(spa) != NULL); 3738*716fd348SMartin Matuska ASSERT(msp->ms_sm != NULL); 3739*716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3740*716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3741*716fd348SMartin Matuska 3742*716fd348SMartin Matuska mutex_enter(&spa->spa_flushed_ms_lock); 3743*716fd348SMartin Matuska metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3744*716fd348SMartin Matuska metaslab_set_unflushed_dirty(msp, B_TRUE); 3745*716fd348SMartin Matuska avl_add(&spa->spa_metaslabs_by_flushed, msp); 3746*716fd348SMartin Matuska mutex_exit(&spa->spa_flushed_ms_lock); 3747*716fd348SMartin Matuska 3748*716fd348SMartin Matuska spa_log_sm_increment_current_mscount(spa); 3749*716fd348SMartin Matuska spa_log_summary_add_flushed_metaslab(spa, B_TRUE); 3750*716fd348SMartin Matuska } 3751*716fd348SMartin Matuska 3752*716fd348SMartin Matuska void 3753*716fd348SMartin Matuska metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty) 3754*716fd348SMartin Matuska { 3755*716fd348SMartin Matuska spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3756*716fd348SMartin Matuska ASSERT(spa_syncing_log_sm(spa) != NULL); 3757*716fd348SMartin Matuska ASSERT(msp->ms_sm != NULL); 3758*716fd348SMartin Matuska ASSERT(metaslab_unflushed_txg(msp) != 0); 3759*716fd348SMartin Matuska ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); 3760*716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3761*716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3762*716fd348SMartin Matuska 3763*716fd348SMartin Matuska VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); 3764*716fd348SMartin Matuska 3765*716fd348SMartin Matuska /* update metaslab's position in our flushing tree */ 3766*716fd348SMartin Matuska uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); 3767*716fd348SMartin Matuska boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp); 3768*716fd348SMartin Matuska mutex_enter(&spa->spa_flushed_ms_lock); 3769*716fd348SMartin Matuska avl_remove(&spa->spa_metaslabs_by_flushed, msp); 3770*716fd348SMartin Matuska metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3771*716fd348SMartin Matuska metaslab_set_unflushed_dirty(msp, dirty); 3772*716fd348SMartin Matuska avl_add(&spa->spa_metaslabs_by_flushed, msp); 3773*716fd348SMartin Matuska mutex_exit(&spa->spa_flushed_ms_lock); 3774*716fd348SMartin Matuska 3775*716fd348SMartin Matuska /* update metaslab counts of spa_log_sm_t nodes */ 3776*716fd348SMartin Matuska spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); 3777*716fd348SMartin Matuska spa_log_sm_increment_current_mscount(spa); 3778*716fd348SMartin Matuska 3779*716fd348SMartin Matuska /* update log space map summary */ 3780*716fd348SMartin Matuska spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg, 3781*716fd348SMartin Matuska ms_prev_flushed_dirty); 3782*716fd348SMartin Matuska spa_log_summary_add_flushed_metaslab(spa, dirty); 3783*716fd348SMartin Matuska 3784*716fd348SMartin Matuska /* cleanup obsolete logs if any */ 3785*716fd348SMartin Matuska spa_cleanup_old_sm_logs(spa, tx); 3786*716fd348SMartin Matuska } 3787*716fd348SMartin Matuska 3788eda14cbcSMatt Macy /* 3789eda14cbcSMatt Macy * Called when the metaslab has been flushed (its own spacemap now reflects 3790eda14cbcSMatt Macy * all the contents of the pool-wide spacemap log). Updates the metaslab's 3791eda14cbcSMatt Macy * metadata and any pool-wide related log space map data (e.g. summary, 3792eda14cbcSMatt Macy * obsolete logs, etc..) to reflect that. 3793eda14cbcSMatt Macy */ 3794eda14cbcSMatt Macy static void 3795eda14cbcSMatt Macy metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) 3796eda14cbcSMatt Macy { 3797eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3798eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3799eda14cbcSMatt Macy 3800eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3801eda14cbcSMatt Macy 3802eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3803eda14cbcSMatt Macy 3804eda14cbcSMatt Macy /* 3805eda14cbcSMatt Macy * Just because a metaslab got flushed, that doesn't mean that 3806eda14cbcSMatt Macy * it will pass through metaslab_sync_done(). Thus, make sure to 3807eda14cbcSMatt Macy * update ms_synced_length here in case it doesn't. 3808eda14cbcSMatt Macy */ 3809eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 3810eda14cbcSMatt Macy 3811eda14cbcSMatt Macy /* 3812eda14cbcSMatt Macy * We may end up here from metaslab_condense() without the 3813eda14cbcSMatt Macy * feature being active. In that case this is a no-op. 3814eda14cbcSMatt Macy */ 3815*716fd348SMartin Matuska if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) || 3816*716fd348SMartin Matuska metaslab_unflushed_txg(msp) == 0) 3817eda14cbcSMatt Macy return; 3818eda14cbcSMatt Macy 3819*716fd348SMartin Matuska metaslab_unflushed_bump(msp, tx, B_FALSE); 3820eda14cbcSMatt Macy } 3821eda14cbcSMatt Macy 3822eda14cbcSMatt Macy boolean_t 3823eda14cbcSMatt Macy metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) 3824eda14cbcSMatt Macy { 3825eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3826eda14cbcSMatt Macy 3827eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3828eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3829eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 3830eda14cbcSMatt Macy 3831eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3832eda14cbcSMatt Macy ASSERT(metaslab_unflushed_txg(msp) != 0); 3833eda14cbcSMatt Macy ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); 3834eda14cbcSMatt Macy 3835eda14cbcSMatt Macy /* 3836eda14cbcSMatt Macy * There is nothing wrong with flushing the same metaslab twice, as 3837eda14cbcSMatt Macy * this codepath should work on that case. However, the current 3838eda14cbcSMatt Macy * flushing scheme makes sure to avoid this situation as we would be 3839eda14cbcSMatt Macy * making all these calls without having anything meaningful to write 3840eda14cbcSMatt Macy * to disk. We assert this behavior here. 3841eda14cbcSMatt Macy */ 3842eda14cbcSMatt Macy ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); 3843eda14cbcSMatt Macy 3844eda14cbcSMatt Macy /* 3845eda14cbcSMatt Macy * We can not flush while loading, because then we would 3846eda14cbcSMatt Macy * not load the ms_unflushed_{allocs,frees}. 3847eda14cbcSMatt Macy */ 3848eda14cbcSMatt Macy if (msp->ms_loading) 3849eda14cbcSMatt Macy return (B_FALSE); 3850eda14cbcSMatt Macy 3851eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3852eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3853eda14cbcSMatt Macy 3854eda14cbcSMatt Macy /* 3855eda14cbcSMatt Macy * Metaslab condensing is effectively flushing. Therefore if the 3856eda14cbcSMatt Macy * metaslab can be condensed we can just condense it instead of 3857eda14cbcSMatt Macy * flushing it. 3858eda14cbcSMatt Macy * 3859eda14cbcSMatt Macy * Note that metaslab_condense() does call metaslab_flush_update() 3860eda14cbcSMatt Macy * so we can just return immediately after condensing. We also 3861eda14cbcSMatt Macy * don't need to care about setting ms_flushing or broadcasting 3862eda14cbcSMatt Macy * ms_flush_cv, even if we temporarily drop the ms_lock in 3863eda14cbcSMatt Macy * metaslab_condense(), as the metaslab is already loaded. 3864eda14cbcSMatt Macy */ 3865eda14cbcSMatt Macy if (msp->ms_loaded && metaslab_should_condense(msp)) { 3866eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3867eda14cbcSMatt Macy 3868eda14cbcSMatt Macy /* 3869eda14cbcSMatt Macy * For all histogram operations below refer to the 3870eda14cbcSMatt Macy * comments of metaslab_sync() where we follow a 3871eda14cbcSMatt Macy * similar procedure. 3872eda14cbcSMatt Macy */ 3873eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3874eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3875eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 3876eda14cbcSMatt Macy 3877eda14cbcSMatt Macy metaslab_condense(msp, tx); 3878eda14cbcSMatt Macy 3879eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 3880eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 3881eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); 3882eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3883eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 3884eda14cbcSMatt Macy msp->ms_defer[t], tx); 3885eda14cbcSMatt Macy } 3886eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 3887eda14cbcSMatt Macy 3888eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 3889eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3890eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3891eda14cbcSMatt Macy 3892eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3893eda14cbcSMatt Macy 3894eda14cbcSMatt Macy /* 3895eda14cbcSMatt Macy * Since we recreated the histogram (and potentially 3896eda14cbcSMatt Macy * the ms_sm too while condensing) ensure that the 3897eda14cbcSMatt Macy * weight is updated too because we are not guaranteed 3898eda14cbcSMatt Macy * that this metaslab is dirty and will go through 3899eda14cbcSMatt Macy * metaslab_sync_done(). 3900eda14cbcSMatt Macy */ 3901eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 3902eda14cbcSMatt Macy return (B_TRUE); 3903eda14cbcSMatt Macy } 3904eda14cbcSMatt Macy 3905eda14cbcSMatt Macy msp->ms_flushing = B_TRUE; 3906eda14cbcSMatt Macy uint64_t sm_len_before = space_map_length(msp->ms_sm); 3907eda14cbcSMatt Macy 3908eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3909eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, 3910eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3911eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, 3912eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3913eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3914eda14cbcSMatt Macy 3915eda14cbcSMatt Macy uint64_t sm_len_after = space_map_length(msp->ms_sm); 3916eda14cbcSMatt Macy if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { 3917eda14cbcSMatt Macy zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " 3918eda14cbcSMatt Macy "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " 391933b8c039SMartin Matuska "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx), 392033b8c039SMartin Matuska spa_name(spa), 392133b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 392233b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 392333b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 392433b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 392533b8c039SMartin Matuska (u_longlong_t)(sm_len_after - sm_len_before)); 3926eda14cbcSMatt Macy } 3927eda14cbcSMatt Macy 3928eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3929eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3930eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3931eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3932eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3933eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3934eda14cbcSMatt Macy 3935eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3936eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3937eda14cbcSMatt Macy 3938eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 3939eda14cbcSMatt Macy 3940eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3941eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3942eda14cbcSMatt Macy 3943eda14cbcSMatt Macy msp->ms_flushing = B_FALSE; 3944eda14cbcSMatt Macy cv_broadcast(&msp->ms_flush_cv); 3945eda14cbcSMatt Macy return (B_TRUE); 3946eda14cbcSMatt Macy } 3947eda14cbcSMatt Macy 3948eda14cbcSMatt Macy /* 3949eda14cbcSMatt Macy * Write a metaslab to disk in the context of the specified transaction group. 3950eda14cbcSMatt Macy */ 3951eda14cbcSMatt Macy void 3952eda14cbcSMatt Macy metaslab_sync(metaslab_t *msp, uint64_t txg) 3953eda14cbcSMatt Macy { 3954eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3955eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 3956eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 3957eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 3958eda14cbcSMatt Macy range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 3959eda14cbcSMatt Macy dmu_tx_t *tx; 3960eda14cbcSMatt Macy 3961eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 3962eda14cbcSMatt Macy 3963eda14cbcSMatt Macy /* 3964eda14cbcSMatt Macy * This metaslab has just been added so there's no work to do now. 3965eda14cbcSMatt Macy */ 3966f9693befSMartin Matuska if (msp->ms_new) { 3967f9693befSMartin Matuska ASSERT0(range_tree_space(alloctree)); 3968f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_freeing)); 3969f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_freed)); 3970f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_checkpointing)); 3971f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_trim)); 3972eda14cbcSMatt Macy return; 3973eda14cbcSMatt Macy } 3974eda14cbcSMatt Macy 3975eda14cbcSMatt Macy /* 3976eda14cbcSMatt Macy * Normally, we don't want to process a metaslab if there are no 3977eda14cbcSMatt Macy * allocations or frees to perform. However, if the metaslab is being 3978eda14cbcSMatt Macy * forced to condense, it's loaded and we're not beyond the final 3979eda14cbcSMatt Macy * dirty txg, we need to let it through. Not condensing beyond the 3980eda14cbcSMatt Macy * final dirty txg prevents an issue where metaslabs that need to be 3981eda14cbcSMatt Macy * condensed but were loaded for other reasons could cause a panic 3982eda14cbcSMatt Macy * here. By only checking the txg in that branch of the conditional, 3983eda14cbcSMatt Macy * we preserve the utility of the VERIFY statements in all other 3984eda14cbcSMatt Macy * cases. 3985eda14cbcSMatt Macy */ 3986eda14cbcSMatt Macy if (range_tree_is_empty(alloctree) && 3987eda14cbcSMatt Macy range_tree_is_empty(msp->ms_freeing) && 3988eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing) && 3989eda14cbcSMatt Macy !(msp->ms_loaded && msp->ms_condense_wanted && 3990eda14cbcSMatt Macy txg <= spa_final_dirty_txg(spa))) 3991eda14cbcSMatt Macy return; 3992eda14cbcSMatt Macy 3993eda14cbcSMatt Macy 3994eda14cbcSMatt Macy VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); 3995eda14cbcSMatt Macy 3996eda14cbcSMatt Macy /* 3997eda14cbcSMatt Macy * The only state that can actually be changing concurrently 3998eda14cbcSMatt Macy * with metaslab_sync() is the metaslab's ms_allocatable. No 3999eda14cbcSMatt Macy * other thread can be modifying this txg's alloc, freeing, 4000eda14cbcSMatt Macy * freed, or space_map_phys_t. We drop ms_lock whenever we 4001eda14cbcSMatt Macy * could call into the DMU, because the DMU can call down to 4002eda14cbcSMatt Macy * us (e.g. via zio_free()) at any time. 4003eda14cbcSMatt Macy * 4004eda14cbcSMatt Macy * The spa_vdev_remove_thread() can be reading metaslab state 4005eda14cbcSMatt Macy * concurrently, and it is locked out by the ms_sync_lock. 4006eda14cbcSMatt Macy * Note that the ms_lock is insufficient for this, because it 4007eda14cbcSMatt Macy * is dropped by space_map_write(). 4008eda14cbcSMatt Macy */ 4009eda14cbcSMatt Macy tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 4010eda14cbcSMatt Macy 4011eda14cbcSMatt Macy /* 4012eda14cbcSMatt Macy * Generate a log space map if one doesn't exist already. 4013eda14cbcSMatt Macy */ 4014eda14cbcSMatt Macy spa_generate_syncing_log_sm(spa, tx); 4015eda14cbcSMatt Macy 4016eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 4017eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 4018eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 4019eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : 4020eda14cbcSMatt Macy zfs_metaslab_sm_blksz_no_log, tx); 4021eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 4022eda14cbcSMatt Macy 4023eda14cbcSMatt Macy dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 4024eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &new_object, tx); 4025eda14cbcSMatt Macy 4026eda14cbcSMatt Macy VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 4027eda14cbcSMatt Macy msp->ms_start, msp->ms_size, vd->vdev_ashift)); 4028eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 4029eda14cbcSMatt Macy 4030eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 4031eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 4032eda14cbcSMatt Macy ASSERT0(metaslab_allocated_space(msp)); 4033eda14cbcSMatt Macy } 4034eda14cbcSMatt Macy 4035eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing) && 4036eda14cbcSMatt Macy vd->vdev_checkpoint_sm == NULL) { 4037eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 4038eda14cbcSMatt Macy 4039eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 4040eda14cbcSMatt Macy zfs_vdev_standard_sm_blksz, tx); 4041eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 4042eda14cbcSMatt Macy 4043eda14cbcSMatt Macy VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 4044eda14cbcSMatt Macy mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 4045eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4046eda14cbcSMatt Macy 4047eda14cbcSMatt Macy /* 4048eda14cbcSMatt Macy * We save the space map object as an entry in vdev_top_zap 4049eda14cbcSMatt Macy * so it can be retrieved when the pool is reopened after an 4050eda14cbcSMatt Macy * export or through zdb. 4051eda14cbcSMatt Macy */ 4052eda14cbcSMatt Macy VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 4053eda14cbcSMatt Macy vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4054eda14cbcSMatt Macy sizeof (new_object), 1, &new_object, tx)); 4055eda14cbcSMatt Macy } 4056eda14cbcSMatt Macy 4057eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 4058eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4059eda14cbcSMatt Macy 4060eda14cbcSMatt Macy /* 4061eda14cbcSMatt Macy * Note: metaslab_condense() clears the space map's histogram. 4062eda14cbcSMatt Macy * Therefore we must verify and remove this histogram before 4063eda14cbcSMatt Macy * condensing. 4064eda14cbcSMatt Macy */ 4065eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4066eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4067eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 4068eda14cbcSMatt Macy 4069eda14cbcSMatt Macy if (spa->spa_sync_pass == 1 && msp->ms_loaded && 4070eda14cbcSMatt Macy metaslab_should_condense(msp)) 4071eda14cbcSMatt Macy metaslab_condense(msp, tx); 4072eda14cbcSMatt Macy 4073eda14cbcSMatt Macy /* 4074eda14cbcSMatt Macy * We'll be going to disk to sync our space accounting, thus we 4075eda14cbcSMatt Macy * drop the ms_lock during that time so allocations coming from 4076eda14cbcSMatt Macy * open-context (ZIL) for future TXGs do not block. 4077eda14cbcSMatt Macy */ 4078eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4079eda14cbcSMatt Macy space_map_t *log_sm = spa_syncing_log_sm(spa); 4080eda14cbcSMatt Macy if (log_sm != NULL) { 4081eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4082*716fd348SMartin Matuska if (metaslab_unflushed_txg(msp) == 0) 4083*716fd348SMartin Matuska metaslab_unflushed_add(msp, tx); 4084*716fd348SMartin Matuska else if (!metaslab_unflushed_dirty(msp)) 4085*716fd348SMartin Matuska metaslab_unflushed_bump(msp, tx, B_TRUE); 4086eda14cbcSMatt Macy 4087eda14cbcSMatt Macy space_map_write(log_sm, alloctree, SM_ALLOC, 4088eda14cbcSMatt Macy vd->vdev_id, tx); 4089eda14cbcSMatt Macy space_map_write(log_sm, msp->ms_freeing, SM_FREE, 4090eda14cbcSMatt Macy vd->vdev_id, tx); 4091eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4092eda14cbcSMatt Macy 4093eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4094eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 4095eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 4096eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4097eda14cbcSMatt Macy range_tree_remove_xor_add(alloctree, 4098eda14cbcSMatt Macy msp->ms_unflushed_frees, msp->ms_unflushed_allocs); 4099eda14cbcSMatt Macy range_tree_remove_xor_add(msp->ms_freeing, 4100eda14cbcSMatt Macy msp->ms_unflushed_allocs, msp->ms_unflushed_frees); 4101eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused += 4102eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4103eda14cbcSMatt Macy } else { 4104eda14cbcSMatt Macy ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4105eda14cbcSMatt Macy 4106eda14cbcSMatt Macy space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 4107eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4108eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 4109eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4110eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4111eda14cbcSMatt Macy } 4112eda14cbcSMatt Macy 4113eda14cbcSMatt Macy msp->ms_allocated_space += range_tree_space(alloctree); 4114eda14cbcSMatt Macy ASSERT3U(msp->ms_allocated_space, >=, 4115eda14cbcSMatt Macy range_tree_space(msp->ms_freeing)); 4116eda14cbcSMatt Macy msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); 4117eda14cbcSMatt Macy 4118eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing)) { 4119eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 4120eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4121eda14cbcSMatt Macy 4122eda14cbcSMatt Macy /* 4123eda14cbcSMatt Macy * Since we are doing writes to disk and the ms_checkpointing 4124eda14cbcSMatt Macy * tree won't be changing during that time, we drop the 4125eda14cbcSMatt Macy * ms_lock while writing to the checkpoint space map, for the 4126eda14cbcSMatt Macy * same reason mentioned above. 4127eda14cbcSMatt Macy */ 4128eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4129eda14cbcSMatt Macy space_map_write(vd->vdev_checkpoint_sm, 4130eda14cbcSMatt Macy msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 4131eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4132eda14cbcSMatt Macy 4133eda14cbcSMatt Macy spa->spa_checkpoint_info.sci_dspace += 4134eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4135eda14cbcSMatt Macy vd->vdev_stat.vs_checkpoint_space += 4136eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4137eda14cbcSMatt Macy ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 4138eda14cbcSMatt Macy -space_map_allocated(vd->vdev_checkpoint_sm)); 4139eda14cbcSMatt Macy 4140eda14cbcSMatt Macy range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 4141eda14cbcSMatt Macy } 4142eda14cbcSMatt Macy 4143eda14cbcSMatt Macy if (msp->ms_loaded) { 4144eda14cbcSMatt Macy /* 4145eda14cbcSMatt Macy * When the space map is loaded, we have an accurate 4146eda14cbcSMatt Macy * histogram in the range tree. This gives us an opportunity 4147eda14cbcSMatt Macy * to bring the space map's histogram up-to-date so we clear 4148eda14cbcSMatt Macy * it first before updating it. 4149eda14cbcSMatt Macy */ 4150eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 4151eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 4152eda14cbcSMatt Macy 4153eda14cbcSMatt Macy /* 4154eda14cbcSMatt Macy * Since we've cleared the histogram we need to add back 4155eda14cbcSMatt Macy * any free space that has already been processed, plus 4156eda14cbcSMatt Macy * any deferred space. This allows the on-disk histogram 4157eda14cbcSMatt Macy * to accurately reflect all free space even if some space 4158eda14cbcSMatt Macy * is not yet available for allocation (i.e. deferred). 4159eda14cbcSMatt Macy */ 4160eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 4161eda14cbcSMatt Macy 4162eda14cbcSMatt Macy /* 4163eda14cbcSMatt Macy * Add back any deferred free space that has not been 4164eda14cbcSMatt Macy * added back into the in-core free tree yet. This will 4165eda14cbcSMatt Macy * ensure that we don't end up with a space map histogram 4166eda14cbcSMatt Macy * that is completely empty unless the metaslab is fully 4167eda14cbcSMatt Macy * allocated. 4168eda14cbcSMatt Macy */ 4169eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4170eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 4171eda14cbcSMatt Macy msp->ms_defer[t], tx); 4172eda14cbcSMatt Macy } 4173eda14cbcSMatt Macy } 4174eda14cbcSMatt Macy 4175eda14cbcSMatt Macy /* 4176eda14cbcSMatt Macy * Always add the free space from this sync pass to the space 4177eda14cbcSMatt Macy * map histogram. We want to make sure that the on-disk histogram 4178eda14cbcSMatt Macy * accounts for all free space. If the space map is not loaded, 4179eda14cbcSMatt Macy * then we will lose some accuracy but will correct it the next 4180eda14cbcSMatt Macy * time we load the space map. 4181eda14cbcSMatt Macy */ 4182eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 4183eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 4184eda14cbcSMatt Macy 4185eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 4186eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4187eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4188eda14cbcSMatt Macy 4189eda14cbcSMatt Macy /* 4190eda14cbcSMatt Macy * For sync pass 1, we avoid traversing this txg's free range tree 4191eda14cbcSMatt Macy * and instead will just swap the pointers for freeing and freed. 4192eda14cbcSMatt Macy * We can safely do this since the freed_tree is guaranteed to be 4193eda14cbcSMatt Macy * empty on the initial pass. 4194eda14cbcSMatt Macy * 4195eda14cbcSMatt Macy * Keep in mind that even if we are currently using a log spacemap 4196eda14cbcSMatt Macy * we want current frees to end up in the ms_allocatable (but not 4197eda14cbcSMatt Macy * get appended to the ms_sm) so their ranges can be reused as usual. 4198eda14cbcSMatt Macy */ 4199eda14cbcSMatt Macy if (spa_sync_pass(spa) == 1) { 4200eda14cbcSMatt Macy range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 4201eda14cbcSMatt Macy ASSERT0(msp->ms_allocated_this_txg); 4202eda14cbcSMatt Macy } else { 4203eda14cbcSMatt Macy range_tree_vacate(msp->ms_freeing, 4204eda14cbcSMatt Macy range_tree_add, msp->ms_freed); 4205eda14cbcSMatt Macy } 4206eda14cbcSMatt Macy msp->ms_allocated_this_txg += range_tree_space(alloctree); 4207eda14cbcSMatt Macy range_tree_vacate(alloctree, NULL, NULL); 4208eda14cbcSMatt Macy 4209eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4210eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 4211eda14cbcSMatt Macy & TXG_MASK])); 4212eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4213eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4214eda14cbcSMatt Macy 4215eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4216eda14cbcSMatt Macy 4217eda14cbcSMatt Macy /* 4218eda14cbcSMatt Macy * Verify that the space map object ID has been recorded in the 4219eda14cbcSMatt Macy * vdev_ms_array. 4220eda14cbcSMatt Macy */ 4221eda14cbcSMatt Macy uint64_t object; 4222eda14cbcSMatt Macy VERIFY0(dmu_read(mos, vd->vdev_ms_array, 4223eda14cbcSMatt Macy msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); 4224eda14cbcSMatt Macy VERIFY3U(object, ==, space_map_object(msp->ms_sm)); 4225eda14cbcSMatt Macy 4226eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 4227eda14cbcSMatt Macy dmu_tx_commit(tx); 4228eda14cbcSMatt Macy } 4229eda14cbcSMatt Macy 4230eda14cbcSMatt Macy static void 4231eda14cbcSMatt Macy metaslab_evict(metaslab_t *msp, uint64_t txg) 4232eda14cbcSMatt Macy { 4233eda14cbcSMatt Macy if (!msp->ms_loaded || msp->ms_disabled != 0) 4234eda14cbcSMatt Macy return; 4235eda14cbcSMatt Macy 4236eda14cbcSMatt Macy for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 4237eda14cbcSMatt Macy VERIFY0(range_tree_space( 4238eda14cbcSMatt Macy msp->ms_allocating[(txg + t) & TXG_MASK])); 4239eda14cbcSMatt Macy } 4240eda14cbcSMatt Macy if (msp->ms_allocator != -1) 4241eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); 4242eda14cbcSMatt Macy 4243eda14cbcSMatt Macy if (!metaslab_debug_unload) 4244eda14cbcSMatt Macy metaslab_unload(msp); 4245eda14cbcSMatt Macy } 4246eda14cbcSMatt Macy 4247eda14cbcSMatt Macy /* 4248eda14cbcSMatt Macy * Called after a transaction group has completely synced to mark 4249eda14cbcSMatt Macy * all of the metaslab's free space as usable. 4250eda14cbcSMatt Macy */ 4251eda14cbcSMatt Macy void 4252eda14cbcSMatt Macy metaslab_sync_done(metaslab_t *msp, uint64_t txg) 4253eda14cbcSMatt Macy { 4254eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4255eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4256eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 4257eda14cbcSMatt Macy range_tree_t **defer_tree; 4258eda14cbcSMatt Macy int64_t alloc_delta, defer_delta; 4259eda14cbcSMatt Macy boolean_t defer_allowed = B_TRUE; 4260eda14cbcSMatt Macy 4261eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 4262eda14cbcSMatt Macy 4263eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4264eda14cbcSMatt Macy 4265f9693befSMartin Matuska if (msp->ms_new) { 4266f9693befSMartin Matuska /* this is a new metaslab, add its capacity to the vdev */ 4267eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 4268f9693befSMartin Matuska 4269f9693befSMartin Matuska /* there should be no allocations nor frees at this point */ 4270f9693befSMartin Matuska VERIFY0(msp->ms_allocated_this_txg); 4271f9693befSMartin Matuska VERIFY0(range_tree_space(msp->ms_freed)); 4272eda14cbcSMatt Macy } 4273f9693befSMartin Matuska 4274eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4275eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4276eda14cbcSMatt Macy 4277eda14cbcSMatt Macy defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 4278eda14cbcSMatt Macy 4279eda14cbcSMatt Macy uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 4280eda14cbcSMatt Macy metaslab_class_get_alloc(spa_normal_class(spa)); 4281eda14cbcSMatt Macy if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { 4282eda14cbcSMatt Macy defer_allowed = B_FALSE; 4283eda14cbcSMatt Macy } 4284eda14cbcSMatt Macy 4285eda14cbcSMatt Macy defer_delta = 0; 4286eda14cbcSMatt Macy alloc_delta = msp->ms_allocated_this_txg - 4287eda14cbcSMatt Macy range_tree_space(msp->ms_freed); 4288eda14cbcSMatt Macy 4289eda14cbcSMatt Macy if (defer_allowed) { 4290eda14cbcSMatt Macy defer_delta = range_tree_space(msp->ms_freed) - 4291eda14cbcSMatt Macy range_tree_space(*defer_tree); 4292eda14cbcSMatt Macy } else { 4293eda14cbcSMatt Macy defer_delta -= range_tree_space(*defer_tree); 4294eda14cbcSMatt Macy } 4295eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 4296eda14cbcSMatt Macy defer_delta, 0); 4297eda14cbcSMatt Macy 4298eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) == NULL) { 4299eda14cbcSMatt Macy /* 4300eda14cbcSMatt Macy * If there's a metaslab_load() in progress and we don't have 4301eda14cbcSMatt Macy * a log space map, it means that we probably wrote to the 4302eda14cbcSMatt Macy * metaslab's space map. If this is the case, we need to 4303eda14cbcSMatt Macy * make sure that we wait for the load to complete so that we 4304eda14cbcSMatt Macy * have a consistent view at the in-core side of the metaslab. 4305eda14cbcSMatt Macy */ 4306eda14cbcSMatt Macy metaslab_load_wait(msp); 4307eda14cbcSMatt Macy } else { 4308eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 4309eda14cbcSMatt Macy } 4310eda14cbcSMatt Macy 4311eda14cbcSMatt Macy /* 4312eda14cbcSMatt Macy * When auto-trimming is enabled, free ranges which are added to 4313eda14cbcSMatt Macy * ms_allocatable are also be added to ms_trim. The ms_trim tree is 4314eda14cbcSMatt Macy * periodically consumed by the vdev_autotrim_thread() which issues 4315eda14cbcSMatt Macy * trims for all ranges and then vacates the tree. The ms_trim tree 4316eda14cbcSMatt Macy * can be discarded at any time with the sole consequence of recent 4317eda14cbcSMatt Macy * frees not being trimmed. 4318eda14cbcSMatt Macy */ 4319eda14cbcSMatt Macy if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { 4320eda14cbcSMatt Macy range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim); 4321eda14cbcSMatt Macy if (!defer_allowed) { 4322eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, range_tree_add, 4323eda14cbcSMatt Macy msp->ms_trim); 4324eda14cbcSMatt Macy } 4325eda14cbcSMatt Macy } else { 4326eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 4327eda14cbcSMatt Macy } 4328eda14cbcSMatt Macy 4329eda14cbcSMatt Macy /* 4330eda14cbcSMatt Macy * Move the frees from the defer_tree back to the free 4331eda14cbcSMatt Macy * range tree (if it's loaded). Swap the freed_tree and 4332eda14cbcSMatt Macy * the defer_tree -- this is safe to do because we've 4333eda14cbcSMatt Macy * just emptied out the defer_tree. 4334eda14cbcSMatt Macy */ 4335eda14cbcSMatt Macy range_tree_vacate(*defer_tree, 4336eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); 4337eda14cbcSMatt Macy if (defer_allowed) { 4338eda14cbcSMatt Macy range_tree_swap(&msp->ms_freed, defer_tree); 4339eda14cbcSMatt Macy } else { 4340eda14cbcSMatt Macy range_tree_vacate(msp->ms_freed, 4341eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, 4342eda14cbcSMatt Macy msp->ms_allocatable); 4343eda14cbcSMatt Macy } 4344eda14cbcSMatt Macy 4345eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 4346eda14cbcSMatt Macy 4347eda14cbcSMatt Macy msp->ms_deferspace += defer_delta; 4348eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, >=, 0); 4349eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 4350eda14cbcSMatt Macy if (msp->ms_deferspace != 0) { 4351eda14cbcSMatt Macy /* 4352eda14cbcSMatt Macy * Keep syncing this metaslab until all deferred frees 4353eda14cbcSMatt Macy * are back in circulation. 4354eda14cbcSMatt Macy */ 4355eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 4356eda14cbcSMatt Macy } 4357eda14cbcSMatt Macy metaslab_aux_histograms_update_done(msp, defer_allowed); 4358eda14cbcSMatt Macy 4359eda14cbcSMatt Macy if (msp->ms_new) { 4360eda14cbcSMatt Macy msp->ms_new = B_FALSE; 4361eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4362eda14cbcSMatt Macy mg->mg_ms_ready++; 4363eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4364eda14cbcSMatt Macy } 4365eda14cbcSMatt Macy 4366eda14cbcSMatt Macy /* 4367eda14cbcSMatt Macy * Re-sort metaslab within its group now that we've adjusted 4368eda14cbcSMatt Macy * its allocatable space. 4369eda14cbcSMatt Macy */ 4370eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 4371eda14cbcSMatt Macy 4372eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4373eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4374eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freed)); 4375eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4376eda14cbcSMatt Macy msp->ms_allocating_total -= msp->ms_allocated_this_txg; 4377eda14cbcSMatt Macy msp->ms_allocated_this_txg = 0; 4378eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4379eda14cbcSMatt Macy } 4380eda14cbcSMatt Macy 4381eda14cbcSMatt Macy void 4382eda14cbcSMatt Macy metaslab_sync_reassess(metaslab_group_t *mg) 4383eda14cbcSMatt Macy { 4384eda14cbcSMatt Macy spa_t *spa = mg->mg_class->mc_spa; 4385eda14cbcSMatt Macy 4386eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4387eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 4388eda14cbcSMatt Macy mg->mg_fragmentation = metaslab_group_fragmentation(mg); 4389eda14cbcSMatt Macy 4390eda14cbcSMatt Macy /* 4391eda14cbcSMatt Macy * Preload the next potential metaslabs but only on active 4392eda14cbcSMatt Macy * metaslab groups. We can get into a state where the metaslab 4393eda14cbcSMatt Macy * is no longer active since we dirty metaslabs as we remove a 4394eda14cbcSMatt Macy * a device, thus potentially making the metaslab group eligible 4395eda14cbcSMatt Macy * for preloading. 4396eda14cbcSMatt Macy */ 4397eda14cbcSMatt Macy if (mg->mg_activation_count > 0) { 4398eda14cbcSMatt Macy metaslab_group_preload(mg); 4399eda14cbcSMatt Macy } 4400eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 4401eda14cbcSMatt Macy } 4402eda14cbcSMatt Macy 4403eda14cbcSMatt Macy /* 4404eda14cbcSMatt Macy * When writing a ditto block (i.e. more than one DVA for a given BP) on 4405eda14cbcSMatt Macy * the same vdev as an existing DVA of this BP, then try to allocate it 4406eda14cbcSMatt Macy * on a different metaslab than existing DVAs (i.e. a unique metaslab). 4407eda14cbcSMatt Macy */ 4408eda14cbcSMatt Macy static boolean_t 4409eda14cbcSMatt Macy metaslab_is_unique(metaslab_t *msp, dva_t *dva) 4410eda14cbcSMatt Macy { 4411eda14cbcSMatt Macy uint64_t dva_ms_id; 4412eda14cbcSMatt Macy 4413eda14cbcSMatt Macy if (DVA_GET_ASIZE(dva) == 0) 4414eda14cbcSMatt Macy return (B_TRUE); 4415eda14cbcSMatt Macy 4416eda14cbcSMatt Macy if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 4417eda14cbcSMatt Macy return (B_TRUE); 4418eda14cbcSMatt Macy 4419eda14cbcSMatt Macy dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 4420eda14cbcSMatt Macy 4421eda14cbcSMatt Macy return (msp->ms_id != dva_ms_id); 4422eda14cbcSMatt Macy } 4423eda14cbcSMatt Macy 4424eda14cbcSMatt Macy /* 4425eda14cbcSMatt Macy * ========================================================================== 4426eda14cbcSMatt Macy * Metaslab allocation tracing facility 4427eda14cbcSMatt Macy * ========================================================================== 4428eda14cbcSMatt Macy */ 4429eda14cbcSMatt Macy 4430eda14cbcSMatt Macy /* 4431eda14cbcSMatt Macy * Add an allocation trace element to the allocation tracing list. 4432eda14cbcSMatt Macy */ 4433eda14cbcSMatt Macy static void 4434eda14cbcSMatt Macy metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 4435eda14cbcSMatt Macy metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 4436eda14cbcSMatt Macy int allocator) 4437eda14cbcSMatt Macy { 4438eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4439eda14cbcSMatt Macy 4440eda14cbcSMatt Macy if (!metaslab_trace_enabled) 4441eda14cbcSMatt Macy return; 4442eda14cbcSMatt Macy 4443eda14cbcSMatt Macy /* 4444eda14cbcSMatt Macy * When the tracing list reaches its maximum we remove 4445eda14cbcSMatt Macy * the second element in the list before adding a new one. 4446eda14cbcSMatt Macy * By removing the second element we preserve the original 4447eda14cbcSMatt Macy * entry as a clue to what allocations steps have already been 4448eda14cbcSMatt Macy * performed. 4449eda14cbcSMatt Macy */ 4450eda14cbcSMatt Macy if (zal->zal_size == metaslab_trace_max_entries) { 4451eda14cbcSMatt Macy metaslab_alloc_trace_t *mat_next; 4452eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4453eda14cbcSMatt Macy panic("too many entries in allocation list"); 4454eda14cbcSMatt Macy #endif 4455eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_trace_over_limit); 4456eda14cbcSMatt Macy zal->zal_size--; 4457eda14cbcSMatt Macy mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 4458eda14cbcSMatt Macy list_remove(&zal->zal_list, mat_next); 4459eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 4460eda14cbcSMatt Macy } 4461eda14cbcSMatt Macy 4462eda14cbcSMatt Macy mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 4463eda14cbcSMatt Macy list_link_init(&mat->mat_list_node); 4464eda14cbcSMatt Macy mat->mat_mg = mg; 4465eda14cbcSMatt Macy mat->mat_msp = msp; 4466eda14cbcSMatt Macy mat->mat_size = psize; 4467eda14cbcSMatt Macy mat->mat_dva_id = dva_id; 4468eda14cbcSMatt Macy mat->mat_offset = offset; 4469eda14cbcSMatt Macy mat->mat_weight = 0; 4470eda14cbcSMatt Macy mat->mat_allocator = allocator; 4471eda14cbcSMatt Macy 4472eda14cbcSMatt Macy if (msp != NULL) 4473eda14cbcSMatt Macy mat->mat_weight = msp->ms_weight; 4474eda14cbcSMatt Macy 4475eda14cbcSMatt Macy /* 4476eda14cbcSMatt Macy * The list is part of the zio so locking is not required. Only 4477eda14cbcSMatt Macy * a single thread will perform allocations for a given zio. 4478eda14cbcSMatt Macy */ 4479eda14cbcSMatt Macy list_insert_tail(&zal->zal_list, mat); 4480eda14cbcSMatt Macy zal->zal_size++; 4481eda14cbcSMatt Macy 4482eda14cbcSMatt Macy ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 4483eda14cbcSMatt Macy } 4484eda14cbcSMatt Macy 4485eda14cbcSMatt Macy void 4486eda14cbcSMatt Macy metaslab_trace_init(zio_alloc_list_t *zal) 4487eda14cbcSMatt Macy { 4488eda14cbcSMatt Macy list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 4489eda14cbcSMatt Macy offsetof(metaslab_alloc_trace_t, mat_list_node)); 4490eda14cbcSMatt Macy zal->zal_size = 0; 4491eda14cbcSMatt Macy } 4492eda14cbcSMatt Macy 4493eda14cbcSMatt Macy void 4494eda14cbcSMatt Macy metaslab_trace_fini(zio_alloc_list_t *zal) 4495eda14cbcSMatt Macy { 4496eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4497eda14cbcSMatt Macy 4498eda14cbcSMatt Macy while ((mat = list_remove_head(&zal->zal_list)) != NULL) 4499eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat); 4500eda14cbcSMatt Macy list_destroy(&zal->zal_list); 4501eda14cbcSMatt Macy zal->zal_size = 0; 4502eda14cbcSMatt Macy } 4503eda14cbcSMatt Macy 4504eda14cbcSMatt Macy /* 4505eda14cbcSMatt Macy * ========================================================================== 4506eda14cbcSMatt Macy * Metaslab block operations 4507eda14cbcSMatt Macy * ========================================================================== 4508eda14cbcSMatt Macy */ 4509eda14cbcSMatt Macy 4510eda14cbcSMatt Macy static void 4511eda14cbcSMatt Macy metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags, 4512eda14cbcSMatt Macy int allocator) 4513eda14cbcSMatt Macy { 4514eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4515eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4516eda14cbcSMatt Macy return; 4517eda14cbcSMatt Macy 4518eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4519eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4520eda14cbcSMatt Macy return; 4521eda14cbcSMatt Macy 4522eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4523eda14cbcSMatt Macy (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); 4524eda14cbcSMatt Macy } 4525eda14cbcSMatt Macy 4526eda14cbcSMatt Macy static void 4527eda14cbcSMatt Macy metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 4528eda14cbcSMatt Macy { 4529eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 45307877fdebSMatt Macy metaslab_class_allocator_t *mca = 45317877fdebSMatt Macy &mg->mg_class->mc_allocator[allocator]; 4532eda14cbcSMatt Macy uint64_t max = mg->mg_max_alloc_queue_depth; 4533eda14cbcSMatt Macy uint64_t cur = mga->mga_cur_max_alloc_queue_depth; 4534eda14cbcSMatt Macy while (cur < max) { 4535eda14cbcSMatt Macy if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, 4536eda14cbcSMatt Macy cur, cur + 1) == cur) { 45377877fdebSMatt Macy atomic_inc_64(&mca->mca_alloc_max_slots); 4538eda14cbcSMatt Macy return; 4539eda14cbcSMatt Macy } 4540eda14cbcSMatt Macy cur = mga->mga_cur_max_alloc_queue_depth; 4541eda14cbcSMatt Macy } 4542eda14cbcSMatt Macy } 4543eda14cbcSMatt Macy 4544eda14cbcSMatt Macy void 4545eda14cbcSMatt Macy metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags, 4546eda14cbcSMatt Macy int allocator, boolean_t io_complete) 4547eda14cbcSMatt Macy { 4548eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4549eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4550eda14cbcSMatt Macy return; 4551eda14cbcSMatt Macy 4552eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4553eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4554eda14cbcSMatt Macy return; 4555eda14cbcSMatt Macy 4556eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4557eda14cbcSMatt Macy (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); 4558eda14cbcSMatt Macy if (io_complete) 4559eda14cbcSMatt Macy metaslab_group_increment_qdepth(mg, allocator); 4560eda14cbcSMatt Macy } 4561eda14cbcSMatt Macy 4562eda14cbcSMatt Macy void 4563eda14cbcSMatt Macy metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag, 4564eda14cbcSMatt Macy int allocator) 4565eda14cbcSMatt Macy { 4566eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4567eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 4568eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 4569eda14cbcSMatt Macy 4570eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 4571eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&dva[d]); 4572eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4573eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4574eda14cbcSMatt Macy VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); 4575eda14cbcSMatt Macy } 4576eda14cbcSMatt Macy #endif 4577eda14cbcSMatt Macy } 4578eda14cbcSMatt Macy 4579eda14cbcSMatt Macy static uint64_t 4580eda14cbcSMatt Macy metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 4581eda14cbcSMatt Macy { 4582eda14cbcSMatt Macy uint64_t start; 4583eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 4584eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 4585eda14cbcSMatt Macy 4586eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4587eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 4588eda14cbcSMatt Macy VERIFY0(msp->ms_disabled); 4589eda14cbcSMatt Macy 4590eda14cbcSMatt Macy start = mc->mc_ops->msop_alloc(msp, size); 4591eda14cbcSMatt Macy if (start != -1ULL) { 4592eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4593eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4594eda14cbcSMatt Macy 4595eda14cbcSMatt Macy VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 4596eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4597eda14cbcSMatt Macy VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 4598eda14cbcSMatt Macy range_tree_remove(rt, start, size); 4599eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, start, size); 4600eda14cbcSMatt Macy 4601eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4602eda14cbcSMatt Macy vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 4603eda14cbcSMatt Macy 4604eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); 4605eda14cbcSMatt Macy msp->ms_allocating_total += size; 4606eda14cbcSMatt Macy 4607eda14cbcSMatt Macy /* Track the last successful allocation */ 4608eda14cbcSMatt Macy msp->ms_alloc_txg = txg; 4609eda14cbcSMatt Macy metaslab_verify_space(msp, txg); 4610eda14cbcSMatt Macy } 4611eda14cbcSMatt Macy 4612eda14cbcSMatt Macy /* 4613eda14cbcSMatt Macy * Now that we've attempted the allocation we need to update the 4614eda14cbcSMatt Macy * metaslab's maximum block size since it may have changed. 4615eda14cbcSMatt Macy */ 4616eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 4617eda14cbcSMatt Macy return (start); 4618eda14cbcSMatt Macy } 4619eda14cbcSMatt Macy 4620eda14cbcSMatt Macy /* 4621eda14cbcSMatt Macy * Find the metaslab with the highest weight that is less than what we've 4622eda14cbcSMatt Macy * already tried. In the common case, this means that we will examine each 4623eda14cbcSMatt Macy * metaslab at most once. Note that concurrent callers could reorder metaslabs 4624eda14cbcSMatt Macy * by activation/passivation once we have dropped the mg_lock. If a metaslab is 4625eda14cbcSMatt Macy * activated by another thread, and we fail to allocate from the metaslab we 4626eda14cbcSMatt Macy * have selected, we may not try the newly-activated metaslab, and instead 4627eda14cbcSMatt Macy * activate another metaslab. This is not optimal, but generally does not cause 4628eda14cbcSMatt Macy * any problems (a possible exception being if every metaslab is completely full 4629eda14cbcSMatt Macy * except for the newly-activated metaslab which we fail to examine). 4630eda14cbcSMatt Macy */ 4631eda14cbcSMatt Macy static metaslab_t * 4632eda14cbcSMatt Macy find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 4633eda14cbcSMatt Macy dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 4634eda14cbcSMatt Macy boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, 4635eda14cbcSMatt Macy boolean_t *was_active) 4636eda14cbcSMatt Macy { 4637eda14cbcSMatt Macy avl_index_t idx; 4638eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 4639eda14cbcSMatt Macy metaslab_t *msp = avl_find(t, search, &idx); 4640eda14cbcSMatt Macy if (msp == NULL) 4641eda14cbcSMatt Macy msp = avl_nearest(t, idx, AVL_AFTER); 4642eda14cbcSMatt Macy 46437877fdebSMatt Macy int tries = 0; 4644eda14cbcSMatt Macy for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 4645eda14cbcSMatt Macy int i; 46467877fdebSMatt Macy 46477877fdebSMatt Macy if (!try_hard && tries > zfs_metaslab_find_max_tries) { 46487877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_too_many_tries); 46497877fdebSMatt Macy return (NULL); 46507877fdebSMatt Macy } 46517877fdebSMatt Macy tries++; 46527877fdebSMatt Macy 4653eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4654eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4655eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4656eda14cbcSMatt Macy continue; 4657eda14cbcSMatt Macy } 4658eda14cbcSMatt Macy 4659eda14cbcSMatt Macy /* 4660eda14cbcSMatt Macy * If the selected metaslab is condensing or disabled, 4661eda14cbcSMatt Macy * skip it. 4662eda14cbcSMatt Macy */ 4663eda14cbcSMatt Macy if (msp->ms_condensing || msp->ms_disabled > 0) 4664eda14cbcSMatt Macy continue; 4665eda14cbcSMatt Macy 4666eda14cbcSMatt Macy *was_active = msp->ms_allocator != -1; 4667eda14cbcSMatt Macy /* 4668eda14cbcSMatt Macy * If we're activating as primary, this is our first allocation 4669eda14cbcSMatt Macy * from this disk, so we don't need to check how close we are. 4670eda14cbcSMatt Macy * If the metaslab under consideration was already active, 4671eda14cbcSMatt Macy * we're getting desperate enough to steal another allocator's 4672eda14cbcSMatt Macy * metaslab, so we still don't care about distances. 4673eda14cbcSMatt Macy */ 4674eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 4675eda14cbcSMatt Macy break; 4676eda14cbcSMatt Macy 4677eda14cbcSMatt Macy for (i = 0; i < d; i++) { 4678eda14cbcSMatt Macy if (want_unique && 4679eda14cbcSMatt Macy !metaslab_is_unique(msp, &dva[i])) 4680eda14cbcSMatt Macy break; /* try another metaslab */ 4681eda14cbcSMatt Macy } 4682eda14cbcSMatt Macy if (i == d) 4683eda14cbcSMatt Macy break; 4684eda14cbcSMatt Macy } 4685eda14cbcSMatt Macy 4686eda14cbcSMatt Macy if (msp != NULL) { 4687eda14cbcSMatt Macy search->ms_weight = msp->ms_weight; 4688eda14cbcSMatt Macy search->ms_start = msp->ms_start + 1; 4689eda14cbcSMatt Macy search->ms_allocator = msp->ms_allocator; 4690eda14cbcSMatt Macy search->ms_primary = msp->ms_primary; 4691eda14cbcSMatt Macy } 4692eda14cbcSMatt Macy return (msp); 4693eda14cbcSMatt Macy } 4694eda14cbcSMatt Macy 4695eda14cbcSMatt Macy static void 4696eda14cbcSMatt Macy metaslab_active_mask_verify(metaslab_t *msp) 4697eda14cbcSMatt Macy { 4698eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4699eda14cbcSMatt Macy 4700eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 4701eda14cbcSMatt Macy return; 4702eda14cbcSMatt Macy 4703eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) 4704eda14cbcSMatt Macy return; 4705eda14cbcSMatt Macy 4706eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { 4707eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4708eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4709eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4710eda14cbcSMatt Macy VERIFY(msp->ms_primary); 4711eda14cbcSMatt Macy return; 4712eda14cbcSMatt Macy } 4713eda14cbcSMatt Macy 4714eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { 4715eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4716eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4717eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4718eda14cbcSMatt Macy VERIFY(!msp->ms_primary); 4719eda14cbcSMatt Macy return; 4720eda14cbcSMatt Macy } 4721eda14cbcSMatt Macy 4722eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 4723eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4724eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4725eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, ==, -1); 4726eda14cbcSMatt Macy return; 4727eda14cbcSMatt Macy } 4728eda14cbcSMatt Macy } 4729eda14cbcSMatt Macy 4730eda14cbcSMatt Macy static uint64_t 4731eda14cbcSMatt Macy metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 4732eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 4733eda14cbcSMatt Macy int allocator, boolean_t try_hard) 4734eda14cbcSMatt Macy { 4735eda14cbcSMatt Macy metaslab_t *msp = NULL; 4736eda14cbcSMatt Macy uint64_t offset = -1ULL; 4737eda14cbcSMatt Macy 4738eda14cbcSMatt Macy uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; 4739eda14cbcSMatt Macy for (int i = 0; i < d; i++) { 4740eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4741eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4742eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_SECONDARY; 4743eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4744eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4745eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_CLAIM; 4746eda14cbcSMatt Macy break; 4747eda14cbcSMatt Macy } 4748eda14cbcSMatt Macy } 4749eda14cbcSMatt Macy 4750eda14cbcSMatt Macy /* 4751eda14cbcSMatt Macy * If we don't have enough metaslabs active to fill the entire array, we 4752eda14cbcSMatt Macy * just use the 0th slot. 4753eda14cbcSMatt Macy */ 4754eda14cbcSMatt Macy if (mg->mg_ms_ready < mg->mg_allocators * 3) 4755eda14cbcSMatt Macy allocator = 0; 4756eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4757eda14cbcSMatt Macy 4758eda14cbcSMatt Macy ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 4759eda14cbcSMatt Macy 4760eda14cbcSMatt Macy metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 4761eda14cbcSMatt Macy search->ms_weight = UINT64_MAX; 4762eda14cbcSMatt Macy search->ms_start = 0; 4763eda14cbcSMatt Macy /* 4764eda14cbcSMatt Macy * At the end of the metaslab tree are the already-active metaslabs, 4765eda14cbcSMatt Macy * first the primaries, then the secondaries. When we resume searching 4766eda14cbcSMatt Macy * through the tree, we need to consider ms_allocator and ms_primary so 4767eda14cbcSMatt Macy * we start in the location right after where we left off, and don't 4768eda14cbcSMatt Macy * accidentally loop forever considering the same metaslabs. 4769eda14cbcSMatt Macy */ 4770eda14cbcSMatt Macy search->ms_allocator = -1; 4771eda14cbcSMatt Macy search->ms_primary = B_TRUE; 4772eda14cbcSMatt Macy for (;;) { 4773eda14cbcSMatt Macy boolean_t was_active = B_FALSE; 4774eda14cbcSMatt Macy 4775eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4776eda14cbcSMatt Macy 4777eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4778eda14cbcSMatt Macy mga->mga_primary != NULL) { 4779eda14cbcSMatt Macy msp = mga->mga_primary; 4780eda14cbcSMatt Macy 4781eda14cbcSMatt Macy /* 4782eda14cbcSMatt Macy * Even though we don't hold the ms_lock for the 4783eda14cbcSMatt Macy * primary metaslab, those fields should not 4784eda14cbcSMatt Macy * change while we hold the mg_lock. Thus it is 4785eda14cbcSMatt Macy * safe to make assertions on them. 4786eda14cbcSMatt Macy */ 4787eda14cbcSMatt Macy ASSERT(msp->ms_primary); 4788eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4789eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4790eda14cbcSMatt Macy 4791eda14cbcSMatt Macy was_active = B_TRUE; 4792eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4793eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4794eda14cbcSMatt Macy mga->mga_secondary != NULL) { 4795eda14cbcSMatt Macy msp = mga->mga_secondary; 4796eda14cbcSMatt Macy 4797eda14cbcSMatt Macy /* 4798eda14cbcSMatt Macy * See comment above about the similar assertions 4799eda14cbcSMatt Macy * for the primary metaslab. 4800eda14cbcSMatt Macy */ 4801eda14cbcSMatt Macy ASSERT(!msp->ms_primary); 4802eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4803eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4804eda14cbcSMatt Macy 4805eda14cbcSMatt Macy was_active = B_TRUE; 4806eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4807eda14cbcSMatt Macy } else { 4808eda14cbcSMatt Macy msp = find_valid_metaslab(mg, activation_weight, dva, d, 4809eda14cbcSMatt Macy want_unique, asize, allocator, try_hard, zal, 4810eda14cbcSMatt Macy search, &was_active); 4811eda14cbcSMatt Macy } 4812eda14cbcSMatt Macy 4813eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4814eda14cbcSMatt Macy if (msp == NULL) { 4815eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 4816eda14cbcSMatt Macy return (-1ULL); 4817eda14cbcSMatt Macy } 4818eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4819eda14cbcSMatt Macy 4820eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4821eda14cbcSMatt Macy 4822eda14cbcSMatt Macy /* 4823eda14cbcSMatt Macy * This code is disabled out because of issues with 4824eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 4825eda14cbcSMatt Macy */ 4826eda14cbcSMatt Macy #if 0 4827eda14cbcSMatt Macy DTRACE_PROBE3(ms__activation__attempt, 4828eda14cbcSMatt Macy metaslab_t *, msp, uint64_t, activation_weight, 4829eda14cbcSMatt Macy boolean_t, was_active); 4830eda14cbcSMatt Macy #endif 4831eda14cbcSMatt Macy 4832eda14cbcSMatt Macy /* 4833eda14cbcSMatt Macy * Ensure that the metaslab we have selected is still 4834eda14cbcSMatt Macy * capable of handling our request. It's possible that 4835eda14cbcSMatt Macy * another thread may have changed the weight while we 4836eda14cbcSMatt Macy * were blocked on the metaslab lock. We check the 4837eda14cbcSMatt Macy * active status first to see if we need to set_selected_txg 4838eda14cbcSMatt Macy * a new metaslab. 4839eda14cbcSMatt Macy */ 4840eda14cbcSMatt Macy if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 4841eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4842eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4843eda14cbcSMatt Macy continue; 4844eda14cbcSMatt Macy } 4845eda14cbcSMatt Macy 4846eda14cbcSMatt Macy /* 4847eda14cbcSMatt Macy * If the metaslab was activated for another allocator 4848eda14cbcSMatt Macy * while we were waiting in the ms_lock above, or it's 4849eda14cbcSMatt Macy * a primary and we're seeking a secondary (or vice versa), 4850eda14cbcSMatt Macy * we go back and select a new metaslab. 4851eda14cbcSMatt Macy */ 4852eda14cbcSMatt Macy if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 4853eda14cbcSMatt Macy (msp->ms_allocator != -1) && 4854eda14cbcSMatt Macy (msp->ms_allocator != allocator || ((activation_weight == 4855eda14cbcSMatt Macy METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 4856eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4857eda14cbcSMatt Macy ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || 4858eda14cbcSMatt Macy msp->ms_allocator != -1); 4859eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4860eda14cbcSMatt Macy continue; 4861eda14cbcSMatt Macy } 4862eda14cbcSMatt Macy 4863eda14cbcSMatt Macy /* 4864eda14cbcSMatt Macy * This metaslab was used for claiming regions allocated 4865eda14cbcSMatt Macy * by the ZIL during pool import. Once these regions are 4866eda14cbcSMatt Macy * claimed we don't need to keep the CLAIM bit set 4867eda14cbcSMatt Macy * anymore. Passivate this metaslab to zero its activation 4868eda14cbcSMatt Macy * mask. 4869eda14cbcSMatt Macy */ 4870eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 4871eda14cbcSMatt Macy activation_weight != METASLAB_WEIGHT_CLAIM) { 4872eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4873eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4874eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4875eda14cbcSMatt Macy ~METASLAB_WEIGHT_CLAIM); 4876eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4877eda14cbcSMatt Macy continue; 4878eda14cbcSMatt Macy } 4879eda14cbcSMatt Macy 4880eda14cbcSMatt Macy metaslab_set_selected_txg(msp, txg); 4881eda14cbcSMatt Macy 4882eda14cbcSMatt Macy int activation_error = 4883eda14cbcSMatt Macy metaslab_activate(msp, allocator, activation_weight); 4884eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4885eda14cbcSMatt Macy 4886eda14cbcSMatt Macy /* 4887eda14cbcSMatt Macy * If the metaslab was activated by another thread for 4888eda14cbcSMatt Macy * another allocator or activation_weight (EBUSY), or it 4889eda14cbcSMatt Macy * failed because another metaslab was assigned as primary 4890eda14cbcSMatt Macy * for this allocator (EEXIST) we continue using this 4891eda14cbcSMatt Macy * metaslab for our allocation, rather than going on to a 4892eda14cbcSMatt Macy * worse metaslab (we waited for that metaslab to be loaded 4893eda14cbcSMatt Macy * after all). 4894eda14cbcSMatt Macy * 4895eda14cbcSMatt Macy * If the activation failed due to an I/O error or ENOSPC we 4896eda14cbcSMatt Macy * skip to the next metaslab. 4897eda14cbcSMatt Macy */ 4898eda14cbcSMatt Macy boolean_t activated; 4899eda14cbcSMatt Macy if (activation_error == 0) { 4900eda14cbcSMatt Macy activated = B_TRUE; 4901eda14cbcSMatt Macy } else if (activation_error == EBUSY || 4902eda14cbcSMatt Macy activation_error == EEXIST) { 4903eda14cbcSMatt Macy activated = B_FALSE; 4904eda14cbcSMatt Macy } else { 4905eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4906eda14cbcSMatt Macy continue; 4907eda14cbcSMatt Macy } 4908eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4909eda14cbcSMatt Macy 4910eda14cbcSMatt Macy /* 4911eda14cbcSMatt Macy * Now that we have the lock, recheck to see if we should 4912eda14cbcSMatt Macy * continue to use this metaslab for this allocation. The 4913eda14cbcSMatt Macy * the metaslab is now loaded so metaslab_should_allocate() 4914eda14cbcSMatt Macy * can accurately determine if the allocation attempt should 4915eda14cbcSMatt Macy * proceed. 4916eda14cbcSMatt Macy */ 4917eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4918eda14cbcSMatt Macy /* Passivate this metaslab and select a new one. */ 4919eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4920eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4921eda14cbcSMatt Macy goto next; 4922eda14cbcSMatt Macy } 4923eda14cbcSMatt Macy 4924eda14cbcSMatt Macy /* 4925eda14cbcSMatt Macy * If this metaslab is currently condensing then pick again 4926eda14cbcSMatt Macy * as we can't manipulate this metaslab until it's committed 4927eda14cbcSMatt Macy * to disk. If this metaslab is being initialized, we shouldn't 4928eda14cbcSMatt Macy * allocate from it since the allocated region might be 4929eda14cbcSMatt Macy * overwritten after allocation. 4930eda14cbcSMatt Macy */ 4931eda14cbcSMatt Macy if (msp->ms_condensing) { 4932eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4933eda14cbcSMatt Macy TRACE_CONDENSING, allocator); 4934eda14cbcSMatt Macy if (activated) { 4935eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4936eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 4937eda14cbcSMatt Macy } 4938eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4939eda14cbcSMatt Macy continue; 4940eda14cbcSMatt Macy } else if (msp->ms_disabled > 0) { 4941eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4942eda14cbcSMatt Macy TRACE_DISABLED, allocator); 4943eda14cbcSMatt Macy if (activated) { 4944eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4945eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 4946eda14cbcSMatt Macy } 4947eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4948eda14cbcSMatt Macy continue; 4949eda14cbcSMatt Macy } 4950eda14cbcSMatt Macy 4951eda14cbcSMatt Macy offset = metaslab_block_alloc(msp, asize, txg); 4952eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 4953eda14cbcSMatt Macy 4954eda14cbcSMatt Macy if (offset != -1ULL) { 4955eda14cbcSMatt Macy /* Proactively passivate the metaslab, if needed */ 4956eda14cbcSMatt Macy if (activated) 4957eda14cbcSMatt Macy metaslab_segment_may_passivate(msp); 4958eda14cbcSMatt Macy break; 4959eda14cbcSMatt Macy } 4960eda14cbcSMatt Macy next: 4961eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4962eda14cbcSMatt Macy 4963eda14cbcSMatt Macy /* 4964eda14cbcSMatt Macy * This code is disabled out because of issues with 4965eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 4966eda14cbcSMatt Macy */ 4967eda14cbcSMatt Macy #if 0 4968eda14cbcSMatt Macy DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, 4969eda14cbcSMatt Macy uint64_t, asize); 4970eda14cbcSMatt Macy #endif 4971eda14cbcSMatt Macy 4972eda14cbcSMatt Macy /* 4973eda14cbcSMatt Macy * We were unable to allocate from this metaslab so determine 4974eda14cbcSMatt Macy * a new weight for this metaslab. Now that we have loaded 4975eda14cbcSMatt Macy * the metaslab we can provide a better hint to the metaslab 4976eda14cbcSMatt Macy * selector. 4977eda14cbcSMatt Macy * 4978eda14cbcSMatt Macy * For space-based metaslabs, we use the maximum block size. 4979eda14cbcSMatt Macy * This information is only available when the metaslab 4980eda14cbcSMatt Macy * is loaded and is more accurate than the generic free 4981eda14cbcSMatt Macy * space weight that was calculated by metaslab_weight(). 4982eda14cbcSMatt Macy * This information allows us to quickly compare the maximum 4983eda14cbcSMatt Macy * available allocation in the metaslab to the allocation 4984eda14cbcSMatt Macy * size being requested. 4985eda14cbcSMatt Macy * 4986eda14cbcSMatt Macy * For segment-based metaslabs, determine the new weight 4987eda14cbcSMatt Macy * based on the highest bucket in the range tree. We 4988eda14cbcSMatt Macy * explicitly use the loaded segment weight (i.e. the range 4989eda14cbcSMatt Macy * tree histogram) since it contains the space that is 4990eda14cbcSMatt Macy * currently available for allocation and is accurate 4991eda14cbcSMatt Macy * even within a sync pass. 4992eda14cbcSMatt Macy */ 4993eda14cbcSMatt Macy uint64_t weight; 4994eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 4995eda14cbcSMatt Macy weight = metaslab_largest_allocatable(msp); 4996eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 4997eda14cbcSMatt Macy } else { 4998eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 4999eda14cbcSMatt Macy } 5000eda14cbcSMatt Macy 5001eda14cbcSMatt Macy if (activated) { 5002eda14cbcSMatt Macy metaslab_passivate(msp, weight); 5003eda14cbcSMatt Macy } else { 5004eda14cbcSMatt Macy /* 5005eda14cbcSMatt Macy * For the case where we use the metaslab that is 5006eda14cbcSMatt Macy * active for another allocator we want to make 5007eda14cbcSMatt Macy * sure that we retain the activation mask. 5008eda14cbcSMatt Macy * 5009eda14cbcSMatt Macy * Note that we could attempt to use something like 5010eda14cbcSMatt Macy * metaslab_recalculate_weight_and_sort() that 5011eda14cbcSMatt Macy * retains the activation mask here. That function 5012eda14cbcSMatt Macy * uses metaslab_weight() to set the weight though 5013eda14cbcSMatt Macy * which is not as accurate as the calculations 5014eda14cbcSMatt Macy * above. 5015eda14cbcSMatt Macy */ 5016eda14cbcSMatt Macy weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; 5017eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 5018eda14cbcSMatt Macy } 5019eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 5020eda14cbcSMatt Macy 5021eda14cbcSMatt Macy /* 5022eda14cbcSMatt Macy * We have just failed an allocation attempt, check 5023eda14cbcSMatt Macy * that metaslab_should_allocate() agrees. Otherwise, 5024eda14cbcSMatt Macy * we may end up in an infinite loop retrying the same 5025eda14cbcSMatt Macy * metaslab. 5026eda14cbcSMatt Macy */ 5027eda14cbcSMatt Macy ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); 5028eda14cbcSMatt Macy 5029eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5030eda14cbcSMatt Macy } 5031eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5032eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 5033eda14cbcSMatt Macy return (offset); 5034eda14cbcSMatt Macy } 5035eda14cbcSMatt Macy 5036eda14cbcSMatt Macy static uint64_t 5037eda14cbcSMatt Macy metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 5038eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 5039eda14cbcSMatt Macy int allocator, boolean_t try_hard) 5040eda14cbcSMatt Macy { 5041eda14cbcSMatt Macy uint64_t offset; 5042eda14cbcSMatt Macy ASSERT(mg->mg_initialized); 5043eda14cbcSMatt Macy 5044eda14cbcSMatt Macy offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 5045eda14cbcSMatt Macy dva, d, allocator, try_hard); 5046eda14cbcSMatt Macy 5047eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 5048eda14cbcSMatt Macy if (offset == -1ULL) { 5049eda14cbcSMatt Macy mg->mg_failed_allocations++; 5050eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, asize, d, 5051eda14cbcSMatt Macy TRACE_GROUP_FAILURE, allocator); 5052eda14cbcSMatt Macy if (asize == SPA_GANGBLOCKSIZE) { 5053eda14cbcSMatt Macy /* 5054eda14cbcSMatt Macy * This metaslab group was unable to allocate 5055eda14cbcSMatt Macy * the minimum gang block size so it must be out of 5056eda14cbcSMatt Macy * space. We must notify the allocation throttle 5057eda14cbcSMatt Macy * to start skipping allocation attempts to this 5058eda14cbcSMatt Macy * metaslab group until more space becomes available. 5059eda14cbcSMatt Macy * Note: this failure cannot be caused by the 5060eda14cbcSMatt Macy * allocation throttle since the allocation throttle 5061eda14cbcSMatt Macy * is only responsible for skipping devices and 5062eda14cbcSMatt Macy * not failing block allocations. 5063eda14cbcSMatt Macy */ 5064eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 5065eda14cbcSMatt Macy } 5066eda14cbcSMatt Macy } 5067eda14cbcSMatt Macy mg->mg_allocations++; 5068eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 5069eda14cbcSMatt Macy return (offset); 5070eda14cbcSMatt Macy } 5071eda14cbcSMatt Macy 5072eda14cbcSMatt Macy /* 5073eda14cbcSMatt Macy * Allocate a block for the specified i/o. 5074eda14cbcSMatt Macy */ 5075eda14cbcSMatt Macy int 5076eda14cbcSMatt Macy metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 5077eda14cbcSMatt Macy dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 5078eda14cbcSMatt Macy zio_alloc_list_t *zal, int allocator) 5079eda14cbcSMatt Macy { 50807877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5081eda14cbcSMatt Macy metaslab_group_t *mg, *fast_mg, *rotor; 5082eda14cbcSMatt Macy vdev_t *vd; 5083eda14cbcSMatt Macy boolean_t try_hard = B_FALSE; 5084eda14cbcSMatt Macy 5085eda14cbcSMatt Macy ASSERT(!DVA_IS_VALID(&dva[d])); 5086eda14cbcSMatt Macy 5087eda14cbcSMatt Macy /* 5088eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 5089eda14cbcSMatt Macy * This will result in more split blocks when using device removal, 5090eda14cbcSMatt Macy * and a large number of split blocks coupled with ztest-induced 5091eda14cbcSMatt Macy * damage can result in extremely long reconstruction times. This 5092eda14cbcSMatt Macy * will also test spilling from special to normal. 5093eda14cbcSMatt Macy */ 509433b8c039SMartin Matuska if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) { 5095eda14cbcSMatt Macy metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 5096eda14cbcSMatt Macy allocator); 5097eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5098eda14cbcSMatt Macy } 5099eda14cbcSMatt Macy 5100eda14cbcSMatt Macy /* 5101eda14cbcSMatt Macy * Start at the rotor and loop through all mgs until we find something. 51027877fdebSMatt Macy * Note that there's no locking on mca_rotor or mca_aliquot because 5103eda14cbcSMatt Macy * nothing actually breaks if we miss a few updates -- we just won't 5104eda14cbcSMatt Macy * allocate quite as evenly. It all balances out over time. 5105eda14cbcSMatt Macy * 5106eda14cbcSMatt Macy * If we are doing ditto or log blocks, try to spread them across 5107eda14cbcSMatt Macy * consecutive vdevs. If we're forced to reuse a vdev before we've 5108eda14cbcSMatt Macy * allocated all of our ditto blocks, then try and spread them out on 5109eda14cbcSMatt Macy * that vdev as much as possible. If it turns out to not be possible, 5110eda14cbcSMatt Macy * gradually lower our standards until anything becomes acceptable. 5111eda14cbcSMatt Macy * Also, allocating on consecutive vdevs (as opposed to random vdevs) 5112eda14cbcSMatt Macy * gives us hope of containing our fault domains to something we're 5113eda14cbcSMatt Macy * able to reason about. Otherwise, any two top-level vdev failures 5114eda14cbcSMatt Macy * will guarantee the loss of data. With consecutive allocation, 5115eda14cbcSMatt Macy * only two adjacent top-level vdev failures will result in data loss. 5116eda14cbcSMatt Macy * 5117eda14cbcSMatt Macy * If we are doing gang blocks (hintdva is non-NULL), try to keep 5118eda14cbcSMatt Macy * ourselves on the same vdev as our gang block header. That 5119eda14cbcSMatt Macy * way, we can hope for locality in vdev_cache, plus it makes our 5120eda14cbcSMatt Macy * fault domains something tractable. 5121eda14cbcSMatt Macy */ 5122eda14cbcSMatt Macy if (hintdva) { 5123eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 5124eda14cbcSMatt Macy 5125eda14cbcSMatt Macy /* 5126eda14cbcSMatt Macy * It's possible the vdev we're using as the hint no 5127eda14cbcSMatt Macy * longer exists or its mg has been closed (e.g. by 5128eda14cbcSMatt Macy * device removal). Consult the rotor when 5129eda14cbcSMatt Macy * all else fails. 5130eda14cbcSMatt Macy */ 5131eda14cbcSMatt Macy if (vd != NULL && vd->vdev_mg != NULL) { 5132184c1b94SMartin Matuska mg = vdev_get_mg(vd, mc); 5133eda14cbcSMatt Macy 5134eda14cbcSMatt Macy if (flags & METASLAB_HINTBP_AVOID && 5135eda14cbcSMatt Macy mg->mg_next != NULL) 5136eda14cbcSMatt Macy mg = mg->mg_next; 5137eda14cbcSMatt Macy } else { 51387877fdebSMatt Macy mg = mca->mca_rotor; 5139eda14cbcSMatt Macy } 5140eda14cbcSMatt Macy } else if (d != 0) { 5141eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 5142eda14cbcSMatt Macy mg = vd->vdev_mg->mg_next; 5143eda14cbcSMatt Macy } else if (flags & METASLAB_FASTWRITE) { 51447877fdebSMatt Macy mg = fast_mg = mca->mca_rotor; 5145eda14cbcSMatt Macy 5146eda14cbcSMatt Macy do { 5147eda14cbcSMatt Macy if (fast_mg->mg_vd->vdev_pending_fastwrite < 5148eda14cbcSMatt Macy mg->mg_vd->vdev_pending_fastwrite) 5149eda14cbcSMatt Macy mg = fast_mg; 51507877fdebSMatt Macy } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor); 5151eda14cbcSMatt Macy 5152eda14cbcSMatt Macy } else { 51537877fdebSMatt Macy ASSERT(mca->mca_rotor != NULL); 51547877fdebSMatt Macy mg = mca->mca_rotor; 5155eda14cbcSMatt Macy } 5156eda14cbcSMatt Macy 5157eda14cbcSMatt Macy /* 5158eda14cbcSMatt Macy * If the hint put us into the wrong metaslab class, or into a 5159eda14cbcSMatt Macy * metaslab group that has been passivated, just follow the rotor. 5160eda14cbcSMatt Macy */ 5161eda14cbcSMatt Macy if (mg->mg_class != mc || mg->mg_activation_count <= 0) 51627877fdebSMatt Macy mg = mca->mca_rotor; 5163eda14cbcSMatt Macy 5164eda14cbcSMatt Macy rotor = mg; 5165eda14cbcSMatt Macy top: 5166eda14cbcSMatt Macy do { 5167eda14cbcSMatt Macy boolean_t allocatable; 5168eda14cbcSMatt Macy 5169eda14cbcSMatt Macy ASSERT(mg->mg_activation_count == 1); 5170eda14cbcSMatt Macy vd = mg->mg_vd; 5171eda14cbcSMatt Macy 5172eda14cbcSMatt Macy /* 5173eda14cbcSMatt Macy * Don't allocate from faulted devices. 5174eda14cbcSMatt Macy */ 5175eda14cbcSMatt Macy if (try_hard) { 5176eda14cbcSMatt Macy spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 5177eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5178eda14cbcSMatt Macy spa_config_exit(spa, SCL_ZIO, FTAG); 5179eda14cbcSMatt Macy } else { 5180eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5181eda14cbcSMatt Macy } 5182eda14cbcSMatt Macy 5183eda14cbcSMatt Macy /* 5184eda14cbcSMatt Macy * Determine if the selected metaslab group is eligible 5185eda14cbcSMatt Macy * for allocations. If we're ganging then don't allow 5186eda14cbcSMatt Macy * this metaslab group to skip allocations since that would 5187eda14cbcSMatt Macy * inadvertently return ENOSPC and suspend the pool 5188eda14cbcSMatt Macy * even though space is still available. 5189eda14cbcSMatt Macy */ 5190eda14cbcSMatt Macy if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 5191eda14cbcSMatt Macy allocatable = metaslab_group_allocatable(mg, rotor, 5192eda14cbcSMatt Macy psize, allocator, d); 5193eda14cbcSMatt Macy } 5194eda14cbcSMatt Macy 5195eda14cbcSMatt Macy if (!allocatable) { 5196eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5197eda14cbcSMatt Macy TRACE_NOT_ALLOCATABLE, allocator); 5198eda14cbcSMatt Macy goto next; 5199eda14cbcSMatt Macy } 5200eda14cbcSMatt Macy 5201eda14cbcSMatt Macy ASSERT(mg->mg_initialized); 5202eda14cbcSMatt Macy 5203eda14cbcSMatt Macy /* 5204eda14cbcSMatt Macy * Avoid writing single-copy data to a failing, 5205eda14cbcSMatt Macy * non-redundant vdev, unless we've already tried all 5206eda14cbcSMatt Macy * other vdevs. 5207eda14cbcSMatt Macy */ 5208eda14cbcSMatt Macy if ((vd->vdev_stat.vs_write_errors > 0 || 5209eda14cbcSMatt Macy vd->vdev_state < VDEV_STATE_HEALTHY) && 5210eda14cbcSMatt Macy d == 0 && !try_hard && vd->vdev_children == 0) { 5211eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5212eda14cbcSMatt Macy TRACE_VDEV_ERROR, allocator); 5213eda14cbcSMatt Macy goto next; 5214eda14cbcSMatt Macy } 5215eda14cbcSMatt Macy 5216eda14cbcSMatt Macy ASSERT(mg->mg_class == mc); 5217eda14cbcSMatt Macy 5218eda14cbcSMatt Macy uint64_t asize = vdev_psize_to_asize(vd, psize); 5219eda14cbcSMatt Macy ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 5220eda14cbcSMatt Macy 5221eda14cbcSMatt Macy /* 5222eda14cbcSMatt Macy * If we don't need to try hard, then require that the 5223eda14cbcSMatt Macy * block be on a different metaslab from any other DVAs 5224eda14cbcSMatt Macy * in this BP (unique=true). If we are trying hard, then 5225eda14cbcSMatt Macy * allow any metaslab to be used (unique=false). 5226eda14cbcSMatt Macy */ 5227eda14cbcSMatt Macy uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 5228eda14cbcSMatt Macy !try_hard, dva, d, allocator, try_hard); 5229eda14cbcSMatt Macy 5230eda14cbcSMatt Macy if (offset != -1ULL) { 5231eda14cbcSMatt Macy /* 5232eda14cbcSMatt Macy * If we've just selected this metaslab group, 5233eda14cbcSMatt Macy * figure out whether the corresponding vdev is 5234eda14cbcSMatt Macy * over- or under-used relative to the pool, 5235eda14cbcSMatt Macy * and set an allocation bias to even it out. 5236eda14cbcSMatt Macy * 5237eda14cbcSMatt Macy * Bias is also used to compensate for unequally 5238eda14cbcSMatt Macy * sized vdevs so that space is allocated fairly. 5239eda14cbcSMatt Macy */ 52407877fdebSMatt Macy if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { 5241eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 5242eda14cbcSMatt Macy int64_t vs_free = vs->vs_space - vs->vs_alloc; 5243eda14cbcSMatt Macy int64_t mc_free = mc->mc_space - mc->mc_alloc; 5244eda14cbcSMatt Macy int64_t ratio; 5245eda14cbcSMatt Macy 5246eda14cbcSMatt Macy /* 5247eda14cbcSMatt Macy * Calculate how much more or less we should 5248eda14cbcSMatt Macy * try to allocate from this device during 5249eda14cbcSMatt Macy * this iteration around the rotor. 5250eda14cbcSMatt Macy * 5251eda14cbcSMatt Macy * This basically introduces a zero-centered 5252eda14cbcSMatt Macy * bias towards the devices with the most 5253eda14cbcSMatt Macy * free space, while compensating for vdev 5254eda14cbcSMatt Macy * size differences. 5255eda14cbcSMatt Macy * 5256eda14cbcSMatt Macy * Examples: 5257eda14cbcSMatt Macy * vdev V1 = 16M/128M 5258eda14cbcSMatt Macy * vdev V2 = 16M/128M 5259eda14cbcSMatt Macy * ratio(V1) = 100% ratio(V2) = 100% 5260eda14cbcSMatt Macy * 5261eda14cbcSMatt Macy * vdev V1 = 16M/128M 5262eda14cbcSMatt Macy * vdev V2 = 64M/128M 5263eda14cbcSMatt Macy * ratio(V1) = 127% ratio(V2) = 72% 5264eda14cbcSMatt Macy * 5265eda14cbcSMatt Macy * vdev V1 = 16M/128M 5266eda14cbcSMatt Macy * vdev V2 = 64M/512M 5267eda14cbcSMatt Macy * ratio(V1) = 40% ratio(V2) = 160% 5268eda14cbcSMatt Macy */ 5269eda14cbcSMatt Macy ratio = (vs_free * mc->mc_alloc_groups * 100) / 5270eda14cbcSMatt Macy (mc_free + 1); 5271eda14cbcSMatt Macy mg->mg_bias = ((ratio - 100) * 5272eda14cbcSMatt Macy (int64_t)mg->mg_aliquot) / 100; 5273eda14cbcSMatt Macy } else if (!metaslab_bias_enabled) { 5274eda14cbcSMatt Macy mg->mg_bias = 0; 5275eda14cbcSMatt Macy } 5276eda14cbcSMatt Macy 5277eda14cbcSMatt Macy if ((flags & METASLAB_FASTWRITE) || 52787877fdebSMatt Macy atomic_add_64_nv(&mca->mca_aliquot, asize) >= 5279eda14cbcSMatt Macy mg->mg_aliquot + mg->mg_bias) { 52807877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 52817877fdebSMatt Macy mca->mca_aliquot = 0; 5282eda14cbcSMatt Macy } 5283eda14cbcSMatt Macy 5284eda14cbcSMatt Macy DVA_SET_VDEV(&dva[d], vd->vdev_id); 5285eda14cbcSMatt Macy DVA_SET_OFFSET(&dva[d], offset); 5286eda14cbcSMatt Macy DVA_SET_GANG(&dva[d], 5287eda14cbcSMatt Macy ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); 5288eda14cbcSMatt Macy DVA_SET_ASIZE(&dva[d], asize); 5289eda14cbcSMatt Macy 5290eda14cbcSMatt Macy if (flags & METASLAB_FASTWRITE) { 5291eda14cbcSMatt Macy atomic_add_64(&vd->vdev_pending_fastwrite, 5292eda14cbcSMatt Macy psize); 5293eda14cbcSMatt Macy } 5294eda14cbcSMatt Macy 5295eda14cbcSMatt Macy return (0); 5296eda14cbcSMatt Macy } 5297eda14cbcSMatt Macy next: 52987877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 52997877fdebSMatt Macy mca->mca_aliquot = 0; 5300eda14cbcSMatt Macy } while ((mg = mg->mg_next) != rotor); 5301eda14cbcSMatt Macy 5302eda14cbcSMatt Macy /* 53037877fdebSMatt Macy * If we haven't tried hard, perhaps do so now. 5304eda14cbcSMatt Macy */ 53057877fdebSMatt Macy if (!try_hard && (zfs_metaslab_try_hard_before_gang || 53067877fdebSMatt Macy GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || 53077877fdebSMatt Macy psize <= 1 << spa->spa_min_ashift)) { 53087877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_try_hard); 5309eda14cbcSMatt Macy try_hard = B_TRUE; 5310eda14cbcSMatt Macy goto top; 5311eda14cbcSMatt Macy } 5312eda14cbcSMatt Macy 5313da5137abSMartin Matuska memset(&dva[d], 0, sizeof (dva_t)); 5314eda14cbcSMatt Macy 5315eda14cbcSMatt Macy metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 5316eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5317eda14cbcSMatt Macy } 5318eda14cbcSMatt Macy 5319eda14cbcSMatt Macy void 5320eda14cbcSMatt Macy metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 5321eda14cbcSMatt Macy boolean_t checkpoint) 5322eda14cbcSMatt Macy { 5323eda14cbcSMatt Macy metaslab_t *msp; 5324eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5325eda14cbcSMatt Macy 5326eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5327eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5328eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5329eda14cbcSMatt Macy 5330eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5331eda14cbcSMatt Macy 5332eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5333eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5334eda14cbcSMatt Macy VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 5335eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5336eda14cbcSMatt Macy VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 5337eda14cbcSMatt Macy 5338eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, asize); 5339eda14cbcSMatt Macy 5340eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5341eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_freeing) && 5342eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing)) { 5343eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 5344eda14cbcSMatt Macy } 5345eda14cbcSMatt Macy 5346eda14cbcSMatt Macy if (checkpoint) { 5347eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 5348eda14cbcSMatt Macy range_tree_add(msp->ms_checkpointing, offset, asize); 5349eda14cbcSMatt Macy } else { 5350eda14cbcSMatt Macy range_tree_add(msp->ms_freeing, offset, asize); 5351eda14cbcSMatt Macy } 5352eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5353eda14cbcSMatt Macy } 5354eda14cbcSMatt Macy 5355eda14cbcSMatt Macy void 5356eda14cbcSMatt Macy metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5357eda14cbcSMatt Macy uint64_t size, void *arg) 5358eda14cbcSMatt Macy { 5359e92ffd9bSMartin Matuska (void) inner_offset; 5360eda14cbcSMatt Macy boolean_t *checkpoint = arg; 5361eda14cbcSMatt Macy 5362eda14cbcSMatt Macy ASSERT3P(checkpoint, !=, NULL); 5363eda14cbcSMatt Macy 5364eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) 5365eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5366eda14cbcSMatt Macy else 5367eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, *checkpoint); 5368eda14cbcSMatt Macy } 5369eda14cbcSMatt Macy 5370eda14cbcSMatt Macy static void 5371eda14cbcSMatt Macy metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 5372eda14cbcSMatt Macy boolean_t checkpoint) 5373eda14cbcSMatt Macy { 5374eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5375eda14cbcSMatt Macy 5376eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5377eda14cbcSMatt Macy 5378eda14cbcSMatt Macy if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 5379eda14cbcSMatt Macy return; 5380eda14cbcSMatt Macy 5381eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL && 5382eda14cbcSMatt Macy spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 5383eda14cbcSMatt Macy vdev_is_concrete(vd)) { 5384eda14cbcSMatt Macy /* 5385eda14cbcSMatt Macy * Note: we check if the vdev is concrete because when 5386eda14cbcSMatt Macy * we complete the removal, we first change the vdev to be 5387eda14cbcSMatt Macy * an indirect vdev (in open context), and then (in syncing 5388eda14cbcSMatt Macy * context) clear spa_vdev_removal. 5389eda14cbcSMatt Macy */ 5390eda14cbcSMatt Macy free_from_removing_vdev(vd, offset, size); 5391eda14cbcSMatt Macy } else if (vd->vdev_ops->vdev_op_remap != NULL) { 5392eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5393eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5394eda14cbcSMatt Macy metaslab_free_impl_cb, &checkpoint); 5395eda14cbcSMatt Macy } else { 5396eda14cbcSMatt Macy metaslab_free_concrete(vd, offset, size, checkpoint); 5397eda14cbcSMatt Macy } 5398eda14cbcSMatt Macy } 5399eda14cbcSMatt Macy 5400eda14cbcSMatt Macy typedef struct remap_blkptr_cb_arg { 5401eda14cbcSMatt Macy blkptr_t *rbca_bp; 5402eda14cbcSMatt Macy spa_remap_cb_t rbca_cb; 5403eda14cbcSMatt Macy vdev_t *rbca_remap_vd; 5404eda14cbcSMatt Macy uint64_t rbca_remap_offset; 5405eda14cbcSMatt Macy void *rbca_cb_arg; 5406eda14cbcSMatt Macy } remap_blkptr_cb_arg_t; 5407eda14cbcSMatt Macy 5408eda14cbcSMatt Macy static void 5409eda14cbcSMatt Macy remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5410eda14cbcSMatt Macy uint64_t size, void *arg) 5411eda14cbcSMatt Macy { 5412eda14cbcSMatt Macy remap_blkptr_cb_arg_t *rbca = arg; 5413eda14cbcSMatt Macy blkptr_t *bp = rbca->rbca_bp; 5414eda14cbcSMatt Macy 5415eda14cbcSMatt Macy /* We can not remap split blocks. */ 5416eda14cbcSMatt Macy if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 5417eda14cbcSMatt Macy return; 5418eda14cbcSMatt Macy ASSERT0(inner_offset); 5419eda14cbcSMatt Macy 5420eda14cbcSMatt Macy if (rbca->rbca_cb != NULL) { 5421eda14cbcSMatt Macy /* 5422eda14cbcSMatt Macy * At this point we know that we are not handling split 5423eda14cbcSMatt Macy * blocks and we invoke the callback on the previous 5424eda14cbcSMatt Macy * vdev which must be indirect. 5425eda14cbcSMatt Macy */ 5426eda14cbcSMatt Macy ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 5427eda14cbcSMatt Macy 5428eda14cbcSMatt Macy rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 5429eda14cbcSMatt Macy rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 5430eda14cbcSMatt Macy 5431eda14cbcSMatt Macy /* set up remap_blkptr_cb_arg for the next call */ 5432eda14cbcSMatt Macy rbca->rbca_remap_vd = vd; 5433eda14cbcSMatt Macy rbca->rbca_remap_offset = offset; 5434eda14cbcSMatt Macy } 5435eda14cbcSMatt Macy 5436eda14cbcSMatt Macy /* 5437eda14cbcSMatt Macy * The phys birth time is that of dva[0]. This ensures that we know 5438eda14cbcSMatt Macy * when each dva was written, so that resilver can determine which 5439eda14cbcSMatt Macy * blocks need to be scrubbed (i.e. those written during the time 5440eda14cbcSMatt Macy * the vdev was offline). It also ensures that the key used in 5441eda14cbcSMatt Macy * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 5442eda14cbcSMatt Macy * we didn't change the phys_birth, a lookup in the ARC for a 5443eda14cbcSMatt Macy * remapped BP could find the data that was previously stored at 5444eda14cbcSMatt Macy * this vdev + offset. 5445eda14cbcSMatt Macy */ 5446eda14cbcSMatt Macy vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 5447eda14cbcSMatt Macy DVA_GET_VDEV(&bp->blk_dva[0])); 5448eda14cbcSMatt Macy vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 5449eda14cbcSMatt Macy bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, 5450eda14cbcSMatt Macy DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 5451eda14cbcSMatt Macy 5452eda14cbcSMatt Macy DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 5453eda14cbcSMatt Macy DVA_SET_OFFSET(&bp->blk_dva[0], offset); 5454eda14cbcSMatt Macy } 5455eda14cbcSMatt Macy 5456eda14cbcSMatt Macy /* 5457eda14cbcSMatt Macy * If the block pointer contains any indirect DVAs, modify them to refer to 5458eda14cbcSMatt Macy * concrete DVAs. Note that this will sometimes not be possible, leaving 5459eda14cbcSMatt Macy * the indirect DVA in place. This happens if the indirect DVA spans multiple 5460eda14cbcSMatt Macy * segments in the mapping (i.e. it is a "split block"). 5461eda14cbcSMatt Macy * 5462eda14cbcSMatt Macy * If the BP was remapped, calls the callback on the original dva (note the 5463eda14cbcSMatt Macy * callback can be called multiple times if the original indirect DVA refers 5464eda14cbcSMatt Macy * to another indirect DVA, etc). 5465eda14cbcSMatt Macy * 5466eda14cbcSMatt Macy * Returns TRUE if the BP was remapped. 5467eda14cbcSMatt Macy */ 5468eda14cbcSMatt Macy boolean_t 5469eda14cbcSMatt Macy spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 5470eda14cbcSMatt Macy { 5471eda14cbcSMatt Macy remap_blkptr_cb_arg_t rbca; 5472eda14cbcSMatt Macy 5473eda14cbcSMatt Macy if (!zfs_remap_blkptr_enable) 5474eda14cbcSMatt Macy return (B_FALSE); 5475eda14cbcSMatt Macy 5476eda14cbcSMatt Macy if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 5477eda14cbcSMatt Macy return (B_FALSE); 5478eda14cbcSMatt Macy 5479eda14cbcSMatt Macy /* 5480eda14cbcSMatt Macy * Dedup BP's can not be remapped, because ddt_phys_select() depends 5481eda14cbcSMatt Macy * on DVA[0] being the same in the BP as in the DDT (dedup table). 5482eda14cbcSMatt Macy */ 5483eda14cbcSMatt Macy if (BP_GET_DEDUP(bp)) 5484eda14cbcSMatt Macy return (B_FALSE); 5485eda14cbcSMatt Macy 5486eda14cbcSMatt Macy /* 5487eda14cbcSMatt Macy * Gang blocks can not be remapped, because 5488eda14cbcSMatt Macy * zio_checksum_gang_verifier() depends on the DVA[0] that's in 5489eda14cbcSMatt Macy * the BP used to read the gang block header (GBH) being the same 5490eda14cbcSMatt Macy * as the DVA[0] that we allocated for the GBH. 5491eda14cbcSMatt Macy */ 5492eda14cbcSMatt Macy if (BP_IS_GANG(bp)) 5493eda14cbcSMatt Macy return (B_FALSE); 5494eda14cbcSMatt Macy 5495eda14cbcSMatt Macy /* 5496eda14cbcSMatt Macy * Embedded BP's have no DVA to remap. 5497eda14cbcSMatt Macy */ 5498eda14cbcSMatt Macy if (BP_GET_NDVAS(bp) < 1) 5499eda14cbcSMatt Macy return (B_FALSE); 5500eda14cbcSMatt Macy 5501eda14cbcSMatt Macy /* 5502eda14cbcSMatt Macy * Note: we only remap dva[0]. If we remapped other dvas, we 5503eda14cbcSMatt Macy * would no longer know what their phys birth txg is. 5504eda14cbcSMatt Macy */ 5505eda14cbcSMatt Macy dva_t *dva = &bp->blk_dva[0]; 5506eda14cbcSMatt Macy 5507eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5508eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5509eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 5510eda14cbcSMatt Macy 5511eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap == NULL) 5512eda14cbcSMatt Macy return (B_FALSE); 5513eda14cbcSMatt Macy 5514eda14cbcSMatt Macy rbca.rbca_bp = bp; 5515eda14cbcSMatt Macy rbca.rbca_cb = callback; 5516eda14cbcSMatt Macy rbca.rbca_remap_vd = vd; 5517eda14cbcSMatt Macy rbca.rbca_remap_offset = offset; 5518eda14cbcSMatt Macy rbca.rbca_cb_arg = arg; 5519eda14cbcSMatt Macy 5520eda14cbcSMatt Macy /* 5521eda14cbcSMatt Macy * remap_blkptr_cb() will be called in order for each level of 5522eda14cbcSMatt Macy * indirection, until a concrete vdev is reached or a split block is 5523eda14cbcSMatt Macy * encountered. old_vd and old_offset are updated within the callback 5524eda14cbcSMatt Macy * as we go from the one indirect vdev to the next one (either concrete 5525eda14cbcSMatt Macy * or indirect again) in that order. 5526eda14cbcSMatt Macy */ 5527eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 5528eda14cbcSMatt Macy 5529eda14cbcSMatt Macy /* Check if the DVA wasn't remapped because it is a split block */ 5530eda14cbcSMatt Macy if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 5531eda14cbcSMatt Macy return (B_FALSE); 5532eda14cbcSMatt Macy 5533eda14cbcSMatt Macy return (B_TRUE); 5534eda14cbcSMatt Macy } 5535eda14cbcSMatt Macy 5536eda14cbcSMatt Macy /* 5537eda14cbcSMatt Macy * Undo the allocation of a DVA which happened in the given transaction group. 5538eda14cbcSMatt Macy */ 5539eda14cbcSMatt Macy void 5540eda14cbcSMatt Macy metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5541eda14cbcSMatt Macy { 5542eda14cbcSMatt Macy metaslab_t *msp; 5543eda14cbcSMatt Macy vdev_t *vd; 5544eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5545eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5546eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5547eda14cbcSMatt Macy 5548eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5549eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5550eda14cbcSMatt Macy 5551eda14cbcSMatt Macy if (txg > spa_freeze_txg(spa)) 5552eda14cbcSMatt Macy return; 5553eda14cbcSMatt Macy 5554eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || 5555eda14cbcSMatt Macy (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 5556eda14cbcSMatt Macy zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", 5557eda14cbcSMatt Macy (u_longlong_t)vdev, (u_longlong_t)offset, 5558eda14cbcSMatt Macy (u_longlong_t)size); 5559eda14cbcSMatt Macy return; 5560eda14cbcSMatt Macy } 5561eda14cbcSMatt Macy 5562eda14cbcSMatt Macy ASSERT(!vd->vdev_removing); 5563eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5564eda14cbcSMatt Macy ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 5565eda14cbcSMatt Macy ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 5566eda14cbcSMatt Macy 5567eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 55686db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5569eda14cbcSMatt Macy 5570eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5571eda14cbcSMatt Macy 5572eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5573eda14cbcSMatt Macy range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 5574eda14cbcSMatt Macy offset, size); 5575eda14cbcSMatt Macy msp->ms_allocating_total -= size; 5576eda14cbcSMatt Macy 5577eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5578eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5579eda14cbcSMatt Macy VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 5580eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, 5581eda14cbcSMatt Macy msp->ms_size); 5582eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5583eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5584eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, offset, size); 5585eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5586eda14cbcSMatt Macy } 5587eda14cbcSMatt Macy 5588eda14cbcSMatt Macy /* 5589eda14cbcSMatt Macy * Free the block represented by the given DVA. 5590eda14cbcSMatt Macy */ 5591eda14cbcSMatt Macy void 5592eda14cbcSMatt Macy metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 5593eda14cbcSMatt Macy { 5594eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5595eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5596eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5597eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 5598eda14cbcSMatt Macy 5599eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5600eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5601eda14cbcSMatt Macy 5602eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) { 56036db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5604eda14cbcSMatt Macy } 5605eda14cbcSMatt Macy 5606eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, checkpoint); 5607eda14cbcSMatt Macy } 5608eda14cbcSMatt Macy 5609eda14cbcSMatt Macy /* 5610eda14cbcSMatt Macy * Reserve some allocation slots. The reservation system must be called 5611eda14cbcSMatt Macy * before we call into the allocator. If there aren't any available slots 5612eda14cbcSMatt Macy * then the I/O will be throttled until an I/O completes and its slots are 5613eda14cbcSMatt Macy * freed up. The function returns true if it was successful in placing 5614eda14cbcSMatt Macy * the reservation. 5615eda14cbcSMatt Macy */ 5616eda14cbcSMatt Macy boolean_t 5617eda14cbcSMatt Macy metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 5618eda14cbcSMatt Macy zio_t *zio, int flags) 5619eda14cbcSMatt Macy { 56207877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 56217877fdebSMatt Macy uint64_t max = mca->mca_alloc_max_slots; 5622eda14cbcSMatt Macy 5623eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 56243f9d360cSMartin Matuska if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) || 56253f9d360cSMartin Matuska zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) { 5626eda14cbcSMatt Macy /* 56271f88aa09SMartin Matuska * The potential race between _count() and _add() is covered 56281f88aa09SMartin Matuska * by the allocator lock in most cases, or irrelevant due to 56291f88aa09SMartin Matuska * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others. 56301f88aa09SMartin Matuska * But even if we assume some other non-existing scenario, the 56311f88aa09SMartin Matuska * worst that can happen is few more I/Os get to allocation 56321f88aa09SMartin Matuska * earlier, that is not a problem. 56331f88aa09SMartin Matuska * 5634eda14cbcSMatt Macy * We reserve the slots individually so that we can unreserve 5635eda14cbcSMatt Macy * them individually when an I/O completes. 5636eda14cbcSMatt Macy */ 56377877fdebSMatt Macy for (int d = 0; d < slots; d++) 56387877fdebSMatt Macy zfs_refcount_add(&mca->mca_alloc_slots, zio); 5639eda14cbcSMatt Macy zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 56403f9d360cSMartin Matuska return (B_TRUE); 5641eda14cbcSMatt Macy } 56423f9d360cSMartin Matuska return (B_FALSE); 5643eda14cbcSMatt Macy } 5644eda14cbcSMatt Macy 5645eda14cbcSMatt Macy void 5646eda14cbcSMatt Macy metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 5647eda14cbcSMatt Macy int allocator, zio_t *zio) 5648eda14cbcSMatt Macy { 56497877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 56507877fdebSMatt Macy 5651eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 56527877fdebSMatt Macy for (int d = 0; d < slots; d++) 56537877fdebSMatt Macy zfs_refcount_remove(&mca->mca_alloc_slots, zio); 5654eda14cbcSMatt Macy } 5655eda14cbcSMatt Macy 5656eda14cbcSMatt Macy static int 5657eda14cbcSMatt Macy metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 5658eda14cbcSMatt Macy uint64_t txg) 5659eda14cbcSMatt Macy { 5660eda14cbcSMatt Macy metaslab_t *msp; 5661eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5662eda14cbcSMatt Macy int error = 0; 5663eda14cbcSMatt Macy 5664eda14cbcSMatt Macy if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 5665eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5666eda14cbcSMatt Macy 5667eda14cbcSMatt Macy ASSERT3P(vd->vdev_ms, !=, NULL); 5668eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5669eda14cbcSMatt Macy 5670eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5671eda14cbcSMatt Macy 5672eda14cbcSMatt Macy if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { 5673eda14cbcSMatt Macy error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 5674eda14cbcSMatt Macy if (error == EBUSY) { 5675eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 5676eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 5677eda14cbcSMatt Macy error = 0; 5678eda14cbcSMatt Macy } 5679eda14cbcSMatt Macy } 5680eda14cbcSMatt Macy 5681eda14cbcSMatt Macy if (error == 0 && 5682eda14cbcSMatt Macy !range_tree_contains(msp->ms_allocatable, offset, size)) 5683eda14cbcSMatt Macy error = SET_ERROR(ENOENT); 5684eda14cbcSMatt Macy 5685eda14cbcSMatt Macy if (error || txg == 0) { /* txg == 0 indicates dry run */ 5686eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5687eda14cbcSMatt Macy return (error); 5688eda14cbcSMatt Macy } 5689eda14cbcSMatt Macy 5690eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5691eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5692eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5693eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, 5694eda14cbcSMatt Macy msp->ms_size); 5695eda14cbcSMatt Macy range_tree_remove(msp->ms_allocatable, offset, size); 5696eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, offset, size); 5697eda14cbcSMatt Macy 56987877fdebSMatt Macy if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ 5699eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 5700eda14cbcSMatt Macy multilist_sublist_t *mls = 57013ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 5702eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 5703eda14cbcSMatt Macy msp->ms_selected_txg = txg; 5704eda14cbcSMatt Macy multilist_sublist_insert_head(mls, msp); 5705eda14cbcSMatt Macy } 5706eda14cbcSMatt Macy multilist_sublist_unlock(mls); 5707eda14cbcSMatt Macy 5708eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 5709eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg); 5710eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], 5711eda14cbcSMatt Macy offset, size); 5712eda14cbcSMatt Macy msp->ms_allocating_total += size; 5713eda14cbcSMatt Macy } 5714eda14cbcSMatt Macy 5715eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5716eda14cbcSMatt Macy 5717eda14cbcSMatt Macy return (0); 5718eda14cbcSMatt Macy } 5719eda14cbcSMatt Macy 5720eda14cbcSMatt Macy typedef struct metaslab_claim_cb_arg_t { 5721eda14cbcSMatt Macy uint64_t mcca_txg; 5722eda14cbcSMatt Macy int mcca_error; 5723eda14cbcSMatt Macy } metaslab_claim_cb_arg_t; 5724eda14cbcSMatt Macy 5725eda14cbcSMatt Macy static void 5726eda14cbcSMatt Macy metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5727eda14cbcSMatt Macy uint64_t size, void *arg) 5728eda14cbcSMatt Macy { 5729e92ffd9bSMartin Matuska (void) inner_offset; 5730eda14cbcSMatt Macy metaslab_claim_cb_arg_t *mcca_arg = arg; 5731eda14cbcSMatt Macy 5732eda14cbcSMatt Macy if (mcca_arg->mcca_error == 0) { 5733eda14cbcSMatt Macy mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 5734eda14cbcSMatt Macy size, mcca_arg->mcca_txg); 5735eda14cbcSMatt Macy } 5736eda14cbcSMatt Macy } 5737eda14cbcSMatt Macy 5738eda14cbcSMatt Macy int 5739eda14cbcSMatt Macy metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 5740eda14cbcSMatt Macy { 5741eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 5742eda14cbcSMatt Macy metaslab_claim_cb_arg_t arg; 5743eda14cbcSMatt Macy 5744eda14cbcSMatt Macy /* 57457877fdebSMatt Macy * Only zdb(8) can claim on indirect vdevs. This is used 5746eda14cbcSMatt Macy * to detect leaks of mapped space (that are not accounted 5747eda14cbcSMatt Macy * for in the obsolete counts, spacemap, or bpobj). 5748eda14cbcSMatt Macy */ 5749eda14cbcSMatt Macy ASSERT(!spa_writeable(vd->vdev_spa)); 5750eda14cbcSMatt Macy arg.mcca_error = 0; 5751eda14cbcSMatt Macy arg.mcca_txg = txg; 5752eda14cbcSMatt Macy 5753eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5754eda14cbcSMatt Macy metaslab_claim_impl_cb, &arg); 5755eda14cbcSMatt Macy 5756eda14cbcSMatt Macy if (arg.mcca_error == 0) { 5757eda14cbcSMatt Macy arg.mcca_error = metaslab_claim_concrete(vd, 5758eda14cbcSMatt Macy offset, size, txg); 5759eda14cbcSMatt Macy } 5760eda14cbcSMatt Macy return (arg.mcca_error); 5761eda14cbcSMatt Macy } else { 5762eda14cbcSMatt Macy return (metaslab_claim_concrete(vd, offset, size, txg)); 5763eda14cbcSMatt Macy } 5764eda14cbcSMatt Macy } 5765eda14cbcSMatt Macy 5766eda14cbcSMatt Macy /* 5767eda14cbcSMatt Macy * Intent log support: upon opening the pool after a crash, notify the SPA 5768eda14cbcSMatt Macy * of blocks that the intent log has allocated for immediate write, but 5769eda14cbcSMatt Macy * which are still considered free by the SPA because the last transaction 5770eda14cbcSMatt Macy * group didn't commit yet. 5771eda14cbcSMatt Macy */ 5772eda14cbcSMatt Macy static int 5773eda14cbcSMatt Macy metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5774eda14cbcSMatt Macy { 5775eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5776eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5777eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5778eda14cbcSMatt Macy vdev_t *vd; 5779eda14cbcSMatt Macy 5780eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 5781eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5782eda14cbcSMatt Macy } 5783eda14cbcSMatt Macy 5784eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5785eda14cbcSMatt Macy 5786eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 57876db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5788eda14cbcSMatt Macy 5789eda14cbcSMatt Macy return (metaslab_claim_impl(vd, offset, size, txg)); 5790eda14cbcSMatt Macy } 5791eda14cbcSMatt Macy 5792eda14cbcSMatt Macy int 5793eda14cbcSMatt Macy metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 5794eda14cbcSMatt Macy int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 5795eda14cbcSMatt Macy zio_alloc_list_t *zal, zio_t *zio, int allocator) 5796eda14cbcSMatt Macy { 5797eda14cbcSMatt Macy dva_t *dva = bp->blk_dva; 5798eda14cbcSMatt Macy dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 5799eda14cbcSMatt Macy int error = 0; 5800eda14cbcSMatt Macy 5801eda14cbcSMatt Macy ASSERT(bp->blk_birth == 0); 5802eda14cbcSMatt Macy ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 5803eda14cbcSMatt Macy 5804eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5805eda14cbcSMatt Macy 58067877fdebSMatt Macy if (mc->mc_allocator[allocator].mca_rotor == NULL) { 58077877fdebSMatt Macy /* no vdevs in this class */ 5808eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5809eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5810eda14cbcSMatt Macy } 5811eda14cbcSMatt Macy 5812eda14cbcSMatt Macy ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 5813eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == 0); 5814eda14cbcSMatt Macy ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 5815eda14cbcSMatt Macy ASSERT3P(zal, !=, NULL); 5816eda14cbcSMatt Macy 5817eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5818eda14cbcSMatt Macy error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 5819eda14cbcSMatt Macy txg, flags, zal, allocator); 5820eda14cbcSMatt Macy if (error != 0) { 5821eda14cbcSMatt Macy for (d--; d >= 0; d--) { 5822eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5823eda14cbcSMatt Macy metaslab_group_alloc_decrement(spa, 5824eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, 5825eda14cbcSMatt Macy allocator, B_FALSE); 5826da5137abSMartin Matuska memset(&dva[d], 0, sizeof (dva_t)); 5827eda14cbcSMatt Macy } 5828eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5829eda14cbcSMatt Macy return (error); 5830eda14cbcSMatt Macy } else { 5831eda14cbcSMatt Macy /* 5832eda14cbcSMatt Macy * Update the metaslab group's queue depth 5833eda14cbcSMatt Macy * based on the newly allocated dva. 5834eda14cbcSMatt Macy */ 5835eda14cbcSMatt Macy metaslab_group_alloc_increment(spa, 5836eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 5837eda14cbcSMatt Macy } 5838eda14cbcSMatt Macy } 5839eda14cbcSMatt Macy ASSERT(error == 0); 5840eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == ndvas); 5841eda14cbcSMatt Macy 5842eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5843eda14cbcSMatt Macy 5844eda14cbcSMatt Macy BP_SET_BIRTH(bp, txg, 0); 5845eda14cbcSMatt Macy 5846eda14cbcSMatt Macy return (0); 5847eda14cbcSMatt Macy } 5848eda14cbcSMatt Macy 5849eda14cbcSMatt Macy void 5850eda14cbcSMatt Macy metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 5851eda14cbcSMatt Macy { 5852eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5853eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5854eda14cbcSMatt Macy 5855eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5856eda14cbcSMatt Macy ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 5857eda14cbcSMatt Macy 5858eda14cbcSMatt Macy /* 5859eda14cbcSMatt Macy * If we have a checkpoint for the pool we need to make sure that 5860eda14cbcSMatt Macy * the blocks that we free that are part of the checkpoint won't be 5861eda14cbcSMatt Macy * reused until the checkpoint is discarded or we revert to it. 5862eda14cbcSMatt Macy * 5863eda14cbcSMatt Macy * The checkpoint flag is passed down the metaslab_free code path 5864eda14cbcSMatt Macy * and is set whenever we want to add a block to the checkpoint's 5865eda14cbcSMatt Macy * accounting. That is, we "checkpoint" blocks that existed at the 5866eda14cbcSMatt Macy * time the checkpoint was created and are therefore referenced by 5867eda14cbcSMatt Macy * the checkpointed uberblock. 5868eda14cbcSMatt Macy * 5869eda14cbcSMatt Macy * Note that, we don't checkpoint any blocks if the current 5870eda14cbcSMatt Macy * syncing txg <= spa_checkpoint_txg. We want these frees to sync 5871eda14cbcSMatt Macy * normally as they will be referenced by the checkpointed uberblock. 5872eda14cbcSMatt Macy */ 5873eda14cbcSMatt Macy boolean_t checkpoint = B_FALSE; 5874eda14cbcSMatt Macy if (bp->blk_birth <= spa->spa_checkpoint_txg && 5875eda14cbcSMatt Macy spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 5876eda14cbcSMatt Macy /* 5877eda14cbcSMatt Macy * At this point, if the block is part of the checkpoint 5878eda14cbcSMatt Macy * there is no way it was created in the current txg. 5879eda14cbcSMatt Macy */ 5880eda14cbcSMatt Macy ASSERT(!now); 5881eda14cbcSMatt Macy ASSERT3U(spa_syncing_txg(spa), ==, txg); 5882eda14cbcSMatt Macy checkpoint = B_TRUE; 5883eda14cbcSMatt Macy } 5884eda14cbcSMatt Macy 5885eda14cbcSMatt Macy spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 5886eda14cbcSMatt Macy 5887eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5888eda14cbcSMatt Macy if (now) { 5889eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5890eda14cbcSMatt Macy } else { 5891eda14cbcSMatt Macy ASSERT3U(txg, ==, spa_syncing_txg(spa)); 5892eda14cbcSMatt Macy metaslab_free_dva(spa, &dva[d], checkpoint); 5893eda14cbcSMatt Macy } 5894eda14cbcSMatt Macy } 5895eda14cbcSMatt Macy 5896eda14cbcSMatt Macy spa_config_exit(spa, SCL_FREE, FTAG); 5897eda14cbcSMatt Macy } 5898eda14cbcSMatt Macy 5899eda14cbcSMatt Macy int 5900eda14cbcSMatt Macy metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 5901eda14cbcSMatt Macy { 5902eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5903eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5904eda14cbcSMatt Macy int error = 0; 5905eda14cbcSMatt Macy 5906eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5907eda14cbcSMatt Macy 5908eda14cbcSMatt Macy if (txg != 0) { 5909eda14cbcSMatt Macy /* 5910eda14cbcSMatt Macy * First do a dry run to make sure all DVAs are claimable, 5911eda14cbcSMatt Macy * so we don't have to unwind from partial failures below. 5912eda14cbcSMatt Macy */ 5913eda14cbcSMatt Macy if ((error = metaslab_claim(spa, bp, 0)) != 0) 5914eda14cbcSMatt Macy return (error); 5915eda14cbcSMatt Macy } 5916eda14cbcSMatt Macy 5917eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5918eda14cbcSMatt Macy 5919eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5920eda14cbcSMatt Macy error = metaslab_claim_dva(spa, &dva[d], txg); 5921eda14cbcSMatt Macy if (error != 0) 5922eda14cbcSMatt Macy break; 5923eda14cbcSMatt Macy } 5924eda14cbcSMatt Macy 5925eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5926eda14cbcSMatt Macy 5927eda14cbcSMatt Macy ASSERT(error == 0 || txg == 0); 5928eda14cbcSMatt Macy 5929eda14cbcSMatt Macy return (error); 5930eda14cbcSMatt Macy } 5931eda14cbcSMatt Macy 5932eda14cbcSMatt Macy void 5933eda14cbcSMatt Macy metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) 5934eda14cbcSMatt Macy { 5935eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5936eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5937eda14cbcSMatt Macy uint64_t psize = BP_GET_PSIZE(bp); 5938eda14cbcSMatt Macy int d; 5939eda14cbcSMatt Macy vdev_t *vd; 5940eda14cbcSMatt Macy 5941eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5942eda14cbcSMatt Macy ASSERT(!BP_IS_EMBEDDED(bp)); 5943eda14cbcSMatt Macy ASSERT(psize > 0); 5944eda14cbcSMatt Macy 5945eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5946eda14cbcSMatt Macy 5947eda14cbcSMatt Macy for (d = 0; d < ndvas; d++) { 5948eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5949eda14cbcSMatt Macy continue; 5950eda14cbcSMatt Macy atomic_add_64(&vd->vdev_pending_fastwrite, psize); 5951eda14cbcSMatt Macy } 5952eda14cbcSMatt Macy 5953eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 5954eda14cbcSMatt Macy } 5955eda14cbcSMatt Macy 5956eda14cbcSMatt Macy void 5957eda14cbcSMatt Macy metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) 5958eda14cbcSMatt Macy { 5959eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5960eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5961eda14cbcSMatt Macy uint64_t psize = BP_GET_PSIZE(bp); 5962eda14cbcSMatt Macy int d; 5963eda14cbcSMatt Macy vdev_t *vd; 5964eda14cbcSMatt Macy 5965eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5966eda14cbcSMatt Macy ASSERT(!BP_IS_EMBEDDED(bp)); 5967eda14cbcSMatt Macy ASSERT(psize > 0); 5968eda14cbcSMatt Macy 5969eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5970eda14cbcSMatt Macy 5971eda14cbcSMatt Macy for (d = 0; d < ndvas; d++) { 5972eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5973eda14cbcSMatt Macy continue; 5974eda14cbcSMatt Macy ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); 5975eda14cbcSMatt Macy atomic_sub_64(&vd->vdev_pending_fastwrite, psize); 5976eda14cbcSMatt Macy } 5977eda14cbcSMatt Macy 5978eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 5979eda14cbcSMatt Macy } 5980eda14cbcSMatt Macy 5981eda14cbcSMatt Macy static void 5982eda14cbcSMatt Macy metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 5983eda14cbcSMatt Macy uint64_t size, void *arg) 5984eda14cbcSMatt Macy { 5985e92ffd9bSMartin Matuska (void) inner, (void) arg; 5986e92ffd9bSMartin Matuska 5987eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_indirect_ops) 5988eda14cbcSMatt Macy return; 5989eda14cbcSMatt Macy 5990eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 5991eda14cbcSMatt Macy } 5992eda14cbcSMatt Macy 5993eda14cbcSMatt Macy static void 5994eda14cbcSMatt Macy metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 5995eda14cbcSMatt Macy { 5996eda14cbcSMatt Macy metaslab_t *msp; 5997eda14cbcSMatt Macy spa_t *spa __maybe_unused = vd->vdev_spa; 5998eda14cbcSMatt Macy 5999eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6000eda14cbcSMatt Macy return; 6001eda14cbcSMatt Macy 6002eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 6003eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 6004eda14cbcSMatt Macy metaslab_check_free_impl_cb, NULL); 6005eda14cbcSMatt Macy return; 6006eda14cbcSMatt Macy } 6007eda14cbcSMatt Macy 6008eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 6009eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 6010eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 6011eda14cbcSMatt Macy 6012eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 6013eda14cbcSMatt Macy 6014eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6015eda14cbcSMatt Macy if (msp->ms_loaded) { 6016eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_allocatable, 6017eda14cbcSMatt Macy offset, size); 6018eda14cbcSMatt Macy } 6019eda14cbcSMatt Macy 6020eda14cbcSMatt Macy /* 6021eda14cbcSMatt Macy * Check all segments that currently exist in the freeing pipeline. 6022eda14cbcSMatt Macy * 6023eda14cbcSMatt Macy * It would intuitively make sense to also check the current allocating 6024eda14cbcSMatt Macy * tree since metaslab_unalloc_dva() exists for extents that are 6025eda14cbcSMatt Macy * allocated and freed in the same sync pass within the same txg. 6026eda14cbcSMatt Macy * Unfortunately there are places (e.g. the ZIL) where we allocate a 6027eda14cbcSMatt Macy * segment but then we free part of it within the same txg 6028eda14cbcSMatt Macy * [see zil_sync()]. Thus, we don't call range_tree_verify() in the 6029eda14cbcSMatt Macy * current allocating tree. 6030eda14cbcSMatt Macy */ 6031eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freeing, offset, size); 6032eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 6033eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freed, offset, size); 6034eda14cbcSMatt Macy for (int j = 0; j < TXG_DEFER_SIZE; j++) 6035eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_defer[j], offset, size); 6036eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_trim, offset, size); 6037eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6038eda14cbcSMatt Macy } 6039eda14cbcSMatt Macy 6040eda14cbcSMatt Macy void 6041eda14cbcSMatt Macy metaslab_check_free(spa_t *spa, const blkptr_t *bp) 6042eda14cbcSMatt Macy { 6043eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6044eda14cbcSMatt Macy return; 6045eda14cbcSMatt Macy 6046eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6047eda14cbcSMatt Macy for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 6048eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6049eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 6050eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 6051eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 6052eda14cbcSMatt Macy 6053eda14cbcSMatt Macy if (DVA_GET_GANG(&bp->blk_dva[i])) 60546db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 6055eda14cbcSMatt Macy 6056eda14cbcSMatt Macy ASSERT3P(vd, !=, NULL); 6057eda14cbcSMatt Macy 6058eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 6059eda14cbcSMatt Macy } 6060eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 6061eda14cbcSMatt Macy } 6062eda14cbcSMatt Macy 6063eda14cbcSMatt Macy static void 6064eda14cbcSMatt Macy metaslab_group_disable_wait(metaslab_group_t *mg) 6065eda14cbcSMatt Macy { 6066eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6067eda14cbcSMatt Macy while (mg->mg_disabled_updating) { 6068eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6069eda14cbcSMatt Macy } 6070eda14cbcSMatt Macy } 6071eda14cbcSMatt Macy 6072eda14cbcSMatt Macy static void 6073eda14cbcSMatt Macy metaslab_group_disabled_increment(metaslab_group_t *mg) 6074eda14cbcSMatt Macy { 6075eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6076eda14cbcSMatt Macy ASSERT(mg->mg_disabled_updating); 6077eda14cbcSMatt Macy 6078eda14cbcSMatt Macy while (mg->mg_ms_disabled >= max_disabled_ms) { 6079eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6080eda14cbcSMatt Macy } 6081eda14cbcSMatt Macy mg->mg_ms_disabled++; 6082eda14cbcSMatt Macy ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); 6083eda14cbcSMatt Macy } 6084eda14cbcSMatt Macy 6085eda14cbcSMatt Macy /* 6086eda14cbcSMatt Macy * Mark the metaslab as disabled to prevent any allocations on this metaslab. 6087eda14cbcSMatt Macy * We must also track how many metaslabs are currently disabled within a 6088eda14cbcSMatt Macy * metaslab group and limit them to prevent allocation failures from 6089eda14cbcSMatt Macy * occurring because all metaslabs are disabled. 6090eda14cbcSMatt Macy */ 6091eda14cbcSMatt Macy void 6092eda14cbcSMatt Macy metaslab_disable(metaslab_t *msp) 6093eda14cbcSMatt Macy { 6094eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_lock)); 6095eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6096eda14cbcSMatt Macy 6097eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6098eda14cbcSMatt Macy 6099eda14cbcSMatt Macy /* 6100eda14cbcSMatt Macy * To keep an accurate count of how many threads have disabled 6101eda14cbcSMatt Macy * a specific metaslab group, we only allow one thread to mark 6102eda14cbcSMatt Macy * the metaslab group at a time. This ensures that the value of 6103eda14cbcSMatt Macy * ms_disabled will be accurate when we decide to mark a metaslab 6104eda14cbcSMatt Macy * group as disabled. To do this we force all other threads 6105eda14cbcSMatt Macy * to wait till the metaslab's mg_disabled_updating flag is no 6106eda14cbcSMatt Macy * longer set. 6107eda14cbcSMatt Macy */ 6108eda14cbcSMatt Macy metaslab_group_disable_wait(mg); 6109eda14cbcSMatt Macy mg->mg_disabled_updating = B_TRUE; 6110eda14cbcSMatt Macy if (msp->ms_disabled == 0) { 6111eda14cbcSMatt Macy metaslab_group_disabled_increment(mg); 6112eda14cbcSMatt Macy } 6113eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6114eda14cbcSMatt Macy msp->ms_disabled++; 6115eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6116eda14cbcSMatt Macy 6117eda14cbcSMatt Macy mg->mg_disabled_updating = B_FALSE; 6118eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6119eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6120eda14cbcSMatt Macy } 6121eda14cbcSMatt Macy 6122eda14cbcSMatt Macy void 6123eda14cbcSMatt Macy metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) 6124eda14cbcSMatt Macy { 6125eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6126eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 6127eda14cbcSMatt Macy 6128eda14cbcSMatt Macy /* 6129eda14cbcSMatt Macy * Wait for the outstanding IO to be synced to prevent newly 6130eda14cbcSMatt Macy * allocated blocks from being overwritten. This used by 6131eda14cbcSMatt Macy * initialize and TRIM which are modifying unallocated space. 6132eda14cbcSMatt Macy */ 6133eda14cbcSMatt Macy if (sync) 6134eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 6135eda14cbcSMatt Macy 6136eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6137eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6138eda14cbcSMatt Macy if (--msp->ms_disabled == 0) { 6139eda14cbcSMatt Macy mg->mg_ms_disabled--; 6140eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6141eda14cbcSMatt Macy if (unload) 6142eda14cbcSMatt Macy metaslab_unload(msp); 6143eda14cbcSMatt Macy } 6144eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6145eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6146eda14cbcSMatt Macy } 6147eda14cbcSMatt Macy 6148*716fd348SMartin Matuska void 6149*716fd348SMartin Matuska metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty) 6150*716fd348SMartin Matuska { 6151*716fd348SMartin Matuska ms->ms_unflushed_dirty = dirty; 6152*716fd348SMartin Matuska } 6153*716fd348SMartin Matuska 6154eda14cbcSMatt Macy static void 6155eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) 6156eda14cbcSMatt Macy { 6157eda14cbcSMatt Macy vdev_t *vd = ms->ms_group->mg_vd; 6158eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 6159eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 6160eda14cbcSMatt Macy 6161eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 6162eda14cbcSMatt Macy 6163eda14cbcSMatt Macy metaslab_unflushed_phys_t entry = { 6164eda14cbcSMatt Macy .msp_unflushed_txg = metaslab_unflushed_txg(ms), 6165eda14cbcSMatt Macy }; 6166eda14cbcSMatt Macy uint64_t entry_size = sizeof (entry); 6167eda14cbcSMatt Macy uint64_t entry_offset = ms->ms_id * entry_size; 6168eda14cbcSMatt Macy 6169eda14cbcSMatt Macy uint64_t object = 0; 6170eda14cbcSMatt Macy int err = zap_lookup(mos, vd->vdev_top_zap, 6171eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6172eda14cbcSMatt Macy &object); 6173eda14cbcSMatt Macy if (err == ENOENT) { 6174eda14cbcSMatt Macy object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, 6175eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); 6176eda14cbcSMatt Macy VERIFY0(zap_add(mos, vd->vdev_top_zap, 6177eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6178eda14cbcSMatt Macy &object, tx)); 6179eda14cbcSMatt Macy } else { 6180eda14cbcSMatt Macy VERIFY0(err); 6181eda14cbcSMatt Macy } 6182eda14cbcSMatt Macy 6183eda14cbcSMatt Macy dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, 6184eda14cbcSMatt Macy &entry, tx); 6185eda14cbcSMatt Macy } 6186eda14cbcSMatt Macy 6187eda14cbcSMatt Macy void 6188eda14cbcSMatt Macy metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) 6189eda14cbcSMatt Macy { 6190eda14cbcSMatt Macy ms->ms_unflushed_txg = txg; 6191eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(ms, tx); 6192eda14cbcSMatt Macy } 6193eda14cbcSMatt Macy 6194*716fd348SMartin Matuska boolean_t 6195*716fd348SMartin Matuska metaslab_unflushed_dirty(metaslab_t *ms) 6196*716fd348SMartin Matuska { 6197*716fd348SMartin Matuska return (ms->ms_unflushed_dirty); 6198*716fd348SMartin Matuska } 6199*716fd348SMartin Matuska 6200eda14cbcSMatt Macy uint64_t 6201eda14cbcSMatt Macy metaslab_unflushed_txg(metaslab_t *ms) 6202eda14cbcSMatt Macy { 6203eda14cbcSMatt Macy return (ms->ms_unflushed_txg); 6204eda14cbcSMatt Macy } 6205eda14cbcSMatt Macy 6206eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW, 6207eda14cbcSMatt Macy "Allocation granularity (a.k.a. stripe size)"); 6208eda14cbcSMatt Macy 6209eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, 6210eda14cbcSMatt Macy "Load all metaslabs when pool is first opened"); 6211eda14cbcSMatt Macy 6212eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, 6213eda14cbcSMatt Macy "Prevent metaslabs from being unloaded"); 6214eda14cbcSMatt Macy 6215eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, 6216eda14cbcSMatt Macy "Preload potential metaslabs during reassessment"); 6217eda14cbcSMatt Macy 6218eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW, 6219eda14cbcSMatt Macy "Delay in txgs after metaslab was last used before unloading"); 6220eda14cbcSMatt Macy 6221eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW, 6222eda14cbcSMatt Macy "Delay in milliseconds after metaslab was last used before unloading"); 6223eda14cbcSMatt Macy 6224eda14cbcSMatt Macy /* BEGIN CSTYLED */ 6225eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW, 6226eda14cbcSMatt Macy "Percentage of metaslab group size that should be free to make it " 6227eda14cbcSMatt Macy "eligible for allocation"); 6228eda14cbcSMatt Macy 6229eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW, 6230eda14cbcSMatt Macy "Percentage of metaslab group size that should be considered eligible " 6231eda14cbcSMatt Macy "for allocations unless all metaslab groups within the metaslab class " 6232eda14cbcSMatt Macy "have also crossed this threshold"); 6233eda14cbcSMatt Macy 6234c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, 6235c03c5b1cSMartin Matuska ZMOD_RW, 6236eda14cbcSMatt Macy "Use the fragmentation metric to prefer less fragmented metaslabs"); 6237eda14cbcSMatt Macy /* END CSTYLED */ 6238eda14cbcSMatt Macy 6239c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT, 6240c03c5b1cSMartin Matuska ZMOD_RW, "Fragmentation for metaslab to allow allocation"); 6241c03c5b1cSMartin Matuska 6242eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, 6243eda14cbcSMatt Macy "Prefer metaslabs with lower LBAs"); 6244eda14cbcSMatt Macy 6245eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, 6246eda14cbcSMatt Macy "Enable metaslab group biasing"); 6247eda14cbcSMatt Macy 6248eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, 6249eda14cbcSMatt Macy ZMOD_RW, "Enable segment-based metaslab selection"); 6250eda14cbcSMatt Macy 6251eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, 6252eda14cbcSMatt Macy "Segment-based metaslab selection maximum buckets before switching"); 6253eda14cbcSMatt Macy 6254eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW, 6255eda14cbcSMatt Macy "Blocks larger than this size are forced to be gang blocks"); 6256eda14cbcSMatt Macy 6257eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW, 6258eda14cbcSMatt Macy "Max distance (bytes) to search forward before using size tree"); 6259eda14cbcSMatt Macy 6260eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, 6261eda14cbcSMatt Macy "When looking in size tree, use largest segment instead of exact fit"); 6262eda14cbcSMatt Macy 6263eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG, 6264eda14cbcSMatt Macy ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); 6265eda14cbcSMatt Macy 6266eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW, 6267eda14cbcSMatt Macy "Percentage of memory that can be used to store metaslab range trees"); 62687877fdebSMatt Macy 62697877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, 62707877fdebSMatt Macy ZMOD_RW, "Try hard to allocate before ganging"); 62717877fdebSMatt Macy 62727877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW, 62737877fdebSMatt Macy "Normally only consider this many of the best metaslabs in each vdev"); 6274