xref: /illumos-gate/usr/src/uts/common/fs/zfs/sys/metaslab_impl.h (revision 9ca527c3d3dfa7c8f304b34a9e03b5eddace838f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
28  */
29 
30 #ifndef _SYS_METASLAB_IMPL_H
31 #define	_SYS_METASLAB_IMPL_H
32 
33 #include <sys/metaslab.h>
34 #include <sys/space_map.h>
35 #include <sys/range_tree.h>
36 #include <sys/vdev.h>
37 #include <sys/txg.h>
38 #include <sys/avl.h>
39 
40 #ifdef	__cplusplus
41 extern "C" {
42 #endif
43 
44 /*
45  * A metaslab class encompasses a category of allocatable top-level vdevs.
46  * Each top-level vdev is associated with a metaslab group which defines
47  * the allocatable region for that vdev. Examples of these categories include
48  * "normal" for data block allocations (i.e. main pool allocations) or "log"
49  * for allocations designated for intent log devices (i.e. slog devices).
50  * When a block allocation is requested from the SPA it is associated with a
51  * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
52  * to the class can be used to satisfy that request. Allocations are done
53  * by traversing the metaslab groups that are linked off of the mc_rotor field.
54  * This rotor points to the next metaslab group where allocations will be
55  * attempted. Allocating a block is a 3 step process -- select the metaslab
56  * group, select the metaslab, and then allocate the block. The metaslab
57  * class defines the low-level block allocator that will be used as the
58  * final step in allocation. These allocators are pluggable allowing each class
59  * to use a block allocator that best suits that class.
60  */
61 struct metaslab_class {
62 	kmutex_t		mc_lock;
63 	spa_t			*mc_spa;
64 	metaslab_group_t	*mc_rotor;
65 	metaslab_ops_t		*mc_ops;
66 	uint64_t		mc_aliquot;
67 
68 	/*
69 	 * Track the number of metaslab groups that have been initialized
70 	 * and can accept allocations. An initialized metaslab group is
71 	 * one has been completely added to the config (i.e. we have
72 	 * updated the MOS config and the space has been added to the pool).
73 	 */
74 	uint64_t		mc_groups;
75 
76 	/*
77 	 * Toggle to enable/disable the allocation throttle.
78 	 */
79 	boolean_t		mc_alloc_throttle_enabled;
80 
81 	/*
82 	 * The allocation throttle works on a reservation system. Whenever
83 	 * an asynchronous zio wants to perform an allocation it must
84 	 * first reserve the number of blocks that it wants to allocate.
85 	 * If there aren't sufficient slots available for the pending zio
86 	 * then that I/O is throttled until more slots free up. The current
87 	 * number of reserved allocations is maintained by the mc_alloc_slots
88 	 * refcount. The mc_alloc_max_slots value determines the maximum
89 	 * number of allocations that the system allows. Gang blocks are
90 	 * allowed to reserve slots even if we've reached the maximum
91 	 * number of allocations allowed.
92 	 */
93 	uint64_t		mc_alloc_max_slots;
94 	refcount_t		mc_alloc_slots;
95 
96 	uint64_t		mc_alloc_groups; /* # of allocatable groups */
97 
98 	uint64_t		mc_alloc;	/* total allocated space */
99 	uint64_t		mc_deferred;	/* total deferred frees */
100 	uint64_t		mc_space;	/* total space (alloc + free) */
101 	uint64_t		mc_dspace;	/* total deflated space */
102 	uint64_t		mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
103 };
104 
105 /*
106  * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
107  * of a top-level vdev. They are linked togther to form a circular linked
108  * list and can belong to only one metaslab class. Metaslab groups may become
109  * ineligible for allocations for a number of reasons such as limited free
110  * space, fragmentation, or going offline. When this happens the allocator will
111  * simply find the next metaslab group in the linked list and attempt
112  * to allocate from that group instead.
113  */
114 struct metaslab_group {
115 	kmutex_t		mg_lock;
116 	avl_tree_t		mg_metaslab_tree;
117 	uint64_t		mg_aliquot;
118 	boolean_t		mg_allocatable;		/* can we allocate? */
119 
120 	/*
121 	 * A metaslab group is considered to be initialized only after
122 	 * we have updated the MOS config and added the space to the pool.
123 	 * We only allow allocation attempts to a metaslab group if it
124 	 * has been initialized.
125 	 */
126 	boolean_t		mg_initialized;
127 
128 	uint64_t		mg_free_capacity;	/* percentage free */
129 	int64_t			mg_bias;
130 	int64_t			mg_activation_count;
131 	metaslab_class_t	*mg_class;
132 	vdev_t			*mg_vd;
133 	taskq_t			*mg_taskq;
134 	metaslab_group_t	*mg_prev;
135 	metaslab_group_t	*mg_next;
136 
137 	/*
138 	 * Each metaslab group can handle mg_max_alloc_queue_depth allocations
139 	 * which are tracked by mg_alloc_queue_depth. It's possible for a
140 	 * metaslab group to handle more allocations than its max. This
141 	 * can occur when gang blocks are required or when other groups
142 	 * are unable to handle their share of allocations.
143 	 */
144 	uint64_t		mg_max_alloc_queue_depth;
145 	refcount_t		mg_alloc_queue_depth;
146 
147 	/*
148 	 * A metalab group that can no longer allocate the minimum block
149 	 * size will set mg_no_free_space. Once a metaslab group is out
150 	 * of space then its share of work must be distributed to other
151 	 * groups.
152 	 */
153 	boolean_t		mg_no_free_space;
154 
155 	uint64_t		mg_allocations;
156 	uint64_t		mg_failed_allocations;
157 	uint64_t		mg_fragmentation;
158 	uint64_t		mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
159 };
160 
161 /*
162  * This value defines the number of elements in the ms_lbas array. The value
163  * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
164  * This is the equivalent of highbit(UINT64_MAX).
165  */
166 #define	MAX_LBAS	64
167 
168 /*
169  * Each metaslab maintains a set of in-core trees to track metaslab operations.
170  * The in-core free tree (ms_tree) contains the current list of free segments.
171  * As blocks are allocated, the allocated segment are removed from the ms_tree
172  * and added to a per txg allocation tree (ms_alloctree). As blocks are freed,
173  * they are added to the per txg free tree (ms_freetree). These per txg
174  * trees allow us to process all allocations and frees in syncing context
175  * where it is safe to update the on-disk space maps. One additional in-core
176  * tree is maintained to track deferred frees (ms_defertree). Once a block
177  * is freed it will move from the ms_freetree to the ms_defertree. A deferred
178  * free means that a block has been freed but cannot be used by the pool
179  * until TXG_DEFER_SIZE transactions groups later. For example, a block
180  * that is freed in txg 50 will not be available for reallocation until
181  * txg 52 (50 + TXG_DEFER_SIZE).  This provides a safety net for uberblock
182  * rollback. A pool could be safely rolled back TXG_DEFERS_SIZE
183  * transactions groups and ensure that no block has been reallocated.
184  *
185  * The simplified transition diagram looks like this:
186  *
187  *
188  *      ALLOCATE
189  *         |
190  *         V
191  *    free segment (ms_tree) --------> ms_alloctree ----> (write to space map)
192  *         ^
193  *         |
194  *         |                           ms_freetree <--- FREE
195  *         |                                 |
196  *         |                                 |
197  *         |                                 |
198  *         +----------- ms_defertree <-------+---------> (write to space map)
199  *
200  *
201  * Each metaslab's space is tracked in a single space map in the MOS,
202  * which is only updated in syncing context. Each time we sync a txg,
203  * we append the allocs and frees from that txg to the space map.
204  * The pool space is only updated once all metaslabs have finished syncing.
205  *
206  * To load the in-core free tree we read the space map from disk.
207  * This object contains a series of alloc and free records that are
208  * combined to make up the list of all free segments in this metaslab. These
209  * segments are represented in-core by the ms_tree and are stored in an
210  * AVL tree.
211  *
212  * As the space map grows (as a result of the appends) it will
213  * eventually become space-inefficient. When the metaslab's in-core free tree
214  * is zfs_condense_pct/100 times the size of the minimal on-disk
215  * representation, we rewrite it in its minimized form. If a metaslab
216  * needs to condense then we must set the ms_condensing flag to ensure
217  * that allocations are not performed on the metaslab that is being written.
218  */
219 struct metaslab {
220 	kmutex_t	ms_lock;
221 	kcondvar_t	ms_load_cv;
222 	space_map_t	*ms_sm;
223 	metaslab_ops_t	*ms_ops;
224 	uint64_t	ms_id;
225 	uint64_t	ms_start;
226 	uint64_t	ms_size;
227 	uint64_t	ms_fragmentation;
228 
229 	range_tree_t	*ms_alloctree[TXG_SIZE];
230 	range_tree_t	*ms_freetree[TXG_SIZE];
231 	range_tree_t	*ms_defertree[TXG_DEFER_SIZE];
232 	range_tree_t	*ms_tree;
233 
234 	boolean_t	ms_condensing;	/* condensing? */
235 	boolean_t	ms_condense_wanted;
236 	boolean_t	ms_loaded;
237 	boolean_t	ms_loading;
238 
239 	int64_t		ms_deferspace;	/* sum of ms_defermap[] space	*/
240 	uint64_t	ms_weight;	/* weight vs. others in group	*/
241 	uint64_t	ms_access_txg;
242 
243 	/*
244 	 * The metaslab block allocators can optionally use a size-ordered
245 	 * range tree and/or an array of LBAs. Not all allocators use
246 	 * this functionality. The ms_size_tree should always contain the
247 	 * same number of segments as the ms_tree. The only difference
248 	 * is that the ms_size_tree is ordered by segment sizes.
249 	 */
250 	avl_tree_t	ms_size_tree;
251 	uint64_t	ms_lbas[MAX_LBAS];
252 
253 	metaslab_group_t *ms_group;	/* metaslab group		*/
254 	avl_node_t	ms_group_node;	/* node in metaslab group tree	*/
255 	txg_node_t	ms_txg_node;	/* per-txg dirty metaslab links	*/
256 };
257 
258 #ifdef	__cplusplus
259 }
260 #endif
261 
262 #endif	/* _SYS_METASLAB_IMPL_H */
263