xref: /freebsd/sys/contrib/openzfs/module/zfs/range_tree.c (revision b59a0cde6a5253f94494397ce5b18dbfa071e08c)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23eda14cbcSMatt Macy  * Use is subject to license terms.
24eda14cbcSMatt Macy  */
25eda14cbcSMatt Macy /*
26eda14cbcSMatt Macy  * Copyright (c) 2013, 2019 by Delphix. All rights reserved.
272c48331dSMatt Macy  * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
28eda14cbcSMatt Macy  */
29eda14cbcSMatt Macy 
30eda14cbcSMatt Macy #include <sys/zfs_context.h>
31eda14cbcSMatt Macy #include <sys/spa.h>
32eda14cbcSMatt Macy #include <sys/dmu.h>
33eda14cbcSMatt Macy #include <sys/dnode.h>
34eda14cbcSMatt Macy #include <sys/zio.h>
35eda14cbcSMatt Macy #include <sys/range_tree.h>
36eda14cbcSMatt Macy 
37eda14cbcSMatt Macy /*
38eda14cbcSMatt Macy  * Range trees are tree-based data structures that can be used to
39eda14cbcSMatt Macy  * track free space or generally any space allocation information.
40eda14cbcSMatt Macy  * A range tree keeps track of individual segments and automatically
41eda14cbcSMatt Macy  * provides facilities such as adjacent extent merging and extent
42eda14cbcSMatt Macy  * splitting in response to range add/remove requests.
43eda14cbcSMatt Macy  *
44eda14cbcSMatt Macy  * A range tree starts out completely empty, with no segments in it.
45*b59a0cdeSMartin Matuska  * Adding an allocation via zfs_range_tree_add to the range tree can either:
46eda14cbcSMatt Macy  * 1) create a new extent
47eda14cbcSMatt Macy  * 2) extend an adjacent extent
48eda14cbcSMatt Macy  * 3) merge two adjacent extents
49*b59a0cdeSMartin Matuska  * Conversely, removing an allocation via zfs_range_tree_remove can:
50eda14cbcSMatt Macy  * 1) completely remove an extent
51eda14cbcSMatt Macy  * 2) shorten an extent (if the allocation was near one of its ends)
52eda14cbcSMatt Macy  * 3) split an extent into two extents, in effect punching a hole
53eda14cbcSMatt Macy  *
54eda14cbcSMatt Macy  * A range tree is also capable of 'bridging' gaps when adding
55eda14cbcSMatt Macy  * allocations. This is useful for cases when close proximity of
56eda14cbcSMatt Macy  * allocations is an important detail that needs to be represented
57*b59a0cdeSMartin Matuska  * in the range tree. See zfs_range_tree_set_gap(). The default behavior
58eda14cbcSMatt Macy  * is not to bridge gaps (i.e. the maximum allowed gap size is 0).
59eda14cbcSMatt Macy  *
60*b59a0cdeSMartin Matuska  * In order to traverse a range tree, use either the zfs_range_tree_walk()
61*b59a0cdeSMartin Matuska  * or zfs_range_tree_vacate() functions.
62eda14cbcSMatt Macy  *
63eda14cbcSMatt Macy  * To obtain more accurate information on individual segment
64eda14cbcSMatt Macy  * operations that the range tree performs "under the hood", you can
65*b59a0cdeSMartin Matuska  * specify a set of callbacks by passing a zfs_range_tree_ops_t structure
66*b59a0cdeSMartin Matuska  * to the zfs_range_tree_create function. Any callbacks that are non-NULL
67eda14cbcSMatt Macy  * are then called at the appropriate times.
68eda14cbcSMatt Macy  *
69eda14cbcSMatt Macy  * The range tree code also supports a special variant of range trees
70eda14cbcSMatt Macy  * that can bridge small gaps between segments. This kind of tree is used
71eda14cbcSMatt Macy  * by the dsl scanning code to group I/Os into mostly sequential chunks to
72eda14cbcSMatt Macy  * optimize disk performance. The code here attempts to do this with as
73eda14cbcSMatt Macy  * little memory and computational overhead as possible. One limitation of
74eda14cbcSMatt Macy  * this implementation is that segments of range trees with gaps can only
75eda14cbcSMatt Macy  * support removing complete segments.
76eda14cbcSMatt Macy  */
77eda14cbcSMatt Macy 
78eda14cbcSMatt Macy static inline void
79*b59a0cdeSMartin Matuska zfs_rs_copy(zfs_range_seg_t *src, zfs_range_seg_t *dest, zfs_range_tree_t *rt)
80eda14cbcSMatt Macy {
81*b59a0cdeSMartin Matuska 	ASSERT3U(rt->rt_type, <, ZFS_RANGE_SEG_NUM_TYPES);
82eda14cbcSMatt Macy 	size_t size = 0;
83eda14cbcSMatt Macy 	switch (rt->rt_type) {
84*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG32:
85*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg32_t);
86eda14cbcSMatt Macy 		break;
87*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG64:
88*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg64_t);
89eda14cbcSMatt Macy 		break;
90*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG_GAP:
91*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg_gap_t);
92eda14cbcSMatt Macy 		break;
93eda14cbcSMatt Macy 	default:
94da5137abSMartin Matuska 		__builtin_unreachable();
95eda14cbcSMatt Macy 	}
96da5137abSMartin Matuska 	memcpy(dest, src, size);
97eda14cbcSMatt Macy }
98eda14cbcSMatt Macy 
99eda14cbcSMatt Macy void
100*b59a0cdeSMartin Matuska zfs_range_tree_stat_verify(zfs_range_tree_t *rt)
101eda14cbcSMatt Macy {
102*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs;
103eda14cbcSMatt Macy 	zfs_btree_index_t where;
104*b59a0cdeSMartin Matuska 	uint64_t hist[ZFS_RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
105eda14cbcSMatt Macy 	int i;
106eda14cbcSMatt Macy 
107eda14cbcSMatt Macy 	for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
108eda14cbcSMatt Macy 	    rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
109*b59a0cdeSMartin Matuska 		uint64_t size = zfs_rs_get_end(rs, rt) -
110*b59a0cdeSMartin Matuska 		    zfs_rs_get_start(rs, rt);
111eda14cbcSMatt Macy 		int idx	= highbit64(size) - 1;
112eda14cbcSMatt Macy 
113eda14cbcSMatt Macy 		hist[idx]++;
114eda14cbcSMatt Macy 		ASSERT3U(hist[idx], !=, 0);
115eda14cbcSMatt Macy 	}
116eda14cbcSMatt Macy 
117*b59a0cdeSMartin Matuska 	for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
118eda14cbcSMatt Macy 		if (hist[i] != rt->rt_histogram[i]) {
119eda14cbcSMatt Macy 			zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
12033b8c039SMartin Matuska 			    i, hist, (u_longlong_t)hist[i],
12133b8c039SMartin Matuska 			    (u_longlong_t)rt->rt_histogram[i]);
122eda14cbcSMatt Macy 		}
123eda14cbcSMatt Macy 		VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
124eda14cbcSMatt Macy 	}
125eda14cbcSMatt Macy }
126eda14cbcSMatt Macy 
127eda14cbcSMatt Macy static void
128*b59a0cdeSMartin Matuska zfs_range_tree_stat_incr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
129eda14cbcSMatt Macy {
130*b59a0cdeSMartin Matuska 	uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
131eda14cbcSMatt Macy 	int idx = highbit64(size) - 1;
132eda14cbcSMatt Macy 
133eda14cbcSMatt Macy 	ASSERT(size != 0);
134eda14cbcSMatt Macy 	ASSERT3U(idx, <,
135eda14cbcSMatt Macy 	    sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
136eda14cbcSMatt Macy 
137eda14cbcSMatt Macy 	rt->rt_histogram[idx]++;
138eda14cbcSMatt Macy 	ASSERT3U(rt->rt_histogram[idx], !=, 0);
139eda14cbcSMatt Macy }
140eda14cbcSMatt Macy 
141eda14cbcSMatt Macy static void
142*b59a0cdeSMartin Matuska zfs_range_tree_stat_decr(zfs_range_tree_t *rt, zfs_range_seg_t *rs)
143eda14cbcSMatt Macy {
144*b59a0cdeSMartin Matuska 	uint64_t size = zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt);
145eda14cbcSMatt Macy 	int idx = highbit64(size) - 1;
146eda14cbcSMatt Macy 
147eda14cbcSMatt Macy 	ASSERT(size != 0);
148eda14cbcSMatt Macy 	ASSERT3U(idx, <,
149eda14cbcSMatt Macy 	    sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
150eda14cbcSMatt Macy 
151eda14cbcSMatt Macy 	ASSERT3U(rt->rt_histogram[idx], !=, 0);
152eda14cbcSMatt Macy 	rt->rt_histogram[idx]--;
153eda14cbcSMatt Macy }
154eda14cbcSMatt Macy 
1554e8d558cSMartin Matuska __attribute__((always_inline)) inline
156eda14cbcSMatt Macy static int
157*b59a0cdeSMartin Matuska zfs_range_tree_seg32_compare(const void *x1, const void *x2)
158eda14cbcSMatt Macy {
159*b59a0cdeSMartin Matuska 	const zfs_range_seg32_t *r1 = x1;
160*b59a0cdeSMartin Matuska 	const zfs_range_seg32_t *r2 = x2;
161eda14cbcSMatt Macy 
162eda14cbcSMatt Macy 	ASSERT3U(r1->rs_start, <=, r1->rs_end);
163eda14cbcSMatt Macy 	ASSERT3U(r2->rs_start, <=, r2->rs_end);
164eda14cbcSMatt Macy 
165eda14cbcSMatt Macy 	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
166eda14cbcSMatt Macy }
167eda14cbcSMatt Macy 
1684e8d558cSMartin Matuska __attribute__((always_inline)) inline
169eda14cbcSMatt Macy static int
170*b59a0cdeSMartin Matuska zfs_range_tree_seg64_compare(const void *x1, const void *x2)
171eda14cbcSMatt Macy {
172*b59a0cdeSMartin Matuska 	const zfs_range_seg64_t *r1 = x1;
173*b59a0cdeSMartin Matuska 	const zfs_range_seg64_t *r2 = x2;
174eda14cbcSMatt Macy 
175eda14cbcSMatt Macy 	ASSERT3U(r1->rs_start, <=, r1->rs_end);
176eda14cbcSMatt Macy 	ASSERT3U(r2->rs_start, <=, r2->rs_end);
177eda14cbcSMatt Macy 
178eda14cbcSMatt Macy 	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
179eda14cbcSMatt Macy }
180eda14cbcSMatt Macy 
1814e8d558cSMartin Matuska __attribute__((always_inline)) inline
182eda14cbcSMatt Macy static int
183*b59a0cdeSMartin Matuska zfs_range_tree_seg_gap_compare(const void *x1, const void *x2)
184eda14cbcSMatt Macy {
185*b59a0cdeSMartin Matuska 	const zfs_range_seg_gap_t *r1 = x1;
186*b59a0cdeSMartin Matuska 	const zfs_range_seg_gap_t *r2 = x2;
187eda14cbcSMatt Macy 
188eda14cbcSMatt Macy 	ASSERT3U(r1->rs_start, <=, r1->rs_end);
189eda14cbcSMatt Macy 	ASSERT3U(r2->rs_start, <=, r2->rs_end);
190eda14cbcSMatt Macy 
191eda14cbcSMatt Macy 	return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
192eda14cbcSMatt Macy }
193eda14cbcSMatt Macy 
194*b59a0cdeSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg32_find_in_buf, zfs_range_seg32_t,
195*b59a0cdeSMartin Matuska     zfs_range_tree_seg32_compare)
1964e8d558cSMartin Matuska 
197*b59a0cdeSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, zfs_range_seg64_t,
198*b59a0cdeSMartin Matuska     zfs_range_tree_seg64_compare)
1994e8d558cSMartin Matuska 
200*b59a0cdeSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf,
201*b59a0cdeSMartin Matuska     zfs_range_seg_gap_t, zfs_range_tree_seg_gap_compare)
2024e8d558cSMartin Matuska 
203*b59a0cdeSMartin Matuska zfs_range_tree_t *
204*b59a0cdeSMartin Matuska zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
205*b59a0cdeSMartin Matuska     zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
206*b59a0cdeSMartin Matuska     uint64_t gap)
207eda14cbcSMatt Macy {
208*b59a0cdeSMartin Matuska 	zfs_range_tree_t *rt = kmem_zalloc(sizeof (zfs_range_tree_t), KM_SLEEP);
209eda14cbcSMatt Macy 
210eda14cbcSMatt Macy 	ASSERT3U(shift, <, 64);
211*b59a0cdeSMartin Matuska 	ASSERT3U(type, <=, ZFS_RANGE_SEG_NUM_TYPES);
212eda14cbcSMatt Macy 	size_t size;
213eda14cbcSMatt Macy 	int (*compare) (const void *, const void *);
2144e8d558cSMartin Matuska 	bt_find_in_buf_f bt_find;
215eda14cbcSMatt Macy 	switch (type) {
216*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG32:
217*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg32_t);
218*b59a0cdeSMartin Matuska 		compare = zfs_range_tree_seg32_compare;
219*b59a0cdeSMartin Matuska 		bt_find = zfs_range_tree_seg32_find_in_buf;
220eda14cbcSMatt Macy 		break;
221*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG64:
222*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg64_t);
223*b59a0cdeSMartin Matuska 		compare = zfs_range_tree_seg64_compare;
224*b59a0cdeSMartin Matuska 		bt_find = zfs_range_tree_seg64_find_in_buf;
225eda14cbcSMatt Macy 		break;
226*b59a0cdeSMartin Matuska 	case ZFS_RANGE_SEG_GAP:
227*b59a0cdeSMartin Matuska 		size = sizeof (zfs_range_seg_gap_t);
228*b59a0cdeSMartin Matuska 		compare = zfs_range_tree_seg_gap_compare;
229*b59a0cdeSMartin Matuska 		bt_find = zfs_range_tree_seg_gap_find_in_buf;
230eda14cbcSMatt Macy 		break;
231eda14cbcSMatt Macy 	default:
232eda14cbcSMatt Macy 		panic("Invalid range seg type %d", type);
233eda14cbcSMatt Macy 	}
2344e8d558cSMartin Matuska 	zfs_btree_create(&rt->rt_root, compare, bt_find, size);
235eda14cbcSMatt Macy 
236eda14cbcSMatt Macy 	rt->rt_ops = ops;
237eda14cbcSMatt Macy 	rt->rt_gap = gap;
238eda14cbcSMatt Macy 	rt->rt_arg = arg;
239eda14cbcSMatt Macy 	rt->rt_type = type;
240eda14cbcSMatt Macy 	rt->rt_start = start;
241eda14cbcSMatt Macy 	rt->rt_shift = shift;
242eda14cbcSMatt Macy 
243eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
244eda14cbcSMatt Macy 		rt->rt_ops->rtop_create(rt, rt->rt_arg);
245eda14cbcSMatt Macy 
246eda14cbcSMatt Macy 	return (rt);
247eda14cbcSMatt Macy }
248eda14cbcSMatt Macy 
249*b59a0cdeSMartin Matuska zfs_range_tree_t *
250*b59a0cdeSMartin Matuska zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
251*b59a0cdeSMartin Matuska     zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift)
252eda14cbcSMatt Macy {
253*b59a0cdeSMartin Matuska 	return (zfs_range_tree_create_gap(ops, type, arg, start, shift, 0));
254eda14cbcSMatt Macy }
255eda14cbcSMatt Macy 
256eda14cbcSMatt Macy void
257*b59a0cdeSMartin Matuska zfs_range_tree_destroy(zfs_range_tree_t *rt)
258eda14cbcSMatt Macy {
259eda14cbcSMatt Macy 	VERIFY0(rt->rt_space);
260eda14cbcSMatt Macy 
261eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
262eda14cbcSMatt Macy 		rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
263eda14cbcSMatt Macy 
264eda14cbcSMatt Macy 	zfs_btree_destroy(&rt->rt_root);
265eda14cbcSMatt Macy 	kmem_free(rt, sizeof (*rt));
266eda14cbcSMatt Macy }
267eda14cbcSMatt Macy 
268eda14cbcSMatt Macy void
269*b59a0cdeSMartin Matuska zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
270*b59a0cdeSMartin Matuska     int64_t delta)
271eda14cbcSMatt Macy {
272*b59a0cdeSMartin Matuska 	if (delta < 0 && delta * -1 >= zfs_rs_get_fill(rs, rt)) {
273eda14cbcSMatt Macy 		zfs_panic_recover("zfs: attempting to decrease fill to or "
274eda14cbcSMatt Macy 		    "below 0; probable double remove in segment [%llx:%llx]",
275*b59a0cdeSMartin Matuska 		    (longlong_t)zfs_rs_get_start(rs, rt),
276*b59a0cdeSMartin Matuska 		    (longlong_t)zfs_rs_get_end(rs, rt));
277eda14cbcSMatt Macy 	}
278*b59a0cdeSMartin Matuska 	if (zfs_rs_get_fill(rs, rt) + delta > zfs_rs_get_end(rs, rt) -
279*b59a0cdeSMartin Matuska 	    zfs_rs_get_start(rs, rt)) {
280eda14cbcSMatt Macy 		zfs_panic_recover("zfs: attempting to increase fill beyond "
281eda14cbcSMatt Macy 		    "max; probable double add in segment [%llx:%llx]",
282*b59a0cdeSMartin Matuska 		    (longlong_t)zfs_rs_get_start(rs, rt),
283*b59a0cdeSMartin Matuska 		    (longlong_t)zfs_rs_get_end(rs, rt));
284eda14cbcSMatt Macy 	}
285eda14cbcSMatt Macy 
286eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
287eda14cbcSMatt Macy 		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
288*b59a0cdeSMartin Matuska 	zfs_rs_set_fill(rs, rt, zfs_rs_get_fill(rs, rt) + delta);
289eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
290eda14cbcSMatt Macy 		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
291eda14cbcSMatt Macy }
292eda14cbcSMatt Macy 
293eda14cbcSMatt Macy static void
294*b59a0cdeSMartin Matuska zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
295eda14cbcSMatt Macy {
296*b59a0cdeSMartin Matuska 	zfs_range_tree_t *rt = arg;
297eda14cbcSMatt Macy 	zfs_btree_index_t where;
298*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs_before, *rs_after, *rs;
299*b59a0cdeSMartin Matuska 	zfs_range_seg_max_t tmp, rsearch;
300eda14cbcSMatt Macy 	uint64_t end = start + size, gap = rt->rt_gap;
301eda14cbcSMatt Macy 	uint64_t bridge_size = 0;
302eda14cbcSMatt Macy 	boolean_t merge_before, merge_after;
303eda14cbcSMatt Macy 
304eda14cbcSMatt Macy 	ASSERT3U(size, !=, 0);
305eda14cbcSMatt Macy 	ASSERT3U(fill, <=, size);
306eda14cbcSMatt Macy 	ASSERT3U(start + size, >, start);
307eda14cbcSMatt Macy 
308*b59a0cdeSMartin Matuska 	zfs_rs_set_start(&rsearch, rt, start);
309*b59a0cdeSMartin Matuska 	zfs_rs_set_end(&rsearch, rt, end);
310eda14cbcSMatt Macy 	rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
311eda14cbcSMatt Macy 
312eda14cbcSMatt Macy 	/*
313eda14cbcSMatt Macy 	 * If this is a gap-supporting range tree, it is possible that we
314eda14cbcSMatt Macy 	 * are inserting into an existing segment. In this case simply
315eda14cbcSMatt Macy 	 * bump the fill count and call the remove / add callbacks. If the
316eda14cbcSMatt Macy 	 * new range will extend an existing segment, we remove the
317eda14cbcSMatt Macy 	 * existing one, apply the new extent to it and re-insert it using
318eda14cbcSMatt Macy 	 * the normal code paths.
319eda14cbcSMatt Macy 	 */
320eda14cbcSMatt Macy 	if (rs != NULL) {
321eda14cbcSMatt Macy 		if (gap == 0) {
322eda14cbcSMatt Macy 			zfs_panic_recover("zfs: adding existent segment to "
323eda14cbcSMatt Macy 			    "range tree (offset=%llx size=%llx)",
324eda14cbcSMatt Macy 			    (longlong_t)start, (longlong_t)size);
325eda14cbcSMatt Macy 			return;
326eda14cbcSMatt Macy 		}
327*b59a0cdeSMartin Matuska 		uint64_t rstart = zfs_rs_get_start(rs, rt);
328*b59a0cdeSMartin Matuska 		uint64_t rend = zfs_rs_get_end(rs, rt);
329eda14cbcSMatt Macy 		if (rstart <= start && rend >= end) {
330*b59a0cdeSMartin Matuska 			zfs_range_tree_adjust_fill(rt, rs, fill);
331eda14cbcSMatt Macy 			return;
332eda14cbcSMatt Macy 		}
333eda14cbcSMatt Macy 
334eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
335eda14cbcSMatt Macy 			rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
336eda14cbcSMatt Macy 
337*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_decr(rt, rs);
338eda14cbcSMatt Macy 		rt->rt_space -= rend - rstart;
339eda14cbcSMatt Macy 
340*b59a0cdeSMartin Matuska 		fill += zfs_rs_get_fill(rs, rt);
341eda14cbcSMatt Macy 		start = MIN(start, rstart);
342eda14cbcSMatt Macy 		end = MAX(end, rend);
343eda14cbcSMatt Macy 		size = end - start;
344eda14cbcSMatt Macy 
345180f8225SMatt Macy 		zfs_btree_remove(&rt->rt_root, rs);
346*b59a0cdeSMartin Matuska 		zfs_range_tree_add_impl(rt, start, size, fill);
347eda14cbcSMatt Macy 		return;
348eda14cbcSMatt Macy 	}
349eda14cbcSMatt Macy 
350eda14cbcSMatt Macy 	ASSERT3P(rs, ==, NULL);
351eda14cbcSMatt Macy 
352eda14cbcSMatt Macy 	/*
353eda14cbcSMatt Macy 	 * Determine whether or not we will have to merge with our neighbors.
354eda14cbcSMatt Macy 	 * If gap != 0, we might need to merge with our neighbors even if we
355eda14cbcSMatt Macy 	 * aren't directly touching.
356eda14cbcSMatt Macy 	 */
357eda14cbcSMatt Macy 	zfs_btree_index_t where_before, where_after;
358eda14cbcSMatt Macy 	rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
359eda14cbcSMatt Macy 	rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
360eda14cbcSMatt Macy 
361*b59a0cdeSMartin Matuska 	merge_before = (rs_before != NULL && zfs_rs_get_end(rs_before, rt) >=
362eda14cbcSMatt Macy 	    start - gap);
363*b59a0cdeSMartin Matuska 	merge_after = (rs_after != NULL && zfs_rs_get_start(rs_after, rt) <=
364*b59a0cdeSMartin Matuska 	    end + gap);
365eda14cbcSMatt Macy 
366eda14cbcSMatt Macy 	if (merge_before && gap != 0)
367*b59a0cdeSMartin Matuska 		bridge_size += start - zfs_rs_get_end(rs_before, rt);
368eda14cbcSMatt Macy 	if (merge_after && gap != 0)
369*b59a0cdeSMartin Matuska 		bridge_size += zfs_rs_get_start(rs_after, rt) - end;
370eda14cbcSMatt Macy 
371eda14cbcSMatt Macy 	if (merge_before && merge_after) {
372eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
373eda14cbcSMatt Macy 			rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
374eda14cbcSMatt Macy 			rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
375eda14cbcSMatt Macy 		}
376eda14cbcSMatt Macy 
377*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_decr(rt, rs_before);
378*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_decr(rt, rs_after);
379eda14cbcSMatt Macy 
380*b59a0cdeSMartin Matuska 		zfs_rs_copy(rs_after, &tmp, rt);
381*b59a0cdeSMartin Matuska 		uint64_t before_start = zfs_rs_get_start_raw(rs_before, rt);
382*b59a0cdeSMartin Matuska 		uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
383*b59a0cdeSMartin Matuska 		uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
384eda14cbcSMatt Macy 		zfs_btree_remove_idx(&rt->rt_root, &where_before);
385eda14cbcSMatt Macy 
386eda14cbcSMatt Macy 		/*
387eda14cbcSMatt Macy 		 * We have to re-find the node because our old reference is
388eda14cbcSMatt Macy 		 * invalid as soon as we do any mutating btree operations.
389eda14cbcSMatt Macy 		 */
390eda14cbcSMatt Macy 		rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
391dbd5678dSMartin Matuska 		ASSERT3P(rs_after, !=, NULL);
392*b59a0cdeSMartin Matuska 		zfs_rs_set_start_raw(rs_after, rt, before_start);
393*b59a0cdeSMartin Matuska 		zfs_rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
394eda14cbcSMatt Macy 		rs = rs_after;
395eda14cbcSMatt Macy 	} else if (merge_before) {
396eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
397eda14cbcSMatt Macy 			rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
398eda14cbcSMatt Macy 
399*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_decr(rt, rs_before);
400eda14cbcSMatt Macy 
401*b59a0cdeSMartin Matuska 		uint64_t before_fill = zfs_rs_get_fill(rs_before, rt);
402*b59a0cdeSMartin Matuska 		zfs_rs_set_end(rs_before, rt, end);
403*b59a0cdeSMartin Matuska 		zfs_rs_set_fill(rs_before, rt, before_fill + fill);
404eda14cbcSMatt Macy 		rs = rs_before;
405eda14cbcSMatt Macy 	} else if (merge_after) {
406eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
407eda14cbcSMatt Macy 			rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
408eda14cbcSMatt Macy 
409*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_decr(rt, rs_after);
410eda14cbcSMatt Macy 
411*b59a0cdeSMartin Matuska 		uint64_t after_fill = zfs_rs_get_fill(rs_after, rt);
412*b59a0cdeSMartin Matuska 		zfs_rs_set_start(rs_after, rt, start);
413*b59a0cdeSMartin Matuska 		zfs_rs_set_fill(rs_after, rt, after_fill + fill);
414eda14cbcSMatt Macy 		rs = rs_after;
415eda14cbcSMatt Macy 	} else {
416eda14cbcSMatt Macy 		rs = &tmp;
417eda14cbcSMatt Macy 
418*b59a0cdeSMartin Matuska 		zfs_rs_set_start(rs, rt, start);
419*b59a0cdeSMartin Matuska 		zfs_rs_set_end(rs, rt, end);
420*b59a0cdeSMartin Matuska 		zfs_rs_set_fill(rs, rt, fill);
421eda14cbcSMatt Macy 		zfs_btree_add_idx(&rt->rt_root, rs, &where);
422eda14cbcSMatt Macy 	}
423eda14cbcSMatt Macy 
424eda14cbcSMatt Macy 	if (gap != 0) {
425*b59a0cdeSMartin Matuska 		ASSERT3U(zfs_rs_get_fill(rs, rt), <=, zfs_rs_get_end(rs, rt) -
426*b59a0cdeSMartin Matuska 		    zfs_rs_get_start(rs, rt));
427eda14cbcSMatt Macy 	} else {
428*b59a0cdeSMartin Matuska 		ASSERT3U(zfs_rs_get_fill(rs, rt), ==, zfs_rs_get_end(rs, rt) -
429*b59a0cdeSMartin Matuska 		    zfs_rs_get_start(rs, rt));
430eda14cbcSMatt Macy 	}
431eda14cbcSMatt Macy 
432eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
433eda14cbcSMatt Macy 		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
434eda14cbcSMatt Macy 
435*b59a0cdeSMartin Matuska 	zfs_range_tree_stat_incr(rt, rs);
436eda14cbcSMatt Macy 	rt->rt_space += size + bridge_size;
437eda14cbcSMatt Macy }
438eda14cbcSMatt Macy 
439eda14cbcSMatt Macy void
440*b59a0cdeSMartin Matuska zfs_range_tree_add(void *arg, uint64_t start, uint64_t size)
441eda14cbcSMatt Macy {
442*b59a0cdeSMartin Matuska 	zfs_range_tree_add_impl(arg, start, size, size);
443eda14cbcSMatt Macy }
444eda14cbcSMatt Macy 
445eda14cbcSMatt Macy static void
446*b59a0cdeSMartin Matuska zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
447eda14cbcSMatt Macy     boolean_t do_fill)
448eda14cbcSMatt Macy {
449eda14cbcSMatt Macy 	zfs_btree_index_t where;
450*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs;
451*b59a0cdeSMartin Matuska 	zfs_range_seg_max_t rsearch, rs_tmp;
452eda14cbcSMatt Macy 	uint64_t end = start + size;
453eda14cbcSMatt Macy 	boolean_t left_over, right_over;
454eda14cbcSMatt Macy 
455eda14cbcSMatt Macy 	VERIFY3U(size, !=, 0);
456eda14cbcSMatt Macy 	VERIFY3U(size, <=, rt->rt_space);
457*b59a0cdeSMartin Matuska 	if (rt->rt_type == ZFS_RANGE_SEG64)
458eda14cbcSMatt Macy 		ASSERT3U(start + size, >, start);
459eda14cbcSMatt Macy 
460*b59a0cdeSMartin Matuska 	zfs_rs_set_start(&rsearch, rt, start);
461*b59a0cdeSMartin Matuska 	zfs_rs_set_end(&rsearch, rt, end);
462eda14cbcSMatt Macy 	rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
463eda14cbcSMatt Macy 
464eda14cbcSMatt Macy 	/* Make sure we completely overlap with someone */
465eda14cbcSMatt Macy 	if (rs == NULL) {
466eda14cbcSMatt Macy 		zfs_panic_recover("zfs: removing nonexistent segment from "
467eda14cbcSMatt Macy 		    "range tree (offset=%llx size=%llx)",
468eda14cbcSMatt Macy 		    (longlong_t)start, (longlong_t)size);
469eda14cbcSMatt Macy 		return;
470eda14cbcSMatt Macy 	}
471eda14cbcSMatt Macy 
472eda14cbcSMatt Macy 	/*
473eda14cbcSMatt Macy 	 * Range trees with gap support must only remove complete segments
474eda14cbcSMatt Macy 	 * from the tree. This allows us to maintain accurate fill accounting
475eda14cbcSMatt Macy 	 * and to ensure that bridged sections are not leaked. If we need to
476eda14cbcSMatt Macy 	 * remove less than the full segment, we can only adjust the fill count.
477eda14cbcSMatt Macy 	 */
478eda14cbcSMatt Macy 	if (rt->rt_gap != 0) {
479eda14cbcSMatt Macy 		if (do_fill) {
480*b59a0cdeSMartin Matuska 			if (zfs_rs_get_fill(rs, rt) == size) {
481*b59a0cdeSMartin Matuska 				start = zfs_rs_get_start(rs, rt);
482*b59a0cdeSMartin Matuska 				end = zfs_rs_get_end(rs, rt);
483eda14cbcSMatt Macy 				size = end - start;
484eda14cbcSMatt Macy 			} else {
485*b59a0cdeSMartin Matuska 				zfs_range_tree_adjust_fill(rt, rs, -size);
486eda14cbcSMatt Macy 				return;
487eda14cbcSMatt Macy 			}
488*b59a0cdeSMartin Matuska 		} else if (zfs_rs_get_start(rs, rt) != start ||
489*b59a0cdeSMartin Matuska 		    zfs_rs_get_end(rs, rt) != end) {
490eda14cbcSMatt Macy 			zfs_panic_recover("zfs: freeing partial segment of "
491eda14cbcSMatt Macy 			    "gap tree (offset=%llx size=%llx) of "
492eda14cbcSMatt Macy 			    "(offset=%llx size=%llx)",
493eda14cbcSMatt Macy 			    (longlong_t)start, (longlong_t)size,
494*b59a0cdeSMartin Matuska 			    (longlong_t)zfs_rs_get_start(rs, rt),
495*b59a0cdeSMartin Matuska 			    (longlong_t)zfs_rs_get_end(rs, rt) -
496*b59a0cdeSMartin Matuska 			    zfs_rs_get_start(rs, rt));
497eda14cbcSMatt Macy 			return;
498eda14cbcSMatt Macy 		}
499eda14cbcSMatt Macy 	}
500eda14cbcSMatt Macy 
501*b59a0cdeSMartin Matuska 	VERIFY3U(zfs_rs_get_start(rs, rt), <=, start);
502*b59a0cdeSMartin Matuska 	VERIFY3U(zfs_rs_get_end(rs, rt), >=, end);
503eda14cbcSMatt Macy 
504*b59a0cdeSMartin Matuska 	left_over = (zfs_rs_get_start(rs, rt) != start);
505*b59a0cdeSMartin Matuska 	right_over = (zfs_rs_get_end(rs, rt) != end);
506eda14cbcSMatt Macy 
507*b59a0cdeSMartin Matuska 	zfs_range_tree_stat_decr(rt, rs);
508eda14cbcSMatt Macy 
509eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
510eda14cbcSMatt Macy 		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
511eda14cbcSMatt Macy 
512eda14cbcSMatt Macy 	if (left_over && right_over) {
513*b59a0cdeSMartin Matuska 		zfs_range_seg_max_t newseg;
514*b59a0cdeSMartin Matuska 		zfs_rs_set_start(&newseg, rt, end);
515*b59a0cdeSMartin Matuska 		zfs_rs_set_end_raw(&newseg, rt, zfs_rs_get_end_raw(rs, rt));
516*b59a0cdeSMartin Matuska 		zfs_rs_set_fill(&newseg, rt, zfs_rs_get_end(rs, rt) - end);
517*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_incr(rt, &newseg);
518eda14cbcSMatt Macy 
519eda14cbcSMatt Macy 		// This modifies the buffer already inside the range tree
520*b59a0cdeSMartin Matuska 		zfs_rs_set_end(rs, rt, start);
521eda14cbcSMatt Macy 
522*b59a0cdeSMartin Matuska 		zfs_rs_copy(rs, &rs_tmp, rt);
523eda14cbcSMatt Macy 		if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
524eda14cbcSMatt Macy 			zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
525eda14cbcSMatt Macy 		else
526eda14cbcSMatt Macy 			zfs_btree_add(&rt->rt_root, &newseg);
527eda14cbcSMatt Macy 
528eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
529eda14cbcSMatt Macy 			rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
530eda14cbcSMatt Macy 	} else if (left_over) {
531eda14cbcSMatt Macy 		// This modifies the buffer already inside the range tree
532*b59a0cdeSMartin Matuska 		zfs_rs_set_end(rs, rt, start);
533*b59a0cdeSMartin Matuska 		zfs_rs_copy(rs, &rs_tmp, rt);
534eda14cbcSMatt Macy 	} else if (right_over) {
535eda14cbcSMatt Macy 		// This modifies the buffer already inside the range tree
536*b59a0cdeSMartin Matuska 		zfs_rs_set_start(rs, rt, end);
537*b59a0cdeSMartin Matuska 		zfs_rs_copy(rs, &rs_tmp, rt);
538eda14cbcSMatt Macy 	} else {
539eda14cbcSMatt Macy 		zfs_btree_remove_idx(&rt->rt_root, &where);
540eda14cbcSMatt Macy 		rs = NULL;
541eda14cbcSMatt Macy 	}
542eda14cbcSMatt Macy 
543eda14cbcSMatt Macy 	if (rs != NULL) {
544eda14cbcSMatt Macy 		/*
545eda14cbcSMatt Macy 		 * The fill of the leftover segment will always be equal to
546eda14cbcSMatt Macy 		 * the size, since we do not support removing partial segments
547eda14cbcSMatt Macy 		 * of range trees with gaps.
548eda14cbcSMatt Macy 		 */
549*b59a0cdeSMartin Matuska 		zfs_zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
550*b59a0cdeSMartin Matuska 		    zfs_rs_get_start_raw(rs, rt));
551*b59a0cdeSMartin Matuska 		zfs_range_tree_stat_incr(rt, &rs_tmp);
552eda14cbcSMatt Macy 
553eda14cbcSMatt Macy 		if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
554eda14cbcSMatt Macy 			rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
555eda14cbcSMatt Macy 	}
556eda14cbcSMatt Macy 
557eda14cbcSMatt Macy 	rt->rt_space -= size;
558eda14cbcSMatt Macy }
559eda14cbcSMatt Macy 
560eda14cbcSMatt Macy void
561*b59a0cdeSMartin Matuska zfs_range_tree_remove(void *arg, uint64_t start, uint64_t size)
562eda14cbcSMatt Macy {
563*b59a0cdeSMartin Matuska 	zfs_range_tree_remove_impl(arg, start, size, B_FALSE);
564eda14cbcSMatt Macy }
565eda14cbcSMatt Macy 
566eda14cbcSMatt Macy void
567*b59a0cdeSMartin Matuska zfs_range_tree_remove_fill(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
568eda14cbcSMatt Macy {
569*b59a0cdeSMartin Matuska 	zfs_range_tree_remove_impl(rt, start, size, B_TRUE);
570eda14cbcSMatt Macy }
571eda14cbcSMatt Macy 
572eda14cbcSMatt Macy void
573*b59a0cdeSMartin Matuska zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
574eda14cbcSMatt Macy     uint64_t newstart, uint64_t newsize)
575eda14cbcSMatt Macy {
576*b59a0cdeSMartin Matuska 	int64_t delta = newsize - (zfs_rs_get_end(rs, rt) -
577*b59a0cdeSMartin Matuska 	    zfs_rs_get_start(rs, rt));
578eda14cbcSMatt Macy 
579*b59a0cdeSMartin Matuska 	zfs_range_tree_stat_decr(rt, rs);
580eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
581eda14cbcSMatt Macy 		rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
582eda14cbcSMatt Macy 
583*b59a0cdeSMartin Matuska 	zfs_rs_set_start(rs, rt, newstart);
584*b59a0cdeSMartin Matuska 	zfs_rs_set_end(rs, rt, newstart + newsize);
585eda14cbcSMatt Macy 
586*b59a0cdeSMartin Matuska 	zfs_range_tree_stat_incr(rt, rs);
587eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
588eda14cbcSMatt Macy 		rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
589eda14cbcSMatt Macy 
590eda14cbcSMatt Macy 	rt->rt_space += delta;
591eda14cbcSMatt Macy }
592eda14cbcSMatt Macy 
593*b59a0cdeSMartin Matuska static zfs_range_seg_t *
594*b59a0cdeSMartin Matuska zfs_range_tree_find_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
595eda14cbcSMatt Macy {
596*b59a0cdeSMartin Matuska 	zfs_range_seg_max_t rsearch;
597eda14cbcSMatt Macy 	uint64_t end = start + size;
598eda14cbcSMatt Macy 
599eda14cbcSMatt Macy 	VERIFY(size != 0);
600eda14cbcSMatt Macy 
601*b59a0cdeSMartin Matuska 	zfs_rs_set_start(&rsearch, rt, start);
602*b59a0cdeSMartin Matuska 	zfs_rs_set_end(&rsearch, rt, end);
603eda14cbcSMatt Macy 	return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
604eda14cbcSMatt Macy }
605eda14cbcSMatt Macy 
606*b59a0cdeSMartin Matuska zfs_range_seg_t *
607*b59a0cdeSMartin Matuska zfs_range_tree_find(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
608eda14cbcSMatt Macy {
609*b59a0cdeSMartin Matuska 	if (rt->rt_type == ZFS_RANGE_SEG64)
610eda14cbcSMatt Macy 		ASSERT3U(start + size, >, start);
611eda14cbcSMatt Macy 
612*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs = zfs_range_tree_find_impl(rt, start, size);
613*b59a0cdeSMartin Matuska 	if (rs != NULL && zfs_rs_get_start(rs, rt) <= start &&
614*b59a0cdeSMartin Matuska 	    zfs_rs_get_end(rs, rt) >= start + size) {
615eda14cbcSMatt Macy 		return (rs);
616eda14cbcSMatt Macy 	}
617eda14cbcSMatt Macy 	return (NULL);
618eda14cbcSMatt Macy }
619eda14cbcSMatt Macy 
620eda14cbcSMatt Macy void
621*b59a0cdeSMartin Matuska zfs_range_tree_verify_not_present(zfs_range_tree_t *rt, uint64_t off,
622*b59a0cdeSMartin Matuska     uint64_t size)
623eda14cbcSMatt Macy {
624*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs = zfs_range_tree_find(rt, off, size);
625eda14cbcSMatt Macy 	if (rs != NULL)
626eda14cbcSMatt Macy 		panic("segment already in tree; rs=%p", (void *)rs);
627eda14cbcSMatt Macy }
628eda14cbcSMatt Macy 
629eda14cbcSMatt Macy boolean_t
630*b59a0cdeSMartin Matuska zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
631eda14cbcSMatt Macy {
632*b59a0cdeSMartin Matuska 	return (zfs_range_tree_find(rt, start, size) != NULL);
633eda14cbcSMatt Macy }
634eda14cbcSMatt Macy 
635eda14cbcSMatt Macy /*
636eda14cbcSMatt Macy  * Returns the first subset of the given range which overlaps with the range
637eda14cbcSMatt Macy  * tree. Returns true if there is a segment in the range, and false if there
638eda14cbcSMatt Macy  * isn't.
639eda14cbcSMatt Macy  */
640eda14cbcSMatt Macy boolean_t
641*b59a0cdeSMartin Matuska zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
642eda14cbcSMatt Macy     uint64_t *ostart, uint64_t *osize)
643eda14cbcSMatt Macy {
644*b59a0cdeSMartin Matuska 	if (rt->rt_type == ZFS_RANGE_SEG64)
645eda14cbcSMatt Macy 		ASSERT3U(start + size, >, start);
646eda14cbcSMatt Macy 
647*b59a0cdeSMartin Matuska 	zfs_range_seg_max_t rsearch;
648*b59a0cdeSMartin Matuska 	zfs_rs_set_start(&rsearch, rt, start);
649*b59a0cdeSMartin Matuska 	zfs_rs_set_end_raw(&rsearch, rt, zfs_rs_get_start_raw(&rsearch, rt) +
650*b59a0cdeSMartin Matuska 	    1);
651eda14cbcSMatt Macy 
652eda14cbcSMatt Macy 	zfs_btree_index_t where;
653*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
654eda14cbcSMatt Macy 	if (rs != NULL) {
655eda14cbcSMatt Macy 		*ostart = start;
656*b59a0cdeSMartin Matuska 		*osize = MIN(size, zfs_rs_get_end(rs, rt) - start);
657eda14cbcSMatt Macy 		return (B_TRUE);
658eda14cbcSMatt Macy 	}
659eda14cbcSMatt Macy 
660eda14cbcSMatt Macy 	rs = zfs_btree_next(&rt->rt_root, &where, &where);
661*b59a0cdeSMartin Matuska 	if (rs == NULL || zfs_rs_get_start(rs, rt) > start + size)
662eda14cbcSMatt Macy 		return (B_FALSE);
663eda14cbcSMatt Macy 
664*b59a0cdeSMartin Matuska 	*ostart = zfs_rs_get_start(rs, rt);
665*b59a0cdeSMartin Matuska 	*osize = MIN(start + size, zfs_rs_get_end(rs, rt)) -
666*b59a0cdeSMartin Matuska 	    zfs_rs_get_start(rs, rt);
667eda14cbcSMatt Macy 	return (B_TRUE);
668eda14cbcSMatt Macy }
669eda14cbcSMatt Macy 
670eda14cbcSMatt Macy /*
671eda14cbcSMatt Macy  * Ensure that this range is not in the tree, regardless of whether
672eda14cbcSMatt Macy  * it is currently in the tree.
673eda14cbcSMatt Macy  */
674eda14cbcSMatt Macy void
675*b59a0cdeSMartin Matuska zfs_range_tree_clear(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
676eda14cbcSMatt Macy {
677*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs;
678eda14cbcSMatt Macy 
679eda14cbcSMatt Macy 	if (size == 0)
680eda14cbcSMatt Macy 		return;
681eda14cbcSMatt Macy 
682*b59a0cdeSMartin Matuska 	if (rt->rt_type == ZFS_RANGE_SEG64)
683eda14cbcSMatt Macy 		ASSERT3U(start + size, >, start);
684eda14cbcSMatt Macy 
685*b59a0cdeSMartin Matuska 	while ((rs = zfs_range_tree_find_impl(rt, start, size)) != NULL) {
686*b59a0cdeSMartin Matuska 		uint64_t free_start = MAX(zfs_rs_get_start(rs, rt), start);
687*b59a0cdeSMartin Matuska 		uint64_t free_end = MIN(zfs_rs_get_end(rs, rt), start + size);
688*b59a0cdeSMartin Matuska 		zfs_range_tree_remove(rt, free_start, free_end - free_start);
689eda14cbcSMatt Macy 	}
690eda14cbcSMatt Macy }
691eda14cbcSMatt Macy 
692eda14cbcSMatt Macy void
693*b59a0cdeSMartin Matuska zfs_range_tree_swap(zfs_range_tree_t **rtsrc, zfs_range_tree_t **rtdst)
694eda14cbcSMatt Macy {
695*b59a0cdeSMartin Matuska 	zfs_range_tree_t *rt;
696eda14cbcSMatt Macy 
697*b59a0cdeSMartin Matuska 	ASSERT0(zfs_range_tree_space(*rtdst));
698eda14cbcSMatt Macy 	ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
699eda14cbcSMatt Macy 
700eda14cbcSMatt Macy 	rt = *rtsrc;
701eda14cbcSMatt Macy 	*rtsrc = *rtdst;
702eda14cbcSMatt Macy 	*rtdst = rt;
703eda14cbcSMatt Macy }
704eda14cbcSMatt Macy 
705eda14cbcSMatt Macy void
706*b59a0cdeSMartin Matuska zfs_range_tree_vacate(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
707*b59a0cdeSMartin Matuska     void *arg)
708eda14cbcSMatt Macy {
709eda14cbcSMatt Macy 	if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
710eda14cbcSMatt Macy 		rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
711eda14cbcSMatt Macy 
712eda14cbcSMatt Macy 	if (func != NULL) {
713*b59a0cdeSMartin Matuska 		zfs_range_seg_t *rs;
714eda14cbcSMatt Macy 		zfs_btree_index_t *cookie = NULL;
715eda14cbcSMatt Macy 
716eda14cbcSMatt Macy 		while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
717eda14cbcSMatt Macy 		    NULL) {
718*b59a0cdeSMartin Matuska 			func(arg, zfs_rs_get_start(rs, rt),
719*b59a0cdeSMartin Matuska 			    zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt));
720eda14cbcSMatt Macy 		}
721eda14cbcSMatt Macy 	} else {
722eda14cbcSMatt Macy 		zfs_btree_clear(&rt->rt_root);
723eda14cbcSMatt Macy 	}
724eda14cbcSMatt Macy 
725da5137abSMartin Matuska 	memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
726eda14cbcSMatt Macy 	rt->rt_space = 0;
727eda14cbcSMatt Macy }
728eda14cbcSMatt Macy 
729eda14cbcSMatt Macy void
730*b59a0cdeSMartin Matuska zfs_range_tree_walk(zfs_range_tree_t *rt, zfs_range_tree_func_t *func,
731*b59a0cdeSMartin Matuska     void *arg)
732eda14cbcSMatt Macy {
733eda14cbcSMatt Macy 	zfs_btree_index_t where;
734*b59a0cdeSMartin Matuska 	for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
735eda14cbcSMatt Macy 	    rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
736*b59a0cdeSMartin Matuska 		func(arg, zfs_rs_get_start(rs, rt), zfs_rs_get_end(rs, rt) -
737*b59a0cdeSMartin Matuska 		    zfs_rs_get_start(rs, rt));
738eda14cbcSMatt Macy 	}
739eda14cbcSMatt Macy }
740eda14cbcSMatt Macy 
741*b59a0cdeSMartin Matuska zfs_range_seg_t *
742*b59a0cdeSMartin Matuska zfs_range_tree_first(zfs_range_tree_t *rt)
743eda14cbcSMatt Macy {
744eda14cbcSMatt Macy 	return (zfs_btree_first(&rt->rt_root, NULL));
745eda14cbcSMatt Macy }
746eda14cbcSMatt Macy 
747eda14cbcSMatt Macy uint64_t
748*b59a0cdeSMartin Matuska zfs_range_tree_space(zfs_range_tree_t *rt)
749eda14cbcSMatt Macy {
750eda14cbcSMatt Macy 	return (rt->rt_space);
751eda14cbcSMatt Macy }
752eda14cbcSMatt Macy 
753eda14cbcSMatt Macy uint64_t
754*b59a0cdeSMartin Matuska zfs_range_tree_numsegs(zfs_range_tree_t *rt)
755eda14cbcSMatt Macy {
756eda14cbcSMatt Macy 	return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
757eda14cbcSMatt Macy }
758eda14cbcSMatt Macy 
759eda14cbcSMatt Macy boolean_t
760*b59a0cdeSMartin Matuska zfs_range_tree_is_empty(zfs_range_tree_t *rt)
761eda14cbcSMatt Macy {
762eda14cbcSMatt Macy 	ASSERT(rt != NULL);
763*b59a0cdeSMartin Matuska 	return (zfs_range_tree_space(rt) == 0);
764eda14cbcSMatt Macy }
765eda14cbcSMatt Macy 
766eda14cbcSMatt Macy /*
767eda14cbcSMatt Macy  * Remove any overlapping ranges between the given segment [start, end)
768eda14cbcSMatt Macy  * from removefrom. Add non-overlapping leftovers to addto.
769eda14cbcSMatt Macy  */
770eda14cbcSMatt Macy void
771*b59a0cdeSMartin Matuska zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
772*b59a0cdeSMartin Matuska     zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
773eda14cbcSMatt Macy {
774eda14cbcSMatt Macy 	zfs_btree_index_t where;
775*b59a0cdeSMartin Matuska 	zfs_range_seg_max_t starting_rs;
776*b59a0cdeSMartin Matuska 	zfs_rs_set_start(&starting_rs, removefrom, start);
777*b59a0cdeSMartin Matuska 	zfs_rs_set_end_raw(&starting_rs, removefrom,
778*b59a0cdeSMartin Matuska 	    zfs_rs_get_start_raw(&starting_rs, removefrom) + 1);
779eda14cbcSMatt Macy 
780*b59a0cdeSMartin Matuska 	zfs_range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
781eda14cbcSMatt Macy 	    &starting_rs, &where);
782eda14cbcSMatt Macy 
783eda14cbcSMatt Macy 	if (curr == NULL)
784eda14cbcSMatt Macy 		curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
785eda14cbcSMatt Macy 
786*b59a0cdeSMartin Matuska 	zfs_range_seg_t *next;
787eda14cbcSMatt Macy 	for (; curr != NULL; curr = next) {
788eda14cbcSMatt Macy 		if (start == end)
789eda14cbcSMatt Macy 			return;
790eda14cbcSMatt Macy 		VERIFY3U(start, <, end);
791eda14cbcSMatt Macy 
792eda14cbcSMatt Macy 		/* there is no overlap */
793*b59a0cdeSMartin Matuska 		if (end <= zfs_rs_get_start(curr, removefrom)) {
794*b59a0cdeSMartin Matuska 			zfs_range_tree_add(addto, start, end - start);
795eda14cbcSMatt Macy 			return;
796eda14cbcSMatt Macy 		}
797eda14cbcSMatt Macy 
798*b59a0cdeSMartin Matuska 		uint64_t overlap_start = MAX(zfs_rs_get_start(curr, removefrom),
799eda14cbcSMatt Macy 		    start);
800*b59a0cdeSMartin Matuska 		uint64_t overlap_end = MIN(zfs_rs_get_end(curr, removefrom),
801eda14cbcSMatt Macy 		    end);
802eda14cbcSMatt Macy 		uint64_t overlap_size = overlap_end - overlap_start;
803eda14cbcSMatt Macy 		ASSERT3S(overlap_size, >, 0);
804*b59a0cdeSMartin Matuska 		zfs_range_seg_max_t rs;
805*b59a0cdeSMartin Matuska 		zfs_rs_copy(curr, &rs, removefrom);
806eda14cbcSMatt Macy 
807*b59a0cdeSMartin Matuska 		zfs_range_tree_remove(removefrom, overlap_start, overlap_size);
808eda14cbcSMatt Macy 
809eda14cbcSMatt Macy 		if (start < overlap_start)
810*b59a0cdeSMartin Matuska 			zfs_range_tree_add(addto, start, overlap_start - start);
811eda14cbcSMatt Macy 
812eda14cbcSMatt Macy 		start = overlap_end;
813eda14cbcSMatt Macy 		next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
814eda14cbcSMatt Macy 		/*
815eda14cbcSMatt Macy 		 * If we find something here, we only removed part of the
816eda14cbcSMatt Macy 		 * curr segment. Either there's some left at the end
817eda14cbcSMatt Macy 		 * because we've reached the end of the range we're removing,
818eda14cbcSMatt Macy 		 * or there's some left at the start because we started
819eda14cbcSMatt Macy 		 * partway through the range.  Either way, we continue with
820eda14cbcSMatt Macy 		 * the loop. If it's the former, we'll return at the start of
821eda14cbcSMatt Macy 		 * the loop, and if it's the latter we'll see if there is more
822eda14cbcSMatt Macy 		 * area to process.
823eda14cbcSMatt Macy 		 */
824eda14cbcSMatt Macy 		if (next != NULL) {
825*b59a0cdeSMartin Matuska 			ASSERT(start == end || start == zfs_rs_get_end(&rs,
826eda14cbcSMatt Macy 			    removefrom));
827eda14cbcSMatt Macy 		}
828eda14cbcSMatt Macy 
829eda14cbcSMatt Macy 		next = zfs_btree_next(&removefrom->rt_root, &where, &where);
830eda14cbcSMatt Macy 	}
831eda14cbcSMatt Macy 	VERIFY3P(curr, ==, NULL);
832eda14cbcSMatt Macy 
833eda14cbcSMatt Macy 	if (start != end) {
834eda14cbcSMatt Macy 		VERIFY3U(start, <, end);
835*b59a0cdeSMartin Matuska 		zfs_range_tree_add(addto, start, end - start);
836eda14cbcSMatt Macy 	} else {
837eda14cbcSMatt Macy 		VERIFY3U(start, ==, end);
838eda14cbcSMatt Macy 	}
839eda14cbcSMatt Macy }
840eda14cbcSMatt Macy 
841eda14cbcSMatt Macy /*
842eda14cbcSMatt Macy  * For each entry in rt, if it exists in removefrom, remove it
843eda14cbcSMatt Macy  * from removefrom. Otherwise, add it to addto.
844eda14cbcSMatt Macy  */
845eda14cbcSMatt Macy void
846*b59a0cdeSMartin Matuska zfs_range_tree_remove_xor_add(zfs_range_tree_t *rt,
847*b59a0cdeSMartin Matuska     zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
848eda14cbcSMatt Macy {
849eda14cbcSMatt Macy 	zfs_btree_index_t where;
850*b59a0cdeSMartin Matuska 	for (zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
851eda14cbcSMatt Macy 	    rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
852*b59a0cdeSMartin Matuska 		zfs_range_tree_remove_xor_add_segment(zfs_rs_get_start(rs, rt),
853*b59a0cdeSMartin Matuska 		    zfs_rs_get_end(rs, rt), removefrom, addto);
854eda14cbcSMatt Macy 	}
855eda14cbcSMatt Macy }
856eda14cbcSMatt Macy 
857eda14cbcSMatt Macy uint64_t
858*b59a0cdeSMartin Matuska zfs_range_tree_min(zfs_range_tree_t *rt)
859eda14cbcSMatt Macy {
860*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
861*b59a0cdeSMartin Matuska 	return (rs != NULL ? zfs_rs_get_start(rs, rt) : 0);
862eda14cbcSMatt Macy }
863eda14cbcSMatt Macy 
864eda14cbcSMatt Macy uint64_t
865*b59a0cdeSMartin Matuska zfs_range_tree_max(zfs_range_tree_t *rt)
866eda14cbcSMatt Macy {
867*b59a0cdeSMartin Matuska 	zfs_range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
868*b59a0cdeSMartin Matuska 	return (rs != NULL ? zfs_rs_get_end(rs, rt) : 0);
869eda14cbcSMatt Macy }
870eda14cbcSMatt Macy 
871eda14cbcSMatt Macy uint64_t
872*b59a0cdeSMartin Matuska zfs_range_tree_span(zfs_range_tree_t *rt)
873eda14cbcSMatt Macy {
874*b59a0cdeSMartin Matuska 	return (zfs_range_tree_max(rt) - zfs_range_tree_min(rt));
875eda14cbcSMatt Macy }
876