xref: /linux/kernel/bpf/range_tree.c (revision e5ecedcd7cc231a115c11cfed79635583ef4f882)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/interval_tree_generic.h>
4 #include <linux/slab.h>
5 #include <linux/bpf_mem_alloc.h>
6 #include <linux/bpf.h>
7 #include "range_tree.h"
8 
9 /*
10  * struct range_tree is a data structure used to allocate contiguous memory
11  * ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is
12  * represented by struct range_node or 'rn' for short.
13  * rn->rn_rbnode links it into an interval tree while
14  * rn->rb_range_size links it into a second rbtree sorted by size of the range.
15  * __find_range() performs binary search and best fit algorithm to find the
16  * range less or equal requested size.
17  * range_tree_clear/set() clears or sets a range of bits in this bitmap. The
18  * adjacent ranges are merged or split at the same time.
19  *
20  * The split/merge logic is based/borrowed from XFS's xbitmap32 added
21  * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
22  *
23  * The implementation relies on external lock to protect rbtree-s.
24  * The alloc/free of range_node-s is done via bpf_mem_alloc.
25  *
26  * bpf arena is using range_tree to represent unallocated slots.
27  * At init time:
28  *   range_tree_set(rt, 0, max);
29  * Then:
30  *   start = range_tree_find(rt, len);
31  *   if (start >= 0)
32  *     range_tree_clear(rt, start, len);
33  * to find free range and mark slots as allocated and later:
34  *   range_tree_set(rt, start, len);
35  * to mark as unallocated after use.
36  */
37 struct range_node {
38 	struct rb_node rn_rbnode;
39 	struct rb_node rb_range_size;
40 	u32 rn_start;
41 	u32 rn_last; /* inclusive */
42 	u32 __rn_subtree_last;
43 };
44 
45 static struct range_node *rb_to_range_node(struct rb_node *rb)
46 {
47 	return rb_entry(rb, struct range_node, rb_range_size);
48 }
49 
50 static u32 rn_size(struct range_node *rn)
51 {
52 	return rn->rn_last - rn->rn_start + 1;
53 }
54 
55 /* Find range that fits best to requested size */
56 static inline struct range_node *__find_range(struct range_tree *rt, u32 len)
57 {
58 	struct rb_node *rb = rt->range_size_root.rb_root.rb_node;
59 	struct range_node *best = NULL;
60 
61 	while (rb) {
62 		struct range_node *rn = rb_to_range_node(rb);
63 
64 		if (len <= rn_size(rn)) {
65 			best = rn;
66 			rb = rb->rb_right;
67 		} else {
68 			rb = rb->rb_left;
69 		}
70 	}
71 
72 	return best;
73 }
74 
75 s64 range_tree_find(struct range_tree *rt, u32 len)
76 {
77 	struct range_node *rn;
78 
79 	rn = __find_range(rt, len);
80 	if (!rn)
81 		return -ENOENT;
82 	return rn->rn_start;
83 }
84 
85 /* Insert the range into rbtree sorted by the range size */
86 static inline void __range_size_insert(struct range_node *rn,
87 				       struct rb_root_cached *root)
88 {
89 	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
90 	u64 size = rn_size(rn);
91 	bool leftmost = true;
92 
93 	while (*link) {
94 		rb = *link;
95 		if (size > rn_size(rb_to_range_node(rb))) {
96 			link = &rb->rb_left;
97 		} else {
98 			link = &rb->rb_right;
99 			leftmost = false;
100 		}
101 	}
102 
103 	rb_link_node(&rn->rb_range_size, rb, link);
104 	rb_insert_color_cached(&rn->rb_range_size, root, leftmost);
105 }
106 
107 #define START(node) ((node)->rn_start)
108 #define LAST(node)  ((node)->rn_last)
109 
110 INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32,
111 		     __rn_subtree_last, START, LAST,
112 		     static inline __maybe_unused,
113 		     __range_it)
114 
115 static inline __maybe_unused void
116 range_it_insert(struct range_node *rn, struct range_tree *rt)
117 {
118 	__range_size_insert(rn, &rt->range_size_root);
119 	__range_it_insert(rn, &rt->it_root);
120 }
121 
122 static inline __maybe_unused void
123 range_it_remove(struct range_node *rn, struct range_tree *rt)
124 {
125 	rb_erase_cached(&rn->rb_range_size, &rt->range_size_root);
126 	RB_CLEAR_NODE(&rn->rb_range_size);
127 	__range_it_remove(rn, &rt->it_root);
128 }
129 
130 static inline __maybe_unused struct range_node *
131 range_it_iter_first(struct range_tree *rt, u32 start, u32 last)
132 {
133 	return __range_it_iter_first(&rt->it_root, start, last);
134 }
135 
136 /* Clear the range in this range tree */
137 int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
138 {
139 	u32 last = start + len - 1;
140 	struct range_node *new_rn;
141 	struct range_node *rn;
142 
143 	while ((rn = range_it_iter_first(rt, start, last))) {
144 		if (rn->rn_start < start && rn->rn_last > last) {
145 			u32 old_last = rn->rn_last;
146 
147 			/* Overlaps with the entire clearing range */
148 			range_it_remove(rn, rt);
149 			rn->rn_last = start - 1;
150 			range_it_insert(rn, rt);
151 
152 			/* Add a range */
153 			migrate_disable();
154 			new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
155 			migrate_enable();
156 			if (!new_rn)
157 				return -ENOMEM;
158 			new_rn->rn_start = last + 1;
159 			new_rn->rn_last = old_last;
160 			range_it_insert(new_rn, rt);
161 		} else if (rn->rn_start < start) {
162 			/* Overlaps with the left side of the clearing range */
163 			range_it_remove(rn, rt);
164 			rn->rn_last = start - 1;
165 			range_it_insert(rn, rt);
166 		} else if (rn->rn_last > last) {
167 			/* Overlaps with the right side of the clearing range */
168 			range_it_remove(rn, rt);
169 			rn->rn_start = last + 1;
170 			range_it_insert(rn, rt);
171 			break;
172 		} else {
173 			/* in the middle of the clearing range */
174 			range_it_remove(rn, rt);
175 			migrate_disable();
176 			bpf_mem_free(&bpf_global_ma, rn);
177 			migrate_enable();
178 		}
179 	}
180 	return 0;
181 }
182 
183 /* Is the whole range set ? */
184 int is_range_tree_set(struct range_tree *rt, u32 start, u32 len)
185 {
186 	u32 last = start + len - 1;
187 	struct range_node *left;
188 
189 	/* Is this whole range set ? */
190 	left = range_it_iter_first(rt, start, last);
191 	if (left && left->rn_start <= start && left->rn_last >= last)
192 		return 0;
193 	return -ESRCH;
194 }
195 
196 /* Set the range in this range tree */
197 int range_tree_set(struct range_tree *rt, u32 start, u32 len)
198 {
199 	u32 last = start + len - 1;
200 	struct range_node *right;
201 	struct range_node *left;
202 	int err;
203 
204 	/* Is this whole range already set ? */
205 	left = range_it_iter_first(rt, start, last);
206 	if (left && left->rn_start <= start && left->rn_last >= last)
207 		return 0;
208 
209 	/* Clear out everything in the range we want to set. */
210 	err = range_tree_clear(rt, start, len);
211 	if (err)
212 		return err;
213 
214 	/* Do we have a left-adjacent range ? */
215 	left = range_it_iter_first(rt, start - 1, start - 1);
216 	if (left && left->rn_last + 1 != start)
217 		return -EFAULT;
218 
219 	/* Do we have a right-adjacent range ? */
220 	right = range_it_iter_first(rt, last + 1, last + 1);
221 	if (right && right->rn_start != last + 1)
222 		return -EFAULT;
223 
224 	if (left && right) {
225 		/* Combine left and right adjacent ranges */
226 		range_it_remove(left, rt);
227 		range_it_remove(right, rt);
228 		left->rn_last = right->rn_last;
229 		range_it_insert(left, rt);
230 		migrate_disable();
231 		bpf_mem_free(&bpf_global_ma, right);
232 		migrate_enable();
233 	} else if (left) {
234 		/* Combine with the left range */
235 		range_it_remove(left, rt);
236 		left->rn_last = last;
237 		range_it_insert(left, rt);
238 	} else if (right) {
239 		/* Combine with the right range */
240 		range_it_remove(right, rt);
241 		right->rn_start = start;
242 		range_it_insert(right, rt);
243 	} else {
244 		migrate_disable();
245 		left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
246 		migrate_enable();
247 		if (!left)
248 			return -ENOMEM;
249 		left->rn_start = start;
250 		left->rn_last = last;
251 		range_it_insert(left, rt);
252 	}
253 	return 0;
254 }
255 
256 void range_tree_destroy(struct range_tree *rt)
257 {
258 	struct range_node *rn;
259 
260 	while ((rn = range_it_iter_first(rt, 0, -1U))) {
261 		range_it_remove(rn, rt);
262 		migrate_disable();
263 		bpf_mem_free(&bpf_global_ma, rn);
264 		migrate_enable();
265 	}
266 }
267 
268 void range_tree_init(struct range_tree *rt)
269 {
270 	rt->it_root = RB_ROOT_CACHED;
271 	rt->range_size_root = RB_ROOT_CACHED;
272 }
273