xref: /linux/tools/testing/selftests/bpf/progs/rbtree_fail.c (revision 90b83efa6701656e02c86e7df2cb1765ea602d07)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include "bpf_experimental.h"
7 #include "bpf_misc.h"
8 
9 struct node_data {
10 	long key;
11 	long data;
12 	struct bpf_rb_node node;
13 };
14 
15 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
16 private(A) struct bpf_spin_lock glock;
17 private(A) struct bpf_rb_root groot __contains(node_data, node);
18 private(A) struct bpf_rb_root groot2 __contains(node_data, node);
19 
20 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
21 {
22 	struct node_data *node_a;
23 	struct node_data *node_b;
24 
25 	node_a = container_of(a, struct node_data, node);
26 	node_b = container_of(b, struct node_data, node);
27 
28 	return node_a->key < node_b->key;
29 }
30 
31 SEC("?tc")
32 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
33 long rbtree_api_nolock_add(void *ctx)
34 {
35 	struct node_data *n;
36 
37 	n = bpf_obj_new(typeof(*n));
38 	if (!n)
39 		return 1;
40 
41 	bpf_rbtree_add(&groot, &n->node, less);
42 	return 0;
43 }
44 
45 SEC("?tc")
46 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
47 long rbtree_api_nolock_remove(void *ctx)
48 {
49 	struct node_data *n;
50 
51 	n = bpf_obj_new(typeof(*n));
52 	if (!n)
53 		return 1;
54 
55 	bpf_spin_lock(&glock);
56 	bpf_rbtree_add(&groot, &n->node, less);
57 	bpf_spin_unlock(&glock);
58 
59 	bpf_rbtree_remove(&groot, &n->node);
60 	return 0;
61 }
62 
63 SEC("?tc")
64 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
65 long rbtree_api_nolock_first(void *ctx)
66 {
67 	bpf_rbtree_first(&groot);
68 	return 0;
69 }
70 
71 SEC("?tc")
72 __retval(0)
73 long rbtree_api_remove_unadded_node(void *ctx)
74 {
75 	struct node_data *n, *m;
76 	struct bpf_rb_node *res_n, *res_m;
77 
78 	n = bpf_obj_new(typeof(*n));
79 	if (!n)
80 		return 1;
81 
82 	m = bpf_obj_new(typeof(*m));
83 	if (!m) {
84 		bpf_obj_drop(n);
85 		return 1;
86 	}
87 
88 	bpf_spin_lock(&glock);
89 	bpf_rbtree_add(&groot, &n->node, less);
90 
91 	res_n = bpf_rbtree_remove(&groot, &n->node);
92 
93 	res_m = bpf_rbtree_remove(&groot, &m->node);
94 	bpf_spin_unlock(&glock);
95 
96 	bpf_obj_drop(m);
97 	if (res_n)
98 		bpf_obj_drop(container_of(res_n, struct node_data, node));
99 	if (res_m) {
100 		bpf_obj_drop(container_of(res_m, struct node_data, node));
101 		/* m was not added to the rbtree */
102 		return 2;
103 	}
104 
105 	return 0;
106 }
107 
108 SEC("?tc")
109 __failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
110 long rbtree_api_remove_no_drop(void *ctx)
111 {
112 	struct bpf_rb_node *res;
113 	struct node_data *n;
114 
115 	bpf_spin_lock(&glock);
116 	res = bpf_rbtree_first(&groot);
117 	if (!res)
118 		goto unlock_err;
119 
120 	res = bpf_rbtree_remove(&groot, res);
121 
122 	if (res) {
123 		n = container_of(res, struct node_data, node);
124 		__sink(n);
125 	}
126 	bpf_spin_unlock(&glock);
127 
128 	/* if (res) { bpf_obj_drop(n); } is missing here */
129 	return 0;
130 
131 unlock_err:
132 	bpf_spin_unlock(&glock);
133 	return 1;
134 }
135 
136 SEC("?tc")
137 __failure __msg("arg#1 expected pointer to allocated object")
138 long rbtree_api_add_to_multiple_trees(void *ctx)
139 {
140 	struct node_data *n;
141 
142 	n = bpf_obj_new(typeof(*n));
143 	if (!n)
144 		return 1;
145 
146 	bpf_spin_lock(&glock);
147 	bpf_rbtree_add(&groot, &n->node, less);
148 
149 	/* This add should fail since n already in groot's tree */
150 	bpf_rbtree_add(&groot2, &n->node, less);
151 	bpf_spin_unlock(&glock);
152 	return 0;
153 }
154 
155 SEC("?tc")
156 __failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
157 long rbtree_api_use_unchecked_remove_retval(void *ctx)
158 {
159 	struct bpf_rb_node *res;
160 
161 	bpf_spin_lock(&glock);
162 
163 	res = bpf_rbtree_first(&groot);
164 	if (!res)
165 		goto err_out;
166 	res = bpf_rbtree_remove(&groot, res);
167 
168 	bpf_spin_unlock(&glock);
169 
170 	bpf_spin_lock(&glock);
171 	/* Must check res for NULL before using in rbtree_add below */
172 	bpf_rbtree_add(&groot, res, less);
173 	bpf_spin_unlock(&glock);
174 	return 0;
175 
176 err_out:
177 	bpf_spin_unlock(&glock);
178 	return 1;
179 }
180 
181 SEC("?tc")
182 __failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
183 long rbtree_api_add_release_unlock_escape(void *ctx)
184 {
185 	struct node_data *n;
186 
187 	n = bpf_obj_new(typeof(*n));
188 	if (!n)
189 		return 1;
190 
191 	bpf_spin_lock(&glock);
192 	bpf_rbtree_add(&groot, &n->node, less);
193 	bpf_spin_unlock(&glock);
194 
195 	bpf_spin_lock(&glock);
196 	/* After add() in previous critical section, n should be
197 	 * release_on_unlock and released after previous spin_unlock,
198 	 * so should not be possible to use it here
199 	 */
200 	bpf_rbtree_remove(&groot, &n->node);
201 	bpf_spin_unlock(&glock);
202 	return 0;
203 }
204 
205 SEC("?tc")
206 __failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
207 long rbtree_api_first_release_unlock_escape(void *ctx)
208 {
209 	struct bpf_rb_node *res;
210 	struct node_data *n;
211 
212 	bpf_spin_lock(&glock);
213 	res = bpf_rbtree_first(&groot);
214 	if (!res) {
215 		bpf_spin_unlock(&glock);
216 		return 1;
217 	}
218 	n = container_of(res, struct node_data, node);
219 	bpf_spin_unlock(&glock);
220 
221 	bpf_spin_lock(&glock);
222 	/* After first() in previous critical section, n should be
223 	 * release_on_unlock and released after previous spin_unlock,
224 	 * so should not be possible to use it here
225 	 */
226 	bpf_rbtree_remove(&groot, &n->node);
227 	bpf_spin_unlock(&glock);
228 	return 0;
229 }
230 
231 static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
232 {
233 	struct node_data *node_a;
234 	struct node_data *node_b;
235 
236 	node_a = container_of(a, struct node_data, node);
237 	node_b = container_of(b, struct node_data, node);
238 	bpf_rbtree_add(&groot, &node_a->node, less);
239 
240 	return node_a->key < node_b->key;
241 }
242 
243 static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
244 {
245 	struct node_data *node_a;
246 	struct node_data *node_b;
247 
248 	node_a = container_of(a, struct node_data, node);
249 	node_b = container_of(b, struct node_data, node);
250 	bpf_rbtree_remove(&groot, &node_a->node);
251 
252 	return node_a->key < node_b->key;
253 }
254 
255 static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
256 {
257 	struct node_data *node_a;
258 	struct node_data *node_b;
259 
260 	node_a = container_of(a, struct node_data, node);
261 	node_b = container_of(b, struct node_data, node);
262 	bpf_rbtree_first(&groot);
263 	bpf_spin_unlock(&glock);
264 
265 	return node_a->key < node_b->key;
266 }
267 
268 static __always_inline
269 long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
270 {
271 	struct node_data *n;
272 
273 	n = bpf_obj_new(typeof(*n));
274 	if (!n)
275 		return 1;
276 
277 	bpf_spin_lock(&glock);
278 	bpf_rbtree_add(&groot, &n->node, cb);
279 	bpf_spin_unlock(&glock);
280 	return 0;
281 }
282 
283 SEC("?tc")
284 __failure __msg("arg#1 expected pointer to allocated object")
285 long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
286 {
287 	return add_with_cb(less__bad_fn_call_add);
288 }
289 
290 SEC("?tc")
291 __failure __msg("rbtree_remove not allowed in rbtree cb")
292 long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
293 {
294 	return add_with_cb(less__bad_fn_call_remove);
295 }
296 
297 SEC("?tc")
298 __failure __msg("can't spin_{lock,unlock} in rbtree cb")
299 long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
300 {
301 	return add_with_cb(less__bad_fn_call_first_unlock_after);
302 }
303 
304 char _license[] SEC("license") = "GPL";
305