xref: /linux/tools/testing/selftests/bpf/progs/rbtree_fail.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include "bpf_experimental.h"
7 #include "bpf_misc.h"
8 
9 struct node_data {
10 	long key;
11 	long data;
12 	struct bpf_rb_node node;
13 };
14 
15 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
16 private(A) struct bpf_spin_lock glock;
17 private(A) struct bpf_rb_root groot __contains(node_data, node);
18 private(A) struct bpf_rb_root groot2 __contains(node_data, node);
19 
20 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
21 {
22 	struct node_data *node_a;
23 	struct node_data *node_b;
24 
25 	node_a = container_of(a, struct node_data, node);
26 	node_b = container_of(b, struct node_data, node);
27 
28 	return node_a->key < node_b->key;
29 }
30 
31 SEC("?tc")
32 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
33 long rbtree_api_nolock_add(void *ctx)
34 {
35 	struct node_data *n;
36 
37 	n = bpf_obj_new(typeof(*n));
38 	if (!n)
39 		return 1;
40 
41 	bpf_rbtree_add(&groot, &n->node, less);
42 	return 0;
43 }
44 
45 SEC("?tc")
46 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
47 long rbtree_api_nolock_remove(void *ctx)
48 {
49 	struct node_data *n;
50 
51 	n = bpf_obj_new(typeof(*n));
52 	if (!n)
53 		return 1;
54 
55 	bpf_spin_lock(&glock);
56 	bpf_rbtree_add(&groot, &n->node, less);
57 	bpf_spin_unlock(&glock);
58 
59 	bpf_rbtree_remove(&groot, &n->node);
60 	return 0;
61 }
62 
63 SEC("?tc")
64 __failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
65 long rbtree_api_nolock_first(void *ctx)
66 {
67 	bpf_rbtree_first(&groot);
68 	return 0;
69 }
70 
71 SEC("?tc")
72 __failure __msg("rbtree_remove node input must be non-owning ref")
73 long rbtree_api_remove_unadded_node(void *ctx)
74 {
75 	struct node_data *n, *m;
76 	struct bpf_rb_node *res;
77 
78 	n = bpf_obj_new(typeof(*n));
79 	if (!n)
80 		return 1;
81 
82 	m = bpf_obj_new(typeof(*m));
83 	if (!m) {
84 		bpf_obj_drop(n);
85 		return 1;
86 	}
87 
88 	bpf_spin_lock(&glock);
89 	bpf_rbtree_add(&groot, &n->node, less);
90 
91 	/* This remove should pass verifier */
92 	res = bpf_rbtree_remove(&groot, &n->node);
93 	n = container_of(res, struct node_data, node);
94 
95 	/* This remove shouldn't, m isn't in an rbtree */
96 	res = bpf_rbtree_remove(&groot, &m->node);
97 	m = container_of(res, struct node_data, node);
98 	bpf_spin_unlock(&glock);
99 
100 	if (n)
101 		bpf_obj_drop(n);
102 	if (m)
103 		bpf_obj_drop(m);
104 	return 0;
105 }
106 
107 SEC("?tc")
108 __failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
109 long rbtree_api_remove_no_drop(void *ctx)
110 {
111 	struct bpf_rb_node *res;
112 	struct node_data *n;
113 
114 	bpf_spin_lock(&glock);
115 	res = bpf_rbtree_first(&groot);
116 	if (!res)
117 		goto unlock_err;
118 
119 	res = bpf_rbtree_remove(&groot, res);
120 
121 	if (res) {
122 		n = container_of(res, struct node_data, node);
123 		__sink(n);
124 	}
125 	bpf_spin_unlock(&glock);
126 
127 	/* if (res) { bpf_obj_drop(n); } is missing here */
128 	return 0;
129 
130 unlock_err:
131 	bpf_spin_unlock(&glock);
132 	return 1;
133 }
134 
135 SEC("?tc")
136 __failure __msg("arg#1 expected pointer to allocated object")
137 long rbtree_api_add_to_multiple_trees(void *ctx)
138 {
139 	struct node_data *n;
140 
141 	n = bpf_obj_new(typeof(*n));
142 	if (!n)
143 		return 1;
144 
145 	bpf_spin_lock(&glock);
146 	bpf_rbtree_add(&groot, &n->node, less);
147 
148 	/* This add should fail since n already in groot's tree */
149 	bpf_rbtree_add(&groot2, &n->node, less);
150 	bpf_spin_unlock(&glock);
151 	return 0;
152 }
153 
154 SEC("?tc")
155 __failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
156 long rbtree_api_use_unchecked_remove_retval(void *ctx)
157 {
158 	struct bpf_rb_node *res;
159 
160 	bpf_spin_lock(&glock);
161 
162 	res = bpf_rbtree_first(&groot);
163 	if (!res)
164 		goto err_out;
165 	res = bpf_rbtree_remove(&groot, res);
166 
167 	bpf_spin_unlock(&glock);
168 
169 	bpf_spin_lock(&glock);
170 	/* Must check res for NULL before using in rbtree_add below */
171 	bpf_rbtree_add(&groot, res, less);
172 	bpf_spin_unlock(&glock);
173 	return 0;
174 
175 err_out:
176 	bpf_spin_unlock(&glock);
177 	return 1;
178 }
179 
180 SEC("?tc")
181 __failure __msg("rbtree_remove node input must be non-owning ref")
182 long rbtree_api_add_release_unlock_escape(void *ctx)
183 {
184 	struct node_data *n;
185 
186 	n = bpf_obj_new(typeof(*n));
187 	if (!n)
188 		return 1;
189 
190 	bpf_spin_lock(&glock);
191 	bpf_rbtree_add(&groot, &n->node, less);
192 	bpf_spin_unlock(&glock);
193 
194 	bpf_spin_lock(&glock);
195 	/* After add() in previous critical section, n should be
196 	 * release_on_unlock and released after previous spin_unlock,
197 	 * so should not be possible to use it here
198 	 */
199 	bpf_rbtree_remove(&groot, &n->node);
200 	bpf_spin_unlock(&glock);
201 	return 0;
202 }
203 
204 SEC("?tc")
205 __failure __msg("rbtree_remove node input must be non-owning ref")
206 long rbtree_api_first_release_unlock_escape(void *ctx)
207 {
208 	struct bpf_rb_node *res;
209 	struct node_data *n;
210 
211 	bpf_spin_lock(&glock);
212 	res = bpf_rbtree_first(&groot);
213 	if (!res) {
214 		bpf_spin_unlock(&glock);
215 		return 1;
216 	}
217 	n = container_of(res, struct node_data, node);
218 	bpf_spin_unlock(&glock);
219 
220 	bpf_spin_lock(&glock);
221 	/* After first() in previous critical section, n should be
222 	 * release_on_unlock and released after previous spin_unlock,
223 	 * so should not be possible to use it here
224 	 */
225 	bpf_rbtree_remove(&groot, &n->node);
226 	bpf_spin_unlock(&glock);
227 	return 0;
228 }
229 
230 static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
231 {
232 	struct node_data *node_a;
233 	struct node_data *node_b;
234 
235 	node_a = container_of(a, struct node_data, node);
236 	node_b = container_of(b, struct node_data, node);
237 	bpf_rbtree_add(&groot, &node_a->node, less);
238 
239 	return node_a->key < node_b->key;
240 }
241 
242 static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
243 {
244 	struct node_data *node_a;
245 	struct node_data *node_b;
246 
247 	node_a = container_of(a, struct node_data, node);
248 	node_b = container_of(b, struct node_data, node);
249 	bpf_rbtree_remove(&groot, &node_a->node);
250 
251 	return node_a->key < node_b->key;
252 }
253 
254 static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
255 {
256 	struct node_data *node_a;
257 	struct node_data *node_b;
258 
259 	node_a = container_of(a, struct node_data, node);
260 	node_b = container_of(b, struct node_data, node);
261 	bpf_rbtree_first(&groot);
262 	bpf_spin_unlock(&glock);
263 
264 	return node_a->key < node_b->key;
265 }
266 
267 static __always_inline
268 long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
269 {
270 	struct node_data *n;
271 
272 	n = bpf_obj_new(typeof(*n));
273 	if (!n)
274 		return 1;
275 
276 	bpf_spin_lock(&glock);
277 	bpf_rbtree_add(&groot, &n->node, cb);
278 	bpf_spin_unlock(&glock);
279 	return 0;
280 }
281 
282 SEC("?tc")
283 __failure __msg("arg#1 expected pointer to allocated object")
284 long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
285 {
286 	return add_with_cb(less__bad_fn_call_add);
287 }
288 
289 SEC("?tc")
290 __failure __msg("rbtree_remove not allowed in rbtree cb")
291 long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
292 {
293 	return add_with_cb(less__bad_fn_call_remove);
294 }
295 
296 SEC("?tc")
297 __failure __msg("can't spin_{lock,unlock} in rbtree cb")
298 long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
299 {
300 	return add_with_cb(less__bad_fn_call_first_unlock_after);
301 }
302 
303 char _license[] SEC("license") = "GPL";
304