xref: /linux/tools/testing/selftests/bpf/progs/linked_list.c (revision d7f39aee79f04eeaa42085728423501b33ac5be5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include "bpf_experimental.h"
7 
8 #ifndef ARRAY_SIZE
9 #define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
10 #endif
11 
12 #include "linked_list.h"
13 
14 struct head_nested_inner {
15 	struct bpf_spin_lock lock;
16 	struct bpf_list_head head __contains(foo, node2);
17 };
18 
19 struct head_nested {
20 	int dummy;
21 	struct head_nested_inner inner;
22 };
23 
24 private(C) struct bpf_spin_lock glock_c;
25 private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
26 private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
27 
28 private(D) struct head_nested ghead_nested;
29 
30 static __always_inline
31 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
32 {
33 	struct bpf_list_node *n;
34 	struct foo *f;
35 
36 	f = bpf_obj_new(typeof(*f));
37 	if (!f)
38 		return 2;
39 
40 	bpf_spin_lock(lock);
41 	n = bpf_list_pop_front(head);
42 	bpf_spin_unlock(lock);
43 	if (n) {
44 		bpf_obj_drop(container_of(n, struct foo, node2));
45 		bpf_obj_drop(f);
46 		return 3;
47 	}
48 
49 	bpf_spin_lock(lock);
50 	n = bpf_list_pop_back(head);
51 	bpf_spin_unlock(lock);
52 	if (n) {
53 		bpf_obj_drop(container_of(n, struct foo, node2));
54 		bpf_obj_drop(f);
55 		return 4;
56 	}
57 
58 
59 	bpf_spin_lock(lock);
60 	f->data = 42;
61 	bpf_list_push_front(head, &f->node2);
62 	bpf_spin_unlock(lock);
63 	if (leave_in_map)
64 		return 0;
65 	bpf_spin_lock(lock);
66 	n = bpf_list_pop_back(head);
67 	bpf_spin_unlock(lock);
68 	if (!n)
69 		return 5;
70 	f = container_of(n, struct foo, node2);
71 	if (f->data != 42) {
72 		bpf_obj_drop(f);
73 		return 6;
74 	}
75 
76 	bpf_spin_lock(lock);
77 	f->data = 13;
78 	bpf_list_push_front(head, &f->node2);
79 	bpf_spin_unlock(lock);
80 	bpf_spin_lock(lock);
81 	n = bpf_list_pop_front(head);
82 	bpf_spin_unlock(lock);
83 	if (!n)
84 		return 7;
85 	f = container_of(n, struct foo, node2);
86 	if (f->data != 13) {
87 		bpf_obj_drop(f);
88 		return 8;
89 	}
90 	bpf_obj_drop(f);
91 
92 	bpf_spin_lock(lock);
93 	n = bpf_list_pop_front(head);
94 	bpf_spin_unlock(lock);
95 	if (n) {
96 		bpf_obj_drop(container_of(n, struct foo, node2));
97 		return 9;
98 	}
99 
100 	bpf_spin_lock(lock);
101 	n = bpf_list_pop_back(head);
102 	bpf_spin_unlock(lock);
103 	if (n) {
104 		bpf_obj_drop(container_of(n, struct foo, node2));
105 		return 10;
106 	}
107 	return 0;
108 }
109 
110 
111 static __always_inline
112 int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
113 {
114 	struct bpf_list_node *n;
115 	struct foo *f[200], *pf;
116 	int i;
117 
118 	/* Loop following this check adds nodes 2-at-a-time in order to
119 	 * validate multiple release_on_unlock release logic
120 	 */
121 	if (ARRAY_SIZE(f) % 2)
122 		return 10;
123 
124 	for (i = 0; i < ARRAY_SIZE(f); i += 2) {
125 		f[i] = bpf_obj_new(typeof(**f));
126 		if (!f[i])
127 			return 2;
128 		f[i]->data = i;
129 
130 		f[i + 1] = bpf_obj_new(typeof(**f));
131 		if (!f[i + 1]) {
132 			bpf_obj_drop(f[i]);
133 			return 9;
134 		}
135 		f[i + 1]->data = i + 1;
136 
137 		bpf_spin_lock(lock);
138 		bpf_list_push_front(head, &f[i]->node2);
139 		bpf_list_push_front(head, &f[i + 1]->node2);
140 		bpf_spin_unlock(lock);
141 	}
142 
143 	for (i = 0; i < ARRAY_SIZE(f); i++) {
144 		bpf_spin_lock(lock);
145 		n = bpf_list_pop_front(head);
146 		bpf_spin_unlock(lock);
147 		if (!n)
148 			return 3;
149 		pf = container_of(n, struct foo, node2);
150 		if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
151 			bpf_obj_drop(pf);
152 			return 4;
153 		}
154 		bpf_spin_lock(lock);
155 		bpf_list_push_back(head, &pf->node2);
156 		bpf_spin_unlock(lock);
157 	}
158 
159 	if (leave_in_map)
160 		return 0;
161 
162 	for (i = 0; i < ARRAY_SIZE(f); i++) {
163 		bpf_spin_lock(lock);
164 		n = bpf_list_pop_back(head);
165 		bpf_spin_unlock(lock);
166 		if (!n)
167 			return 5;
168 		pf = container_of(n, struct foo, node2);
169 		if (pf->data != i) {
170 			bpf_obj_drop(pf);
171 			return 6;
172 		}
173 		bpf_obj_drop(pf);
174 	}
175 	bpf_spin_lock(lock);
176 	n = bpf_list_pop_back(head);
177 	bpf_spin_unlock(lock);
178 	if (n) {
179 		bpf_obj_drop(container_of(n, struct foo, node2));
180 		return 7;
181 	}
182 
183 	bpf_spin_lock(lock);
184 	n = bpf_list_pop_front(head);
185 	bpf_spin_unlock(lock);
186 	if (n) {
187 		bpf_obj_drop(container_of(n, struct foo, node2));
188 		return 8;
189 	}
190 	return 0;
191 }
192 
193 static __always_inline
194 int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
195 {
196 	struct bpf_list_node *n;
197 	struct bar *ba[8], *b;
198 	struct foo *f;
199 	int i;
200 
201 	f = bpf_obj_new(typeof(*f));
202 	if (!f)
203 		return 2;
204 	for (i = 0; i < ARRAY_SIZE(ba); i++) {
205 		b = bpf_obj_new(typeof(*b));
206 		if (!b) {
207 			bpf_obj_drop(f);
208 			return 3;
209 		}
210 		b->data = i;
211 		bpf_spin_lock(&f->lock);
212 		bpf_list_push_back(&f->head, &b->node);
213 		bpf_spin_unlock(&f->lock);
214 	}
215 
216 	bpf_spin_lock(lock);
217 	f->data = 42;
218 	bpf_list_push_front(head, &f->node2);
219 	bpf_spin_unlock(lock);
220 
221 	if (leave_in_map)
222 		return 0;
223 
224 	bpf_spin_lock(lock);
225 	n = bpf_list_pop_front(head);
226 	bpf_spin_unlock(lock);
227 	if (!n)
228 		return 4;
229 	f = container_of(n, struct foo, node2);
230 	if (f->data != 42) {
231 		bpf_obj_drop(f);
232 		return 5;
233 	}
234 
235 	for (i = 0; i < ARRAY_SIZE(ba); i++) {
236 		bpf_spin_lock(&f->lock);
237 		n = bpf_list_pop_front(&f->head);
238 		bpf_spin_unlock(&f->lock);
239 		if (!n) {
240 			bpf_obj_drop(f);
241 			return 6;
242 		}
243 		b = container_of(n, struct bar, node);
244 		if (b->data != i) {
245 			bpf_obj_drop(f);
246 			bpf_obj_drop(b);
247 			return 7;
248 		}
249 		bpf_obj_drop(b);
250 	}
251 	bpf_spin_lock(&f->lock);
252 	n = bpf_list_pop_front(&f->head);
253 	bpf_spin_unlock(&f->lock);
254 	if (n) {
255 		bpf_obj_drop(f);
256 		bpf_obj_drop(container_of(n, struct bar, node));
257 		return 8;
258 	}
259 	bpf_obj_drop(f);
260 	return 0;
261 }
262 
263 static __always_inline
264 int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
265 {
266 	int ret;
267 
268 	ret = list_push_pop(lock, head, false);
269 	if (ret)
270 		return ret;
271 	return list_push_pop(lock, head, true);
272 }
273 
274 static __always_inline
275 int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
276 {
277 	int ret;
278 
279 	ret = list_push_pop_multiple(lock, head, false);
280 	if (ret)
281 		return ret;
282 	return list_push_pop_multiple(lock, head, true);
283 }
284 
285 static __always_inline
286 int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
287 {
288 	int ret;
289 
290 	ret = list_in_list(lock, head, false);
291 	if (ret)
292 		return ret;
293 	return list_in_list(lock, head, true);
294 }
295 
296 SEC("tc")
297 int map_list_push_pop(void *ctx)
298 {
299 	struct map_value *v;
300 
301 	v = bpf_map_lookup_elem(&array_map, &(int){0});
302 	if (!v)
303 		return 1;
304 	return test_list_push_pop(&v->lock, &v->head);
305 }
306 
307 SEC("tc")
308 int inner_map_list_push_pop(void *ctx)
309 {
310 	struct map_value *v;
311 	void *map;
312 
313 	map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
314 	if (!map)
315 		return 1;
316 	v = bpf_map_lookup_elem(map, &(int){0});
317 	if (!v)
318 		return 1;
319 	return test_list_push_pop(&v->lock, &v->head);
320 }
321 
322 SEC("tc")
323 int global_list_push_pop(void *ctx)
324 {
325 	return test_list_push_pop(&glock, &ghead);
326 }
327 
328 SEC("tc")
329 int global_list_push_pop_nested(void *ctx)
330 {
331 	return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
332 }
333 
334 SEC("tc")
335 int global_list_array_push_pop(void *ctx)
336 {
337 	int r;
338 
339 	r = test_list_push_pop(&glock_c, &ghead_array[0]);
340 	if (r)
341 		return r;
342 
343 	r = test_list_push_pop(&glock_c, &ghead_array[1]);
344 	if (r)
345 		return r;
346 
347 	/* Arrays with only one element is a special case, being treated
348 	 * just like a bpf_list_head variable by the verifier, not an
349 	 * array.
350 	 */
351 	return test_list_push_pop(&glock_c, &ghead_array_one[0]);
352 }
353 
354 SEC("tc")
355 int map_list_push_pop_multiple(void *ctx)
356 {
357 	struct map_value *v;
358 
359 	v = bpf_map_lookup_elem(&array_map, &(int){0});
360 	if (!v)
361 		return 1;
362 	return test_list_push_pop_multiple(&v->lock, &v->head);
363 }
364 
365 SEC("tc")
366 int inner_map_list_push_pop_multiple(void *ctx)
367 {
368 	struct map_value *v;
369 	void *map;
370 
371 	map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
372 	if (!map)
373 		return 1;
374 	v = bpf_map_lookup_elem(map, &(int){0});
375 	if (!v)
376 		return 1;
377 	return test_list_push_pop_multiple(&v->lock, &v->head);
378 }
379 
380 SEC("tc")
381 int global_list_push_pop_multiple(void *ctx)
382 {
383 	int ret;
384 
385 	ret = list_push_pop_multiple(&glock, &ghead, false);
386 	if (ret)
387 		return ret;
388 	return list_push_pop_multiple(&glock, &ghead, true);
389 }
390 
391 SEC("tc")
392 int map_list_in_list(void *ctx)
393 {
394 	struct map_value *v;
395 
396 	v = bpf_map_lookup_elem(&array_map, &(int){0});
397 	if (!v)
398 		return 1;
399 	return test_list_in_list(&v->lock, &v->head);
400 }
401 
402 SEC("tc")
403 int inner_map_list_in_list(void *ctx)
404 {
405 	struct map_value *v;
406 	void *map;
407 
408 	map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
409 	if (!map)
410 		return 1;
411 	v = bpf_map_lookup_elem(map, &(int){0});
412 	if (!v)
413 		return 1;
414 	return test_list_in_list(&v->lock, &v->head);
415 }
416 
417 SEC("tc")
418 int global_list_in_list(void *ctx)
419 {
420 	return test_list_in_list(&glock, &ghead);
421 }
422 
423 char _license[] SEC("license") = "GPL";
424