xref: /linux/tools/testing/selftests/bpf/prog_tests/linked_list.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/btf.h>
3 #include <test_btf.h>
4 #include <linux/btf.h>
5 #include <test_progs.h>
6 #include <network_helpers.h>
7 
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
10 
11 static char log_buf[1024 * 1024];
12 
13 static struct {
14 	const char *prog_name;
15 	const char *err_msg;
16 } linked_list_fail_tests[] = {
17 #define TEST(test, off) \
18 	{ #test "_missing_lock_push_front", \
19 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 	{ #test "_missing_lock_push_back", \
21 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 	{ #test "_missing_lock_pop_front", \
23 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 	{ #test "_missing_lock_pop_back", \
25 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 	TEST(kptr, 40)
27 	TEST(global, 16)
28 	TEST(map, 0)
29 	TEST(inner_map, 0)
30 #undef TEST
31 #define TEST(test, op) \
32 	{ #test "_kptr_incorrect_lock_" #op, \
33 	  "held lock and object are not in the same allocation\n" \
34 	  "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
35 	{ #test "_global_incorrect_lock_" #op, \
36 	  "held lock and object are not in the same allocation\n" \
37 	  "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 	{ #test "_map_incorrect_lock_" #op, \
39 	  "held lock and object are not in the same allocation\n" \
40 	  "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 	{ #test "_inner_map_incorrect_lock_" #op, \
42 	  "held lock and object are not in the same allocation\n" \
43 	  "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 	TEST(kptr, push_front)
45 	TEST(kptr, push_back)
46 	TEST(kptr, pop_front)
47 	TEST(kptr, pop_back)
48 	TEST(global, push_front)
49 	TEST(global, push_back)
50 	TEST(global, pop_front)
51 	TEST(global, pop_back)
52 	TEST(map, push_front)
53 	TEST(map, push_back)
54 	TEST(map, pop_front)
55 	TEST(map, pop_back)
56 	TEST(inner_map, push_front)
57 	TEST(inner_map, push_back)
58 	TEST(inner_map, pop_front)
59 	TEST(inner_map, pop_back)
60 #undef TEST
61 	{ "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
62 	{ "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 	{ "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 	{ "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 	{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 	{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 	{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 	{ "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
69 	{ "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
70 	{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 	{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 	{ "obj_new_acq", "Unreleased reference id=" },
73 	{ "use_after_drop", "invalid mem access 'scalar'" },
74 	{ "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 	{ "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 	{ "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 	{ "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 	{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 	{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 	{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 	{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
82 	{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
83 	{ "double_push_front", "arg#1 expected pointer to allocated object" },
84 	{ "double_push_back", "arg#1 expected pointer to allocated object" },
85 	{ "no_node_value_type", "bpf_list_node not found at offset=0" },
86 	{ "incorrect_value_type",
87 	  "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
88 	  "but arg is at offset=0 in struct bar" },
89 	{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
90 	{ "incorrect_node_off1", "bpf_list_node not found at offset=49" },
91 	{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
92 	{ "no_head_type", "bpf_list_head not found at offset=0" },
93 	{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
94 	{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
95 	{ "incorrect_head_off1", "bpf_list_head not found at offset=25" },
96 	{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },
97 	{ "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
98 	{ "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
99 };
100 
test_linked_list_fail_prog(const char * prog_name,const char * err_msg)101 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
102 {
103 	LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
104 						.kernel_log_size = sizeof(log_buf),
105 						.kernel_log_level = 1);
106 	struct linked_list_fail *skel;
107 	struct bpf_program *prog;
108 	int ret;
109 
110 	skel = linked_list_fail__open_opts(&opts);
111 	if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
112 		return;
113 
114 	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
115 	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
116 		goto end;
117 
118 	bpf_program__set_autoload(prog, true);
119 
120 	ret = linked_list_fail__load(skel);
121 	if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
122 		goto end;
123 
124 	if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
125 		fprintf(stderr, "Expected: %s\n", err_msg);
126 		fprintf(stderr, "Verifier: %s\n", log_buf);
127 	}
128 
129 end:
130 	linked_list_fail__destroy(skel);
131 }
132 
clear_fields(struct bpf_map * map)133 static void clear_fields(struct bpf_map *map)
134 {
135 	char buf[24];
136 	int key = 0;
137 
138 	memset(buf, 0xff, sizeof(buf));
139 	ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
140 }
141 
142 enum {
143 	TEST_ALL,
144 	PUSH_POP,
145 	PUSH_POP_MULT,
146 	LIST_IN_LIST,
147 };
148 
test_linked_list_success(int mode,bool leave_in_map)149 static void test_linked_list_success(int mode, bool leave_in_map)
150 {
151 	LIBBPF_OPTS(bpf_test_run_opts, opts,
152 		.data_in = &pkt_v4,
153 		.data_size_in = sizeof(pkt_v4),
154 		.repeat = 1,
155 	);
156 	struct linked_list *skel;
157 	int ret;
158 
159 	skel = linked_list__open_and_load();
160 	if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
161 		return;
162 
163 	if (mode == LIST_IN_LIST)
164 		goto lil;
165 	if (mode == PUSH_POP_MULT)
166 		goto ppm;
167 
168 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
169 	ASSERT_OK(ret, "map_list_push_pop");
170 	ASSERT_OK(opts.retval, "map_list_push_pop retval");
171 	if (!leave_in_map)
172 		clear_fields(skel->maps.array_map);
173 
174 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
175 	ASSERT_OK(ret, "inner_map_list_push_pop");
176 	ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
177 	if (!leave_in_map)
178 		clear_fields(skel->maps.inner_map);
179 
180 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
181 	ASSERT_OK(ret, "global_list_push_pop");
182 	ASSERT_OK(opts.retval, "global_list_push_pop retval");
183 	if (!leave_in_map)
184 		clear_fields(skel->maps.bss_A);
185 
186 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
187 	ASSERT_OK(ret, "global_list_push_pop_nested");
188 	ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
189 	if (!leave_in_map)
190 		clear_fields(skel->maps.bss_A);
191 
192 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
193 	ASSERT_OK(ret, "global_list_array_push_pop");
194 	ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
195 	if (!leave_in_map)
196 		clear_fields(skel->maps.bss_A);
197 
198 	if (mode == PUSH_POP)
199 		goto end;
200 
201 ppm:
202 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
203 	ASSERT_OK(ret, "map_list_push_pop_multiple");
204 	ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
205 	if (!leave_in_map)
206 		clear_fields(skel->maps.array_map);
207 
208 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
209 	ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
210 	ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
211 	if (!leave_in_map)
212 		clear_fields(skel->maps.inner_map);
213 
214 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
215 	ASSERT_OK(ret, "global_list_push_pop_multiple");
216 	ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
217 	if (!leave_in_map)
218 		clear_fields(skel->maps.bss_A);
219 
220 	if (mode == PUSH_POP_MULT)
221 		goto end;
222 
223 lil:
224 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
225 	ASSERT_OK(ret, "map_list_in_list");
226 	ASSERT_OK(opts.retval, "map_list_in_list retval");
227 	if (!leave_in_map)
228 		clear_fields(skel->maps.array_map);
229 
230 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
231 	ASSERT_OK(ret, "inner_map_list_in_list");
232 	ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
233 	if (!leave_in_map)
234 		clear_fields(skel->maps.inner_map);
235 
236 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
237 	ASSERT_OK(ret, "global_list_in_list");
238 	ASSERT_OK(opts.retval, "global_list_in_list retval");
239 	if (!leave_in_map)
240 		clear_fields(skel->maps.bss_A);
241 end:
242 	linked_list__destroy(skel);
243 }
244 
245 #define SPIN_LOCK 2
246 #define LIST_HEAD 3
247 #define LIST_NODE 4
248 
init_btf(void)249 static struct btf *init_btf(void)
250 {
251 	int id, lid, hid, nid;
252 	struct btf *btf;
253 
254 	btf = btf__new_empty();
255 	if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
256 		return NULL;
257 	id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
258 	if (!ASSERT_EQ(id, 1, "btf__add_int"))
259 		goto end;
260 	lid = btf__add_struct(btf, "bpf_spin_lock", 4);
261 	if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
262 		goto end;
263 	hid = btf__add_struct(btf, "bpf_list_head", 16);
264 	if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
265 		goto end;
266 	nid = btf__add_struct(btf, "bpf_list_node", 24);
267 	if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
268 		goto end;
269 	return btf;
270 end:
271 	btf__free(btf);
272 	return NULL;
273 }
274 
list_and_rb_node_same_struct(bool refcount_field)275 static void list_and_rb_node_same_struct(bool refcount_field)
276 {
277 	int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
278 	struct btf *btf;
279 	int id, err;
280 
281 	btf = init_btf();
282 	if (!ASSERT_OK_PTR(btf, "init_btf"))
283 		return;
284 
285 	bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
286 	if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
287 		return;
288 
289 	if (refcount_field) {
290 		bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
291 		if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
292 			return;
293 	}
294 
295 	id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
296 	if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
297 		return;
298 	err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
299 	if (!ASSERT_OK(err, "btf__add_field bar::a"))
300 		return;
301 	err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
302 	if (!ASSERT_OK(err, "btf__add_field bar::c"))
303 		return;
304 	if (refcount_field) {
305 		err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
306 		if (!ASSERT_OK(err, "btf__add_field bar::ref"))
307 			return;
308 	}
309 
310 	foo_btf_id = btf__add_struct(btf, "foo", 20);
311 	if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
312 		return;
313 	err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
314 	if (!ASSERT_OK(err, "btf__add_field foo::a"))
315 		return;
316 	err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
317 	if (!ASSERT_OK(err, "btf__add_field foo::b"))
318 		return;
319 	id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
320 	if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
321 		return;
322 
323 	err = btf__load_into_kernel(btf);
324 	ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
325 	btf__free(btf);
326 }
327 
test_btf(void)328 static void test_btf(void)
329 {
330 	struct btf *btf = NULL;
331 	int id, err;
332 
333 	while (test__start_subtest("btf: too many locks")) {
334 		btf = init_btf();
335 		if (!ASSERT_OK_PTR(btf, "init_btf"))
336 			break;
337 		id = btf__add_struct(btf, "foo", 24);
338 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
339 			break;
340 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
341 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
342 			break;
343 		err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
344 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
345 			break;
346 		err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
347 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
348 			break;
349 
350 		err = btf__load_into_kernel(btf);
351 		ASSERT_EQ(err, -E2BIG, "check btf");
352 		btf__free(btf);
353 		break;
354 	}
355 
356 	while (test__start_subtest("btf: missing lock")) {
357 		btf = init_btf();
358 		if (!ASSERT_OK_PTR(btf, "init_btf"))
359 			break;
360 		id = btf__add_struct(btf, "foo", 16);
361 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
362 			break;
363 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
364 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
365 			break;
366 		id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
367 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
368 			break;
369 		id = btf__add_struct(btf, "baz", 16);
370 		if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
371 			break;
372 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
373 		if (!ASSERT_OK(err, "btf__add_field baz::a"))
374 			break;
375 
376 		err = btf__load_into_kernel(btf);
377 		ASSERT_EQ(err, -EINVAL, "check btf");
378 		btf__free(btf);
379 		break;
380 	}
381 
382 	while (test__start_subtest("btf: bad offset")) {
383 		btf = init_btf();
384 		if (!ASSERT_OK_PTR(btf, "init_btf"))
385 			break;
386 		id = btf__add_struct(btf, "foo", 36);
387 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
388 			break;
389 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
390 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
391 			break;
392 		err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
393 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
394 			break;
395 		err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
396 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
397 			break;
398 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
399 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
400 			break;
401 
402 		err = btf__load_into_kernel(btf);
403 		ASSERT_EQ(err, -EEXIST, "check btf");
404 		btf__free(btf);
405 		break;
406 	}
407 
408 	while (test__start_subtest("btf: missing contains:")) {
409 		btf = init_btf();
410 		if (!ASSERT_OK_PTR(btf, "init_btf"))
411 			break;
412 		id = btf__add_struct(btf, "foo", 24);
413 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
414 			break;
415 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
416 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
417 			break;
418 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
419 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
420 			break;
421 
422 		err = btf__load_into_kernel(btf);
423 		ASSERT_EQ(err, -EINVAL, "check btf");
424 		btf__free(btf);
425 		break;
426 	}
427 
428 	while (test__start_subtest("btf: missing struct")) {
429 		btf = init_btf();
430 		if (!ASSERT_OK_PTR(btf, "init_btf"))
431 			break;
432 		id = btf__add_struct(btf, "foo", 24);
433 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
434 			break;
435 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
436 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
437 			break;
438 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
439 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
440 			break;
441 		id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
442 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
443 			break;
444 
445 		err = btf__load_into_kernel(btf);
446 		ASSERT_EQ(err, -ENOENT, "check btf");
447 		btf__free(btf);
448 		break;
449 	}
450 
451 	while (test__start_subtest("btf: missing node")) {
452 		btf = init_btf();
453 		if (!ASSERT_OK_PTR(btf, "init_btf"))
454 			break;
455 		id = btf__add_struct(btf, "foo", 24);
456 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
457 			break;
458 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
459 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
460 			break;
461 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
462 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
463 			break;
464 		id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
465 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
466 			break;
467 
468 		err = btf__load_into_kernel(btf);
469 		btf__free(btf);
470 		ASSERT_EQ(err, -ENOENT, "check btf");
471 		break;
472 	}
473 
474 	while (test__start_subtest("btf: node incorrect type")) {
475 		btf = init_btf();
476 		if (!ASSERT_OK_PTR(btf, "init_btf"))
477 			break;
478 		id = btf__add_struct(btf, "foo", 20);
479 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
480 			break;
481 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
482 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
483 			break;
484 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
485 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
486 			break;
487 		id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
488 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
489 			break;
490 		id = btf__add_struct(btf, "bar", 4);
491 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
492 			break;
493 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
494 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
495 			break;
496 
497 		err = btf__load_into_kernel(btf);
498 		ASSERT_EQ(err, -EINVAL, "check btf");
499 		btf__free(btf);
500 		break;
501 	}
502 
503 	while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
504 		btf = init_btf();
505 		if (!ASSERT_OK_PTR(btf, "init_btf"))
506 			break;
507 		id = btf__add_struct(btf, "foo", 52);
508 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
509 			break;
510 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
511 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
512 			break;
513 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
514 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
515 			break;
516 		err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
517 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
518 			break;
519 		err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
520 		if (!ASSERT_OK(err, "btf__add_field foo::d"))
521 			break;
522 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
523 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
524 			break;
525 
526 		err = btf__load_into_kernel(btf);
527 		ASSERT_EQ(err, -EINVAL, "check btf");
528 		btf__free(btf);
529 		break;
530 	}
531 
532 	while (test__start_subtest("btf: owning | owned AA cycle")) {
533 		btf = init_btf();
534 		if (!ASSERT_OK_PTR(btf, "init_btf"))
535 			break;
536 		id = btf__add_struct(btf, "foo", 44);
537 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
538 			break;
539 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
540 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
541 			break;
542 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
543 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
544 			break;
545 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
546 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
547 			break;
548 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
549 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
550 			break;
551 
552 		err = btf__load_into_kernel(btf);
553 		ASSERT_EQ(err, -ELOOP, "check btf");
554 		btf__free(btf);
555 		break;
556 	}
557 
558 	while (test__start_subtest("btf: owning | owned ABA cycle")) {
559 		btf = init_btf();
560 		if (!ASSERT_OK_PTR(btf, "init_btf"))
561 			break;
562 		id = btf__add_struct(btf, "foo", 44);
563 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
564 			break;
565 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
566 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
567 			break;
568 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
569 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
570 			break;
571 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
572 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
573 			break;
574 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
575 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
576 			break;
577 		id = btf__add_struct(btf, "bar", 44);
578 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
579 			break;
580 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
581 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
582 			break;
583 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
584 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
585 			break;
586 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
587 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
588 			break;
589 		id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
590 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
591 			break;
592 
593 		err = btf__load_into_kernel(btf);
594 		ASSERT_EQ(err, -ELOOP, "check btf");
595 		btf__free(btf);
596 		break;
597 	}
598 
599 	while (test__start_subtest("btf: owning -> owned")) {
600 		btf = init_btf();
601 		if (!ASSERT_OK_PTR(btf, "init_btf"))
602 			break;
603 		id = btf__add_struct(btf, "foo", 28);
604 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
605 			break;
606 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
607 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
608 			break;
609 		err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
610 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
611 			break;
612 		id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
613 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
614 			break;
615 		id = btf__add_struct(btf, "bar", 24);
616 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
617 			break;
618 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
619 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
620 			break;
621 
622 		err = btf__load_into_kernel(btf);
623 		ASSERT_EQ(err, 0, "check btf");
624 		btf__free(btf);
625 		break;
626 	}
627 
628 	while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
629 		btf = init_btf();
630 		if (!ASSERT_OK_PTR(btf, "init_btf"))
631 			break;
632 		id = btf__add_struct(btf, "foo", 28);
633 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
634 			break;
635 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
636 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
637 			break;
638 		err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
639 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
640 			break;
641 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
642 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
643 			break;
644 		id = btf__add_struct(btf, "bar", 44);
645 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
646 			break;
647 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
648 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
649 			break;
650 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
651 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
652 			break;
653 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
654 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
655 			break;
656 		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
657 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
658 			break;
659 		id = btf__add_struct(btf, "baz", 24);
660 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
661 			break;
662 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
663 		if (!ASSERT_OK(err, "btf__add_field baz:a"))
664 			break;
665 
666 		err = btf__load_into_kernel(btf);
667 		ASSERT_EQ(err, 0, "check btf");
668 		btf__free(btf);
669 		break;
670 	}
671 
672 	while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
673 		btf = init_btf();
674 		if (!ASSERT_OK_PTR(btf, "init_btf"))
675 			break;
676 		id = btf__add_struct(btf, "foo", 44);
677 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
678 			break;
679 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
680 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
681 			break;
682 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
683 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
684 			break;
685 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
686 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
687 			break;
688 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
689 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
690 			break;
691 		id = btf__add_struct(btf, "bar", 44);
692 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
693 			break;
694 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
695 		if (!ASSERT_OK(err, "btf__add_field bar:a"))
696 			break;
697 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
698 		if (!ASSERT_OK(err, "btf__add_field bar:b"))
699 			break;
700 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
701 		if (!ASSERT_OK(err, "btf__add_field bar:c"))
702 			break;
703 		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
704 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
705 			break;
706 		id = btf__add_struct(btf, "baz", 24);
707 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
708 			break;
709 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
710 		if (!ASSERT_OK(err, "btf__add_field baz:a"))
711 			break;
712 
713 		err = btf__load_into_kernel(btf);
714 		ASSERT_EQ(err, -ELOOP, "check btf");
715 		btf__free(btf);
716 		break;
717 	}
718 
719 	while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
720 		btf = init_btf();
721 		if (!ASSERT_OK_PTR(btf, "init_btf"))
722 			break;
723 		id = btf__add_struct(btf, "foo", 20);
724 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
725 			break;
726 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
727 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
728 			break;
729 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
730 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
731 			break;
732 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
733 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
734 			break;
735 		id = btf__add_struct(btf, "bar", 44);
736 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
737 			break;
738 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
739 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
740 			break;
741 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
742 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
743 			break;
744 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
745 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
746 			break;
747 		id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
748 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
749 			break;
750 		id = btf__add_struct(btf, "baz", 44);
751 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
752 			break;
753 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
754 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
755 			break;
756 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
757 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
758 			break;
759 		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
760 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
761 			break;
762 		id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
763 		if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
764 			break;
765 		id = btf__add_struct(btf, "bam", 24);
766 		if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
767 			break;
768 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
769 		if (!ASSERT_OK(err, "btf__add_field bam::a"))
770 			break;
771 
772 		err = btf__load_into_kernel(btf);
773 		ASSERT_EQ(err, -ELOOP, "check btf");
774 		btf__free(btf);
775 		break;
776 	}
777 
778 	while (test__start_subtest("btf: list_node and rb_node in same struct")) {
779 		list_and_rb_node_same_struct(true);
780 		break;
781 	}
782 
783 	while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
784 		list_and_rb_node_same_struct(false);
785 		break;
786 	}
787 }
788 
test_linked_list(void)789 void test_linked_list(void)
790 {
791 	int i;
792 
793 	for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
794 		if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
795 			continue;
796 		test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
797 					   linked_list_fail_tests[i].err_msg);
798 	}
799 	test_btf();
800 	test_linked_list_success(PUSH_POP, false);
801 	test_linked_list_success(PUSH_POP, true);
802 	test_linked_list_success(PUSH_POP_MULT, false);
803 	test_linked_list_success(PUSH_POP_MULT, true);
804 	test_linked_list_success(LIST_IN_LIST, false);
805 	test_linked_list_success(LIST_IN_LIST, true);
806 	test_linked_list_success(TEST_ALL, false);
807 }
808