xref: /linux/tools/testing/selftests/bpf/prog_tests/map_kptr.c (revision cc7b790d412461520de49eb321a0aeed2735e5c4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 #include "map_kptr.skel.h"
6 #include "map_kptr_fail.skel.h"
7 #include "rcu_tasks_trace_gp.skel.h"
8 
9 static void test_map_kptr_success(bool test_run)
10 {
11 	LIBBPF_OPTS(bpf_test_run_opts, lopts);
12 	LIBBPF_OPTS(bpf_test_run_opts, opts,
13 		.data_in = &pkt_v4,
14 		.data_size_in = sizeof(pkt_v4),
15 		.repeat = 1,
16 	);
17 	int key = 0, ret, cpu;
18 	struct map_kptr *skel;
19 	char buf[16], *pbuf;
20 
21 	skel = map_kptr__open_and_load();
22 	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
23 		return;
24 
25 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts);
26 	ASSERT_OK(ret, "test_map_kptr_ref1 refcount");
27 	ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval");
28 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
29 	ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
30 	ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
31 
32 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts);
33 	ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount");
34 	ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval");
35 
36 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts);
37 	ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount");
38 	ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval");
39 
40 	if (test_run)
41 		goto exit;
42 
43 	cpu = libbpf_num_possible_cpus();
44 	if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
45 		goto exit;
46 
47 	pbuf = calloc(cpu, sizeof(buf));
48 	if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)"))
49 		goto exit;
50 
51 	ret = bpf_map__update_elem(skel->maps.array_map,
52 				   &key, sizeof(key), buf, sizeof(buf), 0);
53 	ASSERT_OK(ret, "array_map update");
54 	skel->data->ref--;
55 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
56 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
57 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
58 
59 	ret = bpf_map__update_elem(skel->maps.pcpu_array_map,
60 				   &key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
61 	ASSERT_OK(ret, "pcpu_array_map update");
62 	skel->data->ref--;
63 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
64 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
65 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
66 
67 	ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
68 	ASSERT_OK(ret, "hash_map delete");
69 	skel->data->ref--;
70 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
71 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
72 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
73 
74 	ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0);
75 	ASSERT_OK(ret, "pcpu_hash_map delete");
76 	skel->data->ref--;
77 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
78 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
79 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
80 
81 	ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
82 	ASSERT_OK(ret, "hash_malloc_map delete");
83 	skel->data->ref--;
84 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
85 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
86 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
87 
88 	ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0);
89 	ASSERT_OK(ret, "pcpu_hash_malloc_map delete");
90 	skel->data->ref--;
91 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
92 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
93 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
94 
95 	ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
96 	ASSERT_OK(ret, "lru_hash_map delete");
97 	skel->data->ref--;
98 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
99 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
100 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
101 
102 	ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0);
103 	ASSERT_OK(ret, "lru_pcpu_hash_map delete");
104 	skel->data->ref--;
105 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
106 	ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
107 	ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
108 
109 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts);
110 	ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete");
111 	skel->data->ref--;
112 	ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval");
113 
114 	free(pbuf);
115 exit:
116 	map_kptr__destroy(skel);
117 }
118 
119 static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
120 {
121 	long gp_seq = READ_ONCE(rcu->bss->gp_seq);
122 	LIBBPF_OPTS(bpf_test_run_opts, opts);
123 
124 	if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
125 					      &opts), "do_call_rcu_tasks_trace"))
126 		return -EFAULT;
127 	if (!ASSERT_OK(opts.retval, "opts.retval == 0"))
128 		return -EFAULT;
129 	while (gp_seq == READ_ONCE(rcu->bss->gp_seq))
130 		sched_yield();
131 	return 0;
132 }
133 
134 void serial_test_map_kptr(void)
135 {
136 	struct rcu_tasks_trace_gp *skel;
137 
138 	RUN_TESTS(map_kptr_fail);
139 
140 	skel = rcu_tasks_trace_gp__open_and_load();
141 	if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
142 		return;
143 	if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach"))
144 		goto end;
145 
146 	if (test__start_subtest("success-map")) {
147 		test_map_kptr_success(true);
148 
149 		ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
150 		ASSERT_OK(kern_sync_rcu(), "sync rcu");
151 		/* Observe refcount dropping to 1 on bpf_map_free_deferred */
152 		test_map_kptr_success(false);
153 
154 		ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
155 		ASSERT_OK(kern_sync_rcu(), "sync rcu");
156 		/* Observe refcount dropping to 1 on synchronous delete elem */
157 		test_map_kptr_success(true);
158 	}
159 
160 end:
161 	rcu_tasks_trace_gp__destroy(skel);
162 	return;
163 }
164