xref: /linux/tools/testing/selftests/bpf/prog_tests/test_struct_ops_assoc.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <test_progs.h>
4 #include "struct_ops_assoc.skel.h"
5 #include "struct_ops_assoc_reuse.skel.h"
6 #include "struct_ops_assoc_in_timer.skel.h"
7 
8 static void test_st_ops_assoc(void)
9 {
10 	struct struct_ops_assoc *skel = NULL;
11 	int err, pid;
12 
13 	skel = struct_ops_assoc__open_and_load();
14 	if (!ASSERT_OK_PTR(skel, "struct_ops_assoc__open"))
15 		goto out;
16 
17 	/* cannot explicitly associate struct_ops program */
18 	err = bpf_program__assoc_struct_ops(skel->progs.test_1_a,
19 					    skel->maps.st_ops_map_a, NULL);
20 	ASSERT_ERR(err, "bpf_program__assoc_struct_ops(test_1_a, st_ops_map_a)");
21 
22 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog_a,
23 					    skel->maps.st_ops_map_a, NULL);
24 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(syscall_prog_a, st_ops_map_a)");
25 
26 	err = bpf_program__assoc_struct_ops(skel->progs.sys_enter_prog_a,
27 					    skel->maps.st_ops_map_a, NULL);
28 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(sys_enter_prog_a, st_ops_map_a)");
29 
30 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog_b,
31 					    skel->maps.st_ops_map_b, NULL);
32 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(syscall_prog_b, st_ops_map_b)");
33 
34 	err = bpf_program__assoc_struct_ops(skel->progs.sys_enter_prog_b,
35 					    skel->maps.st_ops_map_b, NULL);
36 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(sys_enter_prog_b, st_ops_map_b)");
37 
38 	/* sys_enter_prog_a already associated with map_a */
39 	err = bpf_program__assoc_struct_ops(skel->progs.sys_enter_prog_a,
40 					    skel->maps.st_ops_map_b, NULL);
41 	ASSERT_ERR(err, "bpf_program__assoc_struct_ops(sys_enter_prog_a, st_ops_map_b)");
42 
43 	err = struct_ops_assoc__attach(skel);
44 	if (!ASSERT_OK(err, "struct_ops_assoc__attach"))
45 		goto out;
46 
47 	/* run tracing prog that calls .test_1 and checks return */
48 	pid = getpid();
49 	skel->bss->test_pid = pid;
50 	sys_gettid();
51 	skel->bss->test_pid = 0;
52 
53 	ASSERT_EQ(skel->bss->test_err_a, 0, "skel->bss->test_err_a");
54 	ASSERT_EQ(skel->bss->test_err_b, 0, "skel->bss->test_err_b");
55 
56 	/* run syscall_prog that calls .test_1 and checks return */
57 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog_a), NULL);
58 	ASSERT_OK(err, "bpf_prog_test_run_opts");
59 
60 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog_b), NULL);
61 	ASSERT_OK(err, "bpf_prog_test_run_opts");
62 
63 	ASSERT_EQ(skel->bss->test_err_a, 0, "skel->bss->test_err_a");
64 	ASSERT_EQ(skel->bss->test_err_b, 0, "skel->bss->test_err_b");
65 
66 out:
67 	struct_ops_assoc__destroy(skel);
68 }
69 
70 static void test_st_ops_assoc_reuse(void)
71 {
72 	struct struct_ops_assoc_reuse *skel = NULL;
73 	int err;
74 
75 	skel = struct_ops_assoc_reuse__open_and_load();
76 	if (!ASSERT_OK_PTR(skel, "struct_ops_assoc_reuse__open"))
77 		goto out;
78 
79 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog_a,
80 					    skel->maps.st_ops_map_a, NULL);
81 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(syscall_prog_a, st_ops_map_a)");
82 
83 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog_b,
84 					    skel->maps.st_ops_map_b, NULL);
85 	ASSERT_OK(err, "bpf_program__assoc_struct_ops(syscall_prog_b, st_ops_map_b)");
86 
87 	err = struct_ops_assoc_reuse__attach(skel);
88 	if (!ASSERT_OK(err, "struct_ops_assoc__attach"))
89 		goto out;
90 
91 	/* run syscall_prog that calls .test_1 and checks return */
92 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog_a), NULL);
93 	ASSERT_OK(err, "bpf_prog_test_run_opts");
94 
95 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog_b), NULL);
96 	ASSERT_OK(err, "bpf_prog_test_run_opts");
97 
98 	ASSERT_EQ(skel->bss->test_err_a, 0, "skel->bss->test_err_a");
99 	ASSERT_EQ(skel->bss->test_err_b, 0, "skel->bss->test_err_b");
100 
101 out:
102 	struct_ops_assoc_reuse__destroy(skel);
103 }
104 
105 static void test_st_ops_assoc_in_timer(void)
106 {
107 	struct struct_ops_assoc_in_timer *skel = NULL;
108 	int err;
109 
110 	skel = struct_ops_assoc_in_timer__open_and_load();
111 	if (!ASSERT_OK_PTR(skel, "struct_ops_assoc_in_timer__open"))
112 		goto out;
113 
114 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog,
115 					    skel->maps.st_ops_map, NULL);
116 	ASSERT_OK(err, "bpf_program__assoc_struct_ops");
117 
118 	err = struct_ops_assoc_in_timer__attach(skel);
119 	if (!ASSERT_OK(err, "struct_ops_assoc__attach"))
120 		goto out;
121 
122 	/*
123 	 * Run .test_1 by calling kfunc bpf_kfunc_multi_st_ops_test_1_prog_arg() and checks
124 	 * the return value. .test_1 will also schedule timer_cb that runs .test_1 again
125 	 * immediately.
126 	 */
127 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog), NULL);
128 	ASSERT_OK(err, "bpf_prog_test_run_opts");
129 
130 	/* Check the return of the kfunc after timer_cb runs */
131 	while (!READ_ONCE(skel->bss->timer_cb_run))
132 		sched_yield();
133 	ASSERT_EQ(skel->bss->timer_test_1_ret, 1234, "skel->bss->timer_test_1_ret");
134 	ASSERT_EQ(skel->bss->test_err, 0, "skel->bss->test_err_a");
135 out:
136 	struct_ops_assoc_in_timer__destroy(skel);
137 }
138 
139 static void test_st_ops_assoc_in_timer_no_uref(void)
140 {
141 	struct struct_ops_assoc_in_timer *skel = NULL;
142 	struct bpf_link *link;
143 	int err;
144 
145 	skel = struct_ops_assoc_in_timer__open_and_load();
146 	if (!ASSERT_OK_PTR(skel, "struct_ops_assoc_in_timer__open"))
147 		goto out;
148 
149 	err = bpf_program__assoc_struct_ops(skel->progs.syscall_prog,
150 					    skel->maps.st_ops_map, NULL);
151 	ASSERT_OK(err, "bpf_program__assoc_struct_ops");
152 
153 	link = bpf_map__attach_struct_ops(skel->maps.st_ops_map);
154 	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
155 		goto out;
156 
157 	/*
158 	 * Run .test_1 by calling kfunc bpf_kfunc_multi_st_ops_test_1_prog_arg() and checks
159 	 * the return value. .test_1 will also schedule timer_cb that runs .test_1 again.
160 	 * timer_cb will run 500ms after syscall_prog runs, when the user space no longer
161 	 * holds a reference to st_ops_map.
162 	 */
163 	skel->bss->timer_ns = 500000000;
164 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.syscall_prog), NULL);
165 	ASSERT_OK(err, "bpf_prog_test_run_opts");
166 
167 	/* Detach and close struct_ops map to cause it to be freed */
168 	bpf_link__destroy(link);
169 	close(bpf_program__fd(skel->progs.syscall_prog));
170 	close(bpf_map__fd(skel->maps.st_ops_map));
171 
172 	/* Check the return of the kfunc after timer_cb runs */
173 	while (!READ_ONCE(skel->bss->timer_cb_run))
174 		sched_yield();
175 	ASSERT_EQ(skel->bss->timer_test_1_ret, -1, "skel->bss->timer_test_1_ret");
176 	ASSERT_EQ(skel->bss->test_err, 0, "skel->bss->test_err_a");
177 out:
178 	struct_ops_assoc_in_timer__destroy(skel);
179 }
180 
181 void test_struct_ops_assoc(void)
182 {
183 	if (test__start_subtest("st_ops_assoc"))
184 		test_st_ops_assoc();
185 	if (test__start_subtest("st_ops_assoc_reuse"))
186 		test_st_ops_assoc_reuse();
187 	if (test__start_subtest("st_ops_assoc_in_timer"))
188 		test_st_ops_assoc_in_timer();
189 	if (test__start_subtest("st_ops_assoc_in_timer_no_uref"))
190 		test_st_ops_assoc_in_timer_no_uref();
191 }
192