xref: /linux/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include "bpf_misc.h"
8 #include "xdp_metadata.h"
9 #include "bpf_kfuncs.h"
10 
11 int arr[1];
12 int unkn_idx;
13 const volatile bool call_dead_subprog = false;
14 
15 __noinline long global_bad(void)
16 {
17 	return arr[unkn_idx]; /* BOOM */
18 }
19 
20 __noinline long global_good(void)
21 {
22 	return arr[0];
23 }
24 
25 __noinline long global_calls_bad(void)
26 {
27 	return global_good() + global_bad() /* does BOOM indirectly */;
28 }
29 
30 __noinline long global_calls_good_only(void)
31 {
32 	return global_good();
33 }
34 
35 __noinline long global_dead(void)
36 {
37 	return arr[0] * 2;
38 }
39 
40 SEC("?raw_tp")
41 __success __log_level(2)
42 /* main prog is validated completely first */
43 __msg("('global_calls_good_only') is global and assumed valid.")
44 /* eventually global_good() is transitively validated as well */
45 __msg("Validating global_good() func")
46 __msg("('global_good') is safe for any args that match its prototype")
47 int chained_global_func_calls_success(void)
48 {
49 	int sum = 0;
50 
51 	if (call_dead_subprog)
52 		sum += global_dead();
53 	return global_calls_good_only() + sum;
54 }
55 
56 SEC("?raw_tp")
57 __failure __log_level(2)
58 /* main prog validated successfully first */
59 __msg("('global_calls_bad') is global and assumed valid.")
60 /* eventually we validate global_bad() and fail */
61 __msg("Validating global_bad() func")
62 __msg("math between map_value pointer and register") /* BOOM */
63 int chained_global_func_calls_bad(void)
64 {
65 	return global_calls_bad();
66 }
67 
68 /* do out of bounds access forcing verifier to fail verification if this
69  * global func is called
70  */
71 __noinline int global_unsupp(const int *mem)
72 {
73 	if (!mem)
74 		return 0;
75 	return mem[100]; /* BOOM */
76 }
77 
78 const volatile bool skip_unsupp_global = true;
79 
80 SEC("?raw_tp")
81 __success
82 int guarded_unsupp_global_called(void)
83 {
84 	if (!skip_unsupp_global)
85 		return global_unsupp(NULL);
86 	return 0;
87 }
88 
89 SEC("?raw_tp")
90 __failure __log_level(2)
91 __msg("Func#1 ('global_unsupp') is global and assumed valid.")
92 __msg("Validating global_unsupp() func#1...")
93 __msg("value is outside of the allowed memory range")
94 int unguarded_unsupp_global_called(void)
95 {
96 	int x = 0;
97 
98 	return global_unsupp(&x);
99 }
100 
101 long stack[128];
102 
103 __weak int subprog_nullable_ptr_bad(int *p)
104 {
105 	return (*p) * 2; /* bad, missing null check */
106 }
107 
108 SEC("?raw_tp")
109 __failure __log_level(2)
110 __msg("invalid mem access 'mem_or_null'")
111 int arg_tag_nullable_ptr_fail(void *ctx)
112 {
113 	int x = 42;
114 
115 	return subprog_nullable_ptr_bad(&x);
116 }
117 
118 typedef struct {
119 	int x;
120 } user_struct_t;
121 
122 __noinline __weak int subprog_user_anon_mem(user_struct_t *t)
123 {
124 	return t ? t->x : 0;
125 }
126 
127 SEC("?tracepoint")
128 __failure __log_level(2)
129 __msg("invalid bpf_context access")
130 __msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
131 int anon_user_mem_invalid(void *ctx)
132 {
133 	/* can't pass PTR_TO_CTX as user memory */
134 	return subprog_user_anon_mem(ctx);
135 }
136 
137 SEC("?tracepoint")
138 __success __log_level(2)
139 __msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
140 int anon_user_mem_valid(void *ctx)
141 {
142 	user_struct_t t = { .x = 42 };
143 
144 	return subprog_user_anon_mem(&t);
145 }
146 
147 __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
148 {
149 	return (*p1) * (*p2); /* good, no need for NULL checks */
150 }
151 
152 int x = 47;
153 
154 SEC("?raw_tp")
155 __success __log_level(2)
156 int arg_tag_nonnull_ptr_good(void *ctx)
157 {
158 	int y = 74;
159 
160 	return subprog_nonnull_ptr_good(&x, &y);
161 }
162 
163 /* this global subprog can be now called from many types of entry progs, each
164  * with different context type
165  */
166 __weak int subprog_ctx_tag(void *ctx __arg_ctx)
167 {
168 	return bpf_get_stack(ctx, stack, sizeof(stack), 0);
169 }
170 
171 __weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
172 {
173 	return 0;
174 }
175 
176 __weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
177 {
178 	return 0;
179 }
180 
181 SEC("?raw_tp")
182 __success __log_level(2)
183 int arg_tag_ctx_raw_tp(void *ctx)
184 {
185 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
186 }
187 
188 SEC("?raw_tp.w")
189 __success __log_level(2)
190 int arg_tag_ctx_raw_tp_writable(void *ctx)
191 {
192 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
193 }
194 
195 SEC("?tp_btf/sys_enter")
196 __success __log_level(2)
197 int arg_tag_ctx_raw_tp_btf(void *ctx)
198 {
199 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
200 }
201 
202 struct whatever { };
203 
204 __weak int tp_whatever(struct whatever *ctx __arg_ctx)
205 {
206 	return 0;
207 }
208 
209 SEC("?tp")
210 __success __log_level(2)
211 int arg_tag_ctx_tp(void *ctx)
212 {
213 	return subprog_ctx_tag(ctx) + tp_whatever(ctx);
214 }
215 
216 __weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
217 {
218 	return 0;
219 }
220 
221 __weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
222 {
223 	return 0;
224 }
225 
226 SEC("?kprobe")
227 __success __log_level(2)
228 int arg_tag_ctx_kprobe(void *ctx)
229 {
230 	return subprog_ctx_tag(ctx) +
231 	       kprobe_subprog_pt_regs(ctx) +
232 	       kprobe_subprog_typedef(ctx);
233 }
234 
235 __weak int perf_subprog_regs(
236 #if defined(bpf_target_riscv)
237 	struct user_regs_struct *ctx __arg_ctx
238 #elif defined(bpf_target_s390)
239 	/* user_pt_regs typedef is anonymous struct, so only `void *` works */
240 	void *ctx __arg_ctx
241 #elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
242 	struct user_pt_regs *ctx __arg_ctx
243 #else
244 	struct pt_regs *ctx __arg_ctx
245 #endif
246 )
247 {
248 	return 0;
249 }
250 
251 __weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
252 {
253 	return 0;
254 }
255 
256 __weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
257 {
258 	return 0;
259 }
260 
261 SEC("?perf_event")
262 __success __log_level(2)
263 int arg_tag_ctx_perf(void *ctx)
264 {
265 	return subprog_ctx_tag(ctx) +
266 	       perf_subprog_regs(ctx) +
267 	       perf_subprog_typedef(ctx) +
268 	       perf_subprog_canonical(ctx);
269 }
270 
271 __weak int iter_subprog_void(void *ctx __arg_ctx)
272 {
273 	return 0;
274 }
275 
276 __weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
277 {
278 	return 0;
279 }
280 
281 SEC("?iter/task")
282 __success __log_level(2)
283 int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
284 {
285 	return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
286 }
287 
288 __weak int tracing_subprog_void(void *ctx __arg_ctx)
289 {
290 	return 0;
291 }
292 
293 __weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
294 {
295 	return 0;
296 }
297 
298 int acc;
299 
300 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
301 __success __log_level(2)
302 int BPF_PROG(arg_tag_ctx_fentry)
303 {
304 	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
305 	return 0;
306 }
307 
308 SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
309 __success __log_level(2)
310 int BPF_PROG(arg_tag_ctx_fexit)
311 {
312 	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
313 	return 0;
314 }
315 
316 SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
317 __success __log_level(2)
318 int BPF_PROG(arg_tag_ctx_fmod_ret)
319 {
320 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
321 }
322 
323 SEC("?lsm/bpf")
324 __success __log_level(2)
325 int BPF_PROG(arg_tag_ctx_lsm)
326 {
327 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
328 }
329 
330 SEC("?struct_ops/test_1")
331 __success __log_level(2)
332 int BPF_PROG(arg_tag_ctx_struct_ops)
333 {
334 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
335 }
336 
337 SEC(".struct_ops")
338 struct bpf_dummy_ops dummy_1 = {
339 	.test_1 = (void *)arg_tag_ctx_struct_ops,
340 };
341 
342 SEC("?syscall")
343 __success __log_level(2)
344 int arg_tag_ctx_syscall(void *ctx)
345 {
346 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
347 }
348 
349 __weak int subprog_dynptr(struct bpf_dynptr *dptr)
350 {
351 	long *d, t, buf[1] = {};
352 
353 	d = bpf_dynptr_data(dptr, 0, sizeof(long));
354 	if (!d)
355 		return 0;
356 
357 	t = *d + 1;
358 
359 	d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
360 	if (!d)
361 		return t;
362 
363 	t = *d + 2;
364 
365 	return t;
366 }
367 
368 SEC("?xdp")
369 __success __log_level(2)
370 int arg_tag_dynptr(struct xdp_md *ctx)
371 {
372 	struct bpf_dynptr dptr;
373 
374 	bpf_dynptr_from_xdp(ctx, 0, &dptr);
375 
376 	return subprog_dynptr(&dptr);
377 }
378 
379 char _license[] SEC("license") = "GPL";
380