xref: /linux/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include "bpf_misc.h"
8 #include "xdp_metadata.h"
9 #include "bpf_kfuncs.h"
10 #include "err.h"
11 
12 /* The compiler may be able to detect the access to uninitialized
13    memory in the routines performing out of bound memory accesses and
14    emit warnings about it.  This is the case of GCC. */
15 #if !defined(__clang__)
16 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
17 #endif
18 
19 int arr[1];
20 int unkn_idx;
21 const volatile bool call_dead_subprog = false;
22 
23 __noinline long global_bad(void)
24 {
25 	return arr[unkn_idx]; /* BOOM */
26 }
27 
28 __noinline long global_good(void)
29 {
30 	return arr[0];
31 }
32 
33 __noinline long global_calls_bad(void)
34 {
35 	return global_good() + global_bad() /* does BOOM indirectly */;
36 }
37 
38 __noinline long global_calls_good_only(void)
39 {
40 	return global_good();
41 }
42 
43 __noinline long global_dead(void)
44 {
45 	return arr[0] * 2;
46 }
47 
48 SEC("?raw_tp")
49 __success __log_level(2)
50 /* main prog is validated completely first */
51 __msg("('global_calls_good_only') is global and assumed valid.")
52 /* eventually global_good() is transitively validated as well */
53 __msg("Validating global_good() func")
54 __msg("('global_good') is safe for any args that match its prototype")
55 int chained_global_func_calls_success(void)
56 {
57 	int sum = 0;
58 
59 	if (call_dead_subprog)
60 		sum += global_dead();
61 	return global_calls_good_only() + sum;
62 }
63 
64 SEC("?raw_tp")
65 __failure __log_level(2)
66 /* main prog validated successfully first */
67 __msg("('global_calls_bad') is global and assumed valid.")
68 /* eventually we validate global_bad() and fail */
69 __msg("Validating global_bad() func")
70 __msg("math between map_value pointer and register") /* BOOM */
71 int chained_global_func_calls_bad(void)
72 {
73 	return global_calls_bad();
74 }
75 
76 /* do out of bounds access forcing verifier to fail verification if this
77  * global func is called
78  */
79 __noinline int global_unsupp(const int *mem)
80 {
81 	if (!mem)
82 		return 0;
83 	return mem[100]; /* BOOM */
84 }
85 
86 const volatile bool skip_unsupp_global = true;
87 
88 SEC("?raw_tp")
89 __success
90 int guarded_unsupp_global_called(void)
91 {
92 	if (!skip_unsupp_global)
93 		return global_unsupp(NULL);
94 	return 0;
95 }
96 
97 SEC("?raw_tp")
98 __failure __log_level(2)
99 __msg("Func#1 ('global_unsupp') is global and assumed valid.")
100 __msg("Validating global_unsupp() func#1...")
101 __msg("value is outside of the allowed memory range")
102 int unguarded_unsupp_global_called(void)
103 {
104 	int x = 0;
105 
106 	return global_unsupp(&x);
107 }
108 
109 long stack[128];
110 
111 __weak int subprog_nullable_ptr_bad(int *p)
112 {
113 	return (*p) * 2; /* bad, missing null check */
114 }
115 
116 SEC("?raw_tp")
117 __failure __log_level(2)
118 __msg("invalid mem access 'mem_or_null'")
119 int arg_tag_nullable_ptr_fail(void *ctx)
120 {
121 	int x = 42;
122 
123 	return subprog_nullable_ptr_bad(&x);
124 }
125 
126 typedef struct {
127 	int x;
128 } user_struct_t;
129 
130 __noinline __weak int subprog_user_anon_mem(user_struct_t *t)
131 {
132 	return t ? t->x : 0;
133 }
134 
135 SEC("?tracepoint")
136 __failure __log_level(2)
137 __msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
138 int anon_user_mem_invalid(void *ctx)
139 {
140 	/* can't pass PTR_TO_CTX as user memory */
141 	return subprog_user_anon_mem(ctx);
142 }
143 
144 SEC("?tracepoint")
145 __success __log_level(2)
146 __msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
147 int anon_user_mem_valid(void *ctx)
148 {
149 	user_struct_t t = { .x = 42 };
150 
151 	return subprog_user_anon_mem(&t);
152 }
153 
154 __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
155 {
156 	return (*p1) * (*p2); /* good, no need for NULL checks */
157 }
158 
159 int x = 47;
160 
161 SEC("?raw_tp")
162 __success __log_level(2)
163 int arg_tag_nonnull_ptr_good(void *ctx)
164 {
165 	int y = 74;
166 
167 	return subprog_nonnull_ptr_good(&x, &y);
168 }
169 
170 /* this global subprog can be now called from many types of entry progs, each
171  * with different context type
172  */
173 __weak int subprog_ctx_tag(void *ctx __arg_ctx)
174 {
175 	return bpf_get_stack(ctx, stack, sizeof(stack), 0);
176 }
177 
178 __weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
179 {
180 	return 0;
181 }
182 
183 __weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
184 {
185 	return 0;
186 }
187 
188 SEC("?raw_tp")
189 __success __log_level(2)
190 int arg_tag_ctx_raw_tp(void *ctx)
191 {
192 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
193 }
194 
195 SEC("?raw_tp.w")
196 __success __log_level(2)
197 int arg_tag_ctx_raw_tp_writable(void *ctx)
198 {
199 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
200 }
201 
202 SEC("?tp_btf/sys_enter")
203 __success __log_level(2)
204 int arg_tag_ctx_raw_tp_btf(void *ctx)
205 {
206 	return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
207 }
208 
209 struct whatever { };
210 
211 __weak int tp_whatever(struct whatever *ctx __arg_ctx)
212 {
213 	return 0;
214 }
215 
216 SEC("?tp")
217 __success __log_level(2)
218 int arg_tag_ctx_tp(void *ctx)
219 {
220 	return subprog_ctx_tag(ctx) + tp_whatever(ctx);
221 }
222 
223 __weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
224 {
225 	return 0;
226 }
227 
228 __weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
229 {
230 	return 0;
231 }
232 
233 SEC("?kprobe")
234 __success __log_level(2)
235 int arg_tag_ctx_kprobe(void *ctx)
236 {
237 	return subprog_ctx_tag(ctx) +
238 	       kprobe_subprog_pt_regs(ctx) +
239 	       kprobe_subprog_typedef(ctx);
240 }
241 
242 __weak int perf_subprog_regs(
243 #if defined(bpf_target_riscv)
244 	struct user_regs_struct *ctx __arg_ctx
245 #elif defined(bpf_target_s390)
246 	/* user_pt_regs typedef is anonymous struct, so only `void *` works */
247 	void *ctx __arg_ctx
248 #elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
249 	struct user_pt_regs *ctx __arg_ctx
250 #else
251 	struct pt_regs *ctx __arg_ctx
252 #endif
253 )
254 {
255 	return 0;
256 }
257 
258 __weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
259 {
260 	return 0;
261 }
262 
263 __weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
264 {
265 	return 0;
266 }
267 
268 SEC("?perf_event")
269 __success __log_level(2)
270 int arg_tag_ctx_perf(void *ctx)
271 {
272 	return subprog_ctx_tag(ctx) +
273 	       perf_subprog_regs(ctx) +
274 	       perf_subprog_typedef(ctx) +
275 	       perf_subprog_canonical(ctx);
276 }
277 
278 __weak int iter_subprog_void(void *ctx __arg_ctx)
279 {
280 	return 0;
281 }
282 
283 __weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
284 {
285 	return 0;
286 }
287 
288 SEC("?iter/task")
289 __success __log_level(2)
290 int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
291 {
292 	return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
293 }
294 
295 __weak int tracing_subprog_void(void *ctx __arg_ctx)
296 {
297 	return 0;
298 }
299 
300 __weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
301 {
302 	return 0;
303 }
304 
305 int acc;
306 
307 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
308 __success __log_level(2)
309 int BPF_PROG(arg_tag_ctx_fentry)
310 {
311 	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
312 	return 0;
313 }
314 
315 SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
316 __success __log_level(2)
317 int BPF_PROG(arg_tag_ctx_fexit)
318 {
319 	acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
320 	return 0;
321 }
322 
323 SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
324 __success __log_level(2)
325 int BPF_PROG(arg_tag_ctx_fmod_ret)
326 {
327 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
328 }
329 
330 SEC("?lsm/bpf")
331 __success __log_level(2)
332 int BPF_PROG(arg_tag_ctx_lsm)
333 {
334 	int ret;
335 
336 	ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
337 	set_if_not_errno_or_zero(ret, -1);
338 	return ret;
339 }
340 
341 SEC("?struct_ops/test_1")
342 __success __log_level(2)
343 int BPF_PROG(arg_tag_ctx_struct_ops)
344 {
345 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
346 }
347 
348 SEC(".struct_ops")
349 struct bpf_dummy_ops dummy_1 = {
350 	.test_1 = (void *)arg_tag_ctx_struct_ops,
351 };
352 
353 SEC("?syscall")
354 __success __log_level(2)
355 int arg_tag_ctx_syscall(void *ctx)
356 {
357 	return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
358 }
359 
360 __weak int syscall_array_bpf_for(void *ctx __arg_ctx)
361 {
362 	int *arr = ctx;
363 	int i;
364 
365 	bpf_for(i, 0, 100)
366 		arr[i] *= i;
367 
368 	return 0;
369 }
370 
371 SEC("?syscall")
372 __success __log_level(2)
373 int arg_tag_ctx_syscall_bpf_for(void *ctx)
374 {
375 	return syscall_array_bpf_for(ctx);
376 }
377 
378 SEC("syscall")
379 __auxiliary
380 int syscall_tailcall_target(void *ctx)
381 {
382 	return syscall_array_bpf_for(ctx);
383 }
384 
385 struct {
386 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
387 	__uint(max_entries, 1);
388 	__uint(key_size, sizeof(__u32));
389 	__array(values, int (void *));
390 } syscall_prog_array SEC(".maps") = {
391 	.values = {
392 		[0] = (void *)&syscall_tailcall_target,
393 	},
394 };
395 
396 SEC("?syscall")
397 __success __log_level(2)
398 int arg_tag_ctx_syscall_tailcall(void *ctx)
399 {
400 	bpf_tail_call(ctx, &syscall_prog_array, 0);
401 	return 0;
402 }
403 
404 SEC("?syscall")
405 __failure __log_level(2)
406 __msg("dereference of modified ctx ptr R1 off=8 disallowed")
407 int arg_tag_ctx_syscall_tailcall_fixed_off_bad(void *ctx)
408 {
409 	char *p = ctx;
410 
411 	p += 8;
412 	bpf_tail_call(p, &syscall_prog_array, 0);
413 	return 0;
414 }
415 
416 SEC("?syscall")
417 __failure __log_level(2)
418 __msg("variable ctx access var_off=(0x0; 0x4) disallowed")
419 int arg_tag_ctx_syscall_tailcall_var_off_bad(void *ctx)
420 {
421 	__u64 off = bpf_get_prandom_u32();
422 	char *p = ctx;
423 
424 	off &= 4;
425 	p += off;
426 	bpf_tail_call(p, &syscall_prog_array, 0);
427 	return 0;
428 }
429 
430 SEC("?syscall")
431 __failure __log_level(2)
432 __msg("dereference of modified ctx ptr R1 off=8 disallowed")
433 int arg_tag_ctx_syscall_fixed_off_bad(void *ctx)
434 {
435 	char *p = ctx;
436 
437 	p += 8;
438 	return subprog_ctx_tag(p);
439 }
440 
441 SEC("?syscall")
442 __failure __log_level(2)
443 __msg("variable ctx access var_off=(0x0; 0x4) disallowed")
444 int arg_tag_ctx_syscall_var_off_bad(void *ctx)
445 {
446 	__u64 off = bpf_get_prandom_u32();
447 	char *p = ctx;
448 
449 	off &= 4;
450 	p += off;
451 	return subprog_ctx_tag(p);
452 }
453 
454 __weak int subprog_dynptr(struct bpf_dynptr *dptr)
455 {
456 	long *d, t, buf[1] = {};
457 
458 	d = bpf_dynptr_data(dptr, 0, sizeof(long));
459 	if (!d)
460 		return 0;
461 
462 	t = *d + 1;
463 
464 	d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
465 	if (!d)
466 		return t;
467 
468 	t = *d + 2;
469 
470 	return t;
471 }
472 
473 SEC("?xdp")
474 __success __log_level(2)
475 int arg_tag_dynptr(struct xdp_md *ctx)
476 {
477 	struct bpf_dynptr dptr;
478 
479 	bpf_dynptr_from_xdp(ctx, 0, &dptr);
480 
481 	return subprog_dynptr(&dptr);
482 }
483 
484 __weak
485 void foo(void)
486 {
487 }
488 
489 SEC("?tc")
490 __failure __msg("R0 !read_ok")
491 int return_from_void_global(struct __sk_buff *skb)
492 {
493 	foo();
494 
495 	asm volatile(
496 		"r1 = r0;"
497 		:::
498 	);
499 
500 	return 0;
501 }
502 
503 char _license[] SEC("license") = "GPL";
504