1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include <vmlinux.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include "bpf_misc.h"
8 #include "xdp_metadata.h"
9 #include "bpf_kfuncs.h"
10 #include "err.h"
11
12 /* The compiler may be able to detect the access to uninitialized
13 memory in the routines performing out of bound memory accesses and
14 emit warnings about it. This is the case of GCC. */
15 #if !defined(__clang__)
16 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
17 #endif
18
19 int arr[1];
20 int unkn_idx;
21 const volatile bool call_dead_subprog = false;
22
global_bad(void)23 __noinline long global_bad(void)
24 {
25 return arr[unkn_idx]; /* BOOM */
26 }
27
global_good(void)28 __noinline long global_good(void)
29 {
30 return arr[0];
31 }
32
global_calls_bad(void)33 __noinline long global_calls_bad(void)
34 {
35 return global_good() + global_bad() /* does BOOM indirectly */;
36 }
37
global_calls_good_only(void)38 __noinline long global_calls_good_only(void)
39 {
40 return global_good();
41 }
42
global_dead(void)43 __noinline long global_dead(void)
44 {
45 return arr[0] * 2;
46 }
47
48 SEC("?raw_tp")
49 __success __log_level(2)
50 /* main prog is validated completely first */
51 __msg("('global_calls_good_only') is global and assumed valid.")
52 /* eventually global_good() is transitively validated as well */
53 __msg("Validating global_good() func")
54 __msg("('global_good') is safe for any args that match its prototype")
chained_global_func_calls_success(void)55 int chained_global_func_calls_success(void)
56 {
57 int sum = 0;
58
59 if (call_dead_subprog)
60 sum += global_dead();
61 return global_calls_good_only() + sum;
62 }
63
64 SEC("?raw_tp")
65 __failure __log_level(2)
66 /* main prog validated successfully first */
67 __msg("('global_calls_bad') is global and assumed valid.")
68 /* eventually we validate global_bad() and fail */
69 __msg("Validating global_bad() func")
70 __msg("math between map_value pointer and register") /* BOOM */
chained_global_func_calls_bad(void)71 int chained_global_func_calls_bad(void)
72 {
73 return global_calls_bad();
74 }
75
76 /* do out of bounds access forcing verifier to fail verification if this
77 * global func is called
78 */
global_unsupp(const int * mem)79 __noinline int global_unsupp(const int *mem)
80 {
81 if (!mem)
82 return 0;
83 return mem[100]; /* BOOM */
84 }
85
86 const volatile bool skip_unsupp_global = true;
87
88 SEC("?raw_tp")
89 __success
guarded_unsupp_global_called(void)90 int guarded_unsupp_global_called(void)
91 {
92 if (!skip_unsupp_global)
93 return global_unsupp(NULL);
94 return 0;
95 }
96
97 SEC("?raw_tp")
98 __failure __log_level(2)
99 __msg("Func#1 ('global_unsupp') is global and assumed valid.")
100 __msg("Validating global_unsupp() func#1...")
101 __msg("value is outside of the allowed memory range")
unguarded_unsupp_global_called(void)102 int unguarded_unsupp_global_called(void)
103 {
104 int x = 0;
105
106 return global_unsupp(&x);
107 }
108
109 long stack[128];
110
subprog_nullable_ptr_bad(int * p)111 __weak int subprog_nullable_ptr_bad(int *p)
112 {
113 return (*p) * 2; /* bad, missing null check */
114 }
115
116 SEC("?raw_tp")
117 __failure __log_level(2)
118 __msg("invalid mem access 'mem_or_null'")
arg_tag_nullable_ptr_fail(void * ctx)119 int arg_tag_nullable_ptr_fail(void *ctx)
120 {
121 int x = 42;
122
123 return subprog_nullable_ptr_bad(&x);
124 }
125
126 typedef struct {
127 int x;
128 } user_struct_t;
129
subprog_user_anon_mem(user_struct_t * t)130 __noinline __weak int subprog_user_anon_mem(user_struct_t *t)
131 {
132 return t ? t->x : 0;
133 }
134
135 SEC("?tracepoint")
136 __failure __log_level(2)
137 __msg("invalid bpf_context access")
138 __msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
anon_user_mem_invalid(void * ctx)139 int anon_user_mem_invalid(void *ctx)
140 {
141 /* can't pass PTR_TO_CTX as user memory */
142 return subprog_user_anon_mem(ctx);
143 }
144
145 SEC("?tracepoint")
146 __success __log_level(2)
147 __msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
anon_user_mem_valid(void * ctx)148 int anon_user_mem_valid(void *ctx)
149 {
150 user_struct_t t = { .x = 42 };
151
152 return subprog_user_anon_mem(&t);
153 }
154
subprog_nonnull_ptr_good(int * p1 __arg_nonnull,int * p2 __arg_nonnull)155 __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
156 {
157 return (*p1) * (*p2); /* good, no need for NULL checks */
158 }
159
160 int x = 47;
161
162 SEC("?raw_tp")
163 __success __log_level(2)
arg_tag_nonnull_ptr_good(void * ctx)164 int arg_tag_nonnull_ptr_good(void *ctx)
165 {
166 int y = 74;
167
168 return subprog_nonnull_ptr_good(&x, &y);
169 }
170
171 /* this global subprog can be now called from many types of entry progs, each
172 * with different context type
173 */
subprog_ctx_tag(void * ctx __arg_ctx)174 __weak int subprog_ctx_tag(void *ctx __arg_ctx)
175 {
176 return bpf_get_stack(ctx, stack, sizeof(stack), 0);
177 }
178
raw_tp_canonical(struct bpf_raw_tracepoint_args * ctx __arg_ctx)179 __weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
180 {
181 return 0;
182 }
183
raw_tp_u64_array(u64 * ctx __arg_ctx)184 __weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
185 {
186 return 0;
187 }
188
189 SEC("?raw_tp")
190 __success __log_level(2)
arg_tag_ctx_raw_tp(void * ctx)191 int arg_tag_ctx_raw_tp(void *ctx)
192 {
193 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
194 }
195
196 SEC("?raw_tp.w")
197 __success __log_level(2)
arg_tag_ctx_raw_tp_writable(void * ctx)198 int arg_tag_ctx_raw_tp_writable(void *ctx)
199 {
200 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
201 }
202
203 SEC("?tp_btf/sys_enter")
204 __success __log_level(2)
arg_tag_ctx_raw_tp_btf(void * ctx)205 int arg_tag_ctx_raw_tp_btf(void *ctx)
206 {
207 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
208 }
209
210 struct whatever { };
211
tp_whatever(struct whatever * ctx __arg_ctx)212 __weak int tp_whatever(struct whatever *ctx __arg_ctx)
213 {
214 return 0;
215 }
216
217 SEC("?tp")
218 __success __log_level(2)
arg_tag_ctx_tp(void * ctx)219 int arg_tag_ctx_tp(void *ctx)
220 {
221 return subprog_ctx_tag(ctx) + tp_whatever(ctx);
222 }
223
kprobe_subprog_pt_regs(struct pt_regs * ctx __arg_ctx)224 __weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
225 {
226 return 0;
227 }
228
kprobe_subprog_typedef(bpf_user_pt_regs_t * ctx __arg_ctx)229 __weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
230 {
231 return 0;
232 }
233
234 SEC("?kprobe")
235 __success __log_level(2)
arg_tag_ctx_kprobe(void * ctx)236 int arg_tag_ctx_kprobe(void *ctx)
237 {
238 return subprog_ctx_tag(ctx) +
239 kprobe_subprog_pt_regs(ctx) +
240 kprobe_subprog_typedef(ctx);
241 }
242
perf_subprog_regs(struct user_regs_struct * ctx __arg_ctx)243 __weak int perf_subprog_regs(
244 #if defined(bpf_target_riscv)
245 struct user_regs_struct *ctx __arg_ctx
246 #elif defined(bpf_target_s390)
247 /* user_pt_regs typedef is anonymous struct, so only `void *` works */
248 void *ctx __arg_ctx
249 #elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
250 struct user_pt_regs *ctx __arg_ctx
251 #else
252 struct pt_regs *ctx __arg_ctx
253 #endif
254 )
255 {
256 return 0;
257 }
258
perf_subprog_typedef(bpf_user_pt_regs_t * ctx __arg_ctx)259 __weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
260 {
261 return 0;
262 }
263
perf_subprog_canonical(struct bpf_perf_event_data * ctx __arg_ctx)264 __weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
265 {
266 return 0;
267 }
268
269 SEC("?perf_event")
270 __success __log_level(2)
arg_tag_ctx_perf(void * ctx)271 int arg_tag_ctx_perf(void *ctx)
272 {
273 return subprog_ctx_tag(ctx) +
274 perf_subprog_regs(ctx) +
275 perf_subprog_typedef(ctx) +
276 perf_subprog_canonical(ctx);
277 }
278
iter_subprog_void(void * ctx __arg_ctx)279 __weak int iter_subprog_void(void *ctx __arg_ctx)
280 {
281 return 0;
282 }
283
iter_subprog_typed(struct bpf_iter__task * ctx __arg_ctx)284 __weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
285 {
286 return 0;
287 }
288
289 SEC("?iter/task")
290 __success __log_level(2)
arg_tag_ctx_iter_task(struct bpf_iter__task * ctx)291 int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
292 {
293 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
294 }
295
tracing_subprog_void(void * ctx __arg_ctx)296 __weak int tracing_subprog_void(void *ctx __arg_ctx)
297 {
298 return 0;
299 }
300
tracing_subprog_u64(u64 * ctx __arg_ctx)301 __weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
302 {
303 return 0;
304 }
305
306 int acc;
307
308 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
309 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fentry)310 int BPF_PROG(arg_tag_ctx_fentry)
311 {
312 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
313 return 0;
314 }
315
316 SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
317 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fexit)318 int BPF_PROG(arg_tag_ctx_fexit)
319 {
320 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
321 return 0;
322 }
323
324 SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
325 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fmod_ret)326 int BPF_PROG(arg_tag_ctx_fmod_ret)
327 {
328 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
329 }
330
331 SEC("?lsm/bpf")
332 __success __log_level(2)
BPF_PROG(arg_tag_ctx_lsm)333 int BPF_PROG(arg_tag_ctx_lsm)
334 {
335 int ret;
336
337 ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
338 set_if_not_errno_or_zero(ret, -1);
339 return ret;
340 }
341
342 SEC("?struct_ops/test_1")
343 __success __log_level(2)
BPF_PROG(arg_tag_ctx_struct_ops)344 int BPF_PROG(arg_tag_ctx_struct_ops)
345 {
346 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
347 }
348
349 SEC(".struct_ops")
350 struct bpf_dummy_ops dummy_1 = {
351 .test_1 = (void *)arg_tag_ctx_struct_ops,
352 };
353
354 SEC("?syscall")
355 __success __log_level(2)
arg_tag_ctx_syscall(void * ctx)356 int arg_tag_ctx_syscall(void *ctx)
357 {
358 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
359 }
360
subprog_dynptr(struct bpf_dynptr * dptr)361 __weak int subprog_dynptr(struct bpf_dynptr *dptr)
362 {
363 long *d, t, buf[1] = {};
364
365 d = bpf_dynptr_data(dptr, 0, sizeof(long));
366 if (!d)
367 return 0;
368
369 t = *d + 1;
370
371 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
372 if (!d)
373 return t;
374
375 t = *d + 2;
376
377 return t;
378 }
379
380 SEC("?xdp")
381 __success __log_level(2)
arg_tag_dynptr(struct xdp_md * ctx)382 int arg_tag_dynptr(struct xdp_md *ctx)
383 {
384 struct bpf_dynptr dptr;
385
386 bpf_dynptr_from_xdp(ctx, 0, &dptr);
387
388 return subprog_dynptr(&dptr);
389 }
390
391 char _license[] SEC("license") = "GPL";
392