xref: /linux/tools/testing/selftests/bpf/progs/dynptr_fail.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Facebook */
3 
4 #include <errno.h>
5 #include <string.h>
6 #include <stdbool.h>
7 #include <linux/bpf.h>
8 #include <bpf/bpf_helpers.h>
9 #include <bpf/bpf_tracing.h>
10 #include <linux/if_ether.h>
11 #include "bpf_misc.h"
12 #include "bpf_kfuncs.h"
13 
14 char _license[] SEC("license") = "GPL";
15 
16 struct test_info {
17 	int x;
18 	struct bpf_dynptr ptr;
19 };
20 
21 struct {
22 	__uint(type, BPF_MAP_TYPE_ARRAY);
23 	__uint(max_entries, 1);
24 	__type(key, __u32);
25 	__type(value, struct bpf_dynptr);
26 } array_map1 SEC(".maps");
27 
28 struct {
29 	__uint(type, BPF_MAP_TYPE_ARRAY);
30 	__uint(max_entries, 1);
31 	__type(key, __u32);
32 	__type(value, struct test_info);
33 } array_map2 SEC(".maps");
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_ARRAY);
37 	__uint(max_entries, 1);
38 	__type(key, __u32);
39 	__type(value, __u32);
40 } array_map3 SEC(".maps");
41 
42 struct {
43 	__uint(type, BPF_MAP_TYPE_ARRAY);
44 	__uint(max_entries, 1);
45 	__type(key, __u32);
46 	__type(value, __u64);
47 } array_map4 SEC(".maps");
48 
49 struct sample {
50 	int pid;
51 	long value;
52 	char comm[16];
53 };
54 
55 struct {
56 	__uint(type, BPF_MAP_TYPE_RINGBUF);
57 	__uint(max_entries, 4096);
58 } ringbuf SEC(".maps");
59 
60 int err, val;
61 
get_map_val_dynptr(struct bpf_dynptr * ptr)62 static int get_map_val_dynptr(struct bpf_dynptr *ptr)
63 {
64 	__u32 key = 0, *map_val;
65 
66 	bpf_map_update_elem(&array_map3, &key, &val, 0);
67 
68 	map_val = bpf_map_lookup_elem(&array_map3, &key);
69 	if (!map_val)
70 		return -ENOENT;
71 
72 	bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
73 
74 	return 0;
75 }
76 
77 /* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
78  * bpf_ringbuf_submit/discard_dynptr call
79  */
80 SEC("?raw_tp")
81 __failure __msg("Unreleased reference id=2")
ringbuf_missing_release1(void * ctx)82 int ringbuf_missing_release1(void *ctx)
83 {
84 	struct bpf_dynptr ptr = {};
85 
86 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
87 
88 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
89 
90 	return 0;
91 }
92 
93 SEC("?raw_tp")
94 __failure __msg("Unreleased reference id=4")
ringbuf_missing_release2(void * ctx)95 int ringbuf_missing_release2(void *ctx)
96 {
97 	struct bpf_dynptr ptr1, ptr2;
98 	struct sample *sample;
99 
100 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
101 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
102 
103 	sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
104 	if (!sample) {
105 		bpf_ringbuf_discard_dynptr(&ptr1, 0);
106 		bpf_ringbuf_discard_dynptr(&ptr2, 0);
107 		return 0;
108 	}
109 
110 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
111 
112 	/* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
113 
114 	return 0;
115 }
116 
missing_release_callback_fn(__u32 index,void * data)117 static int missing_release_callback_fn(__u32 index, void *data)
118 {
119 	struct bpf_dynptr ptr;
120 
121 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
122 
123 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
124 
125 	return 0;
126 }
127 
128 /* Any dynptr initialized within a callback must have bpf_dynptr_put called */
129 SEC("?raw_tp")
130 __failure __msg("Unreleased reference id")
ringbuf_missing_release_callback(void * ctx)131 int ringbuf_missing_release_callback(void *ctx)
132 {
133 	bpf_loop(10, missing_release_callback_fn, NULL, 0);
134 	return 0;
135 }
136 
137 /* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
138 SEC("?raw_tp")
139 __failure __msg("arg 1 is an unacquired reference")
ringbuf_release_uninit_dynptr(void * ctx)140 int ringbuf_release_uninit_dynptr(void *ctx)
141 {
142 	struct bpf_dynptr ptr;
143 
144 	/* this should fail */
145 	bpf_ringbuf_submit_dynptr(&ptr, 0);
146 
147 	return 0;
148 }
149 
150 /* A dynptr can't be used after it has been invalidated */
151 SEC("?raw_tp")
152 __failure __msg("Expected an initialized dynptr as arg #3")
use_after_invalid(void * ctx)153 int use_after_invalid(void *ctx)
154 {
155 	struct bpf_dynptr ptr;
156 	char read_data[64];
157 
158 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
159 
160 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
161 
162 	bpf_ringbuf_submit_dynptr(&ptr, 0);
163 
164 	/* this should fail */
165 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
166 
167 	return 0;
168 }
169 
170 /* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
171 SEC("?raw_tp")
172 __failure __msg("type=mem expected=ringbuf_mem")
ringbuf_invalid_api(void * ctx)173 int ringbuf_invalid_api(void *ctx)
174 {
175 	struct bpf_dynptr ptr;
176 	struct sample *sample;
177 
178 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
179 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
180 	if (!sample)
181 		goto done;
182 
183 	sample->pid = 123;
184 
185 	/* invalid API use. need to use dynptr API to submit/discard */
186 	bpf_ringbuf_submit(sample, 0);
187 
188 done:
189 	bpf_ringbuf_discard_dynptr(&ptr, 0);
190 	return 0;
191 }
192 
193 /* Can't add a dynptr to a map */
194 SEC("?raw_tp")
195 __failure __msg("invalid indirect read from stack")
add_dynptr_to_map1(void * ctx)196 int add_dynptr_to_map1(void *ctx)
197 {
198 	struct bpf_dynptr ptr;
199 	int key = 0;
200 
201 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
202 
203 	/* this should fail */
204 	bpf_map_update_elem(&array_map1, &key, &ptr, 0);
205 
206 	bpf_ringbuf_submit_dynptr(&ptr, 0);
207 
208 	return 0;
209 }
210 
211 /* Can't add a struct with an embedded dynptr to a map */
212 SEC("?raw_tp")
213 __failure __msg("invalid indirect read from stack")
add_dynptr_to_map2(void * ctx)214 int add_dynptr_to_map2(void *ctx)
215 {
216 	struct test_info x;
217 	int key = 0;
218 
219 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
220 
221 	/* this should fail */
222 	bpf_map_update_elem(&array_map2, &key, &x, 0);
223 
224 	bpf_ringbuf_submit_dynptr(&x.ptr, 0);
225 
226 	return 0;
227 }
228 
229 /* A data slice can't be accessed out of bounds */
230 SEC("?raw_tp")
231 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_ringbuf(void * ctx)232 int data_slice_out_of_bounds_ringbuf(void *ctx)
233 {
234 	struct bpf_dynptr ptr;
235 	void *data;
236 
237 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
238 
239 	data  = bpf_dynptr_data(&ptr, 0, 8);
240 	if (!data)
241 		goto done;
242 
243 	/* can't index out of bounds of the data slice */
244 	val = *((char *)data + 8);
245 
246 done:
247 	bpf_ringbuf_submit_dynptr(&ptr, 0);
248 	return 0;
249 }
250 
251 /* A data slice can't be accessed out of bounds */
252 SEC("?tc")
253 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_skb(struct __sk_buff * skb)254 int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
255 {
256 	struct bpf_dynptr ptr;
257 	struct ethhdr *hdr;
258 	char buffer[sizeof(*hdr)] = {};
259 
260 	bpf_dynptr_from_skb(skb, 0, &ptr);
261 
262 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
263 	if (!hdr)
264 		return SK_DROP;
265 
266 	/* this should fail */
267 	*(__u8*)(hdr + 1) = 1;
268 
269 	return SK_PASS;
270 }
271 
272 SEC("?raw_tp")
273 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_map_value(void * ctx)274 int data_slice_out_of_bounds_map_value(void *ctx)
275 {
276 	__u32 map_val;
277 	struct bpf_dynptr ptr;
278 	void *data;
279 
280 	get_map_val_dynptr(&ptr);
281 
282 	data  = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
283 	if (!data)
284 		return 0;
285 
286 	/* can't index out of bounds of the data slice */
287 	val = *((char *)data + (sizeof(map_val) + 1));
288 
289 	return 0;
290 }
291 
292 /* A data slice can't be used after it has been released */
293 SEC("?raw_tp")
294 __failure __msg("invalid mem access 'scalar'")
data_slice_use_after_release1(void * ctx)295 int data_slice_use_after_release1(void *ctx)
296 {
297 	struct bpf_dynptr ptr;
298 	struct sample *sample;
299 
300 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
301 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
302 	if (!sample)
303 		goto done;
304 
305 	sample->pid = 123;
306 
307 	bpf_ringbuf_submit_dynptr(&ptr, 0);
308 
309 	/* this should fail */
310 	val = sample->pid;
311 
312 	return 0;
313 
314 done:
315 	bpf_ringbuf_discard_dynptr(&ptr, 0);
316 	return 0;
317 }
318 
319 /* A data slice can't be used after it has been released.
320  *
321  * This tests the case where the data slice tracks a dynptr (ptr2)
322  * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
323  * ptr2 is at fp - 16).
324  */
325 SEC("?raw_tp")
326 __failure __msg("invalid mem access 'scalar'")
data_slice_use_after_release2(void * ctx)327 int data_slice_use_after_release2(void *ctx)
328 {
329 	struct bpf_dynptr ptr1, ptr2;
330 	struct sample *sample;
331 
332 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
333 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
334 
335 	sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
336 	if (!sample)
337 		goto done;
338 
339 	sample->pid = 23;
340 
341 	bpf_ringbuf_submit_dynptr(&ptr2, 0);
342 
343 	/* this should fail */
344 	sample->pid = 23;
345 
346 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
347 
348 	return 0;
349 
350 done:
351 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
352 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
353 	return 0;
354 }
355 
356 /* A data slice must be first checked for NULL */
357 SEC("?raw_tp")
358 __failure __msg("invalid mem access 'mem_or_null'")
data_slice_missing_null_check1(void * ctx)359 int data_slice_missing_null_check1(void *ctx)
360 {
361 	struct bpf_dynptr ptr;
362 	void *data;
363 
364 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
365 
366 	data  = bpf_dynptr_data(&ptr, 0, 8);
367 
368 	/* missing if (!data) check */
369 
370 	/* this should fail */
371 	*(__u8 *)data = 3;
372 
373 	bpf_ringbuf_submit_dynptr(&ptr, 0);
374 	return 0;
375 }
376 
377 /* A data slice can't be dereferenced if it wasn't checked for null */
378 SEC("?raw_tp")
379 __failure __msg("invalid mem access 'mem_or_null'")
data_slice_missing_null_check2(void * ctx)380 int data_slice_missing_null_check2(void *ctx)
381 {
382 	struct bpf_dynptr ptr;
383 	__u64 *data1, *data2;
384 
385 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
386 
387 	data1 = bpf_dynptr_data(&ptr, 0, 8);
388 	data2 = bpf_dynptr_data(&ptr, 0, 8);
389 	if (data1)
390 		/* this should fail */
391 		*data2 = 3;
392 
393 	bpf_ringbuf_discard_dynptr(&ptr, 0);
394 	return 0;
395 }
396 
397 /* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
398  * dynptr argument
399  */
400 SEC("?raw_tp")
401 __failure __msg("invalid indirect read from stack")
invalid_helper1(void * ctx)402 int invalid_helper1(void *ctx)
403 {
404 	struct bpf_dynptr ptr;
405 
406 	get_map_val_dynptr(&ptr);
407 
408 	/* this should fail */
409 	bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
410 
411 	return 0;
412 }
413 
414 /* A dynptr can't be passed into a helper function at a non-zero offset */
415 SEC("?raw_tp")
416 __failure __msg("cannot pass in dynptr at an offset=-8")
invalid_helper2(void * ctx)417 int invalid_helper2(void *ctx)
418 {
419 	struct bpf_dynptr ptr;
420 	char read_data[64];
421 
422 	get_map_val_dynptr(&ptr);
423 
424 	/* this should fail */
425 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
426 	return 0;
427 }
428 
429 /* A bpf_dynptr is invalidated if it's been written into */
430 SEC("?raw_tp")
431 __failure __msg("Expected an initialized dynptr as arg #1")
invalid_write1(void * ctx)432 int invalid_write1(void *ctx)
433 {
434 	struct bpf_dynptr ptr;
435 	void *data;
436 	__u8 x = 0;
437 
438 	get_map_val_dynptr(&ptr);
439 
440 	memcpy(&ptr, &x, sizeof(x));
441 
442 	/* this should fail */
443 	data = bpf_dynptr_data(&ptr, 0, 1);
444 	__sink(data);
445 
446 	return 0;
447 }
448 
449 /*
450  * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
451  * offset
452  */
453 SEC("?raw_tp")
454 __failure __msg("cannot overwrite referenced dynptr")
invalid_write2(void * ctx)455 int invalid_write2(void *ctx)
456 {
457 	struct bpf_dynptr ptr;
458 	char read_data[64];
459 	__u8 x = 0;
460 
461 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
462 
463 	memcpy((void *)&ptr + 8, &x, sizeof(x));
464 
465 	/* this should fail */
466 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
467 
468 	bpf_ringbuf_submit_dynptr(&ptr, 0);
469 
470 	return 0;
471 }
472 
473 /*
474  * A bpf_dynptr can't be used as a dynptr if it has been written into at a
475  * non-const offset
476  */
477 SEC("?raw_tp")
478 __failure __msg("cannot overwrite referenced dynptr")
invalid_write3(void * ctx)479 int invalid_write3(void *ctx)
480 {
481 	struct bpf_dynptr ptr;
482 	char stack_buf[16];
483 	unsigned long len;
484 	__u8 x = 0;
485 
486 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
487 
488 	memcpy(stack_buf, &val, sizeof(val));
489 	len = stack_buf[0] & 0xf;
490 
491 	memcpy((void *)&ptr + len, &x, sizeof(x));
492 
493 	/* this should fail */
494 	bpf_ringbuf_submit_dynptr(&ptr, 0);
495 
496 	return 0;
497 }
498 
invalid_write4_callback(__u32 index,void * data)499 static int invalid_write4_callback(__u32 index, void *data)
500 {
501 	*(__u32 *)data = 123;
502 
503 	return 0;
504 }
505 
506 /* If the dynptr is written into in a callback function, it should
507  * be invalidated as a dynptr
508  */
509 SEC("?raw_tp")
510 __failure __msg("cannot overwrite referenced dynptr")
invalid_write4(void * ctx)511 int invalid_write4(void *ctx)
512 {
513 	struct bpf_dynptr ptr;
514 
515 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
516 
517 	bpf_loop(10, invalid_write4_callback, &ptr, 0);
518 
519 	/* this should fail */
520 	bpf_ringbuf_submit_dynptr(&ptr, 0);
521 
522 	return 0;
523 }
524 
525 /* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
526 struct bpf_dynptr global_dynptr;
527 
528 SEC("?raw_tp")
529 __failure __msg("type=map_value expected=fp")
global(void * ctx)530 int global(void *ctx)
531 {
532 	/* this should fail */
533 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
534 
535 	bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
536 
537 	return 0;
538 }
539 
540 /* A direct read should fail */
541 SEC("?raw_tp")
542 __failure __msg("invalid read from stack")
invalid_read1(void * ctx)543 int invalid_read1(void *ctx)
544 {
545 	struct bpf_dynptr ptr;
546 
547 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
548 
549 	/* this should fail */
550 	val = *(int *)&ptr;
551 
552 	bpf_ringbuf_discard_dynptr(&ptr, 0);
553 
554 	return 0;
555 }
556 
557 /* A direct read at an offset should fail */
558 SEC("?raw_tp")
559 __failure __msg("cannot pass in dynptr at an offset")
invalid_read2(void * ctx)560 int invalid_read2(void *ctx)
561 {
562 	struct bpf_dynptr ptr;
563 	char read_data[64];
564 
565 	get_map_val_dynptr(&ptr);
566 
567 	/* this should fail */
568 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
569 
570 	return 0;
571 }
572 
573 /* A direct read at an offset into the lower stack slot should fail */
574 SEC("?raw_tp")
575 __failure __msg("invalid read from stack")
invalid_read3(void * ctx)576 int invalid_read3(void *ctx)
577 {
578 	struct bpf_dynptr ptr1, ptr2;
579 
580 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
581 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
582 
583 	/* this should fail */
584 	memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
585 
586 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
587 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
588 
589 	return 0;
590 }
591 
invalid_read4_callback(__u32 index,void * data)592 static int invalid_read4_callback(__u32 index, void *data)
593 {
594 	/* this should fail */
595 	val = *(__u32 *)data;
596 
597 	return 0;
598 }
599 
600 /* A direct read within a callback function should fail */
601 SEC("?raw_tp")
602 __failure __msg("invalid read from stack")
invalid_read4(void * ctx)603 int invalid_read4(void *ctx)
604 {
605 	struct bpf_dynptr ptr;
606 
607 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
608 
609 	bpf_loop(10, invalid_read4_callback, &ptr, 0);
610 
611 	bpf_ringbuf_submit_dynptr(&ptr, 0);
612 
613 	return 0;
614 }
615 
616 /* Initializing a dynptr on an offset should fail */
617 SEC("?raw_tp")
618 __failure __msg("cannot pass in dynptr at an offset=0")
invalid_offset(void * ctx)619 int invalid_offset(void *ctx)
620 {
621 	struct bpf_dynptr ptr;
622 
623 	/* this should fail */
624 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
625 
626 	bpf_ringbuf_discard_dynptr(&ptr, 0);
627 
628 	return 0;
629 }
630 
631 /* Can't release a dynptr twice */
632 SEC("?raw_tp")
633 __failure __msg("arg 1 is an unacquired reference")
release_twice(void * ctx)634 int release_twice(void *ctx)
635 {
636 	struct bpf_dynptr ptr;
637 
638 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
639 
640 	bpf_ringbuf_discard_dynptr(&ptr, 0);
641 
642 	/* this second release should fail */
643 	bpf_ringbuf_discard_dynptr(&ptr, 0);
644 
645 	return 0;
646 }
647 
release_twice_callback_fn(__u32 index,void * data)648 static int release_twice_callback_fn(__u32 index, void *data)
649 {
650 	/* this should fail */
651 	bpf_ringbuf_discard_dynptr(data, 0);
652 
653 	return 0;
654 }
655 
656 /* Test that releasing a dynptr twice, where one of the releases happens
657  * within a callback function, fails
658  */
659 SEC("?raw_tp")
660 __failure __msg("arg 1 is an unacquired reference")
release_twice_callback(void * ctx)661 int release_twice_callback(void *ctx)
662 {
663 	struct bpf_dynptr ptr;
664 
665 	bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
666 
667 	bpf_ringbuf_discard_dynptr(&ptr, 0);
668 
669 	bpf_loop(10, release_twice_callback_fn, &ptr, 0);
670 
671 	return 0;
672 }
673 
674 /* Reject unsupported local mem types for dynptr_from_mem API */
675 SEC("?raw_tp")
676 __failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
dynptr_from_mem_invalid_api(void * ctx)677 int dynptr_from_mem_invalid_api(void *ctx)
678 {
679 	struct bpf_dynptr ptr;
680 	int x = 0;
681 
682 	/* this should fail */
683 	bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
684 
685 	return 0;
686 }
687 
688 SEC("?tc")
689 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_pruning_overwrite(struct __sk_buff * ctx)690 int dynptr_pruning_overwrite(struct __sk_buff *ctx)
691 {
692 	asm volatile (
693 		"r9 = 0xeB9F;				\
694 		 r6 = %[ringbuf] ll;			\
695 		 r1 = r6;				\
696 		 r2 = 8;				\
697 		 r3 = 0;				\
698 		 r4 = r10;				\
699 		 r4 += -16;				\
700 		 call %[bpf_ringbuf_reserve_dynptr];	\
701 		 if r0 == 0 goto pjmp1;			\
702 		 goto pjmp2;				\
703 	pjmp1:						\
704 		 *(u64 *)(r10 - 16) = r9;		\
705 	pjmp2:						\
706 		 r1 = r10;				\
707 		 r1 += -16;				\
708 		 r2 = 0;				\
709 		 call %[bpf_ringbuf_discard_dynptr];	"
710 		:
711 		: __imm(bpf_ringbuf_reserve_dynptr),
712 		  __imm(bpf_ringbuf_discard_dynptr),
713 		  __imm_addr(ringbuf)
714 		: __clobber_all
715 	);
716 	return 0;
717 }
718 
719 SEC("?tc")
720 __success __msg("12: safe") __log_level(2)
dynptr_pruning_stacksafe(struct __sk_buff * ctx)721 int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
722 {
723 	asm volatile (
724 		"r9 = 0xeB9F;				\
725 		 r6 = %[ringbuf] ll;			\
726 		 r1 = r6;				\
727 		 r2 = 8;				\
728 		 r3 = 0;				\
729 		 r4 = r10;				\
730 		 r4 += -16;				\
731 		 call %[bpf_ringbuf_reserve_dynptr];	\
732 		 if r0 == 0 goto stjmp1;		\
733 		 goto stjmp2;				\
734 	stjmp1:						\
735 		 r9 = r9;				\
736 	stjmp2:						\
737 		 r1 = r10;				\
738 		 r1 += -16;				\
739 		 r2 = 0;				\
740 		 call %[bpf_ringbuf_discard_dynptr];	"
741 		:
742 		: __imm(bpf_ringbuf_reserve_dynptr),
743 		  __imm(bpf_ringbuf_discard_dynptr),
744 		  __imm_addr(ringbuf)
745 		: __clobber_all
746 	);
747 	return 0;
748 }
749 
750 SEC("?tc")
751 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_pruning_type_confusion(struct __sk_buff * ctx)752 int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
753 {
754 	asm volatile (
755 		"r6 = %[array_map4] ll;			\
756 		 r7 = %[ringbuf] ll;			\
757 		 r1 = r6;				\
758 		 r2 = r10;				\
759 		 r2 += -8;				\
760 		 r9 = 0;				\
761 		 *(u64 *)(r2 + 0) = r9;			\
762 		 r3 = r10;				\
763 		 r3 += -24;				\
764 		 r9 = 0xeB9FeB9F;			\
765 		 *(u64 *)(r10 - 16) = r9;		\
766 		 *(u64 *)(r10 - 24) = r9;		\
767 		 r9 = 0;				\
768 		 r4 = 0;				\
769 		 r8 = r2;				\
770 		 call %[bpf_map_update_elem];		\
771 		 r1 = r6;				\
772 		 r2 = r8;				\
773 		 call %[bpf_map_lookup_elem];		\
774 		 if r0 != 0 goto tjmp1;			\
775 		 exit;					\
776 	tjmp1:						\
777 		 r8 = r0;				\
778 		 r1 = r7;				\
779 		 r2 = 8;				\
780 		 r3 = 0;				\
781 		 r4 = r10;				\
782 		 r4 += -16;				\
783 		 r0 = *(u64 *)(r0 + 0);			\
784 		 call %[bpf_ringbuf_reserve_dynptr];	\
785 		 if r0 == 0 goto tjmp2;			\
786 		 r8 = r8;				\
787 		 r8 = r8;				\
788 		 r8 = r8;				\
789 		 r8 = r8;				\
790 		 r8 = r8;				\
791 		 r8 = r8;				\
792 		 r8 = r8;				\
793 		 goto tjmp3;				\
794 	tjmp2:						\
795 		 *(u64 *)(r10 - 8) = r9;		\
796 		 *(u64 *)(r10 - 16) = r9;		\
797 		 r1 = r8;				\
798 		 r1 += 8;				\
799 		 r2 = 0;				\
800 		 r3 = 0;				\
801 		 r4 = r10;				\
802 		 r4 += -16;				\
803 		 call %[bpf_dynptr_from_mem];		\
804 	tjmp3:						\
805 		 r1 = r10;				\
806 		 r1 += -16;				\
807 		 r2 = 0;				\
808 		 call %[bpf_ringbuf_discard_dynptr];	"
809 		:
810 		: __imm(bpf_map_update_elem),
811 		  __imm(bpf_map_lookup_elem),
812 		  __imm(bpf_ringbuf_reserve_dynptr),
813 		  __imm(bpf_dynptr_from_mem),
814 		  __imm(bpf_ringbuf_discard_dynptr),
815 		  __imm_addr(array_map4),
816 		  __imm_addr(ringbuf)
817 		: __clobber_all
818 	);
819 	return 0;
820 }
821 
822 SEC("?tc")
823 __failure __msg("dynptr has to be at a constant offset") __log_level(2)
dynptr_var_off_overwrite(struct __sk_buff * ctx)824 int dynptr_var_off_overwrite(struct __sk_buff *ctx)
825 {
826 	asm volatile (
827 		"r9 = 16;				\
828 		 *(u32 *)(r10 - 4) = r9;		\
829 		 r8 = *(u32 *)(r10 - 4);		\
830 		 if r8 >= 0 goto vjmp1;			\
831 		 r0 = 1;				\
832 		 exit;					\
833 	vjmp1:						\
834 		 if r8 <= 16 goto vjmp2;		\
835 		 r0 = 1;				\
836 		 exit;					\
837 	vjmp2:						\
838 		 r8 &= 16;				\
839 		 r1 = %[ringbuf] ll;			\
840 		 r2 = 8;				\
841 		 r3 = 0;				\
842 		 r4 = r10;				\
843 		 r4 += -32;				\
844 		 r4 += r8;				\
845 		 call %[bpf_ringbuf_reserve_dynptr];	\
846 		 r9 = 0xeB9F;				\
847 		 *(u64 *)(r10 - 16) = r9;		\
848 		 r1 = r10;				\
849 		 r1 += -32;				\
850 		 r1 += r8;				\
851 		 r2 = 0;				\
852 		 call %[bpf_ringbuf_discard_dynptr];	"
853 		:
854 		: __imm(bpf_ringbuf_reserve_dynptr),
855 		  __imm(bpf_ringbuf_discard_dynptr),
856 		  __imm_addr(ringbuf)
857 		: __clobber_all
858 	);
859 	return 0;
860 }
861 
862 SEC("?tc")
863 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_partial_slot_invalidate(struct __sk_buff * ctx)864 int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
865 {
866 	asm volatile (
867 		"r6 = %[ringbuf] ll;			\
868 		 r7 = %[array_map4] ll;			\
869 		 r1 = r7;				\
870 		 r2 = r10;				\
871 		 r2 += -8;				\
872 		 r9 = 0;				\
873 		 *(u64 *)(r2 + 0) = r9;			\
874 		 r3 = r2;				\
875 		 r4 = 0;				\
876 		 r8 = r2;				\
877 		 call %[bpf_map_update_elem];		\
878 		 r1 = r7;				\
879 		 r2 = r8;				\
880 		 call %[bpf_map_lookup_elem];		\
881 		 if r0 != 0 goto sjmp1;			\
882 		 exit;					\
883 	sjmp1:						\
884 		 r7 = r0;				\
885 		 r1 = r6;				\
886 		 r2 = 8;				\
887 		 r3 = 0;				\
888 		 r4 = r10;				\
889 		 r4 += -24;				\
890 		 call %[bpf_ringbuf_reserve_dynptr];	\
891 		 *(u64 *)(r10 - 16) = r9;		\
892 		 r1 = r7;				\
893 		 r2 = 8;				\
894 		 r3 = 0;				\
895 		 r4 = r10;				\
896 		 r4 += -16;				\
897 		 call %[bpf_dynptr_from_mem];		\
898 		 r1 = r10;				\
899 		 r1 += -512;				\
900 		 r2 = 488;				\
901 		 r3 = r10;				\
902 		 r3 += -24;				\
903 		 r4 = 0;				\
904 		 r5 = 0;				\
905 		 call %[bpf_dynptr_read];		\
906 		 r8 = 1;				\
907 		 if r0 != 0 goto sjmp2;			\
908 		 r8 = 0;				\
909 	sjmp2:						\
910 		 r1 = r10;				\
911 		 r1 += -24;				\
912 		 r2 = 0;				\
913 		 call %[bpf_ringbuf_discard_dynptr];	"
914 		:
915 		: __imm(bpf_map_update_elem),
916 		  __imm(bpf_map_lookup_elem),
917 		  __imm(bpf_ringbuf_reserve_dynptr),
918 		  __imm(bpf_ringbuf_discard_dynptr),
919 		  __imm(bpf_dynptr_from_mem),
920 		  __imm(bpf_dynptr_read),
921 		  __imm_addr(ringbuf),
922 		  __imm_addr(array_map4)
923 		: __clobber_all
924 	);
925 	return 0;
926 }
927 
928 /* Test that it is allowed to overwrite unreferenced dynptr. */
929 SEC("?raw_tp")
930 __success
dynptr_overwrite_unref(void * ctx)931 int dynptr_overwrite_unref(void *ctx)
932 {
933 	struct bpf_dynptr ptr;
934 
935 	if (get_map_val_dynptr(&ptr))
936 		return 0;
937 	if (get_map_val_dynptr(&ptr))
938 		return 0;
939 	if (get_map_val_dynptr(&ptr))
940 		return 0;
941 
942 	return 0;
943 }
944 
945 /* Test that slices are invalidated on reinitializing a dynptr. */
946 SEC("?raw_tp")
947 __failure __msg("invalid mem access 'scalar'")
dynptr_invalidate_slice_reinit(void * ctx)948 int dynptr_invalidate_slice_reinit(void *ctx)
949 {
950 	struct bpf_dynptr ptr;
951 	__u8 *p;
952 
953 	if (get_map_val_dynptr(&ptr))
954 		return 0;
955 	p = bpf_dynptr_data(&ptr, 0, 1);
956 	if (!p)
957 		return 0;
958 	if (get_map_val_dynptr(&ptr))
959 		return 0;
960 	/* this should fail */
961 	return *p;
962 }
963 
964 /* Invalidation of dynptr slices on destruction of dynptr should not miss
965  * mem_or_null pointers.
966  */
967 SEC("?raw_tp")
968 __failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
dynptr_invalidate_slice_or_null(void * ctx)969 int dynptr_invalidate_slice_or_null(void *ctx)
970 {
971 	struct bpf_dynptr ptr;
972 	__u8 *p;
973 
974 	if (get_map_val_dynptr(&ptr))
975 		return 0;
976 
977 	p = bpf_dynptr_data(&ptr, 0, 1);
978 	*(__u8 *)&ptr = 0;
979 	/* this should fail */
980 	bpf_this_cpu_ptr(p);
981 	return 0;
982 }
983 
984 /* Destruction of dynptr should also any slices obtained from it */
985 SEC("?raw_tp")
986 __failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
dynptr_invalidate_slice_failure(void * ctx)987 int dynptr_invalidate_slice_failure(void *ctx)
988 {
989 	struct bpf_dynptr ptr1;
990 	struct bpf_dynptr ptr2;
991 	__u8 *p1, *p2;
992 
993 	if (get_map_val_dynptr(&ptr1))
994 		return 0;
995 	if (get_map_val_dynptr(&ptr2))
996 		return 0;
997 
998 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
999 	if (!p1)
1000 		return 0;
1001 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1002 	if (!p2)
1003 		return 0;
1004 
1005 	*(__u8 *)&ptr1 = 0;
1006 	/* this should fail */
1007 	return *p1;
1008 }
1009 
1010 /* Invalidation of slices should be scoped and should not prevent dereferencing
1011  * slices of another dynptr after destroying unrelated dynptr
1012  */
1013 SEC("?raw_tp")
1014 __success
dynptr_invalidate_slice_success(void * ctx)1015 int dynptr_invalidate_slice_success(void *ctx)
1016 {
1017 	struct bpf_dynptr ptr1;
1018 	struct bpf_dynptr ptr2;
1019 	__u8 *p1, *p2;
1020 
1021 	if (get_map_val_dynptr(&ptr1))
1022 		return 1;
1023 	if (get_map_val_dynptr(&ptr2))
1024 		return 1;
1025 
1026 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
1027 	if (!p1)
1028 		return 1;
1029 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1030 	if (!p2)
1031 		return 1;
1032 
1033 	*(__u8 *)&ptr1 = 0;
1034 	return *p2;
1035 }
1036 
1037 /* Overwriting referenced dynptr should be rejected */
1038 SEC("?raw_tp")
1039 __failure __msg("cannot overwrite referenced dynptr")
dynptr_overwrite_ref(void * ctx)1040 int dynptr_overwrite_ref(void *ctx)
1041 {
1042 	struct bpf_dynptr ptr;
1043 
1044 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
1045 	/* this should fail */
1046 	if (get_map_val_dynptr(&ptr))
1047 		bpf_ringbuf_discard_dynptr(&ptr, 0);
1048 	return 0;
1049 }
1050 
1051 /* Reject writes to dynptr slot from bpf_dynptr_read */
1052 SEC("?raw_tp")
1053 __failure __msg("potential write to dynptr at off=-16")
dynptr_read_into_slot(void * ctx)1054 int dynptr_read_into_slot(void *ctx)
1055 {
1056 	union {
1057 		struct {
1058 			char _pad[48];
1059 			struct bpf_dynptr ptr;
1060 		};
1061 		char buf[64];
1062 	} data;
1063 
1064 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
1065 	/* this should fail */
1066 	bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
1067 
1068 	return 0;
1069 }
1070 
1071 /* bpf_dynptr_slice()s are read-only and cannot be written to */
1072 SEC("?tc")
1073 __failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
skb_invalid_slice_write(struct __sk_buff * skb)1074 int skb_invalid_slice_write(struct __sk_buff *skb)
1075 {
1076 	struct bpf_dynptr ptr;
1077 	struct ethhdr *hdr;
1078 	char buffer[sizeof(*hdr)] = {};
1079 
1080 	bpf_dynptr_from_skb(skb, 0, &ptr);
1081 
1082 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1083 	if (!hdr)
1084 		return SK_DROP;
1085 
1086 	/* this should fail */
1087 	hdr->h_proto = 1;
1088 
1089 	return SK_PASS;
1090 }
1091 
1092 /* The read-only data slice is invalidated whenever a helper changes packet data */
1093 SEC("?tc")
1094 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice1(struct __sk_buff * skb)1095 int skb_invalid_data_slice1(struct __sk_buff *skb)
1096 {
1097 	struct bpf_dynptr ptr;
1098 	struct ethhdr *hdr;
1099 	char buffer[sizeof(*hdr)] = {};
1100 
1101 	bpf_dynptr_from_skb(skb, 0, &ptr);
1102 
1103 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1104 	if (!hdr)
1105 		return SK_DROP;
1106 
1107 	val = hdr->h_proto;
1108 
1109 	if (bpf_skb_pull_data(skb, skb->len))
1110 		return SK_DROP;
1111 
1112 	/* this should fail */
1113 	val = hdr->h_proto;
1114 
1115 	return SK_PASS;
1116 }
1117 
1118 /* The read-write data slice is invalidated whenever a helper changes packet data */
1119 SEC("?tc")
1120 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice2(struct __sk_buff * skb)1121 int skb_invalid_data_slice2(struct __sk_buff *skb)
1122 {
1123 	struct bpf_dynptr ptr;
1124 	struct ethhdr *hdr;
1125 	char buffer[sizeof(*hdr)] = {};
1126 
1127 	bpf_dynptr_from_skb(skb, 0, &ptr);
1128 
1129 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1130 	if (!hdr)
1131 		return SK_DROP;
1132 
1133 	hdr->h_proto = 123;
1134 
1135 	if (bpf_skb_pull_data(skb, skb->len))
1136 		return SK_DROP;
1137 
1138 	/* this should fail */
1139 	hdr->h_proto = 1;
1140 
1141 	return SK_PASS;
1142 }
1143 
1144 /* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
1145 SEC("?tc")
1146 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice3(struct __sk_buff * skb)1147 int skb_invalid_data_slice3(struct __sk_buff *skb)
1148 {
1149 	char write_data[64] = "hello there, world!!";
1150 	struct bpf_dynptr ptr;
1151 	struct ethhdr *hdr;
1152 	char buffer[sizeof(*hdr)] = {};
1153 
1154 	bpf_dynptr_from_skb(skb, 0, &ptr);
1155 
1156 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1157 	if (!hdr)
1158 		return SK_DROP;
1159 
1160 	val = hdr->h_proto;
1161 
1162 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1163 
1164 	/* this should fail */
1165 	val = hdr->h_proto;
1166 
1167 	return SK_PASS;
1168 }
1169 
1170 /* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
1171 SEC("?tc")
1172 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice4(struct __sk_buff * skb)1173 int skb_invalid_data_slice4(struct __sk_buff *skb)
1174 {
1175 	char write_data[64] = "hello there, world!!";
1176 	struct bpf_dynptr ptr;
1177 	struct ethhdr *hdr;
1178 	char buffer[sizeof(*hdr)] = {};
1179 
1180 	bpf_dynptr_from_skb(skb, 0, &ptr);
1181 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1182 	if (!hdr)
1183 		return SK_DROP;
1184 
1185 	hdr->h_proto = 123;
1186 
1187 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1188 
1189 	/* this should fail */
1190 	hdr->h_proto = 1;
1191 
1192 	return SK_PASS;
1193 }
1194 
1195 /* The read-only data slice is invalidated whenever a helper changes packet data */
1196 SEC("?xdp")
1197 __failure __msg("invalid mem access 'scalar'")
xdp_invalid_data_slice1(struct xdp_md * xdp)1198 int xdp_invalid_data_slice1(struct xdp_md *xdp)
1199 {
1200 	struct bpf_dynptr ptr;
1201 	struct ethhdr *hdr;
1202 	char buffer[sizeof(*hdr)] = {};
1203 
1204 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1205 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1206 	if (!hdr)
1207 		return SK_DROP;
1208 
1209 	val = hdr->h_proto;
1210 
1211 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1212 		return XDP_DROP;
1213 
1214 	/* this should fail */
1215 	val = hdr->h_proto;
1216 
1217 	return XDP_PASS;
1218 }
1219 
1220 /* The read-write data slice is invalidated whenever a helper changes packet data */
1221 SEC("?xdp")
1222 __failure __msg("invalid mem access 'scalar'")
xdp_invalid_data_slice2(struct xdp_md * xdp)1223 int xdp_invalid_data_slice2(struct xdp_md *xdp)
1224 {
1225 	struct bpf_dynptr ptr;
1226 	struct ethhdr *hdr;
1227 	char buffer[sizeof(*hdr)] = {};
1228 
1229 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1230 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1231 	if (!hdr)
1232 		return SK_DROP;
1233 
1234 	hdr->h_proto = 9;
1235 
1236 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1237 		return XDP_DROP;
1238 
1239 	/* this should fail */
1240 	hdr->h_proto = 1;
1241 
1242 	return XDP_PASS;
1243 }
1244 
1245 /* Only supported prog type can create skb-type dynptrs */
1246 SEC("?raw_tp")
1247 __failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
skb_invalid_ctx(void * ctx)1248 int skb_invalid_ctx(void *ctx)
1249 {
1250 	struct bpf_dynptr ptr;
1251 
1252 	/* this should fail */
1253 	bpf_dynptr_from_skb(ctx, 0, &ptr);
1254 
1255 	return 0;
1256 }
1257 
1258 SEC("fentry/skb_tx_error")
1259 __failure __msg("must be referenced or trusted")
BPF_PROG(skb_invalid_ctx_fentry,void * skb)1260 int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
1261 {
1262 	struct bpf_dynptr ptr;
1263 
1264 	/* this should fail */
1265 	bpf_dynptr_from_skb(skb, 0, &ptr);
1266 
1267 	return 0;
1268 }
1269 
1270 SEC("fexit/skb_tx_error")
1271 __failure __msg("must be referenced or trusted")
BPF_PROG(skb_invalid_ctx_fexit,void * skb)1272 int BPF_PROG(skb_invalid_ctx_fexit, void *skb)
1273 {
1274 	struct bpf_dynptr ptr;
1275 
1276 	/* this should fail */
1277 	bpf_dynptr_from_skb(skb, 0, &ptr);
1278 
1279 	return 0;
1280 }
1281 
1282 /* Reject writes to dynptr slot for uninit arg */
1283 SEC("?raw_tp")
1284 __failure __msg("potential write to dynptr at off=-16")
uninit_write_into_slot(void * ctx)1285 int uninit_write_into_slot(void *ctx)
1286 {
1287 	struct {
1288 		char buf[64];
1289 		struct bpf_dynptr ptr;
1290 	} data;
1291 
1292 	bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
1293 	/* this should fail */
1294 	bpf_get_current_comm(data.buf, 80);
1295 
1296 	return 0;
1297 }
1298 
1299 /* Only supported prog type can create xdp-type dynptrs */
1300 SEC("?raw_tp")
1301 __failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
xdp_invalid_ctx(void * ctx)1302 int xdp_invalid_ctx(void *ctx)
1303 {
1304 	struct bpf_dynptr ptr;
1305 
1306 	/* this should fail */
1307 	bpf_dynptr_from_xdp(ctx, 0, &ptr);
1308 
1309 	return 0;
1310 }
1311 
1312 __u32 hdr_size = sizeof(struct ethhdr);
1313 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1314 SEC("?tc")
1315 __failure __msg("unbounded memory access")
dynptr_slice_var_len1(struct __sk_buff * skb)1316 int dynptr_slice_var_len1(struct __sk_buff *skb)
1317 {
1318 	struct bpf_dynptr ptr;
1319 	struct ethhdr *hdr;
1320 	char buffer[sizeof(*hdr)] = {};
1321 
1322 	bpf_dynptr_from_skb(skb, 0, &ptr);
1323 
1324 	/* this should fail */
1325 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
1326 	if (!hdr)
1327 		return SK_DROP;
1328 
1329 	return SK_PASS;
1330 }
1331 
1332 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1333 SEC("?tc")
1334 __failure __msg("must be a known constant")
dynptr_slice_var_len2(struct __sk_buff * skb)1335 int dynptr_slice_var_len2(struct __sk_buff *skb)
1336 {
1337 	char buffer[sizeof(struct ethhdr)] = {};
1338 	struct bpf_dynptr ptr;
1339 	struct ethhdr *hdr;
1340 
1341 	bpf_dynptr_from_skb(skb, 0, &ptr);
1342 
1343 	if (hdr_size <= sizeof(buffer)) {
1344 		/* this should fail */
1345 		hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
1346 		if (!hdr)
1347 			return SK_DROP;
1348 		hdr->h_proto = 12;
1349 	}
1350 
1351 	return SK_PASS;
1352 }
1353 
callback(__u32 index,void * data)1354 static int callback(__u32 index, void *data)
1355 {
1356         *(__u32 *)data = 123;
1357 
1358         return 0;
1359 }
1360 
1361 /* If the dynptr is written into in a callback function, its data
1362  * slices should be invalidated as well.
1363  */
1364 SEC("?raw_tp")
1365 __failure __msg("invalid mem access 'scalar'")
invalid_data_slices(void * ctx)1366 int invalid_data_slices(void *ctx)
1367 {
1368 	struct bpf_dynptr ptr;
1369 	__u32 *slice;
1370 
1371 	if (get_map_val_dynptr(&ptr))
1372 		return 0;
1373 
1374 	slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
1375 	if (!slice)
1376 		return 0;
1377 
1378 	bpf_loop(10, callback, &ptr, 0);
1379 
1380 	/* this should fail */
1381 	*slice = 1;
1382 
1383 	return 0;
1384 }
1385 
1386 /* Program types that don't allow writes to packet data should fail if
1387  * bpf_dynptr_slice_rdwr is called
1388  */
1389 SEC("cgroup_skb/ingress")
1390 __failure __msg("the prog does not allow writes to packet data")
invalid_slice_rdwr_rdonly(struct __sk_buff * skb)1391 int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
1392 {
1393 	char buffer[sizeof(struct ethhdr)] = {};
1394 	struct bpf_dynptr ptr;
1395 	struct ethhdr *hdr;
1396 
1397 	bpf_dynptr_from_skb(skb, 0, &ptr);
1398 
1399 	/* this should fail since cgroup_skb doesn't allow
1400 	 * changing packet data
1401 	 */
1402 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1403 	__sink(hdr);
1404 
1405 	return 0;
1406 }
1407 
1408 /* bpf_dynptr_adjust can only be called on initialized dynptrs */
1409 SEC("?raw_tp")
1410 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_adjust_invalid(void * ctx)1411 int dynptr_adjust_invalid(void *ctx)
1412 {
1413 	struct bpf_dynptr ptr = {};
1414 
1415 	/* this should fail */
1416 	bpf_dynptr_adjust(&ptr, 1, 2);
1417 
1418 	return 0;
1419 }
1420 
1421 /* bpf_dynptr_is_null can only be called on initialized dynptrs */
1422 SEC("?raw_tp")
1423 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_is_null_invalid(void * ctx)1424 int dynptr_is_null_invalid(void *ctx)
1425 {
1426 	struct bpf_dynptr ptr = {};
1427 
1428 	/* this should fail */
1429 	bpf_dynptr_is_null(&ptr);
1430 
1431 	return 0;
1432 }
1433 
1434 /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
1435 SEC("?raw_tp")
1436 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_is_rdonly_invalid(void * ctx)1437 int dynptr_is_rdonly_invalid(void *ctx)
1438 {
1439 	struct bpf_dynptr ptr = {};
1440 
1441 	/* this should fail */
1442 	bpf_dynptr_is_rdonly(&ptr);
1443 
1444 	return 0;
1445 }
1446 
1447 /* bpf_dynptr_size can only be called on initialized dynptrs */
1448 SEC("?raw_tp")
1449 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_size_invalid(void * ctx)1450 int dynptr_size_invalid(void *ctx)
1451 {
1452 	struct bpf_dynptr ptr = {};
1453 
1454 	/* this should fail */
1455 	bpf_dynptr_size(&ptr);
1456 
1457 	return 0;
1458 }
1459 
1460 /* Only initialized dynptrs can be cloned */
1461 SEC("?raw_tp")
1462 __failure __msg("Expected an initialized dynptr as arg #1")
clone_invalid1(void * ctx)1463 int clone_invalid1(void *ctx)
1464 {
1465 	struct bpf_dynptr ptr1 = {};
1466 	struct bpf_dynptr ptr2;
1467 
1468 	/* this should fail */
1469 	bpf_dynptr_clone(&ptr1, &ptr2);
1470 
1471 	return 0;
1472 }
1473 
1474 /* Can't overwrite an existing dynptr when cloning */
1475 SEC("?xdp")
1476 __failure __msg("cannot overwrite referenced dynptr")
clone_invalid2(struct xdp_md * xdp)1477 int clone_invalid2(struct xdp_md *xdp)
1478 {
1479 	struct bpf_dynptr ptr1;
1480 	struct bpf_dynptr clone;
1481 
1482 	bpf_dynptr_from_xdp(xdp, 0, &ptr1);
1483 
1484 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
1485 
1486 	/* this should fail */
1487 	bpf_dynptr_clone(&ptr1, &clone);
1488 
1489 	bpf_ringbuf_submit_dynptr(&clone, 0);
1490 
1491 	return 0;
1492 }
1493 
1494 /* Invalidating a dynptr should invalidate its clones */
1495 SEC("?raw_tp")
1496 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate1(void * ctx)1497 int clone_invalidate1(void *ctx)
1498 {
1499 	struct bpf_dynptr clone;
1500 	struct bpf_dynptr ptr;
1501 	char read_data[64];
1502 
1503 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1504 
1505 	bpf_dynptr_clone(&ptr, &clone);
1506 
1507 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1508 
1509 	/* this should fail */
1510 	bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
1511 
1512 	return 0;
1513 }
1514 
1515 /* Invalidating a dynptr should invalidate its parent */
1516 SEC("?raw_tp")
1517 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate2(void * ctx)1518 int clone_invalidate2(void *ctx)
1519 {
1520 	struct bpf_dynptr ptr;
1521 	struct bpf_dynptr clone;
1522 	char read_data[64];
1523 
1524 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1525 
1526 	bpf_dynptr_clone(&ptr, &clone);
1527 
1528 	bpf_ringbuf_submit_dynptr(&clone, 0);
1529 
1530 	/* this should fail */
1531 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
1532 
1533 	return 0;
1534 }
1535 
1536 /* Invalidating a dynptr should invalidate its siblings */
1537 SEC("?raw_tp")
1538 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate3(void * ctx)1539 int clone_invalidate3(void *ctx)
1540 {
1541 	struct bpf_dynptr ptr;
1542 	struct bpf_dynptr clone1;
1543 	struct bpf_dynptr clone2;
1544 	char read_data[64];
1545 
1546 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1547 
1548 	bpf_dynptr_clone(&ptr, &clone1);
1549 
1550 	bpf_dynptr_clone(&ptr, &clone2);
1551 
1552 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1553 
1554 	/* this should fail */
1555 	bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
1556 
1557 	return 0;
1558 }
1559 
1560 /* Invalidating a dynptr should invalidate any data slices
1561  * of its clones
1562  */
1563 SEC("?raw_tp")
1564 __failure __msg("invalid mem access 'scalar'")
clone_invalidate4(void * ctx)1565 int clone_invalidate4(void *ctx)
1566 {
1567 	struct bpf_dynptr ptr;
1568 	struct bpf_dynptr clone;
1569 	int *data;
1570 
1571 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1572 
1573 	bpf_dynptr_clone(&ptr, &clone);
1574 	data = bpf_dynptr_data(&clone, 0, sizeof(val));
1575 	if (!data)
1576 		return 0;
1577 
1578 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1579 
1580 	/* this should fail */
1581 	*data = 123;
1582 
1583 	return 0;
1584 }
1585 
1586 /* Invalidating a dynptr should invalidate any data slices
1587  * of its parent
1588  */
1589 SEC("?raw_tp")
1590 __failure __msg("invalid mem access 'scalar'")
clone_invalidate5(void * ctx)1591 int clone_invalidate5(void *ctx)
1592 {
1593 	struct bpf_dynptr ptr;
1594 	struct bpf_dynptr clone;
1595 	int *data;
1596 
1597 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1598 	data = bpf_dynptr_data(&ptr, 0, sizeof(val));
1599 	if (!data)
1600 		return 0;
1601 
1602 	bpf_dynptr_clone(&ptr, &clone);
1603 
1604 	bpf_ringbuf_submit_dynptr(&clone, 0);
1605 
1606 	/* this should fail */
1607 	*data = 123;
1608 
1609 	return 0;
1610 }
1611 
1612 /* Invalidating a dynptr should invalidate any data slices
1613  * of its sibling
1614  */
1615 SEC("?raw_tp")
1616 __failure __msg("invalid mem access 'scalar'")
clone_invalidate6(void * ctx)1617 int clone_invalidate6(void *ctx)
1618 {
1619 	struct bpf_dynptr ptr;
1620 	struct bpf_dynptr clone1;
1621 	struct bpf_dynptr clone2;
1622 	int *data;
1623 
1624 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1625 
1626 	bpf_dynptr_clone(&ptr, &clone1);
1627 
1628 	bpf_dynptr_clone(&ptr, &clone2);
1629 
1630 	data = bpf_dynptr_data(&clone1, 0, sizeof(val));
1631 	if (!data)
1632 		return 0;
1633 
1634 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1635 
1636 	/* this should fail */
1637 	*data = 123;
1638 
1639 	return 0;
1640 }
1641 
1642 /* A skb clone's data slices should be invalid anytime packet data changes */
1643 SEC("?tc")
1644 __failure __msg("invalid mem access 'scalar'")
clone_skb_packet_data(struct __sk_buff * skb)1645 int clone_skb_packet_data(struct __sk_buff *skb)
1646 {
1647 	char buffer[sizeof(__u32)] = {};
1648 	struct bpf_dynptr clone;
1649 	struct bpf_dynptr ptr;
1650 	__u32 *data;
1651 
1652 	bpf_dynptr_from_skb(skb, 0, &ptr);
1653 
1654 	bpf_dynptr_clone(&ptr, &clone);
1655 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1656 	if (!data)
1657 		return XDP_DROP;
1658 
1659 	if (bpf_skb_pull_data(skb, skb->len))
1660 		return SK_DROP;
1661 
1662 	/* this should fail */
1663 	*data = 123;
1664 
1665 	return 0;
1666 }
1667 
1668 /* A xdp clone's data slices should be invalid anytime packet data changes */
1669 SEC("?xdp")
1670 __failure __msg("invalid mem access 'scalar'")
clone_xdp_packet_data(struct xdp_md * xdp)1671 int clone_xdp_packet_data(struct xdp_md *xdp)
1672 {
1673 	char buffer[sizeof(__u32)] = {};
1674 	struct bpf_dynptr clone;
1675 	struct bpf_dynptr ptr;
1676 	struct ethhdr *hdr;
1677 	__u32 *data;
1678 
1679 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1680 
1681 	bpf_dynptr_clone(&ptr, &clone);
1682 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1683 	if (!data)
1684 		return XDP_DROP;
1685 
1686 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1687 		return XDP_DROP;
1688 
1689 	/* this should fail */
1690 	*data = 123;
1691 
1692 	return 0;
1693 }
1694 
1695 /* Buffers that are provided must be sufficiently long */
1696 SEC("?cgroup_skb/egress")
1697 __failure __msg("memory, len pair leads to invalid memory access")
test_dynptr_skb_small_buff(struct __sk_buff * skb)1698 int test_dynptr_skb_small_buff(struct __sk_buff *skb)
1699 {
1700 	struct bpf_dynptr ptr;
1701 	char buffer[8] = {};
1702 	__u64 *data;
1703 
1704 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
1705 		err = 1;
1706 		return 1;
1707 	}
1708 
1709 	/* This may return NULL. SKB may require a buffer */
1710 	data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
1711 
1712 	return !!data;
1713 }
1714 
global_call_bpf_dynptr(const struct bpf_dynptr * dynptr)1715 __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
1716 {
1717 	long ret = 0;
1718 	/* Avoid leaving this global function empty to avoid having the compiler
1719 	 * optimize away the call to this global function.
1720 	 */
1721 	__sink(ret);
1722 	return ret;
1723 }
1724 
1725 SEC("?raw_tp")
1726 __failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr")
test_dynptr_reg_type(void * ctx)1727 int test_dynptr_reg_type(void *ctx)
1728 {
1729 	struct task_struct *current = NULL;
1730 	/* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
1731 	 * reg->type that can be passed to a function accepting a
1732 	 * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
1733 	 * this.
1734 	 */
1735 	global_call_bpf_dynptr((const struct bpf_dynptr *)current);
1736 	return 0;
1737 }
1738