xref: /linux/tools/testing/selftests/bpf/progs/dynptr_fail.c (revision 4dba4a936ffb9fdfbacd6f82259f0daf1c83779b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Facebook */
3 
4 #include <errno.h>
5 #include <string.h>
6 #include <stdbool.h>
7 #include <linux/bpf.h>
8 #include <bpf/bpf_helpers.h>
9 #include <bpf/bpf_tracing.h>
10 #include <linux/if_ether.h>
11 #include "bpf_misc.h"
12 #include "bpf_kfuncs.h"
13 
14 char _license[] SEC("license") = "GPL";
15 
16 struct test_info {
17 	int x;
18 	struct bpf_dynptr ptr;
19 };
20 
21 struct {
22 	__uint(type, BPF_MAP_TYPE_ARRAY);
23 	__uint(max_entries, 1);
24 	__type(key, __u32);
25 	__type(value, struct bpf_dynptr);
26 } array_map1 SEC(".maps");
27 
28 struct {
29 	__uint(type, BPF_MAP_TYPE_ARRAY);
30 	__uint(max_entries, 1);
31 	__type(key, __u32);
32 	__type(value, struct test_info);
33 } array_map2 SEC(".maps");
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_ARRAY);
37 	__uint(max_entries, 1);
38 	__type(key, __u32);
39 	__type(value, __u32);
40 } array_map3 SEC(".maps");
41 
42 struct {
43 	__uint(type, BPF_MAP_TYPE_ARRAY);
44 	__uint(max_entries, 1);
45 	__type(key, __u32);
46 	__type(value, __u64);
47 } array_map4 SEC(".maps");
48 
49 struct sample {
50 	int pid;
51 	long value;
52 	char comm[16];
53 };
54 
55 struct {
56 	__uint(type, BPF_MAP_TYPE_RINGBUF);
57 	__uint(max_entries, 4096);
58 } ringbuf SEC(".maps");
59 
60 int err, val;
61 
62 static int get_map_val_dynptr(struct bpf_dynptr *ptr)
63 {
64 	__u32 key = 0, *map_val;
65 
66 	bpf_map_update_elem(&array_map3, &key, &val, 0);
67 
68 	map_val = bpf_map_lookup_elem(&array_map3, &key);
69 	if (!map_val)
70 		return -ENOENT;
71 
72 	bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
73 
74 	return 0;
75 }
76 
77 /* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
78  * bpf_ringbuf_submit/discard_dynptr call
79  */
80 SEC("?raw_tp")
81 __failure __msg("Unreleased reference id=2")
82 int ringbuf_missing_release1(void *ctx)
83 {
84 	struct bpf_dynptr ptr = {};
85 
86 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
87 
88 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
89 
90 	return 0;
91 }
92 
93 SEC("?raw_tp")
94 __failure __msg("Unreleased reference id=4")
95 int ringbuf_missing_release2(void *ctx)
96 {
97 	struct bpf_dynptr ptr1, ptr2;
98 	struct sample *sample;
99 
100 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
101 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
102 
103 	sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
104 	if (!sample) {
105 		bpf_ringbuf_discard_dynptr(&ptr1, 0);
106 		bpf_ringbuf_discard_dynptr(&ptr2, 0);
107 		return 0;
108 	}
109 
110 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
111 
112 	/* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
113 
114 	return 0;
115 }
116 
117 static int missing_release_callback_fn(__u32 index, void *data)
118 {
119 	struct bpf_dynptr ptr;
120 
121 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
122 
123 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
124 
125 	return 0;
126 }
127 
128 /* Any dynptr initialized within a callback must have bpf_dynptr_put called */
129 SEC("?raw_tp")
130 __failure __msg("Unreleased reference id")
131 int ringbuf_missing_release_callback(void *ctx)
132 {
133 	bpf_loop(10, missing_release_callback_fn, NULL, 0);
134 	return 0;
135 }
136 
137 /* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
138 SEC("?raw_tp")
139 __failure __msg("arg 1 is an unacquired reference")
140 int ringbuf_release_uninit_dynptr(void *ctx)
141 {
142 	struct bpf_dynptr ptr;
143 
144 	/* this should fail */
145 	bpf_ringbuf_submit_dynptr(&ptr, 0);
146 
147 	return 0;
148 }
149 
150 /* A dynptr can't be used after it has been invalidated */
151 SEC("?raw_tp")
152 __failure __msg("Expected an initialized dynptr as arg #2")
153 int use_after_invalid(void *ctx)
154 {
155 	struct bpf_dynptr ptr;
156 	char read_data[64];
157 
158 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
159 
160 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
161 
162 	bpf_ringbuf_submit_dynptr(&ptr, 0);
163 
164 	/* this should fail */
165 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
166 
167 	return 0;
168 }
169 
170 /* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
171 SEC("?raw_tp")
172 __failure __msg("type=mem expected=ringbuf_mem")
173 int ringbuf_invalid_api(void *ctx)
174 {
175 	struct bpf_dynptr ptr;
176 	struct sample *sample;
177 
178 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
179 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
180 	if (!sample)
181 		goto done;
182 
183 	sample->pid = 123;
184 
185 	/* invalid API use. need to use dynptr API to submit/discard */
186 	bpf_ringbuf_submit(sample, 0);
187 
188 done:
189 	bpf_ringbuf_discard_dynptr(&ptr, 0);
190 	return 0;
191 }
192 
193 /* Can't add a dynptr to a map */
194 SEC("?raw_tp")
195 __failure __msg("invalid read from stack")
196 int add_dynptr_to_map1(void *ctx)
197 {
198 	struct bpf_dynptr ptr;
199 	int key = 0;
200 
201 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
202 
203 	/* this should fail */
204 	bpf_map_update_elem(&array_map1, &key, &ptr, 0);
205 
206 	bpf_ringbuf_submit_dynptr(&ptr, 0);
207 
208 	return 0;
209 }
210 
211 /* Can't add a struct with an embedded dynptr to a map */
212 SEC("?raw_tp")
213 __failure __msg("invalid read from stack")
214 int add_dynptr_to_map2(void *ctx)
215 {
216 	struct test_info x;
217 	int key = 0;
218 
219 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
220 
221 	/* this should fail */
222 	bpf_map_update_elem(&array_map2, &key, &x, 0);
223 
224 	bpf_ringbuf_submit_dynptr(&x.ptr, 0);
225 
226 	return 0;
227 }
228 
229 /* A data slice can't be accessed out of bounds */
230 SEC("?raw_tp")
231 __failure __msg("value is outside of the allowed memory range")
232 int data_slice_out_of_bounds_ringbuf(void *ctx)
233 {
234 	struct bpf_dynptr ptr;
235 	void *data;
236 
237 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
238 
239 	data  = bpf_dynptr_data(&ptr, 0, 8);
240 	if (!data)
241 		goto done;
242 
243 	/* can't index out of bounds of the data slice */
244 	val = *((char *)data + 8);
245 
246 done:
247 	bpf_ringbuf_submit_dynptr(&ptr, 0);
248 	return 0;
249 }
250 
251 /* A data slice can't be accessed out of bounds */
252 SEC("?tc")
253 __failure __msg("value is outside of the allowed memory range")
254 int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
255 {
256 	struct bpf_dynptr ptr;
257 	struct ethhdr *hdr;
258 	char buffer[sizeof(*hdr)] = {};
259 
260 	bpf_dynptr_from_skb(skb, 0, &ptr);
261 
262 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
263 	if (!hdr)
264 		return SK_DROP;
265 
266 	/* this should fail */
267 	*(__u8*)(hdr + 1) = 1;
268 
269 	return SK_PASS;
270 }
271 
272 /* A metadata slice can't be accessed out of bounds */
273 SEC("?tc")
274 __failure __msg("value is outside of the allowed memory range")
275 int data_slice_out_of_bounds_skb_meta(struct __sk_buff *skb)
276 {
277 	struct bpf_dynptr meta;
278 	__u8 *md;
279 
280 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
281 
282 	md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
283 	if (!md)
284 		return SK_DROP;
285 
286 	/* this should fail */
287 	*(md + 1) = 42;
288 
289 	return SK_PASS;
290 }
291 
292 SEC("?raw_tp")
293 __failure __msg("value is outside of the allowed memory range")
294 int data_slice_out_of_bounds_map_value(void *ctx)
295 {
296 	__u32 map_val;
297 	struct bpf_dynptr ptr;
298 	void *data;
299 
300 	get_map_val_dynptr(&ptr);
301 
302 	data  = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
303 	if (!data)
304 		return 0;
305 
306 	/* can't index out of bounds of the data slice */
307 	val = *((char *)data + (sizeof(map_val) + 1));
308 
309 	return 0;
310 }
311 
312 /* A data slice can't be used after it has been released */
313 SEC("?raw_tp")
314 __failure __msg("invalid mem access 'scalar'")
315 int data_slice_use_after_release1(void *ctx)
316 {
317 	struct bpf_dynptr ptr;
318 	struct sample *sample;
319 
320 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
321 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
322 	if (!sample)
323 		goto done;
324 
325 	sample->pid = 123;
326 
327 	bpf_ringbuf_submit_dynptr(&ptr, 0);
328 
329 	/* this should fail */
330 	val = sample->pid;
331 
332 	return 0;
333 
334 done:
335 	bpf_ringbuf_discard_dynptr(&ptr, 0);
336 	return 0;
337 }
338 
339 /* A data slice can't be used after it has been released.
340  *
341  * This tests the case where the data slice tracks a dynptr (ptr2)
342  * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
343  * ptr2 is at fp - 16).
344  */
345 SEC("?raw_tp")
346 __failure __msg("invalid mem access 'scalar'")
347 int data_slice_use_after_release2(void *ctx)
348 {
349 	struct bpf_dynptr ptr1, ptr2;
350 	struct sample *sample;
351 
352 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
353 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
354 
355 	sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
356 	if (!sample)
357 		goto done;
358 
359 	sample->pid = 23;
360 
361 	bpf_ringbuf_submit_dynptr(&ptr2, 0);
362 
363 	/* this should fail */
364 	sample->pid = 23;
365 
366 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
367 
368 	return 0;
369 
370 done:
371 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
372 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
373 	return 0;
374 }
375 
376 /* A data slice must be first checked for NULL */
377 SEC("?raw_tp")
378 __failure __msg("invalid mem access 'mem_or_null'")
379 int data_slice_missing_null_check1(void *ctx)
380 {
381 	struct bpf_dynptr ptr;
382 	void *data;
383 
384 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
385 
386 	data  = bpf_dynptr_data(&ptr, 0, 8);
387 
388 	/* missing if (!data) check */
389 
390 	/* this should fail */
391 	*(__u8 *)data = 3;
392 
393 	bpf_ringbuf_submit_dynptr(&ptr, 0);
394 	return 0;
395 }
396 
397 /* A data slice can't be dereferenced if it wasn't checked for null */
398 SEC("?raw_tp")
399 __failure __msg("invalid mem access 'mem_or_null'")
400 int data_slice_missing_null_check2(void *ctx)
401 {
402 	struct bpf_dynptr ptr;
403 	__u64 *data1, *data2;
404 
405 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
406 
407 	data1 = bpf_dynptr_data(&ptr, 0, 8);
408 	data2 = bpf_dynptr_data(&ptr, 0, 8);
409 	if (data1)
410 		/* this should fail */
411 		*data2 = 3;
412 
413 	bpf_ringbuf_discard_dynptr(&ptr, 0);
414 	return 0;
415 }
416 
417 /* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
418  * dynptr argument
419  */
420 SEC("?raw_tp")
421 __failure __msg("invalid read from stack")
422 int invalid_helper1(void *ctx)
423 {
424 	struct bpf_dynptr ptr;
425 
426 	get_map_val_dynptr(&ptr);
427 
428 	/* this should fail */
429 	bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
430 
431 	return 0;
432 }
433 
434 /* A dynptr can't be passed into a helper function at a non-zero offset */
435 SEC("?raw_tp")
436 __failure __msg("cannot pass in dynptr at an offset=-8")
437 int invalid_helper2(void *ctx)
438 {
439 	struct bpf_dynptr ptr;
440 	char read_data[64];
441 
442 	get_map_val_dynptr(&ptr);
443 
444 	/* this should fail */
445 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
446 	return 0;
447 }
448 
449 /* A bpf_dynptr is invalidated if it's been written into */
450 SEC("?raw_tp")
451 __failure __msg("Expected an initialized dynptr as arg #0")
452 int invalid_write1(void *ctx)
453 {
454 	struct bpf_dynptr ptr;
455 	void *data;
456 	__u8 x = 0;
457 
458 	get_map_val_dynptr(&ptr);
459 
460 	memcpy(&ptr, &x, sizeof(x));
461 
462 	/* this should fail */
463 	data = bpf_dynptr_data(&ptr, 0, 1);
464 	__sink(data);
465 
466 	return 0;
467 }
468 
469 /*
470  * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
471  * offset
472  */
473 SEC("?raw_tp")
474 __failure __msg("cannot overwrite referenced dynptr")
475 int invalid_write2(void *ctx)
476 {
477 	struct bpf_dynptr ptr;
478 	char read_data[64];
479 	__u8 x = 0;
480 
481 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
482 
483 	memcpy((void *)&ptr + 8, &x, sizeof(x));
484 
485 	/* this should fail */
486 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
487 
488 	bpf_ringbuf_submit_dynptr(&ptr, 0);
489 
490 	return 0;
491 }
492 
493 /*
494  * A bpf_dynptr can't be used as a dynptr if it has been written into at a
495  * non-const offset
496  */
497 SEC("?raw_tp")
498 __failure __msg("cannot overwrite referenced dynptr")
499 int invalid_write3(void *ctx)
500 {
501 	struct bpf_dynptr ptr;
502 	char stack_buf[16];
503 	unsigned long len;
504 	__u8 x = 0;
505 
506 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
507 
508 	memcpy(stack_buf, &val, sizeof(val));
509 	len = stack_buf[0] & 0xf;
510 
511 	memcpy((void *)&ptr + len, &x, sizeof(x));
512 
513 	/* this should fail */
514 	bpf_ringbuf_submit_dynptr(&ptr, 0);
515 
516 	return 0;
517 }
518 
519 static int invalid_write4_callback(__u32 index, void *data)
520 {
521 	*(__u32 *)data = 123;
522 
523 	return 0;
524 }
525 
526 /* If the dynptr is written into in a callback function, it should
527  * be invalidated as a dynptr
528  */
529 SEC("?raw_tp")
530 __failure __msg("cannot overwrite referenced dynptr")
531 int invalid_write4(void *ctx)
532 {
533 	struct bpf_dynptr ptr;
534 
535 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
536 
537 	bpf_loop(10, invalid_write4_callback, &ptr, 0);
538 
539 	/* this should fail */
540 	bpf_ringbuf_submit_dynptr(&ptr, 0);
541 
542 	return 0;
543 }
544 
545 /* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
546 struct bpf_dynptr global_dynptr;
547 
548 SEC("?raw_tp")
549 __failure __msg("type=map_value expected=fp")
550 int global(void *ctx)
551 {
552 	/* this should fail */
553 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
554 
555 	bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
556 
557 	return 0;
558 }
559 
560 /* A direct read should fail */
561 SEC("?raw_tp")
562 __failure __msg("invalid read from stack")
563 int invalid_read1(void *ctx)
564 {
565 	struct bpf_dynptr ptr;
566 
567 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
568 
569 	/* this should fail */
570 	val = *(int *)&ptr;
571 
572 	bpf_ringbuf_discard_dynptr(&ptr, 0);
573 
574 	return 0;
575 }
576 
577 /* A direct read at an offset should fail */
578 SEC("?raw_tp")
579 __failure __msg("cannot pass in dynptr at an offset")
580 int invalid_read2(void *ctx)
581 {
582 	struct bpf_dynptr ptr;
583 	char read_data[64];
584 
585 	get_map_val_dynptr(&ptr);
586 
587 	/* this should fail */
588 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
589 
590 	return 0;
591 }
592 
593 /* A direct read at an offset into the lower stack slot should fail */
594 SEC("?raw_tp")
595 __failure __msg("invalid read from stack")
596 int invalid_read3(void *ctx)
597 {
598 	struct bpf_dynptr ptr1, ptr2;
599 
600 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
601 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
602 
603 	/* this should fail */
604 	memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
605 
606 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
607 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
608 
609 	return 0;
610 }
611 
612 static int invalid_read4_callback(__u32 index, void *data)
613 {
614 	/* this should fail */
615 	val = *(__u32 *)data;
616 
617 	return 0;
618 }
619 
620 /* A direct read within a callback function should fail */
621 SEC("?raw_tp")
622 __failure __msg("invalid read from stack")
623 int invalid_read4(void *ctx)
624 {
625 	struct bpf_dynptr ptr;
626 
627 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
628 
629 	bpf_loop(10, invalid_read4_callback, &ptr, 0);
630 
631 	bpf_ringbuf_submit_dynptr(&ptr, 0);
632 
633 	return 0;
634 }
635 
636 /* Initializing a dynptr on an offset should fail */
637 SEC("?raw_tp")
638 __failure __msg("cannot pass in dynptr at an offset=0")
639 int invalid_offset(void *ctx)
640 {
641 	struct bpf_dynptr ptr;
642 
643 	/* this should fail */
644 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
645 
646 	bpf_ringbuf_discard_dynptr(&ptr, 0);
647 
648 	return 0;
649 }
650 
651 /* Can't release a dynptr twice */
652 SEC("?raw_tp")
653 __failure __msg("arg 1 is an unacquired reference")
654 int release_twice(void *ctx)
655 {
656 	struct bpf_dynptr ptr;
657 
658 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
659 
660 	bpf_ringbuf_discard_dynptr(&ptr, 0);
661 
662 	/* this second release should fail */
663 	bpf_ringbuf_discard_dynptr(&ptr, 0);
664 
665 	return 0;
666 }
667 
668 static int release_twice_callback_fn(__u32 index, void *data)
669 {
670 	/* this should fail */
671 	bpf_ringbuf_discard_dynptr(data, 0);
672 
673 	return 0;
674 }
675 
676 /* Test that releasing a dynptr twice, where one of the releases happens
677  * within a callback function, fails
678  */
679 SEC("?raw_tp")
680 __failure __msg("arg 1 is an unacquired reference")
681 int release_twice_callback(void *ctx)
682 {
683 	struct bpf_dynptr ptr;
684 
685 	bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
686 
687 	bpf_ringbuf_discard_dynptr(&ptr, 0);
688 
689 	bpf_loop(10, release_twice_callback_fn, &ptr, 0);
690 
691 	return 0;
692 }
693 
694 /* Reject unsupported local mem types for dynptr_from_mem API */
695 SEC("?raw_tp")
696 __failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
697 int dynptr_from_mem_invalid_api(void *ctx)
698 {
699 	struct bpf_dynptr ptr;
700 	int x = 0;
701 
702 	/* this should fail */
703 	bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
704 
705 	return 0;
706 }
707 
708 SEC("?tc")
709 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
710 int dynptr_pruning_overwrite(struct __sk_buff *ctx)
711 {
712 	asm volatile (
713 		"r9 = 0xeB9F;				\
714 		 r6 = %[ringbuf] ll;			\
715 		 r1 = r6;				\
716 		 r2 = 8;				\
717 		 r3 = 0;				\
718 		 r4 = r10;				\
719 		 r4 += -16;				\
720 		 call %[bpf_ringbuf_reserve_dynptr];	\
721 		 if r0 == 0 goto pjmp1;			\
722 		 goto pjmp2;				\
723 	pjmp1:						\
724 		 *(u64 *)(r10 - 16) = r9;		\
725 	pjmp2:						\
726 		 r1 = r10;				\
727 		 r1 += -16;				\
728 		 r2 = 0;				\
729 		 call %[bpf_ringbuf_discard_dynptr];	"
730 		:
731 		: __imm(bpf_ringbuf_reserve_dynptr),
732 		  __imm(bpf_ringbuf_discard_dynptr),
733 		  __imm_addr(ringbuf)
734 		: __clobber_all
735 	);
736 	return 0;
737 }
738 
739 SEC("?tc")
740 __success __msg("12: safe") __log_level(2)
741 int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
742 {
743 	asm volatile (
744 		"r9 = 0xeB9F;				\
745 		 r6 = %[ringbuf] ll;			\
746 		 r1 = r6;				\
747 		 r2 = 8;				\
748 		 r3 = 0;				\
749 		 r4 = r10;				\
750 		 r4 += -16;				\
751 		 call %[bpf_ringbuf_reserve_dynptr];	\
752 		 if r0 == 0 goto stjmp1;		\
753 		 goto stjmp2;				\
754 	stjmp1:						\
755 		 r9 = r9;				\
756 	stjmp2:						\
757 		 r1 = r10;				\
758 		 r1 += -16;				\
759 		 r2 = 0;				\
760 		 call %[bpf_ringbuf_discard_dynptr];	"
761 		:
762 		: __imm(bpf_ringbuf_reserve_dynptr),
763 		  __imm(bpf_ringbuf_discard_dynptr),
764 		  __imm_addr(ringbuf)
765 		: __clobber_all
766 	);
767 	return 0;
768 }
769 
770 SEC("?tc")
771 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
772 int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
773 {
774 	asm volatile (
775 		"r6 = %[array_map4] ll;			\
776 		 r7 = %[ringbuf] ll;			\
777 		 r1 = r6;				\
778 		 r2 = r10;				\
779 		 r2 += -8;				\
780 		 r9 = 0;				\
781 		 *(u64 *)(r2 + 0) = r9;			\
782 		 r3 = r10;				\
783 		 r3 += -24;				\
784 		 r9 = 0xeB9FeB9F;			\
785 		 *(u64 *)(r10 - 16) = r9;		\
786 		 *(u64 *)(r10 - 24) = r9;		\
787 		 r9 = 0;				\
788 		 r4 = 0;				\
789 		 r8 = r2;				\
790 		 call %[bpf_map_update_elem];		\
791 		 r1 = r6;				\
792 		 r2 = r8;				\
793 		 call %[bpf_map_lookup_elem];		\
794 		 if r0 != 0 goto tjmp1;			\
795 		 exit;					\
796 	tjmp1:						\
797 		 r8 = r0;				\
798 		 r1 = r7;				\
799 		 r2 = 8;				\
800 		 r3 = 0;				\
801 		 r4 = r10;				\
802 		 r4 += -16;				\
803 		 r0 = *(u64 *)(r0 + 0);			\
804 		 call %[bpf_ringbuf_reserve_dynptr];	\
805 		 if r0 == 0 goto tjmp2;			\
806 		 r8 = r8;				\
807 		 r8 = r8;				\
808 		 r8 = r8;				\
809 		 r8 = r8;				\
810 		 r8 = r8;				\
811 		 r8 = r8;				\
812 		 r8 = r8;				\
813 		 goto tjmp3;				\
814 	tjmp2:						\
815 		 *(u64 *)(r10 - 8) = r9;		\
816 		 *(u64 *)(r10 - 16) = r9;		\
817 		 r1 = r8;				\
818 		 r1 += 8;				\
819 		 r2 = 0;				\
820 		 r3 = 0;				\
821 		 r4 = r10;				\
822 		 r4 += -16;				\
823 		 call %[bpf_dynptr_from_mem];		\
824 	tjmp3:						\
825 		 r1 = r10;				\
826 		 r1 += -16;				\
827 		 r2 = 0;				\
828 		 call %[bpf_ringbuf_discard_dynptr];	"
829 		:
830 		: __imm(bpf_map_update_elem),
831 		  __imm(bpf_map_lookup_elem),
832 		  __imm(bpf_ringbuf_reserve_dynptr),
833 		  __imm(bpf_dynptr_from_mem),
834 		  __imm(bpf_ringbuf_discard_dynptr),
835 		  __imm_addr(array_map4),
836 		  __imm_addr(ringbuf)
837 		: __clobber_all
838 	);
839 	return 0;
840 }
841 
842 SEC("?tc")
843 __failure __msg("dynptr has to be at a constant offset") __log_level(2)
844 int dynptr_var_off_overwrite(struct __sk_buff *ctx)
845 {
846 	asm volatile (
847 		"r9 = 16;				\
848 		 *(u32 *)(r10 - 4) = r9;		\
849 		 r8 = *(u32 *)(r10 - 4);		\
850 		 if r8 >= 0 goto vjmp1;			\
851 		 r0 = 1;				\
852 		 exit;					\
853 	vjmp1:						\
854 		 if r8 <= 16 goto vjmp2;		\
855 		 r0 = 1;				\
856 		 exit;					\
857 	vjmp2:						\
858 		 r8 &= 16;				\
859 		 r1 = %[ringbuf] ll;			\
860 		 r2 = 8;				\
861 		 r3 = 0;				\
862 		 r4 = r10;				\
863 		 r4 += -32;				\
864 		 r4 += r8;				\
865 		 call %[bpf_ringbuf_reserve_dynptr];	\
866 		 r9 = 0xeB9F;				\
867 		 *(u64 *)(r10 - 16) = r9;		\
868 		 r1 = r10;				\
869 		 r1 += -32;				\
870 		 r1 += r8;				\
871 		 r2 = 0;				\
872 		 call %[bpf_ringbuf_discard_dynptr];	"
873 		:
874 		: __imm(bpf_ringbuf_reserve_dynptr),
875 		  __imm(bpf_ringbuf_discard_dynptr),
876 		  __imm_addr(ringbuf)
877 		: __clobber_all
878 	);
879 	return 0;
880 }
881 
882 SEC("?tc")
883 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
884 int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
885 {
886 	asm volatile (
887 		"r6 = %[ringbuf] ll;			\
888 		 r7 = %[array_map4] ll;			\
889 		 r1 = r7;				\
890 		 r2 = r10;				\
891 		 r2 += -8;				\
892 		 r9 = 0;				\
893 		 *(u64 *)(r2 + 0) = r9;			\
894 		 r3 = r2;				\
895 		 r4 = 0;				\
896 		 r8 = r2;				\
897 		 call %[bpf_map_update_elem];		\
898 		 r1 = r7;				\
899 		 r2 = r8;				\
900 		 call %[bpf_map_lookup_elem];		\
901 		 if r0 != 0 goto sjmp1;			\
902 		 exit;					\
903 	sjmp1:						\
904 		 r7 = r0;				\
905 		 r1 = r6;				\
906 		 r2 = 8;				\
907 		 r3 = 0;				\
908 		 r4 = r10;				\
909 		 r4 += -24;				\
910 		 call %[bpf_ringbuf_reserve_dynptr];	\
911 		 *(u64 *)(r10 - 16) = r9;		\
912 		 r1 = r7;				\
913 		 r2 = 8;				\
914 		 r3 = 0;				\
915 		 r4 = r10;				\
916 		 r4 += -16;				\
917 		 call %[bpf_dynptr_from_mem];		\
918 		 r1 = r10;				\
919 		 r1 += -512;				\
920 		 r2 = 488;				\
921 		 r3 = r10;				\
922 		 r3 += -24;				\
923 		 r4 = 0;				\
924 		 r5 = 0;				\
925 		 call %[bpf_dynptr_read];		\
926 		 r8 = 1;				\
927 		 if r0 != 0 goto sjmp2;			\
928 		 r8 = 0;				\
929 	sjmp2:						\
930 		 r1 = r10;				\
931 		 r1 += -24;				\
932 		 r2 = 0;				\
933 		 call %[bpf_ringbuf_discard_dynptr];	"
934 		:
935 		: __imm(bpf_map_update_elem),
936 		  __imm(bpf_map_lookup_elem),
937 		  __imm(bpf_ringbuf_reserve_dynptr),
938 		  __imm(bpf_ringbuf_discard_dynptr),
939 		  __imm(bpf_dynptr_from_mem),
940 		  __imm(bpf_dynptr_read),
941 		  __imm_addr(ringbuf),
942 		  __imm_addr(array_map4)
943 		: __clobber_all
944 	);
945 	return 0;
946 }
947 
948 /* Test that it is allowed to overwrite unreferenced dynptr. */
949 SEC("?raw_tp")
950 __success
951 int dynptr_overwrite_unref(void *ctx)
952 {
953 	struct bpf_dynptr ptr;
954 
955 	if (get_map_val_dynptr(&ptr))
956 		return 0;
957 	if (get_map_val_dynptr(&ptr))
958 		return 0;
959 	if (get_map_val_dynptr(&ptr))
960 		return 0;
961 
962 	return 0;
963 }
964 
965 /* Test that slices are invalidated on reinitializing a dynptr. */
966 SEC("?raw_tp")
967 __failure __msg("invalid mem access 'scalar'")
968 int dynptr_invalidate_slice_reinit(void *ctx)
969 {
970 	struct bpf_dynptr ptr;
971 	__u8 *p;
972 
973 	if (get_map_val_dynptr(&ptr))
974 		return 0;
975 	p = bpf_dynptr_data(&ptr, 0, 1);
976 	if (!p)
977 		return 0;
978 	if (get_map_val_dynptr(&ptr))
979 		return 0;
980 	/* this should fail */
981 	return *p;
982 }
983 
984 /* Invalidation of dynptr slices on destruction of dynptr should not miss
985  * mem_or_null pointers.
986  */
987 SEC("?raw_tp")
988 __failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
989 int dynptr_invalidate_slice_or_null(void *ctx)
990 {
991 	struct bpf_dynptr ptr;
992 	__u8 *p;
993 
994 	if (get_map_val_dynptr(&ptr))
995 		return 0;
996 
997 	p = bpf_dynptr_data(&ptr, 0, 1);
998 	*(__u8 *)&ptr = 0;
999 	/* this should fail */
1000 	bpf_this_cpu_ptr(p);
1001 	return 0;
1002 }
1003 
1004 /* Destruction of dynptr should also any slices obtained from it */
1005 SEC("?raw_tp")
1006 __failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
1007 int dynptr_invalidate_slice_failure(void *ctx)
1008 {
1009 	struct bpf_dynptr ptr1;
1010 	struct bpf_dynptr ptr2;
1011 	__u8 *p1, *p2;
1012 
1013 	if (get_map_val_dynptr(&ptr1))
1014 		return 0;
1015 	if (get_map_val_dynptr(&ptr2))
1016 		return 0;
1017 
1018 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
1019 	if (!p1)
1020 		return 0;
1021 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1022 	if (!p2)
1023 		return 0;
1024 
1025 	*(__u8 *)&ptr1 = 0;
1026 	/* this should fail */
1027 	return *p1;
1028 }
1029 
1030 /* Invalidation of slices should be scoped and should not prevent dereferencing
1031  * slices of another dynptr after destroying unrelated dynptr
1032  */
1033 SEC("?raw_tp")
1034 __success
1035 int dynptr_invalidate_slice_success(void *ctx)
1036 {
1037 	struct bpf_dynptr ptr1;
1038 	struct bpf_dynptr ptr2;
1039 	__u8 *p1, *p2;
1040 
1041 	if (get_map_val_dynptr(&ptr1))
1042 		return 1;
1043 	if (get_map_val_dynptr(&ptr2))
1044 		return 1;
1045 
1046 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
1047 	if (!p1)
1048 		return 1;
1049 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1050 	if (!p2)
1051 		return 1;
1052 
1053 	*(__u8 *)&ptr1 = 0;
1054 	return *p2;
1055 }
1056 
1057 /* Overwriting referenced dynptr should be rejected */
1058 SEC("?raw_tp")
1059 __failure __msg("cannot overwrite referenced dynptr")
1060 int dynptr_overwrite_ref(void *ctx)
1061 {
1062 	struct bpf_dynptr ptr;
1063 
1064 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
1065 	/* this should fail */
1066 	if (get_map_val_dynptr(&ptr))
1067 		bpf_ringbuf_discard_dynptr(&ptr, 0);
1068 	return 0;
1069 }
1070 
1071 /* Reject writes to dynptr slot from bpf_dynptr_read */
1072 SEC("?raw_tp")
1073 __failure __msg("potential write to dynptr at off=-16")
1074 int dynptr_read_into_slot(void *ctx)
1075 {
1076 	union {
1077 		struct {
1078 			char _pad[48];
1079 			struct bpf_dynptr ptr;
1080 		};
1081 		char buf[64];
1082 	} data;
1083 
1084 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
1085 	/* this should fail */
1086 	bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
1087 
1088 	return 0;
1089 }
1090 
1091 /* bpf_dynptr_slice()s are read-only and cannot be written to */
1092 SEC("?tc")
1093 __failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
1094 int skb_invalid_slice_write(struct __sk_buff *skb)
1095 {
1096 	struct bpf_dynptr ptr;
1097 	struct ethhdr *hdr;
1098 	char buffer[sizeof(*hdr)] = {};
1099 
1100 	bpf_dynptr_from_skb(skb, 0, &ptr);
1101 
1102 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1103 	if (!hdr)
1104 		return SK_DROP;
1105 
1106 	/* this should fail */
1107 	hdr->h_proto = 1;
1108 
1109 	return SK_PASS;
1110 }
1111 
1112 /* bpf_dynptr_slice()s are read-only and cannot be written to */
1113 SEC("?tc")
1114 __failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
1115 int skb_meta_invalid_slice_write(struct __sk_buff *skb)
1116 {
1117 	struct bpf_dynptr meta;
1118 	__u8 *md;
1119 
1120 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1121 
1122 	md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
1123 	if (!md)
1124 		return SK_DROP;
1125 
1126 	/* this should fail */
1127 	*md = 42;
1128 
1129 	return SK_PASS;
1130 }
1131 
1132 /* The read-only data slice is invalidated whenever a helper changes packet data */
1133 SEC("?tc")
1134 __failure __msg("invalid mem access 'scalar'")
1135 int skb_invalid_data_slice1(struct __sk_buff *skb)
1136 {
1137 	struct bpf_dynptr ptr;
1138 	struct ethhdr *hdr;
1139 	char buffer[sizeof(*hdr)] = {};
1140 
1141 	bpf_dynptr_from_skb(skb, 0, &ptr);
1142 
1143 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1144 	if (!hdr)
1145 		return SK_DROP;
1146 
1147 	val = hdr->h_proto;
1148 
1149 	if (bpf_skb_pull_data(skb, skb->len))
1150 		return SK_DROP;
1151 
1152 	/* this should fail */
1153 	val = hdr->h_proto;
1154 
1155 	return SK_PASS;
1156 }
1157 
1158 /* The read-write data slice is invalidated whenever a helper changes packet data */
1159 SEC("?tc")
1160 __failure __msg("invalid mem access 'scalar'")
1161 int skb_invalid_data_slice2(struct __sk_buff *skb)
1162 {
1163 	struct bpf_dynptr ptr;
1164 	struct ethhdr *hdr;
1165 	char buffer[sizeof(*hdr)] = {};
1166 
1167 	bpf_dynptr_from_skb(skb, 0, &ptr);
1168 
1169 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1170 	if (!hdr)
1171 		return SK_DROP;
1172 
1173 	hdr->h_proto = 123;
1174 
1175 	if (bpf_skb_pull_data(skb, skb->len))
1176 		return SK_DROP;
1177 
1178 	/* this should fail */
1179 	hdr->h_proto = 1;
1180 
1181 	return SK_PASS;
1182 }
1183 
1184 /* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
1185 SEC("?tc")
1186 __failure __msg("invalid mem access 'scalar'")
1187 int skb_invalid_data_slice3(struct __sk_buff *skb)
1188 {
1189 	char write_data[64] = "hello there, world!!";
1190 	struct bpf_dynptr ptr;
1191 	struct ethhdr *hdr;
1192 	char buffer[sizeof(*hdr)] = {};
1193 
1194 	bpf_dynptr_from_skb(skb, 0, &ptr);
1195 
1196 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1197 	if (!hdr)
1198 		return SK_DROP;
1199 
1200 	val = hdr->h_proto;
1201 
1202 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1203 
1204 	/* this should fail */
1205 	val = hdr->h_proto;
1206 
1207 	return SK_PASS;
1208 }
1209 
1210 /* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
1211 SEC("?tc")
1212 __failure __msg("invalid mem access 'scalar'")
1213 int skb_invalid_data_slice4(struct __sk_buff *skb)
1214 {
1215 	char write_data[64] = "hello there, world!!";
1216 	struct bpf_dynptr ptr;
1217 	struct ethhdr *hdr;
1218 	char buffer[sizeof(*hdr)] = {};
1219 
1220 	bpf_dynptr_from_skb(skb, 0, &ptr);
1221 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1222 	if (!hdr)
1223 		return SK_DROP;
1224 
1225 	hdr->h_proto = 123;
1226 
1227 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1228 
1229 	/* this should fail */
1230 	hdr->h_proto = 1;
1231 
1232 	return SK_PASS;
1233 }
1234 
1235 /* Read-only skb data slice is invalidated on write to skb metadata */
1236 SEC("?tc")
1237 __failure __msg("invalid mem access 'scalar'")
1238 int ro_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
1239 {
1240 	struct bpf_dynptr data, meta;
1241 	__u8 *d;
1242 
1243 	bpf_dynptr_from_skb(skb, 0, &data);
1244 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1245 
1246 	d = bpf_dynptr_slice(&data, 0, NULL, sizeof(*d));
1247 	if (!d)
1248 		return SK_DROP;
1249 
1250 	bpf_dynptr_write(&meta, 0, "x", 1, 0);
1251 
1252 	/* this should fail */
1253 	val = *d;
1254 
1255 	return SK_PASS;
1256 }
1257 
1258 /* Read-write skb data slice is invalidated on write to skb metadata */
1259 SEC("?tc")
1260 __failure __msg("invalid mem access 'scalar'")
1261 int rw_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
1262 {
1263 	struct bpf_dynptr data, meta;
1264 	__u8 *d;
1265 
1266 	bpf_dynptr_from_skb(skb, 0, &data);
1267 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1268 
1269 	d = bpf_dynptr_slice_rdwr(&data, 0, NULL, sizeof(*d));
1270 	if (!d)
1271 		return SK_DROP;
1272 
1273 	bpf_dynptr_write(&meta, 0, "x", 1, 0);
1274 
1275 	/* this should fail */
1276 	*d = 42;
1277 
1278 	return SK_PASS;
1279 }
1280 
1281 /* Read-only skb metadata slice is invalidated on write to skb data */
1282 SEC("?tc")
1283 __failure __msg("invalid mem access 'scalar'")
1284 int ro_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
1285 {
1286 	struct bpf_dynptr data, meta;
1287 	__u8 *md;
1288 
1289 	bpf_dynptr_from_skb(skb, 0, &data);
1290 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1291 
1292 	md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
1293 	if (!md)
1294 		return SK_DROP;
1295 
1296 	bpf_dynptr_write(&data, 0, "x", 1, 0);
1297 
1298 	/* this should fail */
1299 	val = *md;
1300 
1301 	return SK_PASS;
1302 }
1303 
1304 /* Read-write skb metadata slice is invalidated on write to skb data slice */
1305 SEC("?tc")
1306 __failure __msg("invalid mem access 'scalar'")
1307 int rw_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
1308 {
1309 	struct bpf_dynptr data, meta;
1310 	__u8 *md;
1311 
1312 	bpf_dynptr_from_skb(skb, 0, &data);
1313 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1314 
1315 	md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
1316 	if (!md)
1317 		return SK_DROP;
1318 
1319 	bpf_dynptr_write(&data, 0, "x", 1, 0);
1320 
1321 	/* this should fail */
1322 	*md = 42;
1323 
1324 	return SK_PASS;
1325 }
1326 
1327 /* Read-only skb metadata slice is invalidated whenever a helper changes packet data */
1328 SEC("?tc")
1329 __failure __msg("invalid mem access 'scalar'")
1330 int ro_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
1331 {
1332 	struct bpf_dynptr meta;
1333 	__u8 *md;
1334 
1335 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1336 
1337 	md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
1338 	if (!md)
1339 		return SK_DROP;
1340 
1341 	if (bpf_skb_pull_data(skb, skb->len))
1342 		return SK_DROP;
1343 
1344 	/* this should fail */
1345 	val = *md;
1346 
1347 	return SK_PASS;
1348 }
1349 
1350 /* Read-write skb metadata slice is invalidated whenever a helper changes packet data */
1351 SEC("?tc")
1352 __failure __msg("invalid mem access 'scalar'")
1353 int rw_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
1354 {
1355 	struct bpf_dynptr meta;
1356 	__u8 *md;
1357 
1358 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1359 
1360 	md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
1361 	if (!md)
1362 		return SK_DROP;
1363 
1364 	if (bpf_skb_pull_data(skb, skb->len))
1365 		return SK_DROP;
1366 
1367 	/* this should fail */
1368 	*md = 42;
1369 
1370 	return SK_PASS;
1371 }
1372 
1373 /* Read-only skb metadata slice is invalidated on write to skb metadata */
1374 SEC("?tc")
1375 __failure __msg("invalid mem access 'scalar'")
1376 int ro_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
1377 {
1378 	struct bpf_dynptr meta;
1379 	__u8 *md;
1380 
1381 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1382 
1383 	md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
1384 	if (!md)
1385 		return SK_DROP;
1386 
1387 	bpf_dynptr_write(&meta, 0, "x", 1, 0);
1388 
1389 	/* this should fail */
1390 	val = *md;
1391 
1392 	return SK_PASS;
1393 }
1394 
1395 /* Read-write skb metadata slice is invalidated on write to skb metadata */
1396 SEC("?tc")
1397 __failure __msg("invalid mem access 'scalar'")
1398 int rw_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
1399 {
1400 	struct bpf_dynptr meta;
1401 	__u8 *md;
1402 
1403 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1404 
1405 	md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
1406 	if (!md)
1407 		return SK_DROP;
1408 
1409 	bpf_dynptr_write(&meta, 0, "x", 1, 0);
1410 
1411 	/* this should fail */
1412 	*md = 42;
1413 
1414 	return SK_PASS;
1415 }
1416 
1417 /* The read-only data slice is invalidated whenever a helper changes packet data */
1418 SEC("?xdp")
1419 __failure __msg("invalid mem access 'scalar'")
1420 int xdp_invalid_data_slice1(struct xdp_md *xdp)
1421 {
1422 	struct bpf_dynptr ptr;
1423 	struct ethhdr *hdr;
1424 	char buffer[sizeof(*hdr)] = {};
1425 
1426 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1427 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1428 	if (!hdr)
1429 		return SK_DROP;
1430 
1431 	val = hdr->h_proto;
1432 
1433 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1434 		return XDP_DROP;
1435 
1436 	/* this should fail */
1437 	val = hdr->h_proto;
1438 
1439 	return XDP_PASS;
1440 }
1441 
1442 /* The read-write data slice is invalidated whenever a helper changes packet data */
1443 SEC("?xdp")
1444 __failure __msg("invalid mem access 'scalar'")
1445 int xdp_invalid_data_slice2(struct xdp_md *xdp)
1446 {
1447 	struct bpf_dynptr ptr;
1448 	struct ethhdr *hdr;
1449 	char buffer[sizeof(*hdr)] = {};
1450 
1451 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1452 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1453 	if (!hdr)
1454 		return SK_DROP;
1455 
1456 	hdr->h_proto = 9;
1457 
1458 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1459 		return XDP_DROP;
1460 
1461 	/* this should fail */
1462 	hdr->h_proto = 1;
1463 
1464 	return XDP_PASS;
1465 }
1466 
1467 /* Only supported prog type can create skb-type dynptrs */
1468 SEC("?raw_tp")
1469 __failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
1470 int skb_invalid_ctx(void *ctx)
1471 {
1472 	struct bpf_dynptr ptr;
1473 
1474 	/* this should fail */
1475 	bpf_dynptr_from_skb(ctx, 0, &ptr);
1476 
1477 	return 0;
1478 }
1479 
1480 /* Only supported prog type can create skb_meta-type dynptrs */
1481 SEC("?raw_tp")
1482 __failure __msg("calling kernel function bpf_dynptr_from_skb_meta is not allowed")
1483 int skb_meta_invalid_ctx(void *ctx)
1484 {
1485 	struct bpf_dynptr meta;
1486 
1487 	/* this should fail */
1488 	bpf_dynptr_from_skb_meta(ctx, 0, &meta);
1489 
1490 	return 0;
1491 }
1492 
1493 SEC("fentry/skb_tx_error")
1494 __failure __msg("must be referenced or trusted")
1495 int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
1496 {
1497 	struct bpf_dynptr ptr;
1498 
1499 	/* this should fail */
1500 	bpf_dynptr_from_skb(skb, 0, &ptr);
1501 
1502 	return 0;
1503 }
1504 
1505 SEC("fexit/skb_tx_error")
1506 __failure __msg("must be referenced or trusted")
1507 int BPF_PROG(skb_invalid_ctx_fexit, void *skb)
1508 {
1509 	struct bpf_dynptr ptr;
1510 
1511 	/* this should fail */
1512 	bpf_dynptr_from_skb(skb, 0, &ptr);
1513 
1514 	return 0;
1515 }
1516 
1517 /* Reject writes to dynptr slot for uninit arg */
1518 SEC("?raw_tp")
1519 __failure __msg("potential write to dynptr at off=-16")
1520 int uninit_write_into_slot(void *ctx)
1521 {
1522 	struct {
1523 		char buf[64];
1524 		struct bpf_dynptr ptr;
1525 	} data;
1526 
1527 	bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
1528 	/* this should fail */
1529 	bpf_get_current_comm(data.buf, 80);
1530 
1531 	return 0;
1532 }
1533 
1534 /* Only supported prog type can create xdp-type dynptrs */
1535 SEC("?raw_tp")
1536 __failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
1537 int xdp_invalid_ctx(void *ctx)
1538 {
1539 	struct bpf_dynptr ptr;
1540 
1541 	/* this should fail */
1542 	bpf_dynptr_from_xdp(ctx, 0, &ptr);
1543 
1544 	return 0;
1545 }
1546 
1547 __u32 hdr_size = sizeof(struct ethhdr);
1548 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1549 SEC("?tc")
1550 __failure __msg("unbounded memory access")
1551 int dynptr_slice_var_len1(struct __sk_buff *skb)
1552 {
1553 	struct bpf_dynptr ptr;
1554 	struct ethhdr *hdr;
1555 	char buffer[sizeof(*hdr)] = {};
1556 
1557 	bpf_dynptr_from_skb(skb, 0, &ptr);
1558 
1559 	/* this should fail */
1560 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
1561 	if (!hdr)
1562 		return SK_DROP;
1563 
1564 	return SK_PASS;
1565 }
1566 
1567 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1568 SEC("?tc")
1569 __failure __msg("must be a known constant")
1570 int dynptr_slice_var_len2(struct __sk_buff *skb)
1571 {
1572 	char buffer[sizeof(struct ethhdr)] = {};
1573 	struct bpf_dynptr ptr;
1574 	struct ethhdr *hdr;
1575 
1576 	bpf_dynptr_from_skb(skb, 0, &ptr);
1577 
1578 	if (hdr_size <= sizeof(buffer)) {
1579 		/* this should fail */
1580 		hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
1581 		if (!hdr)
1582 			return SK_DROP;
1583 		hdr->h_proto = 12;
1584 	}
1585 
1586 	return SK_PASS;
1587 }
1588 
1589 static int callback(__u32 index, void *data)
1590 {
1591         *(__u32 *)data = 123;
1592 
1593         return 0;
1594 }
1595 
1596 /* If the dynptr is written into in a callback function, its data
1597  * slices should be invalidated as well.
1598  */
1599 SEC("?raw_tp")
1600 __failure __msg("invalid mem access 'scalar'")
1601 int invalid_data_slices(void *ctx)
1602 {
1603 	struct bpf_dynptr ptr;
1604 	__u32 *slice;
1605 
1606 	if (get_map_val_dynptr(&ptr))
1607 		return 0;
1608 
1609 	slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
1610 	if (!slice)
1611 		return 0;
1612 
1613 	bpf_loop(10, callback, &ptr, 0);
1614 
1615 	/* this should fail */
1616 	*slice = 1;
1617 
1618 	return 0;
1619 }
1620 
1621 /* Program types that don't allow writes to packet data should fail if
1622  * bpf_dynptr_slice_rdwr is called
1623  */
1624 SEC("cgroup_skb/ingress")
1625 __failure __msg("the prog does not allow writes to packet data")
1626 int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
1627 {
1628 	char buffer[sizeof(struct ethhdr)] = {};
1629 	struct bpf_dynptr ptr;
1630 	struct ethhdr *hdr;
1631 
1632 	bpf_dynptr_from_skb(skb, 0, &ptr);
1633 
1634 	/* this should fail since cgroup_skb doesn't allow
1635 	 * changing packet data
1636 	 */
1637 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1638 	__sink(hdr);
1639 
1640 	return 0;
1641 }
1642 
1643 /* bpf_dynptr_adjust can only be called on initialized dynptrs */
1644 SEC("?raw_tp")
1645 __failure __msg("Expected an initialized dynptr as arg #0")
1646 int dynptr_adjust_invalid(void *ctx)
1647 {
1648 	struct bpf_dynptr ptr = {};
1649 
1650 	/* this should fail */
1651 	bpf_dynptr_adjust(&ptr, 1, 2);
1652 
1653 	return 0;
1654 }
1655 
1656 /* bpf_dynptr_is_null can only be called on initialized dynptrs */
1657 SEC("?raw_tp")
1658 __failure __msg("Expected an initialized dynptr as arg #0")
1659 int dynptr_is_null_invalid(void *ctx)
1660 {
1661 	struct bpf_dynptr ptr = {};
1662 
1663 	/* this should fail */
1664 	bpf_dynptr_is_null(&ptr);
1665 
1666 	return 0;
1667 }
1668 
1669 /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
1670 SEC("?raw_tp")
1671 __failure __msg("Expected an initialized dynptr as arg #0")
1672 int dynptr_is_rdonly_invalid(void *ctx)
1673 {
1674 	struct bpf_dynptr ptr = {};
1675 
1676 	/* this should fail */
1677 	bpf_dynptr_is_rdonly(&ptr);
1678 
1679 	return 0;
1680 }
1681 
1682 /* bpf_dynptr_size can only be called on initialized dynptrs */
1683 SEC("?raw_tp")
1684 __failure __msg("Expected an initialized dynptr as arg #0")
1685 int dynptr_size_invalid(void *ctx)
1686 {
1687 	struct bpf_dynptr ptr = {};
1688 
1689 	/* this should fail */
1690 	bpf_dynptr_size(&ptr);
1691 
1692 	return 0;
1693 }
1694 
1695 /* Only initialized dynptrs can be cloned */
1696 SEC("?raw_tp")
1697 __failure __msg("Expected an initialized dynptr as arg #0")
1698 int clone_invalid1(void *ctx)
1699 {
1700 	struct bpf_dynptr ptr1 = {};
1701 	struct bpf_dynptr ptr2;
1702 
1703 	/* this should fail */
1704 	bpf_dynptr_clone(&ptr1, &ptr2);
1705 
1706 	return 0;
1707 }
1708 
1709 /* Can't overwrite an existing dynptr when cloning */
1710 SEC("?xdp")
1711 __failure __msg("cannot overwrite referenced dynptr")
1712 int clone_invalid2(struct xdp_md *xdp)
1713 {
1714 	struct bpf_dynptr ptr1;
1715 	struct bpf_dynptr clone;
1716 
1717 	bpf_dynptr_from_xdp(xdp, 0, &ptr1);
1718 
1719 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
1720 
1721 	/* this should fail */
1722 	bpf_dynptr_clone(&ptr1, &clone);
1723 
1724 	bpf_ringbuf_submit_dynptr(&clone, 0);
1725 
1726 	return 0;
1727 }
1728 
1729 /* Invalidating a dynptr should invalidate its clones */
1730 SEC("?raw_tp")
1731 __failure __msg("Expected an initialized dynptr as arg #2")
1732 int clone_invalidate1(void *ctx)
1733 {
1734 	struct bpf_dynptr clone;
1735 	struct bpf_dynptr ptr;
1736 	char read_data[64];
1737 
1738 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1739 
1740 	bpf_dynptr_clone(&ptr, &clone);
1741 
1742 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1743 
1744 	/* this should fail */
1745 	bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
1746 
1747 	return 0;
1748 }
1749 
1750 /* Invalidating a dynptr should invalidate its parent */
1751 SEC("?raw_tp")
1752 __failure __msg("Expected an initialized dynptr as arg #2")
1753 int clone_invalidate2(void *ctx)
1754 {
1755 	struct bpf_dynptr ptr;
1756 	struct bpf_dynptr clone;
1757 	char read_data[64];
1758 
1759 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1760 
1761 	bpf_dynptr_clone(&ptr, &clone);
1762 
1763 	bpf_ringbuf_submit_dynptr(&clone, 0);
1764 
1765 	/* this should fail */
1766 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
1767 
1768 	return 0;
1769 }
1770 
1771 /* Invalidating a dynptr should invalidate its siblings */
1772 SEC("?raw_tp")
1773 __failure __msg("Expected an initialized dynptr as arg #2")
1774 int clone_invalidate3(void *ctx)
1775 {
1776 	struct bpf_dynptr ptr;
1777 	struct bpf_dynptr clone1;
1778 	struct bpf_dynptr clone2;
1779 	char read_data[64];
1780 
1781 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1782 
1783 	bpf_dynptr_clone(&ptr, &clone1);
1784 
1785 	bpf_dynptr_clone(&ptr, &clone2);
1786 
1787 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1788 
1789 	/* this should fail */
1790 	bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
1791 
1792 	return 0;
1793 }
1794 
1795 /* Invalidating a dynptr should invalidate any data slices
1796  * of its clones
1797  */
1798 SEC("?raw_tp")
1799 __failure __msg("invalid mem access 'scalar'")
1800 int clone_invalidate4(void *ctx)
1801 {
1802 	struct bpf_dynptr ptr;
1803 	struct bpf_dynptr clone;
1804 	int *data;
1805 
1806 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1807 
1808 	bpf_dynptr_clone(&ptr, &clone);
1809 	data = bpf_dynptr_data(&clone, 0, sizeof(val));
1810 	if (!data)
1811 		return 0;
1812 
1813 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1814 
1815 	/* this should fail */
1816 	*data = 123;
1817 
1818 	return 0;
1819 }
1820 
1821 /* Invalidating a dynptr should invalidate any data slices
1822  * of its parent
1823  */
1824 SEC("?raw_tp")
1825 __failure __msg("invalid mem access 'scalar'")
1826 int clone_invalidate5(void *ctx)
1827 {
1828 	struct bpf_dynptr ptr;
1829 	struct bpf_dynptr clone;
1830 	int *data;
1831 
1832 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1833 	data = bpf_dynptr_data(&ptr, 0, sizeof(val));
1834 	if (!data)
1835 		return 0;
1836 
1837 	bpf_dynptr_clone(&ptr, &clone);
1838 
1839 	bpf_ringbuf_submit_dynptr(&clone, 0);
1840 
1841 	/* this should fail */
1842 	*data = 123;
1843 
1844 	return 0;
1845 }
1846 
1847 /* Invalidating a dynptr should invalidate any data slices
1848  * of its sibling
1849  */
1850 SEC("?raw_tp")
1851 __failure __msg("invalid mem access 'scalar'")
1852 int clone_invalidate6(void *ctx)
1853 {
1854 	struct bpf_dynptr ptr;
1855 	struct bpf_dynptr clone1;
1856 	struct bpf_dynptr clone2;
1857 	int *data;
1858 
1859 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1860 
1861 	bpf_dynptr_clone(&ptr, &clone1);
1862 
1863 	bpf_dynptr_clone(&ptr, &clone2);
1864 
1865 	data = bpf_dynptr_data(&clone1, 0, sizeof(val));
1866 	if (!data)
1867 		return 0;
1868 
1869 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1870 
1871 	/* this should fail */
1872 	*data = 123;
1873 
1874 	return 0;
1875 }
1876 
1877 /* A skb clone's data slices should be invalid anytime packet data changes */
1878 SEC("?tc")
1879 __failure __msg("invalid mem access 'scalar'")
1880 int clone_skb_packet_data(struct __sk_buff *skb)
1881 {
1882 	char buffer[sizeof(__u32)] = {};
1883 	struct bpf_dynptr clone;
1884 	struct bpf_dynptr ptr;
1885 	__u32 *data;
1886 
1887 	bpf_dynptr_from_skb(skb, 0, &ptr);
1888 
1889 	bpf_dynptr_clone(&ptr, &clone);
1890 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1891 	if (!data)
1892 		return XDP_DROP;
1893 
1894 	if (bpf_skb_pull_data(skb, skb->len))
1895 		return SK_DROP;
1896 
1897 	/* this should fail */
1898 	*data = 123;
1899 
1900 	return 0;
1901 }
1902 
1903 /* A skb clone's metadata slice becomes invalid anytime packet data changes */
1904 SEC("?tc")
1905 __failure __msg("invalid mem access 'scalar'")
1906 int clone_skb_packet_meta(struct __sk_buff *skb)
1907 {
1908 	struct bpf_dynptr clone, meta;
1909 	__u8 *md;
1910 
1911 	bpf_dynptr_from_skb_meta(skb, 0, &meta);
1912 	bpf_dynptr_clone(&meta, &clone);
1913 	md = bpf_dynptr_slice_rdwr(&clone, 0, NULL, sizeof(*md));
1914 	if (!md)
1915 		return SK_DROP;
1916 
1917 	if (bpf_skb_pull_data(skb, skb->len))
1918 		return SK_DROP;
1919 
1920 	/* this should fail */
1921 	*md = 42;
1922 
1923 	return 0;
1924 }
1925 
1926 /* A xdp clone's data slices should be invalid anytime packet data changes */
1927 SEC("?xdp")
1928 __failure __msg("invalid mem access 'scalar'")
1929 int clone_xdp_packet_data(struct xdp_md *xdp)
1930 {
1931 	char buffer[sizeof(__u32)] = {};
1932 	struct bpf_dynptr clone;
1933 	struct bpf_dynptr ptr;
1934 	struct ethhdr *hdr;
1935 	__u32 *data;
1936 
1937 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1938 
1939 	bpf_dynptr_clone(&ptr, &clone);
1940 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1941 	if (!data)
1942 		return XDP_DROP;
1943 
1944 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1945 		return XDP_DROP;
1946 
1947 	/* this should fail */
1948 	*data = 123;
1949 
1950 	return 0;
1951 }
1952 
1953 /* Buffers that are provided must be sufficiently long */
1954 SEC("?cgroup_skb/egress")
1955 __failure __msg("memory, len pair leads to invalid memory access")
1956 int test_dynptr_skb_small_buff(struct __sk_buff *skb)
1957 {
1958 	struct bpf_dynptr ptr;
1959 	char buffer[8] = {};
1960 	__u64 *data;
1961 
1962 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
1963 		err = 1;
1964 		return 1;
1965 	}
1966 
1967 	/* This may return NULL. SKB may require a buffer */
1968 	data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
1969 
1970 	return !!data;
1971 }
1972 
1973 __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
1974 {
1975 	long ret = 0;
1976 	/* Avoid leaving this global function empty to avoid having the compiler
1977 	 * optimize away the call to this global function.
1978 	 */
1979 	__sink(ret);
1980 	return ret;
1981 }
1982 
1983 SEC("?raw_tp")
1984 __failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
1985 int test_dynptr_reg_type(void *ctx)
1986 {
1987 	struct task_struct *current = NULL;
1988 	/* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
1989 	 * reg->type that can be passed to a function accepting a
1990 	 * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
1991 	 * this.
1992 	 */
1993 	global_call_bpf_dynptr((const struct bpf_dynptr *)current);
1994 	return 0;
1995 }
1996