xref: /linux/tools/testing/selftests/bpf/progs/dynptr_success.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Facebook */
3 
4 #include <vmlinux.h>
5 #include <string.h>
6 #include <stdbool.h>
7 #include <bpf/bpf_helpers.h>
8 #include <bpf/bpf_tracing.h>
9 #include "bpf_misc.h"
10 #include "errno.h"
11 
12 #define PAGE_SIZE_64K 65536
13 
14 char _license[] SEC("license") = "GPL";
15 
16 int pid, err, val;
17 
18 struct ringbuf_sample {
19 	int pid;
20 	int seq;
21 	long value;
22 	char comm[16];
23 };
24 
25 struct {
26 	__uint(type, BPF_MAP_TYPE_RINGBUF);
27 	__uint(max_entries, 4096);
28 } ringbuf SEC(".maps");
29 
30 struct {
31 	__uint(type, BPF_MAP_TYPE_ARRAY);
32 	__uint(max_entries, 1);
33 	__type(key, __u32);
34 	__type(value, __u32);
35 } array_map SEC(".maps");
36 
37 SEC("?tp/syscalls/sys_enter_nanosleep")
38 int test_read_write(void *ctx)
39 {
40 	char write_data[64] = "hello there, world!!";
41 	char read_data[64] = {};
42 	struct bpf_dynptr ptr;
43 	int i;
44 
45 	if (bpf_get_current_pid_tgid() >> 32 != pid)
46 		return 0;
47 
48 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
49 
50 	/* Write data into the dynptr */
51 	err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
52 
53 	/* Read the data that was written into the dynptr */
54 	err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
55 
56 	/* Ensure the data we read matches the data we wrote */
57 	for (i = 0; i < sizeof(read_data); i++) {
58 		if (read_data[i] != write_data[i]) {
59 			err = 1;
60 			break;
61 		}
62 	}
63 
64 	bpf_ringbuf_discard_dynptr(&ptr, 0);
65 	return 0;
66 }
67 
68 SEC("?tp/syscalls/sys_enter_nanosleep")
69 int test_dynptr_data(void *ctx)
70 {
71 	__u32 key = 0, val = 235, *map_val;
72 	struct bpf_dynptr ptr;
73 	__u32 map_val_size;
74 	void *data;
75 
76 	map_val_size = sizeof(*map_val);
77 
78 	if (bpf_get_current_pid_tgid() >> 32 != pid)
79 		return 0;
80 
81 	bpf_map_update_elem(&array_map, &key, &val, 0);
82 
83 	map_val = bpf_map_lookup_elem(&array_map, &key);
84 	if (!map_val) {
85 		err = 1;
86 		return 0;
87 	}
88 
89 	bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
90 
91 	/* Try getting a data slice that is out of range */
92 	data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
93 	if (data) {
94 		err = 2;
95 		return 0;
96 	}
97 
98 	/* Try getting more bytes than available */
99 	data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
100 	if (data) {
101 		err = 3;
102 		return 0;
103 	}
104 
105 	data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
106 	if (!data) {
107 		err = 4;
108 		return 0;
109 	}
110 
111 	*(__u32 *)data = 999;
112 
113 	err = bpf_probe_read_kernel(&val, sizeof(val), data);
114 	if (err)
115 		return 0;
116 
117 	if (val != *(int *)data)
118 		err = 5;
119 
120 	return 0;
121 }
122 
123 static int ringbuf_callback(__u32 index, void *data)
124 {
125 	struct ringbuf_sample *sample;
126 
127 	struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
128 
129 	sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
130 	if (!sample)
131 		err = 2;
132 	else
133 		sample->pid += index;
134 
135 	return 0;
136 }
137 
138 SEC("?tp/syscalls/sys_enter_nanosleep")
139 int test_ringbuf(void *ctx)
140 {
141 	struct bpf_dynptr ptr;
142 	struct ringbuf_sample *sample;
143 
144 	if (bpf_get_current_pid_tgid() >> 32 != pid)
145 		return 0;
146 
147 	val = 100;
148 
149 	/* check that you can reserve a dynamic size reservation */
150 	err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
151 
152 	sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
153 	if (!sample) {
154 		err = 1;
155 		goto done;
156 	}
157 
158 	sample->pid = 10;
159 
160 	/* Can pass dynptr to callback functions */
161 	bpf_loop(10, ringbuf_callback, &ptr, 0);
162 
163 	if (sample->pid != 55)
164 		err = 2;
165 
166 done:
167 	bpf_ringbuf_discard_dynptr(&ptr, 0);
168 	return 0;
169 }
170 
171 SEC("?cgroup_skb/egress")
172 int test_skb_readonly(struct __sk_buff *skb)
173 {
174 	__u8 write_data[2] = {1, 2};
175 	struct bpf_dynptr ptr;
176 	int ret;
177 
178 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
179 		err = 1;
180 		return 1;
181 	}
182 
183 	/* since cgroup skbs are read only, writes should fail */
184 	ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
185 	if (ret != -EINVAL) {
186 		err = 2;
187 		return 1;
188 	}
189 
190 	return 1;
191 }
192 
193 SEC("?cgroup_skb/egress")
194 int test_dynptr_skb_data(struct __sk_buff *skb)
195 {
196 	struct bpf_dynptr ptr;
197 	__u64 *data;
198 
199 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
200 		err = 1;
201 		return 1;
202 	}
203 
204 	/* This should return NULL. Must use bpf_dynptr_slice API */
205 	data = bpf_dynptr_data(&ptr, 0, 1);
206 	if (data) {
207 		err = 2;
208 		return 1;
209 	}
210 
211 	return 1;
212 }
213 
214 SEC("tp/syscalls/sys_enter_nanosleep")
215 int test_adjust(void *ctx)
216 {
217 	struct bpf_dynptr ptr;
218 	__u32 bytes = 64;
219 	__u32 off = 10;
220 	__u32 trim = 15;
221 
222 	if (bpf_get_current_pid_tgid() >> 32 != pid)
223 		return 0;
224 
225 	err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
226 	if (err) {
227 		err = 1;
228 		goto done;
229 	}
230 
231 	if (bpf_dynptr_size(&ptr) != bytes) {
232 		err = 2;
233 		goto done;
234 	}
235 
236 	/* Advance the dynptr by off */
237 	err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
238 	if (err) {
239 		err = 3;
240 		goto done;
241 	}
242 
243 	if (bpf_dynptr_size(&ptr) != bytes - off) {
244 		err = 4;
245 		goto done;
246 	}
247 
248 	/* Trim the dynptr */
249 	err = bpf_dynptr_adjust(&ptr, off, 15);
250 	if (err) {
251 		err = 5;
252 		goto done;
253 	}
254 
255 	/* Check that the size was adjusted correctly */
256 	if (bpf_dynptr_size(&ptr) != trim - off) {
257 		err = 6;
258 		goto done;
259 	}
260 
261 done:
262 	bpf_ringbuf_discard_dynptr(&ptr, 0);
263 	return 0;
264 }
265 
266 SEC("tp/syscalls/sys_enter_nanosleep")
267 int test_adjust_err(void *ctx)
268 {
269 	char write_data[45] = "hello there, world!!";
270 	struct bpf_dynptr ptr;
271 	__u32 size = 64;
272 	__u32 off = 20;
273 
274 	if (bpf_get_current_pid_tgid() >> 32 != pid)
275 		return 0;
276 
277 	if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
278 		err = 1;
279 		goto done;
280 	}
281 
282 	/* Check that start can't be greater than end */
283 	if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
284 		err = 2;
285 		goto done;
286 	}
287 
288 	/* Check that start can't be greater than size */
289 	if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
290 		err = 3;
291 		goto done;
292 	}
293 
294 	/* Check that end can't be greater than size */
295 	if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
296 		err = 4;
297 		goto done;
298 	}
299 
300 	if (bpf_dynptr_adjust(&ptr, off, size)) {
301 		err = 5;
302 		goto done;
303 	}
304 
305 	/* Check that you can't write more bytes than available into the dynptr
306 	 * after you've adjusted it
307 	 */
308 	if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
309 		err = 6;
310 		goto done;
311 	}
312 
313 	/* Check that even after adjusting, submitting/discarding
314 	 * a ringbuf dynptr works
315 	 */
316 	bpf_ringbuf_submit_dynptr(&ptr, 0);
317 	return 0;
318 
319 done:
320 	bpf_ringbuf_discard_dynptr(&ptr, 0);
321 	return 0;
322 }
323 
324 SEC("tp/syscalls/sys_enter_nanosleep")
325 int test_zero_size_dynptr(void *ctx)
326 {
327 	char write_data = 'x', read_data;
328 	struct bpf_dynptr ptr;
329 	__u32 size = 64;
330 
331 	if (bpf_get_current_pid_tgid() >> 32 != pid)
332 		return 0;
333 
334 	if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
335 		err = 1;
336 		goto done;
337 	}
338 
339 	/* After this, the dynptr has a size of 0 */
340 	if (bpf_dynptr_adjust(&ptr, size, size)) {
341 		err = 2;
342 		goto done;
343 	}
344 
345 	/* Test that reading + writing non-zero bytes is not ok */
346 	if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
347 		err = 3;
348 		goto done;
349 	}
350 
351 	if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
352 		err = 4;
353 		goto done;
354 	}
355 
356 	/* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
357 	if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
358 		err = 5;
359 		goto done;
360 	}
361 
362 	if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
363 		err = 6;
364 		goto done;
365 	}
366 
367 	err = 0;
368 
369 done:
370 	bpf_ringbuf_discard_dynptr(&ptr, 0);
371 	return 0;
372 }
373 
374 SEC("tp/syscalls/sys_enter_nanosleep")
375 int test_dynptr_is_null(void *ctx)
376 {
377 	struct bpf_dynptr ptr1;
378 	struct bpf_dynptr ptr2;
379 	__u64 size = 4;
380 
381 	if (bpf_get_current_pid_tgid() >> 32 != pid)
382 		return 0;
383 
384 	/* Pass in invalid flags, get back an invalid dynptr */
385 	if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
386 		err = 1;
387 		goto exit_early;
388 	}
389 
390 	/* Test that the invalid dynptr is null */
391 	if (!bpf_dynptr_is_null(&ptr1)) {
392 		err = 2;
393 		goto exit_early;
394 	}
395 
396 	/* Get a valid dynptr */
397 	if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
398 		err = 3;
399 		goto exit;
400 	}
401 
402 	/* Test that the valid dynptr is not null */
403 	if (bpf_dynptr_is_null(&ptr2)) {
404 		err = 4;
405 		goto exit;
406 	}
407 
408 exit:
409 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
410 exit_early:
411 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
412 	return 0;
413 }
414 
415 SEC("cgroup_skb/egress")
416 int test_dynptr_is_rdonly(struct __sk_buff *skb)
417 {
418 	struct bpf_dynptr ptr1;
419 	struct bpf_dynptr ptr2;
420 	struct bpf_dynptr ptr3;
421 
422 	/* Pass in invalid flags, get back an invalid dynptr */
423 	if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
424 		err = 1;
425 		return 0;
426 	}
427 
428 	/* Test that an invalid dynptr is_rdonly returns false */
429 	if (bpf_dynptr_is_rdonly(&ptr1)) {
430 		err = 2;
431 		return 0;
432 	}
433 
434 	/* Get a read-only dynptr */
435 	if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
436 		err = 3;
437 		return 0;
438 	}
439 
440 	/* Test that the dynptr is read-only */
441 	if (!bpf_dynptr_is_rdonly(&ptr2)) {
442 		err = 4;
443 		return 0;
444 	}
445 
446 	/* Get a read-writeable dynptr */
447 	if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
448 		err = 5;
449 		goto done;
450 	}
451 
452 	/* Test that the dynptr is read-only */
453 	if (bpf_dynptr_is_rdonly(&ptr3)) {
454 		err = 6;
455 		goto done;
456 	}
457 
458 done:
459 	bpf_ringbuf_discard_dynptr(&ptr3, 0);
460 	return 0;
461 }
462 
463 SEC("cgroup_skb/egress")
464 int test_dynptr_clone(struct __sk_buff *skb)
465 {
466 	struct bpf_dynptr ptr1;
467 	struct bpf_dynptr ptr2;
468 	__u32 off = 2, size;
469 
470 	/* Get a dynptr */
471 	if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
472 		err = 1;
473 		return 0;
474 	}
475 
476 	if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
477 		err = 2;
478 		return 0;
479 	}
480 
481 	/* Clone the dynptr */
482 	if (bpf_dynptr_clone(&ptr1, &ptr2)) {
483 		err = 3;
484 		return 0;
485 	}
486 
487 	size = bpf_dynptr_size(&ptr1);
488 
489 	/* Check that the clone has the same size and rd-only */
490 	if (bpf_dynptr_size(&ptr2) != size) {
491 		err = 4;
492 		return 0;
493 	}
494 
495 	if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
496 		err = 5;
497 		return 0;
498 	}
499 
500 	/* Advance and trim the original dynptr */
501 	bpf_dynptr_adjust(&ptr1, 5, 5);
502 
503 	/* Check that only original dynptr was affected, and the clone wasn't */
504 	if (bpf_dynptr_size(&ptr2) != size) {
505 		err = 6;
506 		return 0;
507 	}
508 
509 	return 0;
510 }
511 
512 SEC("?cgroup_skb/egress")
513 int test_dynptr_skb_no_buff(struct __sk_buff *skb)
514 {
515 	struct bpf_dynptr ptr;
516 	__u64 *data;
517 
518 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
519 		err = 1;
520 		return 1;
521 	}
522 
523 	/* This may return NULL. SKB may require a buffer */
524 	data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
525 
526 	return !!data;
527 }
528 
529 SEC("?cgroup_skb/egress")
530 int test_dynptr_skb_strcmp(struct __sk_buff *skb)
531 {
532 	struct bpf_dynptr ptr;
533 	char *data;
534 
535 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
536 		err = 1;
537 		return 1;
538 	}
539 
540 	/* This may return NULL. SKB may require a buffer */
541 	data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
542 	if (data) {
543 		bpf_strncmp(data, 10, "foo");
544 		return 1;
545 	}
546 
547 	return 1;
548 }
549 
550 SEC("tp_btf/kfree_skb")
551 int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location)
552 {
553 	__u8 write_data[2] = {1, 2};
554 	struct bpf_dynptr ptr;
555 	int ret;
556 
557 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
558 		err = 1;
559 		return 1;
560 	}
561 
562 	/* since tp_btf skbs are read only, writes should fail */
563 	ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
564 	if (ret != -EINVAL) {
565 		err = 2;
566 		return 1;
567 	}
568 
569 	return 1;
570 }
571 
572 static inline int bpf_memcmp(const char *a, const char *b, u32 size)
573 {
574 	int i;
575 
576 	bpf_for(i, 0, size) {
577 		if (a[i] != b[i])
578 			return a[i] < b[i] ? -1 : 1;
579 	}
580 	return 0;
581 }
582 
583 SEC("?tp/syscalls/sys_enter_nanosleep")
584 int test_dynptr_copy(void *ctx)
585 {
586 	char data[] = "hello there, world!!";
587 	char buf[32] = {'\0'};
588 	__u32 sz = sizeof(data);
589 	struct bpf_dynptr src, dst;
590 
591 	bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &src);
592 	bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &dst);
593 
594 	/* Test basic case of copying contiguous memory backed dynptrs */
595 	err = bpf_dynptr_write(&src, 0, data, sz, 0);
596 	err = err ?: bpf_dynptr_copy(&dst, 0, &src, 0, sz);
597 	err = err ?: bpf_dynptr_read(buf, sz, &dst, 0, 0);
598 	err = err ?: bpf_memcmp(data, buf, sz);
599 
600 	/* Test that offsets are handled correctly */
601 	err = err ?: bpf_dynptr_copy(&dst, 3, &src, 5, sz - 5);
602 	err = err ?: bpf_dynptr_read(buf, sz - 5, &dst, 3, 0);
603 	err = err ?: bpf_memcmp(data + 5, buf, sz - 5);
604 
605 	bpf_ringbuf_discard_dynptr(&src, 0);
606 	bpf_ringbuf_discard_dynptr(&dst, 0);
607 	return 0;
608 }
609 
610 SEC("xdp")
611 int test_dynptr_copy_xdp(struct xdp_md *xdp)
612 {
613 	struct bpf_dynptr ptr_buf, ptr_xdp;
614 	char data[] = "qwertyuiopasdfghjkl";
615 	char buf[32] = {'\0'};
616 	__u32 len = sizeof(data), xdp_data_size;
617 	int i, chunks = 200;
618 
619 	/* ptr_xdp is backed by non-contiguous memory */
620 	bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
621 	xdp_data_size = bpf_dynptr_size(&ptr_xdp);
622 	bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf);
623 
624 	/* Destination dynptr is backed by non-contiguous memory */
625 	bpf_for(i, 0, chunks) {
626 		err = bpf_dynptr_write(&ptr_buf, i * len, data, len, 0);
627 		if (err)
628 			goto out;
629 	}
630 
631 	err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks);
632 	if (err)
633 		goto out;
634 
635 	bpf_for(i, 0, chunks) {
636 		__builtin_memset(buf, 0, sizeof(buf));
637 		err = bpf_dynptr_read(&buf, len, &ptr_xdp, i * len, 0);
638 		if (err)
639 			goto out;
640 		if (bpf_memcmp(data, buf, len) != 0)
641 			goto out;
642 	}
643 
644 	/* Source dynptr is backed by non-contiguous memory */
645 	__builtin_memset(buf, 0, sizeof(buf));
646 	bpf_for(i, 0, chunks) {
647 		err = bpf_dynptr_write(&ptr_buf, i * len, buf, len, 0);
648 		if (err)
649 			goto out;
650 	}
651 
652 	err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks);
653 	if (err)
654 		goto out;
655 
656 	bpf_for(i, 0, chunks) {
657 		__builtin_memset(buf, 0, sizeof(buf));
658 		err = bpf_dynptr_read(&buf, len, &ptr_buf, i * len, 0);
659 		if (err)
660 			goto out;
661 		if (bpf_memcmp(data, buf, len) != 0)
662 			goto out;
663 	}
664 
665 	/* Both source and destination dynptrs are backed by non-contiguous memory */
666 	err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1));
667 	if (err)
668 		goto out;
669 
670 	bpf_for(i, 0, chunks - 1) {
671 		__builtin_memset(buf, 0, sizeof(buf));
672 		err = bpf_dynptr_read(&buf, len, &ptr_xdp, 2 + i * len, 0);
673 		if (err)
674 			goto out;
675 		if (bpf_memcmp(data, buf, len) != 0)
676 			goto out;
677 	}
678 
679 	if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG)
680 		err = 1;
681 
682 out:
683 	bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
684 	return XDP_DROP;
685 }
686 
687 char memset_zero_data[] = "data to be zeroed";
688 
689 SEC("?tp/syscalls/sys_enter_nanosleep")
690 int test_dynptr_memset_zero(void *ctx)
691 {
692 	__u32 data_sz = sizeof(memset_zero_data);
693 	char zeroes[32] = {'\0'};
694 	struct bpf_dynptr ptr;
695 
696 	err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr);
697 	err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0);
698 	err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz);
699 
700 	return 0;
701 }
702 
703 #define DYNPTR_MEMSET_VAL 42
704 
705 char memset_notzero_data[] = "data to be overwritten";
706 
707 SEC("?tp/syscalls/sys_enter_nanosleep")
708 int test_dynptr_memset_notzero(void *ctx)
709 {
710 	u32 data_sz = sizeof(memset_notzero_data);
711 	struct bpf_dynptr ptr;
712 	char expected[32];
713 
714 	__builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz);
715 
716 	err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr);
717 	err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL);
718 	err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz);
719 
720 	return 0;
721 }
722 
723 char memset_zero_offset_data[] = "data to be zeroed partially";
724 
725 SEC("?tp/syscalls/sys_enter_nanosleep")
726 int test_dynptr_memset_zero_offset(void *ctx)
727 {
728 	char expected[] = "data to \0\0\0\0eroed partially";
729 	__u32 data_sz = sizeof(memset_zero_offset_data);
730 	struct bpf_dynptr ptr;
731 
732 	err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr);
733 	err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0);
734 	err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz);
735 
736 	return 0;
737 }
738 
739 char memset_zero_adjusted_data[] = "data to be zeroed partially";
740 
741 SEC("?tp/syscalls/sys_enter_nanosleep")
742 int test_dynptr_memset_zero_adjusted(void *ctx)
743 {
744 	char expected[] = "data\0\0\0\0be zeroed partially";
745 	__u32 data_sz = sizeof(memset_zero_adjusted_data);
746 	struct bpf_dynptr ptr;
747 
748 	err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr);
749 	err = err ?: bpf_dynptr_adjust(&ptr, 4, 8);
750 	err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
751 	err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz);
752 
753 	return 0;
754 }
755 
756 char memset_overflow_data[] = "memset overflow data";
757 
758 SEC("?tp/syscalls/sys_enter_nanosleep")
759 int test_dynptr_memset_overflow(void *ctx)
760 {
761 	__u32 data_sz = sizeof(memset_overflow_data);
762 	struct bpf_dynptr ptr;
763 	int ret;
764 
765 	err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
766 	ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0);
767 	if (ret != -E2BIG)
768 		err = 1;
769 
770 	return 0;
771 }
772 
773 SEC("?tp/syscalls/sys_enter_nanosleep")
774 int test_dynptr_memset_overflow_offset(void *ctx)
775 {
776 	__u32 data_sz = sizeof(memset_overflow_data);
777 	struct bpf_dynptr ptr;
778 	int ret;
779 
780 	err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
781 	ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0);
782 	if (ret != -E2BIG)
783 		err = 1;
784 
785 	return 0;
786 }
787 
788 SEC("?cgroup_skb/egress")
789 int test_dynptr_memset_readonly(struct __sk_buff *skb)
790 {
791 	struct bpf_dynptr ptr;
792 	int ret;
793 
794 	err = bpf_dynptr_from_skb(skb, 0, &ptr);
795 
796 	/* cgroup skbs are read only, memset should fail */
797 	ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
798 	if (ret != -EINVAL)
799 		err = 1;
800 
801 	return 0;
802 }
803 
804 #define min_t(type, x, y) ({		\
805 	type __x = (x);			\
806 	type __y = (y);			\
807 	__x < __y ? __x : __y; })
808 
809 SEC("xdp")
810 int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp)
811 {
812 	u32 data_sz, chunk_sz, offset = 0;
813 	const int max_chunks = 200;
814 	struct bpf_dynptr ptr_xdp;
815 	char expected_buf[32];
816 	char buf[32];
817 	int i;
818 
819 	__builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf));
820 
821 	/* ptr_xdp is backed by non-contiguous memory */
822 	bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
823 	data_sz = bpf_dynptr_size(&ptr_xdp);
824 
825 	err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL);
826 	if (err) {
827 		/* bpf_dynptr_memset() eventually called bpf_xdp_pointer()
828 		 * where if data_sz is greater than 0xffff, -EFAULT will be
829 		 * returned. For 64K page size, data_sz is greater than
830 		 * 64K, so error is expected and let us zero out error and
831 		 * return success.
832 		 */
833 		if (data_sz >= PAGE_SIZE_64K)
834 			err = 0;
835 		goto out;
836 	}
837 
838 	bpf_for(i, 0, max_chunks) {
839 		offset = i * sizeof(buf);
840 		if (offset >= data_sz)
841 			goto out;
842 		chunk_sz = min_t(u32, sizeof(buf), data_sz - offset);
843 		err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0);
844 		if (err)
845 			goto out;
846 		err = bpf_memcmp(buf, expected_buf, sizeof(buf));
847 		if (err)
848 			goto out;
849 	}
850 out:
851 	return XDP_DROP;
852 }
853 
854 void *user_ptr;
855 /* Contains the copy of the data pointed by user_ptr.
856  * Size 384 to make it not fit into a single kernel chunk when copying
857  * but less than the maximum bpf stack size (512).
858  */
859 char expected_str[384];
860 __u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257};
861 
862 typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off,
863 				    u32 size, const void *unsafe_ptr);
864 
865 /* Returns the offset just before the end of the maximum sized xdp fragment.
866  * Any write larger than 32 bytes will be split between 2 fragments.
867  */
868 __u32 xdp_near_frag_end_offset(void)
869 {
870 	const __u32 headroom = 256;
871 	const __u32 max_frag_size =  __PAGE_SIZE - headroom - sizeof(struct skb_shared_info);
872 
873 	/* 32 bytes before the approximate end of the fragment */
874 	return max_frag_size - 32;
875 }
876 
877 /* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks
878  * of type bpf_read_dynptr_fn_t to prevent compiler from generating
879  * indirect calls that make program fail to load with "unknown opcode" error.
880  */
881 static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
882 {
883 	char buf[sizeof(expected_str)];
884 	struct bpf_dynptr ptr_buf;
885 	int i;
886 
887 	if (bpf_get_current_pid_tgid() >> 32 != pid)
888 		return;
889 
890 	err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
891 
892 	bpf_for(i, 0, ARRAY_SIZE(test_len)) {
893 		__u32 len = test_len[i];
894 
895 		err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr);
896 		if (len > sizeof(buf))
897 			break;
898 		err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
899 
900 		if (err || bpf_memcmp(expected_str, buf, len))
901 			err = 1;
902 
903 		/* Reset buffer and dynptr */
904 		__builtin_memset(buf, 0, sizeof(buf));
905 		err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0);
906 	}
907 	bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
908 }
909 
910 static __always_inline void test_dynptr_probe_str(void *ptr,
911 						  bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
912 {
913 	char buf[sizeof(expected_str)];
914 	struct bpf_dynptr ptr_buf;
915 	__u32 cnt, i;
916 
917 	if (bpf_get_current_pid_tgid() >> 32 != pid)
918 		return;
919 
920 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
921 
922 	bpf_for(i, 0, ARRAY_SIZE(test_len)) {
923 		__u32 len = test_len[i];
924 
925 		cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr);
926 		if (cnt != len)
927 			err = 1;
928 
929 		if (len > sizeof(buf))
930 			continue;
931 		err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
932 		if (!len)
933 			continue;
934 		if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
935 			err = 1;
936 	}
937 	bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
938 }
939 
940 static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr,
941 						  bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
942 {
943 	struct bpf_dynptr ptr_xdp;
944 	char buf[sizeof(expected_str)];
945 	__u32 off, i;
946 
947 	if (bpf_get_current_pid_tgid() >> 32 != pid)
948 		return;
949 
950 	off = xdp_near_frag_end_offset();
951 	err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
952 
953 	bpf_for(i, 0, ARRAY_SIZE(test_len)) {
954 		__u32 len = test_len[i];
955 
956 		err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
957 		if (len > sizeof(buf))
958 			continue;
959 		err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
960 		if (err || bpf_memcmp(expected_str, buf, len))
961 			err = 1;
962 		/* Reset buffer and dynptr */
963 		__builtin_memset(buf, 0, sizeof(buf));
964 		err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
965 	}
966 }
967 
968 static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr,
969 						      bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
970 {
971 	struct bpf_dynptr ptr_xdp;
972 	char buf[sizeof(expected_str)];
973 	__u32 cnt, off, i;
974 
975 	if (bpf_get_current_pid_tgid() >> 32 != pid)
976 		return;
977 
978 	off = xdp_near_frag_end_offset();
979 	err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
980 	if (err)
981 		return;
982 
983 	bpf_for(i, 0, ARRAY_SIZE(test_len)) {
984 		__u32 len = test_len[i];
985 
986 		cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
987 		if (cnt != len)
988 			err = 1;
989 
990 		if (len > sizeof(buf))
991 			continue;
992 		err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
993 
994 		if (!len)
995 			continue;
996 		if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
997 			err = 1;
998 
999 		__builtin_memset(buf, 0, sizeof(buf));
1000 		err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
1001 	}
1002 }
1003 
1004 SEC("xdp")
1005 int test_probe_read_user_dynptr(struct xdp_md *xdp)
1006 {
1007 	test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr);
1008 	if (!err)
1009 		test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr);
1010 	return XDP_PASS;
1011 }
1012 
1013 SEC("xdp")
1014 int test_probe_read_kernel_dynptr(struct xdp_md *xdp)
1015 {
1016 	test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr);
1017 	if (!err)
1018 		test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr);
1019 	return XDP_PASS;
1020 }
1021 
1022 SEC("xdp")
1023 int test_probe_read_user_str_dynptr(struct xdp_md *xdp)
1024 {
1025 	test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr);
1026 	if (!err)
1027 		test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr);
1028 	return XDP_PASS;
1029 }
1030 
1031 SEC("xdp")
1032 int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp)
1033 {
1034 	test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr);
1035 	if (!err)
1036 		test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr);
1037 	return XDP_PASS;
1038 }
1039 
1040 SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
1041 int test_copy_from_user_dynptr(void *ctx)
1042 {
1043 	test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr);
1044 	return 0;
1045 }
1046 
1047 SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
1048 int test_copy_from_user_str_dynptr(void *ctx)
1049 {
1050 	test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr);
1051 	return 0;
1052 }
1053 
1054 static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off,
1055 					u32 size, const void *unsafe_ptr)
1056 {
1057 	struct task_struct *task = bpf_get_current_task_btf();
1058 
1059 	return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
1060 }
1061 
1062 static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off,
1063 					    u32 size, const void *unsafe_ptr)
1064 {
1065 	struct task_struct *task = bpf_get_current_task_btf();
1066 
1067 	return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task);
1068 }
1069 
1070 SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
1071 int test_copy_from_user_task_dynptr(void *ctx)
1072 {
1073 	test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task);
1074 	return 0;
1075 }
1076 
1077 SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
1078 int test_copy_from_user_task_str_dynptr(void *ctx)
1079 {
1080 	test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str);
1081 	return 0;
1082 }
1083