xref: /linux/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bpf_misc.h"
3 #include "bpf_experimental.h"
4 
5 struct {
6 	__uint(type, BPF_MAP_TYPE_ARRAY);
7 	__uint(max_entries, 8);
8 	__type(key, __u32);
9 	__type(value, __u64);
10 } map SEC(".maps");
11 
12 struct {
13 	__uint(type, BPF_MAP_TYPE_USER_RINGBUF);
14 	__uint(max_entries, 8);
15 } ringbuf SEC(".maps");
16 
17 struct vm_area_struct;
18 struct bpf_map;
19 
20 struct buf_context {
21 	char *buf;
22 };
23 
24 struct num_context {
25 	__u64 i;
26 	__u64 j;
27 };
28 
29 __u8 choice_arr[2] = { 0, 1 };
30 
31 static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
32 {
33 	if (idx == 0) {
34 		ctx->buf = (char *)(0xDEAD);
35 		return 0;
36 	}
37 
38 	if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
39 		return 1;
40 
41 	return 0;
42 }
43 
44 SEC("?raw_tp")
45 __failure __msg("R1 type=scalar expected=fp")
46 int unsafe_on_2nd_iter(void *unused)
47 {
48 	char buf[4];
49 	struct buf_context loop_ctx = { .buf = buf };
50 
51 	bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
52 	return 0;
53 }
54 
55 static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
56 {
57 	ctx->i = 0;
58 	return 0;
59 }
60 
61 SEC("?raw_tp")
62 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
63 int unsafe_on_zero_iter(void *unused)
64 {
65 	struct num_context loop_ctx = { .i = 32 };
66 
67 	bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
68 	return choice_arr[loop_ctx.i];
69 }
70 
71 static int widening_cb(__u32 idx, struct num_context *ctx)
72 {
73 	++ctx->i;
74 	return 0;
75 }
76 
77 SEC("?raw_tp")
78 __success
79 int widening(void *unused)
80 {
81 	struct num_context loop_ctx = { .i = 0, .j = 1 };
82 
83 	bpf_loop(100, widening_cb, &loop_ctx, 0);
84 	/* loop_ctx.j is not changed during callback iteration,
85 	 * verifier should not apply widening to it.
86 	 */
87 	return choice_arr[loop_ctx.j];
88 }
89 
90 static int loop_detection_cb(__u32 idx, struct num_context *ctx)
91 {
92 	for (;;) {}
93 	return 0;
94 }
95 
96 SEC("?raw_tp")
97 __failure __msg("infinite loop detected")
98 int loop_detection(void *unused)
99 {
100 	struct num_context loop_ctx = { .i = 0 };
101 
102 	bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
103 	return 0;
104 }
105 
106 static __always_inline __u64 oob_state_machine(struct num_context *ctx)
107 {
108 	switch (ctx->i) {
109 	case 0:
110 		ctx->i = 1;
111 		break;
112 	case 1:
113 		ctx->i = 32;
114 		break;
115 	}
116 	return 0;
117 }
118 
119 static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
120 {
121 	return oob_state_machine(data);
122 }
123 
124 SEC("?raw_tp")
125 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
126 int unsafe_for_each_map_elem(void *unused)
127 {
128 	struct num_context loop_ctx = { .i = 0 };
129 
130 	bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
131 	return choice_arr[loop_ctx.i];
132 }
133 
134 static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
135 {
136 	return oob_state_machine(data);
137 }
138 
139 SEC("?raw_tp")
140 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
141 int unsafe_ringbuf_drain(void *unused)
142 {
143 	struct num_context loop_ctx = { .i = 0 };
144 
145 	bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
146 	return choice_arr[loop_ctx.i];
147 }
148 
149 static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
150 {
151 	return oob_state_machine(data);
152 }
153 
154 SEC("?raw_tp")
155 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
156 int unsafe_find_vma(void *unused)
157 {
158 	struct task_struct *task = bpf_get_current_task_btf();
159 	struct num_context loop_ctx = { .i = 0 };
160 
161 	bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
162 	return choice_arr[loop_ctx.i];
163 }
164 
165 static int iter_limit_cb(__u32 idx, struct num_context *ctx)
166 {
167 	ctx->i++;
168 	return 0;
169 }
170 
171 SEC("?raw_tp")
172 __success
173 int bpf_loop_iter_limit_ok(void *unused)
174 {
175 	struct num_context ctx = { .i = 0 };
176 
177 	bpf_loop(1, iter_limit_cb, &ctx, 0);
178 	return choice_arr[ctx.i];
179 }
180 
181 SEC("?raw_tp")
182 __failure __msg("invalid access to map value, value_size=2 off=2 size=1")
183 int bpf_loop_iter_limit_overflow(void *unused)
184 {
185 	struct num_context ctx = { .i = 0 };
186 
187 	bpf_loop(2, iter_limit_cb, &ctx, 0);
188 	return choice_arr[ctx.i];
189 }
190 
191 static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
192 {
193 	ctx->i += 100;
194 	return 0;
195 }
196 
197 static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
198 {
199 	ctx->i += 10;
200 	return 0;
201 }
202 
203 static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
204 {
205 	ctx->i += 1;
206 	bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
207 	bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
208 	return 0;
209 }
210 
211 /* Check that path visiting every callback function once had been
212  * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
213  * with each decimal digit corresponding to a callback visit marker.
214  */
215 SEC("socket")
216 __success __retval(111111)
217 int bpf_loop_iter_limit_nested(void *unused)
218 {
219 	struct num_context ctx1 = { .i = 0 };
220 	struct num_context ctx2 = { .i = 0 };
221 	__u64 a, b, c;
222 
223 	bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
224 	bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
225 	a = ctx1.i;
226 	b = ctx2.i;
227 	/* Force 'ctx1.i' and 'ctx2.i' precise. */
228 	c = choice_arr[(a + b) % 2];
229 	/* This makes 'c' zero, but neither clang nor verifier know it. */
230 	c /= 10;
231 	/* Make sure that verifier does not visit 'impossible' states:
232 	 * enumerate all possible callback visit masks.
233 	 */
234 	if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
235 	    b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
236 		asm volatile ("r0 /= 0;" ::: "r0");
237 	return 1000 * a + b + c;
238 }
239 
240 struct iter_limit_bug_ctx {
241 	__u64 a;
242 	__u64 b;
243 	__u64 c;
244 };
245 
246 static __naked void iter_limit_bug_cb(void)
247 {
248 	/* This is the same as C code below, but written
249 	 * in assembly to control which branches are fall-through.
250 	 *
251 	 *   switch (bpf_get_prandom_u32()) {
252 	 *   case 1:  ctx->a = 42; break;
253 	 *   case 2:  ctx->b = 42; break;
254 	 *   default: ctx->c = 42; break;
255 	 *   }
256 	 */
257 	asm volatile (
258 	"r9 = r2;"
259 	"call %[bpf_get_prandom_u32];"
260 	"r1 = r0;"
261 	"r2 = 42;"
262 	"r0 = 0;"
263 	"if r1 == 0x1 goto 1f;"
264 	"if r1 == 0x2 goto 2f;"
265 	"*(u64 *)(r9 + 16) = r2;"
266 	"exit;"
267 	"1: *(u64 *)(r9 + 0) = r2;"
268 	"exit;"
269 	"2: *(u64 *)(r9 + 8) = r2;"
270 	"exit;"
271 	:
272 	: __imm(bpf_get_prandom_u32)
273 	: __clobber_all
274 	);
275 }
276 
277 int tmp_var;
278 SEC("socket")
279 __failure __msg("infinite loop detected at insn 2")
280 __naked void jgt_imm64_and_may_goto(void)
281 {
282 	asm volatile ("			\
283 	r0 = %[tmp_var] ll;		\
284 l0_%=:	.byte 0xe5; /* may_goto */	\
285 	.byte 0; /* regs */		\
286 	.short -3; /* off -3 */		\
287 	.long 0; /* imm */		\
288 	if r0 > 10 goto l0_%=;		\
289 	r0 = 0;				\
290 	exit;				\
291 "	:: __imm_addr(tmp_var)
292 	: __clobber_all);
293 }
294 
295 SEC("socket")
296 __failure __msg("infinite loop detected at insn 1")
297 __naked void may_goto_self(void)
298 {
299 	asm volatile ("			\
300 	r0 = *(u32 *)(r10 - 4);		\
301 l0_%=:	.byte 0xe5; /* may_goto */	\
302 	.byte 0; /* regs */		\
303 	.short -1; /* off -1 */		\
304 	.long 0; /* imm */		\
305 	if r0 > 10 goto l0_%=;		\
306 	r0 = 0;				\
307 	exit;				\
308 "	::: __clobber_all);
309 }
310 
311 SEC("socket")
312 __success __retval(0)
313 __naked void may_goto_neg_off(void)
314 {
315 	asm volatile ("			\
316 	r0 = *(u32 *)(r10 - 4);		\
317 	goto l0_%=;			\
318 	goto l1_%=;			\
319 l0_%=:	.byte 0xe5; /* may_goto */	\
320 	.byte 0; /* regs */		\
321 	.short -2; /* off -2 */		\
322 	.long 0; /* imm */		\
323 	if r0 > 10 goto l0_%=;		\
324 l1_%=:	r0 = 0;				\
325 	exit;				\
326 "	::: __clobber_all);
327 }
328 
329 SEC("tc")
330 __failure
331 __flag(BPF_F_TEST_STATE_FREQ)
332 int iter_limit_bug(struct __sk_buff *skb)
333 {
334 	struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
335 
336 	bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
337 
338 	/* This is the same as C code below,
339 	 * written in assembly to guarantee checks order.
340 	 *
341 	 *   if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
342 	 *     asm volatile("r1 /= 0;":::"r1");
343 	 */
344 	asm volatile (
345 	"r1 = *(u64 *)%[ctx_a];"
346 	"if r1 != 42 goto 1f;"
347 	"r1 = *(u64 *)%[ctx_b];"
348 	"if r1 != 42 goto 1f;"
349 	"r1 = *(u64 *)%[ctx_c];"
350 	"if r1 != 7 goto 1f;"
351 	"r1 /= 0;"
352 	"1:"
353 	:
354 	: [ctx_a]"m"(ctx.a),
355 	  [ctx_b]"m"(ctx.b),
356 	  [ctx_c]"m"(ctx.c)
357 	: "r1"
358 	);
359 	return 0;
360 }
361 
362 SEC("socket")
363 __success __retval(0)
364 __naked void ja_and_may_goto(void)
365 {
366 	asm volatile ("			\
367 l0_%=:	.byte 0xe5; /* may_goto */	\
368 	.byte 0; /* regs */		\
369 	.short 1; /* off 1 */		\
370 	.long 0; /* imm */		\
371 	goto l0_%=;			\
372 	r0 = 0;				\
373 	exit;				\
374 "	::: __clobber_common);
375 }
376 
377 SEC("socket")
378 __success __retval(0)
379 __naked void ja_and_may_goto2(void)
380 {
381 	asm volatile ("			\
382 l0_%=:	r0 = 0;				\
383 	.byte 0xe5; /* may_goto */	\
384 	.byte 0; /* regs */		\
385 	.short 1; /* off 1 */		\
386 	.long 0; /* imm */		\
387 	goto l0_%=;			\
388 	r0 = 0;				\
389 	exit;				\
390 "	::: __clobber_common);
391 }
392 
393 SEC("socket")
394 __success __retval(0)
395 __naked void jlt_and_may_goto(void)
396 {
397 	asm volatile ("			\
398 l0_%=:	call %[bpf_jiffies64];		\
399 	.byte 0xe5; /* may_goto */	\
400 	.byte 0; /* regs */		\
401 	.short 1; /* off 1 */		\
402 	.long 0; /* imm */		\
403 	if r0 < 10 goto l0_%=;		\
404 	r0 = 0;				\
405 	exit;				\
406 "	:: __imm(bpf_jiffies64)
407 	: __clobber_all);
408 }
409 
410 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
411 	(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
412 	defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
413 	defined(__TARGET_ARCH_loongarch)) && \
414 	__clang_major__ >= 18
415 SEC("socket")
416 __success __retval(0)
417 __naked void gotol_and_may_goto(void)
418 {
419 	asm volatile ("			\
420 l0_%=:	r0 = 0;				\
421 	.byte 0xe5; /* may_goto */	\
422 	.byte 0; /* regs */		\
423 	.short 1; /* off 1 */		\
424 	.long 0; /* imm */		\
425 	gotol l0_%=;			\
426 	r0 = 0;				\
427 	exit;				\
428 "	::: __clobber_common);
429 }
430 #endif
431 
432 SEC("socket")
433 __success __retval(0)
434 __naked void ja_and_may_goto_subprog(void)
435 {
436 	asm volatile ("			\
437 	call subprog_with_may_goto;	\
438 	exit;				\
439 "	::: __clobber_all);
440 }
441 
442 static __naked __noinline __used
443 void subprog_with_may_goto(void)
444 {
445 	asm volatile ("			\
446 l0_%=:	.byte 0xe5; /* may_goto */	\
447 	.byte 0; /* regs */		\
448 	.short 1; /* off 1 */		\
449 	.long 0; /* imm */		\
450 	goto l0_%=;			\
451 	r0 = 0;				\
452 	exit;				\
453 "	::: __clobber_all);
454 }
455 
456 #define ARR_SZ 1000000
457 int zero;
458 char arr[ARR_SZ];
459 
460 SEC("socket")
461 __success __retval(0xd495cdc0)
462 int cond_break1(const void *ctx)
463 {
464 	unsigned long i;
465 	unsigned int sum = 0;
466 
467 	for (i = zero; i < ARR_SZ && can_loop; i++)
468 		sum += i;
469 	for (i = zero; i < ARR_SZ; i++) {
470 		barrier_var(i);
471 		sum += i + arr[i];
472 		cond_break;
473 	}
474 
475 	return sum;
476 }
477 
478 SEC("socket")
479 __success __retval(999000000)
480 int cond_break2(const void *ctx)
481 {
482 	int i, j;
483 	int sum = 0;
484 
485 	for (i = zero; i < 1000 && can_loop; i++)
486 		for (j = zero; j < 1000; j++) {
487 			sum += i + j;
488 			cond_break;
489 	}
490 	return sum;
491 }
492 
493 static __noinline int loop(void)
494 {
495 	int i, sum = 0;
496 
497 	for (i = zero; i <= 1000000 && can_loop; i++)
498 		sum += i;
499 
500 	return sum;
501 }
502 
503 SEC("socket")
504 __success __retval(0x6a5a2920)
505 int cond_break3(const void *ctx)
506 {
507 	return loop();
508 }
509 
510 SEC("socket")
511 __success __retval(1)
512 int cond_break4(const void *ctx)
513 {
514 	int cnt = zero;
515 
516 	for (;;) {
517 		/* should eventually break out of the loop */
518 		cond_break;
519 		cnt++;
520 	}
521 	/* if we looped a bit, it's a success */
522 	return cnt > 1 ? 1 : 0;
523 }
524 
525 static __noinline int static_subprog(void)
526 {
527 	int cnt = zero;
528 
529 	for (;;) {
530 		cond_break;
531 		cnt++;
532 	}
533 
534 	return cnt;
535 }
536 
537 SEC("socket")
538 __success __retval(1)
539 int cond_break5(const void *ctx)
540 {
541 	int cnt1 = zero, cnt2;
542 
543 	for (;;) {
544 		cond_break;
545 		cnt1++;
546 	}
547 
548 	cnt2 = static_subprog();
549 
550 	/* main and subprog have to loop a bit */
551 	return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
552 }
553 
554 #define ARR2_SZ 1000
555 SEC(".data.arr2")
556 char arr2[ARR2_SZ];
557 
558 SEC("socket")
559 __success __flag(BPF_F_TEST_STATE_FREQ)
560 int loop_inside_iter(const void *ctx)
561 {
562 	struct bpf_iter_num it;
563 	int *v, sum = 0;
564 	__u64 i = 0;
565 
566 	bpf_iter_num_new(&it, 0, ARR2_SZ);
567 	while ((v = bpf_iter_num_next(&it))) {
568 		if (i < ARR2_SZ)
569 			sum += arr2[i++];
570 	}
571 	bpf_iter_num_destroy(&it);
572 	return sum;
573 }
574 
575 SEC("socket")
576 __success __flag(BPF_F_TEST_STATE_FREQ)
577 int loop_inside_iter_signed(const void *ctx)
578 {
579 	struct bpf_iter_num it;
580 	int *v, sum = 0;
581 	long i = 0;
582 
583 	bpf_iter_num_new(&it, 0, ARR2_SZ);
584 	while ((v = bpf_iter_num_next(&it))) {
585 		if (i < ARR2_SZ && i >= 0)
586 			sum += arr2[i++];
587 	}
588 	bpf_iter_num_destroy(&it);
589 	return sum;
590 }
591 
592 volatile const int limit = ARR2_SZ;
593 
594 SEC("socket")
595 __success __flag(BPF_F_TEST_STATE_FREQ)
596 int loop_inside_iter_volatile_limit(const void *ctx)
597 {
598 	struct bpf_iter_num it;
599 	int *v, sum = 0;
600 	__u64 i = 0;
601 
602 	bpf_iter_num_new(&it, 0, ARR2_SZ);
603 	while ((v = bpf_iter_num_next(&it))) {
604 		if (i < limit)
605 			sum += arr2[i++];
606 	}
607 	bpf_iter_num_destroy(&it);
608 	return sum;
609 }
610 
611 #define ARR_LONG_SZ 1000
612 
613 SEC(".data.arr_long")
614 long arr_long[ARR_LONG_SZ];
615 
616 SEC("socket")
617 __success
618 int test1(const void *ctx)
619 {
620 	long i;
621 
622 	for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
623 		arr_long[i] = i;
624 	return 0;
625 }
626 
627 SEC("socket")
628 __success
629 int test2(const void *ctx)
630 {
631 	__u64 i;
632 
633 	for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
634 		barrier_var(i);
635 		arr_long[i] = i;
636 	}
637 	return 0;
638 }
639 
640 SEC(".data.arr_foo")
641 struct {
642 	int a;
643 	int b;
644 } arr_foo[ARR_LONG_SZ];
645 
646 SEC("socket")
647 __success
648 int test3(const void *ctx)
649 {
650 	__u64 i;
651 
652 	for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
653 		barrier_var(i);
654 		arr_foo[i].a = i;
655 		arr_foo[i].b = i;
656 	}
657 	return 0;
658 }
659 
660 SEC("socket")
661 __success
662 int test4(const void *ctx)
663 {
664 	long i;
665 
666 	for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
667 		barrier_var(i);
668 		arr_foo[i].a = i;
669 		arr_foo[i].b = i;
670 	}
671 	return 0;
672 }
673 
674 char buf[10] SEC(".data.buf");
675 
676 SEC("socket")
677 __description("check add const")
678 __success
679 __naked void check_add_const(void)
680 {
681 	/* typical LLVM generated loop with may_goto */
682 	asm volatile ("			\
683 	call %[bpf_ktime_get_ns];	\
684 	if r0 > 9 goto l1_%=;		\
685 l0_%=:	r1 = %[buf];			\
686 	r2 = r0;			\
687 	r1 += r2;			\
688 	r3 = *(u8 *)(r1 +0);		\
689 	.byte 0xe5; /* may_goto */	\
690 	.byte 0; /* regs */		\
691 	.short 4; /* off of l1_%=: */	\
692 	.long 0; /* imm */		\
693 	r0 = r2;			\
694 	r0 += 1;			\
695 	if r2 < 9 goto l0_%=;		\
696 	exit;				\
697 l1_%=:	r0 = 0;				\
698 	exit;				\
699 "	:
700 	: __imm(bpf_ktime_get_ns),
701 	  __imm_ptr(buf)
702 	: __clobber_common);
703 }
704 
705 SEC("socket")
706 __failure
707 __msg("*(u8 *)(r7 +0) = r0")
708 __msg("invalid access to map value, value_size=10 off=10 size=1")
709 __naked void check_add_const_3regs(void)
710 {
711 	asm volatile (
712 	"r6 = %[buf];"
713 	"r7 = %[buf];"
714 	"call %[bpf_ktime_get_ns];"
715 	"r1 = r0;"              /* link r0.id == r1.id == r2.id */
716 	"r2 = r0;"
717 	"r1 += 1;"              /* r1 == r0+1 */
718 	"r2 += 2;"              /* r2 == r0+2 */
719 	"if r0 > 8 goto 1f;"    /* r0 range [0, 8]  */
720 	"r6 += r1;"             /* r1 range [1, 9]  */
721 	"r7 += r2;"             /* r2 range [2, 10] */
722 	"*(u8 *)(r6 +0) = r0;"  /* safe, within bounds   */
723 	"*(u8 *)(r7 +0) = r0;"  /* unsafe, out of bounds */
724 	"1: exit;"
725 	:
726 	: __imm(bpf_ktime_get_ns),
727 	  __imm_ptr(buf)
728 	: __clobber_common);
729 }
730 
731 SEC("socket")
732 __failure
733 __msg("*(u8 *)(r8 -1) = r0")
734 __msg("invalid access to map value, value_size=10 off=10 size=1")
735 __naked void check_add_const_3regs_2if(void)
736 {
737 	asm volatile (
738 	"r6 = %[buf];"
739 	"r7 = %[buf];"
740 	"r8 = %[buf];"
741 	"call %[bpf_ktime_get_ns];"
742 	"if r0 < 2 goto 1f;"
743 	"r1 = r0;"              /* link r0.id == r1.id == r2.id */
744 	"r2 = r0;"
745 	"r1 += 1;"              /* r1 == r0+1 */
746 	"r2 += 2;"              /* r2 == r0+2 */
747 	"if r2 > 11 goto 1f;"   /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
748 	"if r0 s< 0 goto 1f;"   /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
749 	"r6 += r0;"             /* r0 range [0, 9]  */
750 	"r7 += r1;"             /* r1 range [1, 10] */
751 	"r8 += r2;"             /* r2 range [2, 11] */
752 	"*(u8 *)(r6 +0) = r0;"  /* safe, within bounds   */
753 	"*(u8 *)(r7 -1) = r0;"  /* safe */
754 	"*(u8 *)(r8 -1) = r0;"  /* unsafe */
755 	"1: exit;"
756 	:
757 	: __imm(bpf_ktime_get_ns),
758 	  __imm_ptr(buf)
759 	: __clobber_common);
760 }
761 
762 SEC("socket")
763 __failure
764 __flag(BPF_F_TEST_STATE_FREQ)
765 __naked void check_add_const_regsafe_off(void)
766 {
767 	asm volatile (
768 	"r8 = %[buf];"
769 	"call %[bpf_ktime_get_ns];"
770 	"r6 = r0;"
771 	"call %[bpf_ktime_get_ns];"
772 	"r7 = r0;"
773 	"call %[bpf_ktime_get_ns];"
774 	"r1 = r0;"              /* same ids for r1 and r0 */
775 	"if r6 > r7 goto 1f;"   /* this jump can't be predicted */
776 	"r1 += 1;"              /* r1.off == +1 */
777 	"goto 2f;"
778 	"1: r1 += 100;"         /* r1.off == +100 */
779 	"goto +0;"              /* verify r1.off in regsafe() after this insn */
780 	"2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
781 	"r8 += r1;"
782 	"*(u8 *)(r8 +0) = r0;"  /* potentially unsafe, buf size is 10 */
783 	"3: exit;"
784 	:
785 	: __imm(bpf_ktime_get_ns),
786 	  __imm_ptr(buf)
787 	: __clobber_common);
788 }
789 
790 char _license[] SEC("license") = "GPL";
791