1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include <stdbool.h>
5 #include <linux/bpf.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_misc.h"
8 #include "bpf_compiler.h"
9
10 static volatile int zero = 0;
11
12 int my_pid;
13 int arr[256];
14 int small_arr[16] SEC(".data.small_arr");
15
16 struct {
17 __uint(type, BPF_MAP_TYPE_HASH);
18 __uint(max_entries, 10);
19 __type(key, int);
20 __type(value, int);
21 } amap SEC(".maps");
22
23 #ifdef REAL_TEST
24 #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
25 #else
26 #define MY_PID_GUARD() ({ })
27 #endif
28
29 SEC("?raw_tp")
30 __failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
iter_err_unsafe_c_loop(const void * ctx)31 int iter_err_unsafe_c_loop(const void *ctx)
32 {
33 struct bpf_iter_num it;
34 int *v, i = zero; /* obscure initial value of i */
35
36 MY_PID_GUARD();
37
38 bpf_iter_num_new(&it, 0, 1000);
39 while ((v = bpf_iter_num_next(&it))) {
40 i++;
41 }
42 bpf_iter_num_destroy(&it);
43
44 small_arr[i] = 123; /* invalid */
45
46 return 0;
47 }
48
49 SEC("?raw_tp")
50 __failure __msg("unbounded memory access")
iter_err_unsafe_asm_loop(const void * ctx)51 int iter_err_unsafe_asm_loop(const void *ctx)
52 {
53 struct bpf_iter_num it;
54
55 MY_PID_GUARD();
56
57 asm volatile (
58 "r6 = %[zero];" /* iteration counter */
59 "r1 = %[it];" /* iterator state */
60 "r2 = 0;"
61 "r3 = 1000;"
62 "r4 = 1;"
63 "call %[bpf_iter_num_new];"
64 "loop:"
65 "r1 = %[it];"
66 "call %[bpf_iter_num_next];"
67 "if r0 == 0 goto out;"
68 "r6 += 1;"
69 "goto loop;"
70 "out:"
71 "r1 = %[it];"
72 "call %[bpf_iter_num_destroy];"
73 "r1 = %[small_arr];"
74 "r2 = r6;"
75 "r2 <<= 2;"
76 "r1 += r2;"
77 "*(u32 *)(r1 + 0) = r6;" /* invalid */
78 :
79 : [it]"r"(&it),
80 [small_arr]"r"(small_arr),
81 [zero]"r"(zero),
82 __imm(bpf_iter_num_new),
83 __imm(bpf_iter_num_next),
84 __imm(bpf_iter_num_destroy)
85 : __clobber_common, "r6"
86 );
87
88 return 0;
89 }
90
91 SEC("raw_tp")
92 __success
iter_while_loop(const void * ctx)93 int iter_while_loop(const void *ctx)
94 {
95 struct bpf_iter_num it;
96 int *v;
97
98 MY_PID_GUARD();
99
100 bpf_iter_num_new(&it, 0, 3);
101 while ((v = bpf_iter_num_next(&it))) {
102 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
103 }
104 bpf_iter_num_destroy(&it);
105
106 return 0;
107 }
108
109 SEC("raw_tp")
110 __success
iter_while_loop_auto_cleanup(const void * ctx)111 int iter_while_loop_auto_cleanup(const void *ctx)
112 {
113 __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
114 int *v;
115
116 MY_PID_GUARD();
117
118 bpf_iter_num_new(&it, 0, 3);
119 while ((v = bpf_iter_num_next(&it))) {
120 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
121 }
122 /* (!) no explicit bpf_iter_num_destroy() */
123
124 return 0;
125 }
126
127 SEC("raw_tp")
128 __success
iter_for_loop(const void * ctx)129 int iter_for_loop(const void *ctx)
130 {
131 struct bpf_iter_num it;
132 int *v;
133
134 MY_PID_GUARD();
135
136 bpf_iter_num_new(&it, 5, 10);
137 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
138 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
139 }
140 bpf_iter_num_destroy(&it);
141
142 return 0;
143 }
144
145 SEC("raw_tp")
146 __success
iter_bpf_for_each_macro(const void * ctx)147 int iter_bpf_for_each_macro(const void *ctx)
148 {
149 int *v;
150
151 MY_PID_GUARD();
152
153 bpf_for_each(num, v, 5, 10) {
154 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
155 }
156
157 return 0;
158 }
159
160 SEC("raw_tp")
161 __success
iter_bpf_for_macro(const void * ctx)162 int iter_bpf_for_macro(const void *ctx)
163 {
164 int i;
165
166 MY_PID_GUARD();
167
168 bpf_for(i, 5, 10) {
169 bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
170 }
171
172 return 0;
173 }
174
175 SEC("raw_tp")
176 __success
iter_pragma_unroll_loop(const void * ctx)177 int iter_pragma_unroll_loop(const void *ctx)
178 {
179 struct bpf_iter_num it;
180 int *v, i;
181
182 MY_PID_GUARD();
183
184 bpf_iter_num_new(&it, 0, 2);
185 __pragma_loop_no_unroll
186 for (i = 0; i < 3; i++) {
187 v = bpf_iter_num_next(&it);
188 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
189 }
190 bpf_iter_num_destroy(&it);
191
192 return 0;
193 }
194
195 SEC("raw_tp")
196 __success
iter_manual_unroll_loop(const void * ctx)197 int iter_manual_unroll_loop(const void *ctx)
198 {
199 struct bpf_iter_num it;
200 int *v;
201
202 MY_PID_GUARD();
203
204 bpf_iter_num_new(&it, 100, 200);
205 v = bpf_iter_num_next(&it);
206 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
207 v = bpf_iter_num_next(&it);
208 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
209 v = bpf_iter_num_next(&it);
210 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
211 v = bpf_iter_num_next(&it);
212 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
213 bpf_iter_num_destroy(&it);
214
215 return 0;
216 }
217
218 SEC("raw_tp")
219 __success
iter_multiple_sequential_loops(const void * ctx)220 int iter_multiple_sequential_loops(const void *ctx)
221 {
222 struct bpf_iter_num it;
223 int *v, i;
224
225 MY_PID_GUARD();
226
227 bpf_iter_num_new(&it, 0, 3);
228 while ((v = bpf_iter_num_next(&it))) {
229 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
230 }
231 bpf_iter_num_destroy(&it);
232
233 bpf_iter_num_new(&it, 5, 10);
234 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
235 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
236 }
237 bpf_iter_num_destroy(&it);
238
239 bpf_iter_num_new(&it, 0, 2);
240 __pragma_loop_no_unroll
241 for (i = 0; i < 3; i++) {
242 v = bpf_iter_num_next(&it);
243 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
244 }
245 bpf_iter_num_destroy(&it);
246
247 bpf_iter_num_new(&it, 100, 200);
248 v = bpf_iter_num_next(&it);
249 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
250 v = bpf_iter_num_next(&it);
251 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
252 v = bpf_iter_num_next(&it);
253 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
254 v = bpf_iter_num_next(&it);
255 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
256 bpf_iter_num_destroy(&it);
257
258 return 0;
259 }
260
261 SEC("raw_tp")
262 __success
iter_limit_cond_break_loop(const void * ctx)263 int iter_limit_cond_break_loop(const void *ctx)
264 {
265 struct bpf_iter_num it;
266 int *v, i = 0, sum = 0;
267
268 MY_PID_GUARD();
269
270 bpf_iter_num_new(&it, 0, 10);
271 while ((v = bpf_iter_num_next(&it))) {
272 bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
273 sum += *v;
274
275 i++;
276 if (i > 3)
277 break;
278 }
279 bpf_iter_num_destroy(&it);
280
281 bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
282
283 return 0;
284 }
285
286 SEC("raw_tp")
287 __success
iter_obfuscate_counter(const void * ctx)288 int iter_obfuscate_counter(const void *ctx)
289 {
290 struct bpf_iter_num it;
291 int *v, sum = 0;
292 /* Make i's initial value unknowable for verifier to prevent it from
293 * pruning if/else branch inside the loop body and marking i as precise.
294 */
295 int i = zero;
296
297 MY_PID_GUARD();
298
299 bpf_iter_num_new(&it, 0, 10);
300 while ((v = bpf_iter_num_next(&it))) {
301 int x;
302
303 i += 1;
304
305 /* If we initialized i as `int i = 0;` above, verifier would
306 * track that i becomes 1 on first iteration after increment
307 * above, and here verifier would eagerly prune else branch
308 * and mark i as precise, ruining open-coded iterator logic
309 * completely, as each next iteration would have a different
310 * *precise* value of i, and thus there would be no
311 * convergence of state. This would result in reaching maximum
312 * instruction limit, no matter what the limit is.
313 */
314 if (i == 1)
315 x = 123;
316 else
317 x = i * 3 + 1;
318
319 bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
320
321 sum += x;
322 }
323 bpf_iter_num_destroy(&it);
324
325 bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
326
327 return 0;
328 }
329
330 SEC("raw_tp")
331 __success
iter_search_loop(const void * ctx)332 int iter_search_loop(const void *ctx)
333 {
334 struct bpf_iter_num it;
335 int *v, *elem = NULL;
336 bool found = false;
337
338 MY_PID_GUARD();
339
340 bpf_iter_num_new(&it, 0, 10);
341
342 while ((v = bpf_iter_num_next(&it))) {
343 bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
344
345 if (*v == 2) {
346 found = true;
347 elem = v;
348 barrier_var(elem);
349 }
350 }
351
352 /* should fail to verify if bpf_iter_num_destroy() is here */
353
354 if (found)
355 /* here found element will be wrong, we should have copied
356 * value to a variable, but here we want to make sure we can
357 * access memory after the loop anyways
358 */
359 bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
360 else
361 bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
362
363 bpf_iter_num_destroy(&it);
364
365 return 0;
366 }
367
368 SEC("raw_tp")
369 __success
iter_array_fill(const void * ctx)370 int iter_array_fill(const void *ctx)
371 {
372 int sum, i;
373
374 MY_PID_GUARD();
375
376 bpf_for(i, 0, ARRAY_SIZE(arr)) {
377 arr[i] = i * 2;
378 }
379
380 sum = 0;
381 bpf_for(i, 0, ARRAY_SIZE(arr)) {
382 sum += arr[i];
383 }
384
385 bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
386
387 return 0;
388 }
389
390 static int arr2d[4][5];
391 static int arr2d_row_sums[4];
392 static int arr2d_col_sums[5];
393
394 SEC("raw_tp")
395 __success
iter_nested_iters(const void * ctx)396 int iter_nested_iters(const void *ctx)
397 {
398 int sum, row, col;
399
400 MY_PID_GUARD();
401
402 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
403 bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
404 arr2d[row][col] = row * col;
405 }
406 }
407
408 /* zero-initialize sums */
409 sum = 0;
410 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
411 arr2d_row_sums[row] = 0;
412 }
413 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
414 arr2d_col_sums[col] = 0;
415 }
416
417 /* calculate sums */
418 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
419 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
420 sum += arr2d[row][col];
421 arr2d_row_sums[row] += arr2d[row][col];
422 arr2d_col_sums[col] += arr2d[row][col];
423 }
424 }
425
426 bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
427 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
428 bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
429 }
430 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
431 bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
432 col, arr2d_col_sums[col],
433 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
434 }
435
436 return 0;
437 }
438
439 SEC("raw_tp")
440 __success
iter_nested_deeply_iters(const void * ctx)441 int iter_nested_deeply_iters(const void *ctx)
442 {
443 int sum = 0;
444
445 MY_PID_GUARD();
446
447 bpf_repeat(10) {
448 bpf_repeat(10) {
449 bpf_repeat(10) {
450 bpf_repeat(10) {
451 bpf_repeat(10) {
452 sum += 1;
453 }
454 }
455 }
456 }
457 /* validate that we can break from inside bpf_repeat() */
458 break;
459 }
460
461 return sum;
462 }
463
fill_inner_dimension(int row)464 static __noinline void fill_inner_dimension(int row)
465 {
466 int col;
467
468 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
469 arr2d[row][col] = row * col;
470 }
471 }
472
sum_inner_dimension(int row)473 static __noinline int sum_inner_dimension(int row)
474 {
475 int sum = 0, col;
476
477 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
478 sum += arr2d[row][col];
479 arr2d_row_sums[row] += arr2d[row][col];
480 arr2d_col_sums[col] += arr2d[row][col];
481 }
482
483 return sum;
484 }
485
486 SEC("raw_tp")
487 __success
iter_subprog_iters(const void * ctx)488 int iter_subprog_iters(const void *ctx)
489 {
490 int sum, row, col;
491
492 MY_PID_GUARD();
493
494 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
495 fill_inner_dimension(row);
496 }
497
498 /* zero-initialize sums */
499 sum = 0;
500 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
501 arr2d_row_sums[row] = 0;
502 }
503 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
504 arr2d_col_sums[col] = 0;
505 }
506
507 /* calculate sums */
508 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
509 sum += sum_inner_dimension(row);
510 }
511
512 bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
513 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
514 bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
515 row, arr2d_row_sums[row]);
516 }
517 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
518 bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
519 col, arr2d_col_sums[col],
520 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
521 }
522
523 return 0;
524 }
525
526 struct {
527 __uint(type, BPF_MAP_TYPE_ARRAY);
528 __type(key, int);
529 __type(value, int);
530 __uint(max_entries, 1000);
531 } arr_map SEC(".maps");
532
533 SEC("?raw_tp")
534 __failure __msg("invalid mem access 'scalar'")
iter_err_too_permissive1(const void * ctx)535 int iter_err_too_permissive1(const void *ctx)
536 {
537 int *map_val = NULL;
538 int key = 0;
539
540 MY_PID_GUARD();
541
542 map_val = bpf_map_lookup_elem(&arr_map, &key);
543 if (!map_val)
544 return 0;
545
546 bpf_repeat(1000000) {
547 map_val = NULL;
548 }
549
550 *map_val = 123;
551
552 return 0;
553 }
554
555 SEC("?raw_tp")
556 __failure __msg("invalid mem access 'map_value_or_null'")
iter_err_too_permissive2(const void * ctx)557 int iter_err_too_permissive2(const void *ctx)
558 {
559 int *map_val = NULL;
560 int key = 0;
561
562 MY_PID_GUARD();
563
564 map_val = bpf_map_lookup_elem(&arr_map, &key);
565 if (!map_val)
566 return 0;
567
568 bpf_repeat(1000000) {
569 map_val = bpf_map_lookup_elem(&arr_map, &key);
570 }
571
572 *map_val = 123;
573
574 return 0;
575 }
576
577 SEC("?raw_tp")
578 __failure __msg("invalid mem access 'map_value_or_null'")
iter_err_too_permissive3(const void * ctx)579 int iter_err_too_permissive3(const void *ctx)
580 {
581 int *map_val = NULL;
582 int key = 0;
583 bool found = false;
584
585 MY_PID_GUARD();
586
587 bpf_repeat(1000000) {
588 map_val = bpf_map_lookup_elem(&arr_map, &key);
589 found = true;
590 }
591
592 if (found)
593 *map_val = 123;
594
595 return 0;
596 }
597
598 SEC("raw_tp")
599 __success
iter_tricky_but_fine(const void * ctx)600 int iter_tricky_but_fine(const void *ctx)
601 {
602 int *map_val = NULL;
603 int key = 0;
604 bool found = false;
605
606 MY_PID_GUARD();
607
608 bpf_repeat(1000000) {
609 map_val = bpf_map_lookup_elem(&arr_map, &key);
610 if (map_val) {
611 found = true;
612 break;
613 }
614 }
615
616 if (found)
617 *map_val = 123;
618
619 return 0;
620 }
621
622 #define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
623
624 SEC("raw_tp")
625 __success
iter_stack_array_loop(const void * ctx)626 int iter_stack_array_loop(const void *ctx)
627 {
628 long arr1[16], arr2[16], sum = 0;
629 int i;
630
631 MY_PID_GUARD();
632
633 /* zero-init arr1 and arr2 in such a way that verifier doesn't know
634 * it's all zeros; if we don't do that, we'll make BPF verifier track
635 * all combination of zero/non-zero stack slots for arr1/arr2, which
636 * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
637 * states
638 */
639 __bpf_memzero(arr1, sizeof(arr1));
640 __bpf_memzero(arr2, sizeof(arr1));
641
642 /* validate that we can break and continue when using bpf_for() */
643 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
644 if (i & 1) {
645 arr1[i] = i;
646 continue;
647 } else {
648 arr2[i] = i;
649 break;
650 }
651 }
652
653 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
654 sum += arr1[i] + arr2[i];
655 }
656
657 return sum;
658 }
659
fill(struct bpf_iter_num * it,int * arr,__u32 n,int mul)660 static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
661 {
662 int *t, i;
663
664 while ((t = bpf_iter_num_next(it))) {
665 i = *t;
666 if (i >= n)
667 break;
668 arr[i] = i * mul;
669 }
670 }
671
sum(struct bpf_iter_num * it,int * arr,__u32 n)672 static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
673 {
674 int *t, i, sum = 0;
675
676 while ((t = bpf_iter_num_next(it))) {
677 i = *t;
678 if ((__u32)i >= n)
679 break;
680 sum += arr[i];
681 }
682
683 return sum;
684 }
685
686 SEC("raw_tp")
687 __success
iter_pass_iter_ptr_to_subprog(const void * ctx)688 int iter_pass_iter_ptr_to_subprog(const void *ctx)
689 {
690 int arr1[16], arr2[32];
691 struct bpf_iter_num it;
692 int n, sum1, sum2;
693
694 MY_PID_GUARD();
695
696 /* fill arr1 */
697 n = ARRAY_SIZE(arr1);
698 bpf_iter_num_new(&it, 0, n);
699 fill(&it, arr1, n, 2);
700 bpf_iter_num_destroy(&it);
701
702 /* fill arr2 */
703 n = ARRAY_SIZE(arr2);
704 bpf_iter_num_new(&it, 0, n);
705 fill(&it, arr2, n, 10);
706 bpf_iter_num_destroy(&it);
707
708 /* sum arr1 */
709 n = ARRAY_SIZE(arr1);
710 bpf_iter_num_new(&it, 0, n);
711 sum1 = sum(&it, arr1, n);
712 bpf_iter_num_destroy(&it);
713
714 /* sum arr2 */
715 n = ARRAY_SIZE(arr2);
716 bpf_iter_num_new(&it, 0, n);
717 sum2 = sum(&it, arr2, n);
718 bpf_iter_num_destroy(&it);
719
720 bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
721
722 return 0;
723 }
724
725 SEC("?raw_tp")
726 __failure
727 __msg("R1 type=scalar expected=fp")
delayed_read_mark(void)728 __naked int delayed_read_mark(void)
729 {
730 /* This is equivalent to C program below.
731 * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
732 * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
733 * At this point iterator next() call is reached with r7 that has no read mark.
734 * Loop body with r7=0xdead would only be visited if verifier would decide to continue
735 * with second loop iteration. Absence of read mark on r7 might affect state
736 * equivalent logic used for iterator convergence tracking.
737 *
738 * r7 = &fp[-16]
739 * fp[-16] = 0
740 * r6 = bpf_get_prandom_u32()
741 * bpf_iter_num_new(&fp[-8], 0, 10)
742 * while (bpf_iter_num_next(&fp[-8])) {
743 * r6++
744 * if (r6 != 42) {
745 * r7 = 0xdead
746 * continue;
747 * }
748 * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
749 * }
750 * bpf_iter_num_destroy(&fp[-8])
751 * return 0
752 */
753 asm volatile (
754 "r7 = r10;"
755 "r7 += -16;"
756 "r0 = 0;"
757 "*(u64 *)(r7 + 0) = r0;"
758 "call %[bpf_get_prandom_u32];"
759 "r6 = r0;"
760 "r1 = r10;"
761 "r1 += -8;"
762 "r2 = 0;"
763 "r3 = 10;"
764 "call %[bpf_iter_num_new];"
765 "1:"
766 "r1 = r10;"
767 "r1 += -8;"
768 "call %[bpf_iter_num_next];"
769 "if r0 == 0 goto 2f;"
770 "r6 += 1;"
771 "if r6 != 42 goto 3f;"
772 "r7 = 0xdead;"
773 "goto 1b;"
774 "3:"
775 "r1 = r7;"
776 "r2 = 8;"
777 "r3 = 0xdeadbeef;"
778 "call %[bpf_probe_read_user];"
779 "goto 1b;"
780 "2:"
781 "r1 = r10;"
782 "r1 += -8;"
783 "call %[bpf_iter_num_destroy];"
784 "r0 = 0;"
785 "exit;"
786 :
787 : __imm(bpf_get_prandom_u32),
788 __imm(bpf_iter_num_new),
789 __imm(bpf_iter_num_next),
790 __imm(bpf_iter_num_destroy),
791 __imm(bpf_probe_read_user)
792 : __clobber_all
793 );
794 }
795
796 SEC("?raw_tp")
797 __failure
798 __msg("math between fp pointer and register with unbounded")
delayed_precision_mark(void)799 __naked int delayed_precision_mark(void)
800 {
801 /* This is equivalent to C program below.
802 * The test is similar to delayed_iter_mark but verifies that incomplete
803 * precision don't fool verifier.
804 * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
805 * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
806 * At this point iterator next() call is reached with r7 that has no read
807 * and precision marks.
808 * Loop body with r7=-32 would only be visited if verifier would decide to continue
809 * with second loop iteration. Absence of precision mark on r7 might affect state
810 * equivalent logic used for iterator convergence tracking.
811 *
812 * r8 = 0
813 * fp[-16] = 0
814 * r7 = -16
815 * r6 = bpf_get_prandom_u32()
816 * bpf_iter_num_new(&fp[-8], 0, 10)
817 * while (bpf_iter_num_next(&fp[-8])) {
818 * if (r6 != 42) {
819 * r7 = -32
820 * r6 = bpf_get_prandom_u32()
821 * continue;
822 * }
823 * r0 = r10
824 * r0 += r7
825 * r8 = *(u64 *)(r0 + 0) // this is not safe
826 * r6 = bpf_get_prandom_u32()
827 * }
828 * bpf_iter_num_destroy(&fp[-8])
829 * return r8
830 */
831 asm volatile (
832 "r8 = 0;"
833 "*(u64 *)(r10 - 16) = r8;"
834 "r7 = -16;"
835 "call %[bpf_get_prandom_u32];"
836 "r6 = r0;"
837 "r1 = r10;"
838 "r1 += -8;"
839 "r2 = 0;"
840 "r3 = 10;"
841 "call %[bpf_iter_num_new];"
842 "1:"
843 "r1 = r10;"
844 "r1 += -8;\n"
845 "call %[bpf_iter_num_next];"
846 "if r0 == 0 goto 2f;"
847 "if r6 != 42 goto 3f;"
848 "r7 = -33;"
849 "call %[bpf_get_prandom_u32];"
850 "r6 = r0;"
851 "goto 1b;\n"
852 "3:"
853 "r0 = r10;"
854 "r0 += r7;"
855 "r8 = *(u64 *)(r0 + 0);"
856 "call %[bpf_get_prandom_u32];"
857 "r6 = r0;"
858 "goto 1b;\n"
859 "2:"
860 "r1 = r10;"
861 "r1 += -8;"
862 "call %[bpf_iter_num_destroy];"
863 "r0 = r8;"
864 "exit;"
865 :
866 : __imm(bpf_get_prandom_u32),
867 __imm(bpf_iter_num_new),
868 __imm(bpf_iter_num_next),
869 __imm(bpf_iter_num_destroy),
870 __imm(bpf_probe_read_user)
871 : __clobber_all
872 );
873 }
874
875 SEC("?raw_tp")
876 __failure
877 __msg("math between fp pointer and register with unbounded")
__flag(BPF_F_TEST_STATE_FREQ)878 __flag(BPF_F_TEST_STATE_FREQ)
879 __naked int loop_state_deps1(void)
880 {
881 /* This is equivalent to C program below.
882 *
883 * The case turns out to be tricky in a sense that:
884 * - states with c=-25 are explored only on a second iteration
885 * of the outer loop;
886 * - states with read+precise mark on c are explored only on
887 * second iteration of the inner loop and in a state which
888 * is pushed to states stack first.
889 *
890 * Depending on the details of iterator convergence logic
891 * verifier might stop states traversal too early and miss
892 * unsafe c=-25 memory access.
893 *
894 * j = iter_new(); // fp[-16]
895 * a = 0; // r6
896 * b = 0; // r7
897 * c = -24; // r8
898 * while (iter_next(j)) {
899 * i = iter_new(); // fp[-8]
900 * a = 0; // r6
901 * b = 0; // r7
902 * while (iter_next(i)) {
903 * if (a == 1) {
904 * a = 0;
905 * b = 1;
906 * } else if (a == 0) {
907 * a = 1;
908 * if (random() == 42)
909 * continue;
910 * if (b == 1) {
911 * *(r10 + c) = 7; // this is not safe
912 * iter_destroy(i);
913 * iter_destroy(j);
914 * return;
915 * }
916 * }
917 * }
918 * iter_destroy(i);
919 * a = 0;
920 * b = 0;
921 * c = -25;
922 * }
923 * iter_destroy(j);
924 * return;
925 */
926 asm volatile (
927 "r1 = r10;"
928 "r1 += -16;"
929 "r2 = 0;"
930 "r3 = 10;"
931 "call %[bpf_iter_num_new];"
932 "r6 = 0;"
933 "r7 = 0;"
934 "r8 = -24;"
935 "j_loop_%=:"
936 "r1 = r10;"
937 "r1 += -16;"
938 "call %[bpf_iter_num_next];"
939 "if r0 == 0 goto j_loop_end_%=;"
940 "r1 = r10;"
941 "r1 += -8;"
942 "r2 = 0;"
943 "r3 = 10;"
944 "call %[bpf_iter_num_new];"
945 "r6 = 0;"
946 "r7 = 0;"
947 "i_loop_%=:"
948 "r1 = r10;"
949 "r1 += -8;"
950 "call %[bpf_iter_num_next];"
951 "if r0 == 0 goto i_loop_end_%=;"
952 "check_one_r6_%=:"
953 "if r6 != 1 goto check_zero_r6_%=;"
954 "r6 = 0;"
955 "r7 = 1;"
956 "goto i_loop_%=;"
957 "check_zero_r6_%=:"
958 "if r6 != 0 goto i_loop_%=;"
959 "r6 = 1;"
960 "call %[bpf_get_prandom_u32];"
961 "if r0 != 42 goto check_one_r7_%=;"
962 "goto i_loop_%=;"
963 "check_one_r7_%=:"
964 "if r7 != 1 goto i_loop_%=;"
965 "r0 = r10;"
966 "r0 += r8;"
967 "r1 = 7;"
968 "*(u64 *)(r0 + 0) = r1;"
969 "r1 = r10;"
970 "r1 += -8;"
971 "call %[bpf_iter_num_destroy];"
972 "r1 = r10;"
973 "r1 += -16;"
974 "call %[bpf_iter_num_destroy];"
975 "r0 = 0;"
976 "exit;"
977 "i_loop_end_%=:"
978 "r1 = r10;"
979 "r1 += -8;"
980 "call %[bpf_iter_num_destroy];"
981 "r6 = 0;"
982 "r7 = 0;"
983 "r8 = -25;"
984 "goto j_loop_%=;"
985 "j_loop_end_%=:"
986 "r1 = r10;"
987 "r1 += -16;"
988 "call %[bpf_iter_num_destroy];"
989 "r0 = 0;"
990 "exit;"
991 :
992 : __imm(bpf_get_prandom_u32),
993 __imm(bpf_iter_num_new),
994 __imm(bpf_iter_num_next),
995 __imm(bpf_iter_num_destroy)
996 : __clobber_all
997 );
998 }
999
1000 SEC("?raw_tp")
1001 __failure
1002 __msg("math between fp pointer and register with unbounded")
__flag(BPF_F_TEST_STATE_FREQ)1003 __flag(BPF_F_TEST_STATE_FREQ)
1004 __naked int loop_state_deps2(void)
1005 {
1006 /* This is equivalent to C program below.
1007 *
1008 * The case turns out to be tricky in a sense that:
1009 * - states with read+precise mark on c are explored only on a second
1010 * iteration of the first inner loop and in a state which is pushed to
1011 * states stack first.
1012 * - states with c=-25 are explored only on a second iteration of the
1013 * second inner loop and in a state which is pushed to states stack
1014 * first.
1015 *
1016 * Depending on the details of iterator convergence logic
1017 * verifier might stop states traversal too early and miss
1018 * unsafe c=-25 memory access.
1019 *
1020 * j = iter_new(); // fp[-16]
1021 * a = 0; // r6
1022 * b = 0; // r7
1023 * c = -24; // r8
1024 * while (iter_next(j)) {
1025 * i = iter_new(); // fp[-8]
1026 * a = 0; // r6
1027 * b = 0; // r7
1028 * while (iter_next(i)) {
1029 * if (a == 1) {
1030 * a = 0;
1031 * b = 1;
1032 * } else if (a == 0) {
1033 * a = 1;
1034 * if (random() == 42)
1035 * continue;
1036 * if (b == 1) {
1037 * *(r10 + c) = 7; // this is not safe
1038 * iter_destroy(i);
1039 * iter_destroy(j);
1040 * return;
1041 * }
1042 * }
1043 * }
1044 * iter_destroy(i);
1045 * i = iter_new(); // fp[-8]
1046 * a = 0; // r6
1047 * b = 0; // r7
1048 * while (iter_next(i)) {
1049 * if (a == 1) {
1050 * a = 0;
1051 * b = 1;
1052 * } else if (a == 0) {
1053 * a = 1;
1054 * if (random() == 42)
1055 * continue;
1056 * if (b == 1) {
1057 * a = 0;
1058 * c = -25;
1059 * }
1060 * }
1061 * }
1062 * iter_destroy(i);
1063 * }
1064 * iter_destroy(j);
1065 * return;
1066 */
1067 asm volatile (
1068 "r1 = r10;"
1069 "r1 += -16;"
1070 "r2 = 0;"
1071 "r3 = 10;"
1072 "call %[bpf_iter_num_new];"
1073 "r6 = 0;"
1074 "r7 = 0;"
1075 "r8 = -24;"
1076 "j_loop_%=:"
1077 "r1 = r10;"
1078 "r1 += -16;"
1079 "call %[bpf_iter_num_next];"
1080 "if r0 == 0 goto j_loop_end_%=;"
1081
1082 /* first inner loop */
1083 "r1 = r10;"
1084 "r1 += -8;"
1085 "r2 = 0;"
1086 "r3 = 10;"
1087 "call %[bpf_iter_num_new];"
1088 "r6 = 0;"
1089 "r7 = 0;"
1090 "i_loop_%=:"
1091 "r1 = r10;"
1092 "r1 += -8;"
1093 "call %[bpf_iter_num_next];"
1094 "if r0 == 0 goto i_loop_end_%=;"
1095 "check_one_r6_%=:"
1096 "if r6 != 1 goto check_zero_r6_%=;"
1097 "r6 = 0;"
1098 "r7 = 1;"
1099 "goto i_loop_%=;"
1100 "check_zero_r6_%=:"
1101 "if r6 != 0 goto i_loop_%=;"
1102 "r6 = 1;"
1103 "call %[bpf_get_prandom_u32];"
1104 "if r0 != 42 goto check_one_r7_%=;"
1105 "goto i_loop_%=;"
1106 "check_one_r7_%=:"
1107 "if r7 != 1 goto i_loop_%=;"
1108 "r0 = r10;"
1109 "r0 += r8;"
1110 "r1 = 7;"
1111 "*(u64 *)(r0 + 0) = r1;"
1112 "r1 = r10;"
1113 "r1 += -8;"
1114 "call %[bpf_iter_num_destroy];"
1115 "r1 = r10;"
1116 "r1 += -16;"
1117 "call %[bpf_iter_num_destroy];"
1118 "r0 = 0;"
1119 "exit;"
1120 "i_loop_end_%=:"
1121 "r1 = r10;"
1122 "r1 += -8;"
1123 "call %[bpf_iter_num_destroy];"
1124
1125 /* second inner loop */
1126 "r1 = r10;"
1127 "r1 += -8;"
1128 "r2 = 0;"
1129 "r3 = 10;"
1130 "call %[bpf_iter_num_new];"
1131 "r6 = 0;"
1132 "r7 = 0;"
1133 "i2_loop_%=:"
1134 "r1 = r10;"
1135 "r1 += -8;"
1136 "call %[bpf_iter_num_next];"
1137 "if r0 == 0 goto i2_loop_end_%=;"
1138 "check2_one_r6_%=:"
1139 "if r6 != 1 goto check2_zero_r6_%=;"
1140 "r6 = 0;"
1141 "r7 = 1;"
1142 "goto i2_loop_%=;"
1143 "check2_zero_r6_%=:"
1144 "if r6 != 0 goto i2_loop_%=;"
1145 "r6 = 1;"
1146 "call %[bpf_get_prandom_u32];"
1147 "if r0 != 42 goto check2_one_r7_%=;"
1148 "goto i2_loop_%=;"
1149 "check2_one_r7_%=:"
1150 "if r7 != 1 goto i2_loop_%=;"
1151 "r6 = 0;"
1152 "r8 = -25;"
1153 "goto i2_loop_%=;"
1154 "i2_loop_end_%=:"
1155 "r1 = r10;"
1156 "r1 += -8;"
1157 "call %[bpf_iter_num_destroy];"
1158
1159 "r6 = 0;"
1160 "r7 = 0;"
1161 "goto j_loop_%=;"
1162 "j_loop_end_%=:"
1163 "r1 = r10;"
1164 "r1 += -16;"
1165 "call %[bpf_iter_num_destroy];"
1166 "r0 = 0;"
1167 "exit;"
1168 :
1169 : __imm(bpf_get_prandom_u32),
1170 __imm(bpf_iter_num_new),
1171 __imm(bpf_iter_num_next),
1172 __imm(bpf_iter_num_destroy)
1173 : __clobber_all
1174 );
1175 }
1176
1177 SEC("?raw_tp")
1178 __success
triple_continue(void)1179 __naked int triple_continue(void)
1180 {
1181 /* This is equivalent to C program below.
1182 * High branching factor of the loop body turned out to be
1183 * problematic for one of the iterator convergence tracking
1184 * algorithms explored.
1185 *
1186 * r6 = bpf_get_prandom_u32()
1187 * bpf_iter_num_new(&fp[-8], 0, 10)
1188 * while (bpf_iter_num_next(&fp[-8])) {
1189 * if (bpf_get_prandom_u32() != 42)
1190 * continue;
1191 * if (bpf_get_prandom_u32() != 42)
1192 * continue;
1193 * if (bpf_get_prandom_u32() != 42)
1194 * continue;
1195 * r0 += 0;
1196 * }
1197 * bpf_iter_num_destroy(&fp[-8])
1198 * return 0
1199 */
1200 asm volatile (
1201 "r1 = r10;"
1202 "r1 += -8;"
1203 "r2 = 0;"
1204 "r3 = 10;"
1205 "call %[bpf_iter_num_new];"
1206 "loop_%=:"
1207 "r1 = r10;"
1208 "r1 += -8;"
1209 "call %[bpf_iter_num_next];"
1210 "if r0 == 0 goto loop_end_%=;"
1211 "call %[bpf_get_prandom_u32];"
1212 "if r0 != 42 goto loop_%=;"
1213 "call %[bpf_get_prandom_u32];"
1214 "if r0 != 42 goto loop_%=;"
1215 "call %[bpf_get_prandom_u32];"
1216 "if r0 != 42 goto loop_%=;"
1217 "r0 += 0;"
1218 "goto loop_%=;"
1219 "loop_end_%=:"
1220 "r1 = r10;"
1221 "r1 += -8;"
1222 "call %[bpf_iter_num_destroy];"
1223 "r0 = 0;"
1224 "exit;"
1225 :
1226 : __imm(bpf_get_prandom_u32),
1227 __imm(bpf_iter_num_new),
1228 __imm(bpf_iter_num_next),
1229 __imm(bpf_iter_num_destroy)
1230 : __clobber_all
1231 );
1232 }
1233
1234 SEC("?raw_tp")
1235 __success
widen_spill(void)1236 __naked int widen_spill(void)
1237 {
1238 /* This is equivalent to C program below.
1239 * The counter is stored in fp[-16], if this counter is not widened
1240 * verifier states representing loop iterations would never converge.
1241 *
1242 * fp[-16] = 0
1243 * bpf_iter_num_new(&fp[-8], 0, 10)
1244 * while (bpf_iter_num_next(&fp[-8])) {
1245 * r0 = fp[-16];
1246 * r0 += 1;
1247 * fp[-16] = r0;
1248 * }
1249 * bpf_iter_num_destroy(&fp[-8])
1250 * return 0
1251 */
1252 asm volatile (
1253 "r0 = 0;"
1254 "*(u64 *)(r10 - 16) = r0;"
1255 "r1 = r10;"
1256 "r1 += -8;"
1257 "r2 = 0;"
1258 "r3 = 10;"
1259 "call %[bpf_iter_num_new];"
1260 "loop_%=:"
1261 "r1 = r10;"
1262 "r1 += -8;"
1263 "call %[bpf_iter_num_next];"
1264 "if r0 == 0 goto loop_end_%=;"
1265 "r0 = *(u64 *)(r10 - 16);"
1266 "r0 += 1;"
1267 "*(u64 *)(r10 - 16) = r0;"
1268 "goto loop_%=;"
1269 "loop_end_%=:"
1270 "r1 = r10;"
1271 "r1 += -8;"
1272 "call %[bpf_iter_num_destroy];"
1273 "r0 = 0;"
1274 "exit;"
1275 :
1276 : __imm(bpf_iter_num_new),
1277 __imm(bpf_iter_num_next),
1278 __imm(bpf_iter_num_destroy)
1279 : __clobber_all
1280 );
1281 }
1282
1283 SEC("raw_tp")
1284 __success
checkpoint_states_deletion(void)1285 __naked int checkpoint_states_deletion(void)
1286 {
1287 /* This is equivalent to C program below.
1288 *
1289 * int *a, *b, *c, *d, *e, *f;
1290 * int i, sum = 0;
1291 * bpf_for(i, 0, 10) {
1292 * a = bpf_map_lookup_elem(&amap, &i);
1293 * b = bpf_map_lookup_elem(&amap, &i);
1294 * c = bpf_map_lookup_elem(&amap, &i);
1295 * d = bpf_map_lookup_elem(&amap, &i);
1296 * e = bpf_map_lookup_elem(&amap, &i);
1297 * f = bpf_map_lookup_elem(&amap, &i);
1298 * if (a) sum += 1;
1299 * if (b) sum += 1;
1300 * if (c) sum += 1;
1301 * if (d) sum += 1;
1302 * if (e) sum += 1;
1303 * if (f) sum += 1;
1304 * }
1305 * return 0;
1306 *
1307 * The body of the loop spawns multiple simulation paths
1308 * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
1309 * Each combination is unique from states_equal() point of view.
1310 * Explored states checkpoint is created after each iterator next call.
1311 * Iterator convergence logic expects that eventually current state
1312 * would get equal to one of the explored states and thus loop
1313 * exploration would be finished (at-least for a specific path).
1314 * Verifier evicts explored states with high miss to hit ratio
1315 * to to avoid comparing current state with too many explored
1316 * states per instruction.
1317 * This test is designed to "stress test" eviction policy defined using formula:
1318 *
1319 * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
1320 *
1321 * Currently N is set to 64, which allows for 6 variables in this test.
1322 */
1323 asm volatile (
1324 "r6 = 0;" /* a */
1325 "r7 = 0;" /* b */
1326 "r8 = 0;" /* c */
1327 "*(u64 *)(r10 - 24) = r6;" /* d */
1328 "*(u64 *)(r10 - 32) = r6;" /* e */
1329 "*(u64 *)(r10 - 40) = r6;" /* f */
1330 "r9 = 0;" /* sum */
1331 "r1 = r10;"
1332 "r1 += -8;"
1333 "r2 = 0;"
1334 "r3 = 10;"
1335 "call %[bpf_iter_num_new];"
1336 "loop_%=:"
1337 "r1 = r10;"
1338 "r1 += -8;"
1339 "call %[bpf_iter_num_next];"
1340 "if r0 == 0 goto loop_end_%=;"
1341
1342 "*(u64 *)(r10 - 16) = r0;"
1343
1344 "r1 = %[amap] ll;"
1345 "r2 = r10;"
1346 "r2 += -16;"
1347 "call %[bpf_map_lookup_elem];"
1348 "r6 = r0;"
1349
1350 "r1 = %[amap] ll;"
1351 "r2 = r10;"
1352 "r2 += -16;"
1353 "call %[bpf_map_lookup_elem];"
1354 "r7 = r0;"
1355
1356 "r1 = %[amap] ll;"
1357 "r2 = r10;"
1358 "r2 += -16;"
1359 "call %[bpf_map_lookup_elem];"
1360 "r8 = r0;"
1361
1362 "r1 = %[amap] ll;"
1363 "r2 = r10;"
1364 "r2 += -16;"
1365 "call %[bpf_map_lookup_elem];"
1366 "*(u64 *)(r10 - 24) = r0;"
1367
1368 "r1 = %[amap] ll;"
1369 "r2 = r10;"
1370 "r2 += -16;"
1371 "call %[bpf_map_lookup_elem];"
1372 "*(u64 *)(r10 - 32) = r0;"
1373
1374 "r1 = %[amap] ll;"
1375 "r2 = r10;"
1376 "r2 += -16;"
1377 "call %[bpf_map_lookup_elem];"
1378 "*(u64 *)(r10 - 40) = r0;"
1379
1380 "if r6 == 0 goto +1;"
1381 "r9 += 1;"
1382 "if r7 == 0 goto +1;"
1383 "r9 += 1;"
1384 "if r8 == 0 goto +1;"
1385 "r9 += 1;"
1386 "r0 = *(u64 *)(r10 - 24);"
1387 "if r0 == 0 goto +1;"
1388 "r9 += 1;"
1389 "r0 = *(u64 *)(r10 - 32);"
1390 "if r0 == 0 goto +1;"
1391 "r9 += 1;"
1392 "r0 = *(u64 *)(r10 - 40);"
1393 "if r0 == 0 goto +1;"
1394 "r9 += 1;"
1395
1396 "goto loop_%=;"
1397 "loop_end_%=:"
1398 "r1 = r10;"
1399 "r1 += -8;"
1400 "call %[bpf_iter_num_destroy];"
1401 "r0 = 0;"
1402 "exit;"
1403 :
1404 : __imm(bpf_map_lookup_elem),
1405 __imm(bpf_iter_num_new),
1406 __imm(bpf_iter_num_next),
1407 __imm(bpf_iter_num_destroy),
1408 __imm_addr(amap)
1409 : __clobber_all
1410 );
1411 }
1412
1413 struct {
1414 int data[32];
1415 int n;
1416 } loop_data;
1417
1418 SEC("raw_tp")
1419 __success
iter_arr_with_actual_elem_count(const void * ctx)1420 int iter_arr_with_actual_elem_count(const void *ctx)
1421 {
1422 int i, n = loop_data.n, sum = 0;
1423
1424 if (n > ARRAY_SIZE(loop_data.data))
1425 return 0;
1426
1427 bpf_for(i, 0, n) {
1428 /* no rechecking of i against ARRAY_SIZE(loop_data.n) */
1429 sum += loop_data.data[i];
1430 }
1431
1432 return sum;
1433 }
1434
1435 __u32 upper, select_n, result;
1436 __u64 global;
1437
nest_2(char * str)1438 static __noinline bool nest_2(char *str)
1439 {
1440 /* some insns (including branch insns) to ensure stacksafe() is triggered
1441 * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
1442 */
1443 if (str[0] == 't')
1444 return true;
1445 if (str[1] == 'e')
1446 return true;
1447 if (str[2] == 's')
1448 return true;
1449 if (str[3] == 't')
1450 return true;
1451 return false;
1452 }
1453
nest_1(int n)1454 static __noinline bool nest_1(int n)
1455 {
1456 /* case 0: allocate stack, case 1: no allocate stack */
1457 switch (n) {
1458 case 0: {
1459 char comm[16];
1460
1461 if (bpf_get_current_comm(comm, 16))
1462 return false;
1463 return nest_2(comm);
1464 }
1465 case 1:
1466 return nest_2((char *)&global);
1467 default:
1468 return false;
1469 }
1470 }
1471
1472 SEC("raw_tp")
1473 __success
iter_subprog_check_stacksafe(const void * ctx)1474 int iter_subprog_check_stacksafe(const void *ctx)
1475 {
1476 long i;
1477
1478 bpf_for(i, 0, upper) {
1479 if (!nest_1(select_n)) {
1480 result = 1;
1481 return 0;
1482 }
1483 }
1484
1485 result = 2;
1486 return 0;
1487 }
1488
1489 char _license[] SEC("license") = "GPL";
1490