1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 4 #include <stdbool.h> 5 #include <linux/bpf.h> 6 #include <bpf/bpf_helpers.h> 7 #include "bpf_misc.h" 8 #include "bpf_compiler.h" 9 10 #define unlikely(x) __builtin_expect(!!(x), 0) 11 12 static volatile int zero = 0; 13 14 int my_pid; 15 int arr[256]; 16 int small_arr[16] SEC(".data.small_arr"); 17 18 struct { 19 __uint(type, BPF_MAP_TYPE_HASH); 20 __uint(max_entries, 10); 21 __type(key, int); 22 __type(value, int); 23 } amap SEC(".maps"); 24 25 #ifdef REAL_TEST 26 #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0 27 #else 28 #define MY_PID_GUARD() ({ }) 29 #endif 30 31 SEC("?raw_tp") 32 __failure __msg("math between map_value pointer and register with unbounded min value is not allowed") 33 int iter_err_unsafe_c_loop(const void *ctx) 34 { 35 struct bpf_iter_num it; 36 int *v, i = zero; /* obscure initial value of i */ 37 38 MY_PID_GUARD(); 39 40 bpf_iter_num_new(&it, 0, 1000); 41 while ((v = bpf_iter_num_next(&it))) { 42 i++; 43 } 44 bpf_iter_num_destroy(&it); 45 46 small_arr[i] = 123; /* invalid */ 47 48 return 0; 49 } 50 51 SEC("?raw_tp") 52 __failure __msg("unbounded memory access") 53 int iter_err_unsafe_asm_loop(const void *ctx) 54 { 55 struct bpf_iter_num it; 56 57 MY_PID_GUARD(); 58 59 asm volatile ( 60 "r6 = %[zero];" /* iteration counter */ 61 "r1 = %[it];" /* iterator state */ 62 "r2 = 0;" 63 "r3 = 1000;" 64 "r4 = 1;" 65 "call %[bpf_iter_num_new];" 66 "loop:" 67 "r1 = %[it];" 68 "call %[bpf_iter_num_next];" 69 "if r0 == 0 goto out;" 70 "r6 += 1;" 71 "goto loop;" 72 "out:" 73 "r1 = %[it];" 74 "call %[bpf_iter_num_destroy];" 75 "r1 = %[small_arr];" 76 "r2 = r6;" 77 "r2 <<= 2;" 78 "r1 += r2;" 79 "*(u32 *)(r1 + 0) = r6;" /* invalid */ 80 : 81 : [it]"r"(&it), 82 [small_arr]"r"(small_arr), 83 [zero]"r"(zero), 84 __imm(bpf_iter_num_new), 85 __imm(bpf_iter_num_next), 86 __imm(bpf_iter_num_destroy) 87 : __clobber_common, "r6" 88 ); 89 90 return 0; 91 } 92 93 SEC("raw_tp") 94 __success 95 int iter_while_loop(const void *ctx) 96 { 97 struct bpf_iter_num it; 98 int *v; 99 100 MY_PID_GUARD(); 101 102 bpf_iter_num_new(&it, 0, 3); 103 while ((v = bpf_iter_num_next(&it))) { 104 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 105 } 106 bpf_iter_num_destroy(&it); 107 108 return 0; 109 } 110 111 SEC("raw_tp") 112 __success 113 int iter_while_loop_auto_cleanup(const void *ctx) 114 { 115 __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; 116 int *v; 117 118 MY_PID_GUARD(); 119 120 bpf_iter_num_new(&it, 0, 3); 121 while ((v = bpf_iter_num_next(&it))) { 122 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 123 } 124 /* (!) no explicit bpf_iter_num_destroy() */ 125 126 return 0; 127 } 128 129 SEC("raw_tp") 130 __success 131 int iter_for_loop(const void *ctx) 132 { 133 struct bpf_iter_num it; 134 int *v; 135 136 MY_PID_GUARD(); 137 138 bpf_iter_num_new(&it, 5, 10); 139 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { 140 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 141 } 142 bpf_iter_num_destroy(&it); 143 144 return 0; 145 } 146 147 SEC("raw_tp") 148 __success 149 int iter_bpf_for_each_macro(const void *ctx) 150 { 151 int *v; 152 153 MY_PID_GUARD(); 154 155 bpf_for_each(num, v, 5, 10) { 156 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 157 } 158 159 return 0; 160 } 161 162 SEC("raw_tp") 163 __success 164 int iter_bpf_for_macro(const void *ctx) 165 { 166 int i; 167 168 MY_PID_GUARD(); 169 170 bpf_for(i, 5, 10) { 171 bpf_printk("ITER_BASIC: E2 VAL: v=%d", i); 172 } 173 174 return 0; 175 } 176 177 SEC("raw_tp") 178 __success 179 int iter_pragma_unroll_loop(const void *ctx) 180 { 181 struct bpf_iter_num it; 182 int *v, i; 183 184 MY_PID_GUARD(); 185 186 bpf_iter_num_new(&it, 0, 2); 187 __pragma_loop_no_unroll 188 for (i = 0; i < 3; i++) { 189 v = bpf_iter_num_next(&it); 190 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); 191 } 192 bpf_iter_num_destroy(&it); 193 194 return 0; 195 } 196 197 SEC("raw_tp") 198 __success 199 int iter_manual_unroll_loop(const void *ctx) 200 { 201 struct bpf_iter_num it; 202 int *v; 203 204 MY_PID_GUARD(); 205 206 bpf_iter_num_new(&it, 100, 200); 207 v = bpf_iter_num_next(&it); 208 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 209 v = bpf_iter_num_next(&it); 210 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 211 v = bpf_iter_num_next(&it); 212 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 213 v = bpf_iter_num_next(&it); 214 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); 215 bpf_iter_num_destroy(&it); 216 217 return 0; 218 } 219 220 SEC("raw_tp") 221 __success 222 int iter_multiple_sequential_loops(const void *ctx) 223 { 224 struct bpf_iter_num it; 225 int *v, i; 226 227 MY_PID_GUARD(); 228 229 bpf_iter_num_new(&it, 0, 3); 230 while ((v = bpf_iter_num_next(&it))) { 231 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 232 } 233 bpf_iter_num_destroy(&it); 234 235 bpf_iter_num_new(&it, 5, 10); 236 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { 237 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 238 } 239 bpf_iter_num_destroy(&it); 240 241 bpf_iter_num_new(&it, 0, 2); 242 __pragma_loop_no_unroll 243 for (i = 0; i < 3; i++) { 244 v = bpf_iter_num_next(&it); 245 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); 246 } 247 bpf_iter_num_destroy(&it); 248 249 bpf_iter_num_new(&it, 100, 200); 250 v = bpf_iter_num_next(&it); 251 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 252 v = bpf_iter_num_next(&it); 253 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 254 v = bpf_iter_num_next(&it); 255 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 256 v = bpf_iter_num_next(&it); 257 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); 258 bpf_iter_num_destroy(&it); 259 260 return 0; 261 } 262 263 SEC("raw_tp") 264 __success 265 int iter_limit_cond_break_loop(const void *ctx) 266 { 267 struct bpf_iter_num it; 268 int *v, i = 0, sum = 0; 269 270 MY_PID_GUARD(); 271 272 bpf_iter_num_new(&it, 0, 10); 273 while ((v = bpf_iter_num_next(&it))) { 274 bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v); 275 sum += *v; 276 277 i++; 278 if (i > 3) 279 break; 280 } 281 bpf_iter_num_destroy(&it); 282 283 bpf_printk("ITER_SIMPLE: sum=%d\n", sum); 284 285 return 0; 286 } 287 288 SEC("raw_tp") 289 __success 290 int iter_obfuscate_counter(const void *ctx) 291 { 292 struct bpf_iter_num it; 293 int *v, sum = 0; 294 /* Make i's initial value unknowable for verifier to prevent it from 295 * pruning if/else branch inside the loop body and marking i as precise. 296 */ 297 int i = zero; 298 299 MY_PID_GUARD(); 300 301 bpf_iter_num_new(&it, 0, 10); 302 while ((v = bpf_iter_num_next(&it))) { 303 int x; 304 305 i += 1; 306 307 /* If we initialized i as `int i = 0;` above, verifier would 308 * track that i becomes 1 on first iteration after increment 309 * above, and here verifier would eagerly prune else branch 310 * and mark i as precise, ruining open-coded iterator logic 311 * completely, as each next iteration would have a different 312 * *precise* value of i, and thus there would be no 313 * convergence of state. This would result in reaching maximum 314 * instruction limit, no matter what the limit is. 315 */ 316 if (i == 1) 317 x = 123; 318 else 319 x = i * 3 + 1; 320 321 bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x); 322 323 sum += x; 324 } 325 bpf_iter_num_destroy(&it); 326 327 bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum); 328 329 return 0; 330 } 331 332 SEC("raw_tp") 333 __success 334 int iter_search_loop(const void *ctx) 335 { 336 struct bpf_iter_num it; 337 int *v, *elem = NULL; 338 bool found = false; 339 340 MY_PID_GUARD(); 341 342 bpf_iter_num_new(&it, 0, 10); 343 344 while ((v = bpf_iter_num_next(&it))) { 345 bpf_printk("ITER_SEARCH_LOOP: v=%d", *v); 346 347 if (*v == 2) { 348 found = true; 349 elem = v; 350 barrier_var(elem); 351 } 352 } 353 354 /* should fail to verify if bpf_iter_num_destroy() is here */ 355 356 if (found) 357 /* here found element will be wrong, we should have copied 358 * value to a variable, but here we want to make sure we can 359 * access memory after the loop anyways 360 */ 361 bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem); 362 else 363 bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n"); 364 365 bpf_iter_num_destroy(&it); 366 367 return 0; 368 } 369 370 SEC("raw_tp") 371 __success 372 int iter_array_fill(const void *ctx) 373 { 374 int sum, i; 375 376 MY_PID_GUARD(); 377 378 bpf_for(i, 0, ARRAY_SIZE(arr)) { 379 arr[i] = i * 2; 380 } 381 382 sum = 0; 383 bpf_for(i, 0, ARRAY_SIZE(arr)) { 384 sum += arr[i]; 385 } 386 387 bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256); 388 389 return 0; 390 } 391 392 static int arr2d[4][5]; 393 static int arr2d_row_sums[4]; 394 static int arr2d_col_sums[5]; 395 396 SEC("raw_tp") 397 __success 398 int iter_nested_iters(const void *ctx) 399 { 400 int sum, row, col; 401 402 MY_PID_GUARD(); 403 404 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 405 bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) { 406 arr2d[row][col] = row * col; 407 } 408 } 409 410 /* zero-initialize sums */ 411 sum = 0; 412 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 413 arr2d_row_sums[row] = 0; 414 } 415 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 416 arr2d_col_sums[col] = 0; 417 } 418 419 /* calculate sums */ 420 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 421 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 422 sum += arr2d[row][col]; 423 arr2d_row_sums[row] += arr2d[row][col]; 424 arr2d_col_sums[col] += arr2d[row][col]; 425 } 426 } 427 428 bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum); 429 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 430 bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]); 431 } 432 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 433 bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s", 434 col, arr2d_col_sums[col], 435 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); 436 } 437 438 return 0; 439 } 440 441 SEC("raw_tp") 442 __success 443 int iter_nested_deeply_iters(const void *ctx) 444 { 445 int sum = 0; 446 447 MY_PID_GUARD(); 448 449 bpf_repeat(10) { 450 bpf_repeat(10) { 451 bpf_repeat(10) { 452 bpf_repeat(10) { 453 bpf_repeat(10) { 454 sum += 1; 455 } 456 } 457 } 458 } 459 /* validate that we can break from inside bpf_repeat() */ 460 break; 461 } 462 463 return sum; 464 } 465 466 static __noinline void fill_inner_dimension(int row) 467 { 468 int col; 469 470 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 471 arr2d[row][col] = row * col; 472 } 473 } 474 475 static __noinline int sum_inner_dimension(int row) 476 { 477 int sum = 0, col; 478 479 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 480 sum += arr2d[row][col]; 481 arr2d_row_sums[row] += arr2d[row][col]; 482 arr2d_col_sums[col] += arr2d[row][col]; 483 } 484 485 return sum; 486 } 487 488 SEC("raw_tp") 489 __success 490 int iter_subprog_iters(const void *ctx) 491 { 492 int sum, row, col; 493 494 MY_PID_GUARD(); 495 496 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 497 fill_inner_dimension(row); 498 } 499 500 /* zero-initialize sums */ 501 sum = 0; 502 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 503 arr2d_row_sums[row] = 0; 504 } 505 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 506 arr2d_col_sums[col] = 0; 507 } 508 509 /* calculate sums */ 510 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 511 sum += sum_inner_dimension(row); 512 } 513 514 bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum); 515 bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 516 bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d", 517 row, arr2d_row_sums[row]); 518 } 519 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 520 bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s", 521 col, arr2d_col_sums[col], 522 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); 523 } 524 525 return 0; 526 } 527 528 struct { 529 __uint(type, BPF_MAP_TYPE_HASH); 530 __type(key, int); 531 __type(value, int); 532 __uint(max_entries, 1000); 533 } hash_map SEC(".maps"); 534 535 SEC("?raw_tp") 536 __failure __msg("invalid mem access 'scalar'") 537 int iter_err_too_permissive1(const void *ctx) 538 { 539 int *map_val = NULL; 540 int key = 0; 541 542 MY_PID_GUARD(); 543 544 map_val = bpf_map_lookup_elem(&hash_map, &key); 545 if (!map_val) 546 return 0; 547 548 bpf_repeat(1000000) { 549 map_val = NULL; 550 } 551 552 *map_val = 123; 553 554 return 0; 555 } 556 557 SEC("?raw_tp") 558 __failure __msg("invalid mem access 'map_value_or_null'") 559 int iter_err_too_permissive2(const void *ctx) 560 { 561 int *map_val = NULL; 562 int key = 0; 563 564 MY_PID_GUARD(); 565 566 map_val = bpf_map_lookup_elem(&hash_map, &key); 567 if (!map_val) 568 return 0; 569 570 bpf_repeat(1000000) { 571 map_val = bpf_map_lookup_elem(&hash_map, &key); 572 } 573 574 *map_val = 123; 575 576 return 0; 577 } 578 579 SEC("?raw_tp") 580 __failure __msg("invalid mem access 'map_value_or_null'") 581 int iter_err_too_permissive3(const void *ctx) 582 { 583 int *map_val = NULL; 584 int key = 0; 585 bool found = false; 586 587 MY_PID_GUARD(); 588 589 bpf_repeat(1000000) { 590 map_val = bpf_map_lookup_elem(&hash_map, &key); 591 found = true; 592 } 593 594 if (found) 595 *map_val = 123; 596 597 return 0; 598 } 599 600 SEC("raw_tp") 601 __success 602 int iter_tricky_but_fine(const void *ctx) 603 { 604 int *map_val = NULL; 605 int key = 0; 606 bool found = false; 607 608 MY_PID_GUARD(); 609 610 bpf_repeat(1000000) { 611 map_val = bpf_map_lookup_elem(&hash_map, &key); 612 if (map_val) { 613 found = true; 614 break; 615 } 616 } 617 618 if (found) 619 *map_val = 123; 620 621 return 0; 622 } 623 624 #define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0) 625 626 SEC("raw_tp") 627 __success 628 int iter_stack_array_loop(const void *ctx) 629 { 630 long arr1[16], arr2[16], sum = 0; 631 int i; 632 633 MY_PID_GUARD(); 634 635 /* zero-init arr1 and arr2 in such a way that verifier doesn't know 636 * it's all zeros; if we don't do that, we'll make BPF verifier track 637 * all combination of zero/non-zero stack slots for arr1/arr2, which 638 * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different 639 * states 640 */ 641 __bpf_memzero(arr1, sizeof(arr1)); 642 __bpf_memzero(arr2, sizeof(arr1)); 643 644 /* validate that we can break and continue when using bpf_for() */ 645 bpf_for(i, 0, ARRAY_SIZE(arr1)) { 646 if (i & 1) { 647 arr1[i] = i; 648 continue; 649 } else { 650 arr2[i] = i; 651 break; 652 } 653 } 654 655 bpf_for(i, 0, ARRAY_SIZE(arr1)) { 656 sum += arr1[i] + arr2[i]; 657 } 658 659 return sum; 660 } 661 662 static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) 663 { 664 int *t, i; 665 666 while ((t = bpf_iter_num_next(it))) { 667 i = *t; 668 if (i >= n) 669 break; 670 arr[i] = i * mul; 671 } 672 } 673 674 static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) 675 { 676 int *t, i, sum = 0; 677 678 while ((t = bpf_iter_num_next(it))) { 679 i = *t; 680 if ((__u32)i >= n) 681 break; 682 sum += arr[i]; 683 } 684 685 return sum; 686 } 687 688 SEC("raw_tp") 689 __success 690 int iter_pass_iter_ptr_to_subprog(const void *ctx) 691 { 692 int arr1[16], arr2[32]; 693 struct bpf_iter_num it; 694 int n, sum1, sum2; 695 696 MY_PID_GUARD(); 697 698 /* fill arr1 */ 699 n = ARRAY_SIZE(arr1); 700 bpf_iter_num_new(&it, 0, n); 701 fill(&it, arr1, n, 2); 702 bpf_iter_num_destroy(&it); 703 704 /* fill arr2 */ 705 n = ARRAY_SIZE(arr2); 706 bpf_iter_num_new(&it, 0, n); 707 fill(&it, arr2, n, 10); 708 bpf_iter_num_destroy(&it); 709 710 /* sum arr1 */ 711 n = ARRAY_SIZE(arr1); 712 bpf_iter_num_new(&it, 0, n); 713 sum1 = sum(&it, arr1, n); 714 bpf_iter_num_destroy(&it); 715 716 /* sum arr2 */ 717 n = ARRAY_SIZE(arr2); 718 bpf_iter_num_new(&it, 0, n); 719 sum2 = sum(&it, arr2, n); 720 bpf_iter_num_destroy(&it); 721 722 bpf_printk("sum1=%d, sum2=%d", sum1, sum2); 723 724 return 0; 725 } 726 727 SEC("?raw_tp") 728 __failure 729 __msg("R1 type=scalar expected=fp") 730 __naked int delayed_read_mark(void) 731 { 732 /* This is equivalent to C program below. 733 * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead. 734 * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch. 735 * At this point iterator next() call is reached with r7 that has no read mark. 736 * Loop body with r7=0xdead would only be visited if verifier would decide to continue 737 * with second loop iteration. Absence of read mark on r7 might affect state 738 * equivalent logic used for iterator convergence tracking. 739 * 740 * r7 = &fp[-16] 741 * fp[-16] = 0 742 * r6 = bpf_get_prandom_u32() 743 * bpf_iter_num_new(&fp[-8], 0, 10) 744 * while (bpf_iter_num_next(&fp[-8])) { 745 * r6++ 746 * if (r6 != 42) { 747 * r7 = 0xdead 748 * continue; 749 * } 750 * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe 751 * } 752 * bpf_iter_num_destroy(&fp[-8]) 753 * return 0 754 */ 755 asm volatile ( 756 "r7 = r10;" 757 "r7 += -16;" 758 "r0 = 0;" 759 "*(u64 *)(r7 + 0) = r0;" 760 "call %[bpf_get_prandom_u32];" 761 "r6 = r0;" 762 "r1 = r10;" 763 "r1 += -8;" 764 "r2 = 0;" 765 "r3 = 10;" 766 "call %[bpf_iter_num_new];" 767 "1:" 768 "r1 = r10;" 769 "r1 += -8;" 770 "call %[bpf_iter_num_next];" 771 "if r0 == 0 goto 2f;" 772 "r6 += 1;" 773 "if r6 != 42 goto 3f;" 774 "r7 = 0xdead;" 775 "goto 1b;" 776 "3:" 777 "r1 = r7;" 778 "r2 = 8;" 779 "r3 = 0xdeadbeef;" 780 "call %[bpf_probe_read_user];" 781 "goto 1b;" 782 "2:" 783 "r1 = r10;" 784 "r1 += -8;" 785 "call %[bpf_iter_num_destroy];" 786 "r0 = 0;" 787 "exit;" 788 : 789 : __imm(bpf_get_prandom_u32), 790 __imm(bpf_iter_num_new), 791 __imm(bpf_iter_num_next), 792 __imm(bpf_iter_num_destroy), 793 __imm(bpf_probe_read_user) 794 : __clobber_all 795 ); 796 } 797 798 SEC("?raw_tp") 799 __failure 800 __msg("math between fp pointer and register with unbounded") 801 __naked int delayed_precision_mark(void) 802 { 803 /* This is equivalent to C program below. 804 * The test is similar to delayed_iter_mark but verifies that incomplete 805 * precision don't fool verifier. 806 * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32. 807 * State with r7=-16 is visited first and follows r6 != 42 ... continue branch. 808 * At this point iterator next() call is reached with r7 that has no read 809 * and precision marks. 810 * Loop body with r7=-32 would only be visited if verifier would decide to continue 811 * with second loop iteration. Absence of precision mark on r7 might affect state 812 * equivalent logic used for iterator convergence tracking. 813 * 814 * r8 = 0 815 * fp[-16] = 0 816 * r7 = -16 817 * r6 = bpf_get_prandom_u32() 818 * bpf_iter_num_new(&fp[-8], 0, 10) 819 * while (bpf_iter_num_next(&fp[-8])) { 820 * if (r6 != 42) { 821 * r7 = -32 822 * r6 = bpf_get_prandom_u32() 823 * continue; 824 * } 825 * r0 = r10 826 * r0 += r7 827 * r8 = *(u64 *)(r0 + 0) // this is not safe 828 * r6 = bpf_get_prandom_u32() 829 * } 830 * bpf_iter_num_destroy(&fp[-8]) 831 * return r8 832 */ 833 asm volatile ( 834 "r8 = 0;" 835 "*(u64 *)(r10 - 16) = r8;" 836 "r7 = -16;" 837 "call %[bpf_get_prandom_u32];" 838 "r6 = r0;" 839 "r1 = r10;" 840 "r1 += -8;" 841 "r2 = 0;" 842 "r3 = 10;" 843 "call %[bpf_iter_num_new];" 844 "1:" 845 "r1 = r10;" 846 "r1 += -8;\n" 847 "call %[bpf_iter_num_next];" 848 "if r0 == 0 goto 2f;" 849 "if r6 != 42 goto 3f;" 850 "r7 = -33;" 851 "call %[bpf_get_prandom_u32];" 852 "r6 = r0;" 853 "goto 1b;\n" 854 "3:" 855 "r0 = r10;" 856 "r0 += r7;" 857 "r8 = *(u64 *)(r0 + 0);" 858 "call %[bpf_get_prandom_u32];" 859 "r6 = r0;" 860 "goto 1b;\n" 861 "2:" 862 "r1 = r10;" 863 "r1 += -8;" 864 "call %[bpf_iter_num_destroy];" 865 "r0 = r8;" 866 "exit;" 867 : 868 : __imm(bpf_get_prandom_u32), 869 __imm(bpf_iter_num_new), 870 __imm(bpf_iter_num_next), 871 __imm(bpf_iter_num_destroy), 872 __imm(bpf_probe_read_user) 873 : __clobber_all 874 ); 875 } 876 877 SEC("?raw_tp") 878 __failure 879 __msg("math between fp pointer and register with unbounded") 880 __flag(BPF_F_TEST_STATE_FREQ) 881 __naked int loop_state_deps1(void) 882 { 883 /* This is equivalent to C program below. 884 * 885 * The case turns out to be tricky in a sense that: 886 * - states with c=-25 are explored only on a second iteration 887 * of the outer loop; 888 * - states with read+precise mark on c are explored only on 889 * second iteration of the inner loop and in a state which 890 * is pushed to states stack first. 891 * 892 * Depending on the details of iterator convergence logic 893 * verifier might stop states traversal too early and miss 894 * unsafe c=-25 memory access. 895 * 896 * j = iter_new(); // fp[-16] 897 * a = 0; // r6 898 * b = 0; // r7 899 * c = -24; // r8 900 * while (iter_next(j)) { 901 * i = iter_new(); // fp[-8] 902 * a = 0; // r6 903 * b = 0; // r7 904 * while (iter_next(i)) { 905 * if (a == 1) { 906 * a = 0; 907 * b = 1; 908 * } else if (a == 0) { 909 * a = 1; 910 * if (random() == 42) 911 * continue; 912 * if (b == 1) { 913 * *(r10 + c) = 7; // this is not safe 914 * iter_destroy(i); 915 * iter_destroy(j); 916 * return; 917 * } 918 * } 919 * } 920 * iter_destroy(i); 921 * a = 0; 922 * b = 0; 923 * c = -25; 924 * } 925 * iter_destroy(j); 926 * return; 927 */ 928 asm volatile ( 929 "r1 = r10;" 930 "r1 += -16;" 931 "r2 = 0;" 932 "r3 = 10;" 933 "call %[bpf_iter_num_new];" 934 "r6 = 0;" 935 "r7 = 0;" 936 "r8 = -24;" 937 "j_loop_%=:" 938 "r1 = r10;" 939 "r1 += -16;" 940 "call %[bpf_iter_num_next];" 941 "if r0 == 0 goto j_loop_end_%=;" 942 "r1 = r10;" 943 "r1 += -8;" 944 "r2 = 0;" 945 "r3 = 10;" 946 "call %[bpf_iter_num_new];" 947 "r6 = 0;" 948 "r7 = 0;" 949 "i_loop_%=:" 950 "r1 = r10;" 951 "r1 += -8;" 952 "call %[bpf_iter_num_next];" 953 "if r0 == 0 goto i_loop_end_%=;" 954 "check_one_r6_%=:" 955 "if r6 != 1 goto check_zero_r6_%=;" 956 "r6 = 0;" 957 "r7 = 1;" 958 "goto i_loop_%=;" 959 "check_zero_r6_%=:" 960 "if r6 != 0 goto i_loop_%=;" 961 "r6 = 1;" 962 "call %[bpf_get_prandom_u32];" 963 "if r0 != 42 goto check_one_r7_%=;" 964 "goto i_loop_%=;" 965 "check_one_r7_%=:" 966 "if r7 != 1 goto i_loop_%=;" 967 "r0 = r10;" 968 "r0 += r8;" 969 "r1 = 7;" 970 "*(u64 *)(r0 + 0) = r1;" 971 "r1 = r10;" 972 "r1 += -8;" 973 "call %[bpf_iter_num_destroy];" 974 "r1 = r10;" 975 "r1 += -16;" 976 "call %[bpf_iter_num_destroy];" 977 "r0 = 0;" 978 "exit;" 979 "i_loop_end_%=:" 980 "r1 = r10;" 981 "r1 += -8;" 982 "call %[bpf_iter_num_destroy];" 983 "r6 = 0;" 984 "r7 = 0;" 985 "r8 = -25;" 986 "goto j_loop_%=;" 987 "j_loop_end_%=:" 988 "r1 = r10;" 989 "r1 += -16;" 990 "call %[bpf_iter_num_destroy];" 991 "r0 = 0;" 992 "exit;" 993 : 994 : __imm(bpf_get_prandom_u32), 995 __imm(bpf_iter_num_new), 996 __imm(bpf_iter_num_next), 997 __imm(bpf_iter_num_destroy) 998 : __clobber_all 999 ); 1000 } 1001 1002 SEC("?raw_tp") 1003 __failure 1004 __msg("math between fp pointer and register with unbounded") 1005 __flag(BPF_F_TEST_STATE_FREQ) 1006 __naked int loop_state_deps2(void) 1007 { 1008 /* This is equivalent to C program below. 1009 * 1010 * The case turns out to be tricky in a sense that: 1011 * - states with read+precise mark on c are explored only on a second 1012 * iteration of the first inner loop and in a state which is pushed to 1013 * states stack first. 1014 * - states with c=-25 are explored only on a second iteration of the 1015 * second inner loop and in a state which is pushed to states stack 1016 * first. 1017 * 1018 * Depending on the details of iterator convergence logic 1019 * verifier might stop states traversal too early and miss 1020 * unsafe c=-25 memory access. 1021 * 1022 * j = iter_new(); // fp[-16] 1023 * a = 0; // r6 1024 * b = 0; // r7 1025 * c = -24; // r8 1026 * while (iter_next(j)) { 1027 * i = iter_new(); // fp[-8] 1028 * a = 0; // r6 1029 * b = 0; // r7 1030 * while (iter_next(i)) { 1031 * if (a == 1) { 1032 * a = 0; 1033 * b = 1; 1034 * } else if (a == 0) { 1035 * a = 1; 1036 * if (random() == 42) 1037 * continue; 1038 * if (b == 1) { 1039 * *(r10 + c) = 7; // this is not safe 1040 * iter_destroy(i); 1041 * iter_destroy(j); 1042 * return; 1043 * } 1044 * } 1045 * } 1046 * iter_destroy(i); 1047 * i = iter_new(); // fp[-8] 1048 * a = 0; // r6 1049 * b = 0; // r7 1050 * while (iter_next(i)) { 1051 * if (a == 1) { 1052 * a = 0; 1053 * b = 1; 1054 * } else if (a == 0) { 1055 * a = 1; 1056 * if (random() == 42) 1057 * continue; 1058 * if (b == 1) { 1059 * a = 0; 1060 * c = -25; 1061 * } 1062 * } 1063 * } 1064 * iter_destroy(i); 1065 * } 1066 * iter_destroy(j); 1067 * return; 1068 */ 1069 asm volatile ( 1070 "r1 = r10;" 1071 "r1 += -16;" 1072 "r2 = 0;" 1073 "r3 = 10;" 1074 "call %[bpf_iter_num_new];" 1075 "r6 = 0;" 1076 "r7 = 0;" 1077 "r8 = -24;" 1078 "j_loop_%=:" 1079 "r1 = r10;" 1080 "r1 += -16;" 1081 "call %[bpf_iter_num_next];" 1082 "if r0 == 0 goto j_loop_end_%=;" 1083 1084 /* first inner loop */ 1085 "r1 = r10;" 1086 "r1 += -8;" 1087 "r2 = 0;" 1088 "r3 = 10;" 1089 "call %[bpf_iter_num_new];" 1090 "r6 = 0;" 1091 "r7 = 0;" 1092 "i_loop_%=:" 1093 "r1 = r10;" 1094 "r1 += -8;" 1095 "call %[bpf_iter_num_next];" 1096 "if r0 == 0 goto i_loop_end_%=;" 1097 "check_one_r6_%=:" 1098 "if r6 != 1 goto check_zero_r6_%=;" 1099 "r6 = 0;" 1100 "r7 = 1;" 1101 "goto i_loop_%=;" 1102 "check_zero_r6_%=:" 1103 "if r6 != 0 goto i_loop_%=;" 1104 "r6 = 1;" 1105 "call %[bpf_get_prandom_u32];" 1106 "if r0 != 42 goto check_one_r7_%=;" 1107 "goto i_loop_%=;" 1108 "check_one_r7_%=:" 1109 "if r7 != 1 goto i_loop_%=;" 1110 "r0 = r10;" 1111 "r0 += r8;" 1112 "r1 = 7;" 1113 "*(u64 *)(r0 + 0) = r1;" 1114 "r1 = r10;" 1115 "r1 += -8;" 1116 "call %[bpf_iter_num_destroy];" 1117 "r1 = r10;" 1118 "r1 += -16;" 1119 "call %[bpf_iter_num_destroy];" 1120 "r0 = 0;" 1121 "exit;" 1122 "i_loop_end_%=:" 1123 "r1 = r10;" 1124 "r1 += -8;" 1125 "call %[bpf_iter_num_destroy];" 1126 1127 /* second inner loop */ 1128 "r1 = r10;" 1129 "r1 += -8;" 1130 "r2 = 0;" 1131 "r3 = 10;" 1132 "call %[bpf_iter_num_new];" 1133 "r6 = 0;" 1134 "r7 = 0;" 1135 "i2_loop_%=:" 1136 "r1 = r10;" 1137 "r1 += -8;" 1138 "call %[bpf_iter_num_next];" 1139 "if r0 == 0 goto i2_loop_end_%=;" 1140 "check2_one_r6_%=:" 1141 "if r6 != 1 goto check2_zero_r6_%=;" 1142 "r6 = 0;" 1143 "r7 = 1;" 1144 "goto i2_loop_%=;" 1145 "check2_zero_r6_%=:" 1146 "if r6 != 0 goto i2_loop_%=;" 1147 "r6 = 1;" 1148 "call %[bpf_get_prandom_u32];" 1149 "if r0 != 42 goto check2_one_r7_%=;" 1150 "goto i2_loop_%=;" 1151 "check2_one_r7_%=:" 1152 "if r7 != 1 goto i2_loop_%=;" 1153 "r6 = 0;" 1154 "r8 = -25;" 1155 "goto i2_loop_%=;" 1156 "i2_loop_end_%=:" 1157 "r1 = r10;" 1158 "r1 += -8;" 1159 "call %[bpf_iter_num_destroy];" 1160 1161 "r6 = 0;" 1162 "r7 = 0;" 1163 "goto j_loop_%=;" 1164 "j_loop_end_%=:" 1165 "r1 = r10;" 1166 "r1 += -16;" 1167 "call %[bpf_iter_num_destroy];" 1168 "r0 = 0;" 1169 "exit;" 1170 : 1171 : __imm(bpf_get_prandom_u32), 1172 __imm(bpf_iter_num_new), 1173 __imm(bpf_iter_num_next), 1174 __imm(bpf_iter_num_destroy) 1175 : __clobber_all 1176 ); 1177 } 1178 1179 SEC("?raw_tp") 1180 __failure 1181 __msg("math between fp pointer and register with unbounded") 1182 __flag(BPF_F_TEST_STATE_FREQ) 1183 __naked int loop_state_deps3(void) 1184 { 1185 /* This is equivalent to a C program below. 1186 * 1187 * if (random() != 24) { // assume false branch is placed first 1188 * i = iter_new(); // fp[-8] 1189 * while (iter_next(i)); 1190 * iter_destroy(i); 1191 * return; 1192 * } 1193 * 1194 * for (i = 10; i > 0; i--); // increase dfs_depth for child states 1195 * 1196 * i = iter_new(); // fp[-8] 1197 * b = -24; // r8 1198 * for (;;) { // checkpoint (L) 1199 * if (iter_next(i)) // checkpoint (N) 1200 * break; 1201 * if (random() == 77) { // assume false branch is placed first 1202 * *(u64 *)(r10 + b) = 7; // this is not safe when b == -25 1203 * iter_destroy(i); 1204 * return; 1205 * } 1206 * if (random() == 42) { // assume false branch is placed first 1207 * b = -25; 1208 * } 1209 * } 1210 * iter_destroy(i); 1211 * 1212 * In case of a buggy verifier first loop might poison 1213 * env->cur_state->loop_entry with a state having 0 branches 1214 * and small dfs_depth. This would trigger NOT_EXACT states 1215 * comparison for some states within second loop. 1216 * Specifically, checkpoint (L) might be problematic if: 1217 * - branch with '*(u64 *)(r10 + b) = 7' is not explored yet; 1218 * - checkpoint (L) is first reached in state {b=-24}; 1219 * - traversal is pruned at checkpoint (N) setting checkpoint's (L) 1220 * branch count to 0, thus making it eligible for use in pruning; 1221 * - checkpoint (L) is next reached in state {b=-25}, 1222 * this would cause NOT_EXACT comparison with a state {b=-24} 1223 * while 'b' is not marked precise yet. 1224 */ 1225 asm volatile ( 1226 "call %[bpf_get_prandom_u32];" 1227 "if r0 == 24 goto 2f;" 1228 "r1 = r10;" 1229 "r1 += -8;" 1230 "r2 = 0;" 1231 "r3 = 5;" 1232 "call %[bpf_iter_num_new];" 1233 "1:" 1234 "r1 = r10;" 1235 "r1 += -8;" 1236 "call %[bpf_iter_num_next];" 1237 "if r0 != 0 goto 1b;" 1238 "r1 = r10;" 1239 "r1 += -8;" 1240 "call %[bpf_iter_num_destroy];" 1241 "r0 = 0;" 1242 "exit;" 1243 "2:" 1244 /* loop to increase dfs_depth */ 1245 "r0 = 10;" 1246 "3:" 1247 "r0 -= 1;" 1248 "if r0 != 0 goto 3b;" 1249 /* end of loop */ 1250 "r1 = r10;" 1251 "r1 += -8;" 1252 "r2 = 0;" 1253 "r3 = 10;" 1254 "call %[bpf_iter_num_new];" 1255 "r8 = -24;" 1256 "main_loop_%=:" 1257 "r1 = r10;" 1258 "r1 += -8;" 1259 "call %[bpf_iter_num_next];" 1260 "if r0 == 0 goto main_loop_end_%=;" 1261 /* first if */ 1262 "call %[bpf_get_prandom_u32];" 1263 "if r0 == 77 goto unsafe_write_%=;" 1264 /* second if */ 1265 "call %[bpf_get_prandom_u32];" 1266 "if r0 == 42 goto poison_r8_%=;" 1267 /* iterate */ 1268 "goto main_loop_%=;" 1269 "main_loop_end_%=:" 1270 "r1 = r10;" 1271 "r1 += -8;" 1272 "call %[bpf_iter_num_destroy];" 1273 "r0 = 0;" 1274 "exit;" 1275 1276 "unsafe_write_%=:" 1277 "r0 = r10;" 1278 "r0 += r8;" 1279 "r1 = 7;" 1280 "*(u64 *)(r0 + 0) = r1;" 1281 "goto main_loop_end_%=;" 1282 1283 "poison_r8_%=:" 1284 "r8 = -25;" 1285 "goto main_loop_%=;" 1286 : 1287 : __imm(bpf_get_prandom_u32), 1288 __imm(bpf_iter_num_new), 1289 __imm(bpf_iter_num_next), 1290 __imm(bpf_iter_num_destroy) 1291 : __clobber_all 1292 ); 1293 } 1294 1295 SEC("?raw_tp") 1296 __success 1297 __naked int triple_continue(void) 1298 { 1299 /* This is equivalent to C program below. 1300 * High branching factor of the loop body turned out to be 1301 * problematic for one of the iterator convergence tracking 1302 * algorithms explored. 1303 * 1304 * r6 = bpf_get_prandom_u32() 1305 * bpf_iter_num_new(&fp[-8], 0, 10) 1306 * while (bpf_iter_num_next(&fp[-8])) { 1307 * if (bpf_get_prandom_u32() != 42) 1308 * continue; 1309 * if (bpf_get_prandom_u32() != 42) 1310 * continue; 1311 * if (bpf_get_prandom_u32() != 42) 1312 * continue; 1313 * r0 += 0; 1314 * } 1315 * bpf_iter_num_destroy(&fp[-8]) 1316 * return 0 1317 */ 1318 asm volatile ( 1319 "r1 = r10;" 1320 "r1 += -8;" 1321 "r2 = 0;" 1322 "r3 = 10;" 1323 "call %[bpf_iter_num_new];" 1324 "loop_%=:" 1325 "r1 = r10;" 1326 "r1 += -8;" 1327 "call %[bpf_iter_num_next];" 1328 "if r0 == 0 goto loop_end_%=;" 1329 "call %[bpf_get_prandom_u32];" 1330 "if r0 != 42 goto loop_%=;" 1331 "call %[bpf_get_prandom_u32];" 1332 "if r0 != 42 goto loop_%=;" 1333 "call %[bpf_get_prandom_u32];" 1334 "if r0 != 42 goto loop_%=;" 1335 "r0 += 0;" 1336 "goto loop_%=;" 1337 "loop_end_%=:" 1338 "r1 = r10;" 1339 "r1 += -8;" 1340 "call %[bpf_iter_num_destroy];" 1341 "r0 = 0;" 1342 "exit;" 1343 : 1344 : __imm(bpf_get_prandom_u32), 1345 __imm(bpf_iter_num_new), 1346 __imm(bpf_iter_num_next), 1347 __imm(bpf_iter_num_destroy) 1348 : __clobber_all 1349 ); 1350 } 1351 1352 SEC("?raw_tp") 1353 __success 1354 __naked int widen_spill(void) 1355 { 1356 /* This is equivalent to C program below. 1357 * The counter is stored in fp[-16], if this counter is not widened 1358 * verifier states representing loop iterations would never converge. 1359 * 1360 * fp[-16] = 0 1361 * bpf_iter_num_new(&fp[-8], 0, 10) 1362 * while (bpf_iter_num_next(&fp[-8])) { 1363 * r0 = fp[-16]; 1364 * r0 += 1; 1365 * fp[-16] = r0; 1366 * } 1367 * bpf_iter_num_destroy(&fp[-8]) 1368 * return 0 1369 */ 1370 asm volatile ( 1371 "r0 = 0;" 1372 "*(u64 *)(r10 - 16) = r0;" 1373 "r1 = r10;" 1374 "r1 += -8;" 1375 "r2 = 0;" 1376 "r3 = 10;" 1377 "call %[bpf_iter_num_new];" 1378 "loop_%=:" 1379 "r1 = r10;" 1380 "r1 += -8;" 1381 "call %[bpf_iter_num_next];" 1382 "if r0 == 0 goto loop_end_%=;" 1383 "r0 = *(u64 *)(r10 - 16);" 1384 "r0 += 1;" 1385 "*(u64 *)(r10 - 16) = r0;" 1386 "goto loop_%=;" 1387 "loop_end_%=:" 1388 "r1 = r10;" 1389 "r1 += -8;" 1390 "call %[bpf_iter_num_destroy];" 1391 "r0 = 0;" 1392 "exit;" 1393 : 1394 : __imm(bpf_iter_num_new), 1395 __imm(bpf_iter_num_next), 1396 __imm(bpf_iter_num_destroy) 1397 : __clobber_all 1398 ); 1399 } 1400 1401 SEC("raw_tp") 1402 __success 1403 __naked int checkpoint_states_deletion(void) 1404 { 1405 /* This is equivalent to C program below. 1406 * 1407 * int *a, *b, *c, *d, *e, *f; 1408 * int i, sum = 0; 1409 * bpf_for(i, 0, 10) { 1410 * a = bpf_map_lookup_elem(&amap, &i); 1411 * b = bpf_map_lookup_elem(&amap, &i); 1412 * c = bpf_map_lookup_elem(&amap, &i); 1413 * d = bpf_map_lookup_elem(&amap, &i); 1414 * e = bpf_map_lookup_elem(&amap, &i); 1415 * f = bpf_map_lookup_elem(&amap, &i); 1416 * if (a) sum += 1; 1417 * if (b) sum += 1; 1418 * if (c) sum += 1; 1419 * if (d) sum += 1; 1420 * if (e) sum += 1; 1421 * if (f) sum += 1; 1422 * } 1423 * return 0; 1424 * 1425 * The body of the loop spawns multiple simulation paths 1426 * with different combination of NULL/non-NULL information for a/b/c/d/e/f. 1427 * Each combination is unique from states_equal() point of view. 1428 * Explored states checkpoint is created after each iterator next call. 1429 * Iterator convergence logic expects that eventually current state 1430 * would get equal to one of the explored states and thus loop 1431 * exploration would be finished (at-least for a specific path). 1432 * Verifier evicts explored states with high miss to hit ratio 1433 * to to avoid comparing current state with too many explored 1434 * states per instruction. 1435 * This test is designed to "stress test" eviction policy defined using formula: 1436 * 1437 * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted 1438 * 1439 * Currently N is set to 64, which allows for 6 variables in this test. 1440 */ 1441 asm volatile ( 1442 "r6 = 0;" /* a */ 1443 "r7 = 0;" /* b */ 1444 "r8 = 0;" /* c */ 1445 "*(u64 *)(r10 - 24) = r6;" /* d */ 1446 "*(u64 *)(r10 - 32) = r6;" /* e */ 1447 "*(u64 *)(r10 - 40) = r6;" /* f */ 1448 "r9 = 0;" /* sum */ 1449 "r1 = r10;" 1450 "r1 += -8;" 1451 "r2 = 0;" 1452 "r3 = 10;" 1453 "call %[bpf_iter_num_new];" 1454 "loop_%=:" 1455 "r1 = r10;" 1456 "r1 += -8;" 1457 "call %[bpf_iter_num_next];" 1458 "if r0 == 0 goto loop_end_%=;" 1459 1460 "*(u64 *)(r10 - 16) = r0;" 1461 1462 "r1 = %[amap] ll;" 1463 "r2 = r10;" 1464 "r2 += -16;" 1465 "call %[bpf_map_lookup_elem];" 1466 "r6 = r0;" 1467 1468 "r1 = %[amap] ll;" 1469 "r2 = r10;" 1470 "r2 += -16;" 1471 "call %[bpf_map_lookup_elem];" 1472 "r7 = r0;" 1473 1474 "r1 = %[amap] ll;" 1475 "r2 = r10;" 1476 "r2 += -16;" 1477 "call %[bpf_map_lookup_elem];" 1478 "r8 = r0;" 1479 1480 "r1 = %[amap] ll;" 1481 "r2 = r10;" 1482 "r2 += -16;" 1483 "call %[bpf_map_lookup_elem];" 1484 "*(u64 *)(r10 - 24) = r0;" 1485 1486 "r1 = %[amap] ll;" 1487 "r2 = r10;" 1488 "r2 += -16;" 1489 "call %[bpf_map_lookup_elem];" 1490 "*(u64 *)(r10 - 32) = r0;" 1491 1492 "r1 = %[amap] ll;" 1493 "r2 = r10;" 1494 "r2 += -16;" 1495 "call %[bpf_map_lookup_elem];" 1496 "*(u64 *)(r10 - 40) = r0;" 1497 1498 "if r6 == 0 goto +1;" 1499 "r9 += 1;" 1500 "if r7 == 0 goto +1;" 1501 "r9 += 1;" 1502 "if r8 == 0 goto +1;" 1503 "r9 += 1;" 1504 "r0 = *(u64 *)(r10 - 24);" 1505 "if r0 == 0 goto +1;" 1506 "r9 += 1;" 1507 "r0 = *(u64 *)(r10 - 32);" 1508 "if r0 == 0 goto +1;" 1509 "r9 += 1;" 1510 "r0 = *(u64 *)(r10 - 40);" 1511 "if r0 == 0 goto +1;" 1512 "r9 += 1;" 1513 1514 "goto loop_%=;" 1515 "loop_end_%=:" 1516 "r1 = r10;" 1517 "r1 += -8;" 1518 "call %[bpf_iter_num_destroy];" 1519 "r0 = 0;" 1520 "exit;" 1521 : 1522 : __imm(bpf_map_lookup_elem), 1523 __imm(bpf_iter_num_new), 1524 __imm(bpf_iter_num_next), 1525 __imm(bpf_iter_num_destroy), 1526 __imm_addr(amap) 1527 : __clobber_all 1528 ); 1529 } 1530 1531 struct { 1532 int data[32]; 1533 int n; 1534 } loop_data; 1535 1536 SEC("raw_tp") 1537 __success 1538 int iter_arr_with_actual_elem_count(const void *ctx) 1539 { 1540 int i, n = loop_data.n, sum = 0; 1541 1542 if (n > ARRAY_SIZE(loop_data.data)) 1543 return 0; 1544 1545 bpf_for(i, 0, n) { 1546 /* no rechecking of i against ARRAY_SIZE(loop_data.n) */ 1547 sum += loop_data.data[i]; 1548 } 1549 1550 return sum; 1551 } 1552 1553 __u32 upper, select_n, result; 1554 __u64 global; 1555 1556 static __noinline bool nest_2(char *str) 1557 { 1558 /* some insns (including branch insns) to ensure stacksafe() is triggered 1559 * in nest_2(). This way, stacksafe() can compare frame associated with nest_1(). 1560 */ 1561 if (str[0] == 't') 1562 return true; 1563 if (str[1] == 'e') 1564 return true; 1565 if (str[2] == 's') 1566 return true; 1567 if (str[3] == 't') 1568 return true; 1569 return false; 1570 } 1571 1572 static __noinline bool nest_1(int n) 1573 { 1574 /* case 0: allocate stack, case 1: no allocate stack */ 1575 switch (n) { 1576 case 0: { 1577 char comm[16]; 1578 1579 if (bpf_get_current_comm(comm, 16)) 1580 return false; 1581 return nest_2(comm); 1582 } 1583 case 1: 1584 return nest_2((char *)&global); 1585 default: 1586 return false; 1587 } 1588 } 1589 1590 SEC("raw_tp") 1591 __success 1592 int iter_subprog_check_stacksafe(const void *ctx) 1593 { 1594 long i; 1595 1596 bpf_for(i, 0, upper) { 1597 if (!nest_1(select_n)) { 1598 result = 1; 1599 return 0; 1600 } 1601 } 1602 1603 result = 2; 1604 return 0; 1605 } 1606 1607 struct bpf_iter_num global_it; 1608 1609 SEC("raw_tp") 1610 __failure __msg("arg#0 expected pointer to an iterator on stack") 1611 int iter_new_bad_arg(const void *ctx) 1612 { 1613 bpf_iter_num_new(&global_it, 0, 1); 1614 return 0; 1615 } 1616 1617 SEC("raw_tp") 1618 __failure __msg("arg#0 expected pointer to an iterator on stack") 1619 int iter_next_bad_arg(const void *ctx) 1620 { 1621 bpf_iter_num_next(&global_it); 1622 return 0; 1623 } 1624 1625 SEC("raw_tp") 1626 __failure __msg("arg#0 expected pointer to an iterator on stack") 1627 int iter_destroy_bad_arg(const void *ctx) 1628 { 1629 bpf_iter_num_destroy(&global_it); 1630 return 0; 1631 } 1632 1633 SEC("raw_tp") 1634 __success 1635 int clean_live_states(const void *ctx) 1636 { 1637 char buf[1]; 1638 int i, j, k, l, m, n, o; 1639 1640 bpf_for(i, 0, 10) 1641 bpf_for(j, 0, 10) 1642 bpf_for(k, 0, 10) 1643 bpf_for(l, 0, 10) 1644 bpf_for(m, 0, 10) 1645 bpf_for(n, 0, 10) 1646 bpf_for(o, 0, 10) { 1647 if (unlikely(bpf_get_prandom_u32())) 1648 buf[0] = 42; 1649 bpf_printk("%s", buf); 1650 } 1651 return 0; 1652 } 1653 1654 char _license[] SEC("license") = "GPL"; 1655