1 // SPDX-License-Identifier: GPL-2.0 2 #include "bpf_misc.h" 3 #include "bpf_experimental.h" 4 5 struct { 6 __uint(type, BPF_MAP_TYPE_ARRAY); 7 __uint(max_entries, 8); 8 __type(key, __u32); 9 __type(value, __u64); 10 } map SEC(".maps"); 11 12 struct { 13 __uint(type, BPF_MAP_TYPE_USER_RINGBUF); 14 __uint(max_entries, 8); 15 } ringbuf SEC(".maps"); 16 17 struct vm_area_struct; 18 struct bpf_map; 19 20 struct buf_context { 21 char *buf; 22 }; 23 24 struct num_context { 25 __u64 i; 26 __u64 j; 27 }; 28 29 __u8 choice_arr[2] = { 0, 1 }; 30 31 static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx) 32 { 33 if (idx == 0) { 34 ctx->buf = (char *)(0xDEAD); 35 return 0; 36 } 37 38 if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE))) 39 return 1; 40 41 return 0; 42 } 43 44 SEC("?raw_tp") 45 __failure __msg("R1 type=scalar expected=fp") 46 int unsafe_on_2nd_iter(void *unused) 47 { 48 char buf[4]; 49 struct buf_context loop_ctx = { .buf = buf }; 50 51 bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0); 52 return 0; 53 } 54 55 static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx) 56 { 57 ctx->i = 0; 58 return 0; 59 } 60 61 SEC("?raw_tp") 62 __failure __msg("invalid access to map value, value_size=2 off=32 size=1") 63 int unsafe_on_zero_iter(void *unused) 64 { 65 struct num_context loop_ctx = { .i = 32 }; 66 67 bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0); 68 return choice_arr[loop_ctx.i]; 69 } 70 71 static int widening_cb(__u32 idx, struct num_context *ctx) 72 { 73 ++ctx->i; 74 return 0; 75 } 76 77 SEC("?raw_tp") 78 __success 79 int widening(void *unused) 80 { 81 struct num_context loop_ctx = { .i = 0, .j = 1 }; 82 83 bpf_loop(100, widening_cb, &loop_ctx, 0); 84 /* loop_ctx.j is not changed during callback iteration, 85 * verifier should not apply widening to it. 86 */ 87 return choice_arr[loop_ctx.j]; 88 } 89 90 static int loop_detection_cb(__u32 idx, struct num_context *ctx) 91 { 92 for (;;) {} 93 return 0; 94 } 95 96 SEC("?raw_tp") 97 __failure __msg("infinite loop detected") 98 int loop_detection(void *unused) 99 { 100 struct num_context loop_ctx = { .i = 0 }; 101 102 bpf_loop(100, loop_detection_cb, &loop_ctx, 0); 103 return 0; 104 } 105 106 static __always_inline __u64 oob_state_machine(struct num_context *ctx) 107 { 108 switch (ctx->i) { 109 case 0: 110 ctx->i = 1; 111 break; 112 case 1: 113 ctx->i = 32; 114 break; 115 } 116 return 0; 117 } 118 119 static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data) 120 { 121 return oob_state_machine(data); 122 } 123 124 SEC("?raw_tp") 125 __failure __msg("invalid access to map value, value_size=2 off=32 size=1") 126 int unsafe_for_each_map_elem(void *unused) 127 { 128 struct num_context loop_ctx = { .i = 0 }; 129 130 bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0); 131 return choice_arr[loop_ctx.i]; 132 } 133 134 static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data) 135 { 136 return oob_state_machine(data); 137 } 138 139 SEC("?raw_tp") 140 __failure __msg("invalid access to map value, value_size=2 off=32 size=1") 141 int unsafe_ringbuf_drain(void *unused) 142 { 143 struct num_context loop_ctx = { .i = 0 }; 144 145 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0); 146 return choice_arr[loop_ctx.i]; 147 } 148 149 static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data) 150 { 151 return oob_state_machine(data); 152 } 153 154 SEC("?raw_tp") 155 __failure __msg("invalid access to map value, value_size=2 off=32 size=1") 156 int unsafe_find_vma(void *unused) 157 { 158 struct task_struct *task = bpf_get_current_task_btf(); 159 struct num_context loop_ctx = { .i = 0 }; 160 161 bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0); 162 return choice_arr[loop_ctx.i]; 163 } 164 165 static int iter_limit_cb(__u32 idx, struct num_context *ctx) 166 { 167 ctx->i++; 168 return 0; 169 } 170 171 SEC("?raw_tp") 172 __success 173 int bpf_loop_iter_limit_ok(void *unused) 174 { 175 struct num_context ctx = { .i = 0 }; 176 177 bpf_loop(1, iter_limit_cb, &ctx, 0); 178 return choice_arr[ctx.i]; 179 } 180 181 SEC("?raw_tp") 182 __failure __msg("invalid access to map value, value_size=2 off=2 size=1") 183 int bpf_loop_iter_limit_overflow(void *unused) 184 { 185 struct num_context ctx = { .i = 0 }; 186 187 bpf_loop(2, iter_limit_cb, &ctx, 0); 188 return choice_arr[ctx.i]; 189 } 190 191 static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx) 192 { 193 ctx->i += 100; 194 return 0; 195 } 196 197 static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx) 198 { 199 ctx->i += 10; 200 return 0; 201 } 202 203 static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx) 204 { 205 ctx->i += 1; 206 bpf_loop(1, iter_limit_level2a_cb, ctx, 0); 207 bpf_loop(1, iter_limit_level2b_cb, ctx, 0); 208 return 0; 209 } 210 211 /* Check that path visiting every callback function once had been 212 * reached by verifier. Variables 'ctx{1,2}i' below serve as flags, 213 * with each decimal digit corresponding to a callback visit marker. 214 */ 215 SEC("socket") 216 __success __retval(111111) 217 int bpf_loop_iter_limit_nested(void *unused) 218 { 219 struct num_context ctx1 = { .i = 0 }; 220 struct num_context ctx2 = { .i = 0 }; 221 __u64 a, b, c; 222 223 bpf_loop(1, iter_limit_level1_cb, &ctx1, 0); 224 bpf_loop(1, iter_limit_level1_cb, &ctx2, 0); 225 a = ctx1.i; 226 b = ctx2.i; 227 /* Force 'ctx1.i' and 'ctx2.i' precise. */ 228 c = choice_arr[(a + b) % 2]; 229 /* This makes 'c' zero, but neither clang nor verifier know it. */ 230 c /= 10; 231 /* Make sure that verifier does not visit 'impossible' states: 232 * enumerate all possible callback visit masks. 233 */ 234 if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 && 235 b != 0 && b != 1 && b != 11 && b != 101 && b != 111) 236 asm volatile ("r0 /= 0;" ::: "r0"); 237 return 1000 * a + b + c; 238 } 239 240 struct iter_limit_bug_ctx { 241 __u64 a; 242 __u64 b; 243 __u64 c; 244 }; 245 246 static __naked void iter_limit_bug_cb(void) 247 { 248 /* This is the same as C code below, but written 249 * in assembly to control which branches are fall-through. 250 * 251 * switch (bpf_get_prandom_u32()) { 252 * case 1: ctx->a = 42; break; 253 * case 2: ctx->b = 42; break; 254 * default: ctx->c = 42; break; 255 * } 256 */ 257 asm volatile ( 258 "r9 = r2;" 259 "call %[bpf_get_prandom_u32];" 260 "r1 = r0;" 261 "r2 = 42;" 262 "r0 = 0;" 263 "if r1 == 0x1 goto 1f;" 264 "if r1 == 0x2 goto 2f;" 265 "*(u64 *)(r9 + 16) = r2;" 266 "exit;" 267 "1: *(u64 *)(r9 + 0) = r2;" 268 "exit;" 269 "2: *(u64 *)(r9 + 8) = r2;" 270 "exit;" 271 : 272 : __imm(bpf_get_prandom_u32) 273 : __clobber_all 274 ); 275 } 276 277 int tmp_var; 278 SEC("socket") 279 __failure __msg("infinite loop detected at insn 2") 280 __naked void jgt_imm64_and_may_goto(void) 281 { 282 asm volatile (" \ 283 r0 = %[tmp_var] ll; \ 284 l0_%=: .byte 0xe5; /* may_goto */ \ 285 .byte 0; /* regs */ \ 286 .short -3; /* off -3 */ \ 287 .long 0; /* imm */ \ 288 if r0 > 10 goto l0_%=; \ 289 r0 = 0; \ 290 exit; \ 291 " :: __imm_addr(tmp_var) 292 : __clobber_all); 293 } 294 295 SEC("socket") 296 __failure __msg("infinite loop detected at insn 1") 297 __naked void may_goto_self(void) 298 { 299 asm volatile (" \ 300 r0 = *(u32 *)(r10 - 4); \ 301 l0_%=: .byte 0xe5; /* may_goto */ \ 302 .byte 0; /* regs */ \ 303 .short -1; /* off -1 */ \ 304 .long 0; /* imm */ \ 305 if r0 > 10 goto l0_%=; \ 306 r0 = 0; \ 307 exit; \ 308 " ::: __clobber_all); 309 } 310 311 SEC("socket") 312 __success __retval(0) 313 __naked void may_goto_neg_off(void) 314 { 315 asm volatile (" \ 316 r0 = *(u32 *)(r10 - 4); \ 317 goto l0_%=; \ 318 goto l1_%=; \ 319 l0_%=: .byte 0xe5; /* may_goto */ \ 320 .byte 0; /* regs */ \ 321 .short -2; /* off -2 */ \ 322 .long 0; /* imm */ \ 323 if r0 > 10 goto l0_%=; \ 324 l1_%=: r0 = 0; \ 325 exit; \ 326 " ::: __clobber_all); 327 } 328 329 SEC("tc") 330 __failure 331 __flag(BPF_F_TEST_STATE_FREQ) 332 int iter_limit_bug(struct __sk_buff *skb) 333 { 334 struct iter_limit_bug_ctx ctx = { 7, 7, 7 }; 335 336 bpf_loop(2, iter_limit_bug_cb, &ctx, 0); 337 338 /* This is the same as C code below, 339 * written in assembly to guarantee checks order. 340 * 341 * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7) 342 * asm volatile("r1 /= 0;":::"r1"); 343 */ 344 asm volatile ( 345 "r1 = *(u64 *)%[ctx_a];" 346 "if r1 != 42 goto 1f;" 347 "r1 = *(u64 *)%[ctx_b];" 348 "if r1 != 42 goto 1f;" 349 "r1 = *(u64 *)%[ctx_c];" 350 "if r1 != 7 goto 1f;" 351 "r1 /= 0;" 352 "1:" 353 : 354 : [ctx_a]"m"(ctx.a), 355 [ctx_b]"m"(ctx.b), 356 [ctx_c]"m"(ctx.c) 357 : "r1" 358 ); 359 return 0; 360 } 361 362 SEC("socket") 363 __success __retval(0) 364 __naked void ja_and_may_goto(void) 365 { 366 asm volatile (" \ 367 l0_%=: .byte 0xe5; /* may_goto */ \ 368 .byte 0; /* regs */ \ 369 .short 1; /* off 1 */ \ 370 .long 0; /* imm */ \ 371 goto l0_%=; \ 372 r0 = 0; \ 373 exit; \ 374 " ::: __clobber_common); 375 } 376 377 SEC("socket") 378 __success __retval(0) 379 __naked void ja_and_may_goto2(void) 380 { 381 asm volatile (" \ 382 l0_%=: r0 = 0; \ 383 .byte 0xe5; /* may_goto */ \ 384 .byte 0; /* regs */ \ 385 .short 1; /* off 1 */ \ 386 .long 0; /* imm */ \ 387 goto l0_%=; \ 388 r0 = 0; \ 389 exit; \ 390 " ::: __clobber_common); 391 } 392 393 SEC("socket") 394 __success __retval(0) 395 __naked void jlt_and_may_goto(void) 396 { 397 asm volatile (" \ 398 l0_%=: call %[bpf_jiffies64]; \ 399 .byte 0xe5; /* may_goto */ \ 400 .byte 0; /* regs */ \ 401 .short 1; /* off 1 */ \ 402 .long 0; /* imm */ \ 403 if r0 < 10 goto l0_%=; \ 404 r0 = 0; \ 405 exit; \ 406 " :: __imm(bpf_jiffies64) 407 : __clobber_all); 408 } 409 410 #ifdef CAN_USE_GOTOL 411 SEC("socket") 412 __success __retval(0) 413 __naked void gotol_and_may_goto(void) 414 { 415 asm volatile (" \ 416 l0_%=: r0 = 0; \ 417 .byte 0xe5; /* may_goto */ \ 418 .byte 0; /* regs */ \ 419 .short 1; /* off 1 */ \ 420 .long 0; /* imm */ \ 421 gotol l0_%=; \ 422 r0 = 0; \ 423 exit; \ 424 " ::: __clobber_common); 425 } 426 #endif 427 428 SEC("socket") 429 __success __retval(0) 430 __naked void ja_and_may_goto_subprog(void) 431 { 432 asm volatile (" \ 433 call subprog_with_may_goto; \ 434 exit; \ 435 " ::: __clobber_all); 436 } 437 438 static __naked __noinline __used 439 void subprog_with_may_goto(void) 440 { 441 asm volatile (" \ 442 l0_%=: .byte 0xe5; /* may_goto */ \ 443 .byte 0; /* regs */ \ 444 .short 1; /* off 1 */ \ 445 .long 0; /* imm */ \ 446 goto l0_%=; \ 447 r0 = 0; \ 448 exit; \ 449 " ::: __clobber_all); 450 } 451 452 #define ARR_SZ 1000000 453 int zero; 454 char arr[ARR_SZ]; 455 456 SEC("socket") 457 __success __retval(0xd495cdc0) 458 int cond_break1(const void *ctx) 459 { 460 unsigned long i; 461 unsigned int sum = 0; 462 463 for (i = zero; i < ARR_SZ && can_loop; i++) 464 sum += i; 465 for (i = zero; i < ARR_SZ; i++) { 466 barrier_var(i); 467 sum += i + arr[i]; 468 cond_break; 469 } 470 471 return sum; 472 } 473 474 SEC("socket") 475 __success __retval(999000000) 476 int cond_break2(const void *ctx) 477 { 478 int i, j; 479 int sum = 0; 480 481 for (i = zero; i < 1000 && can_loop; i++) 482 for (j = zero; j < 1000; j++) { 483 sum += i + j; 484 cond_break; 485 } 486 return sum; 487 } 488 489 static __noinline int loop(void) 490 { 491 int i, sum = 0; 492 493 for (i = zero; i <= 1000000 && can_loop; i++) 494 sum += i; 495 496 return sum; 497 } 498 499 SEC("socket") 500 __success __retval(0x6a5a2920) 501 int cond_break3(const void *ctx) 502 { 503 return loop(); 504 } 505 506 SEC("socket") 507 __success __retval(1) 508 int cond_break4(const void *ctx) 509 { 510 int cnt = zero; 511 512 for (;;) { 513 /* should eventually break out of the loop */ 514 cond_break; 515 cnt++; 516 } 517 /* if we looped a bit, it's a success */ 518 return cnt > 1 ? 1 : 0; 519 } 520 521 static __noinline int static_subprog(void) 522 { 523 int cnt = zero; 524 525 for (;;) { 526 cond_break; 527 cnt++; 528 } 529 530 return cnt; 531 } 532 533 SEC("socket") 534 __success __retval(1) 535 int cond_break5(const void *ctx) 536 { 537 int cnt1 = zero, cnt2; 538 539 for (;;) { 540 cond_break; 541 cnt1++; 542 } 543 544 cnt2 = static_subprog(); 545 546 /* main and subprog have to loop a bit */ 547 return cnt1 > 1 && cnt2 > 1 ? 1 : 0; 548 } 549 550 #define ARR2_SZ 1000 551 SEC(".data.arr2") 552 char arr2[ARR2_SZ]; 553 554 SEC("socket") 555 __success __flag(BPF_F_TEST_STATE_FREQ) 556 int loop_inside_iter(const void *ctx) 557 { 558 struct bpf_iter_num it; 559 int *v, sum = 0; 560 __u64 i = 0; 561 562 bpf_iter_num_new(&it, 0, ARR2_SZ); 563 while ((v = bpf_iter_num_next(&it))) { 564 if (i < ARR2_SZ) 565 sum += arr2[i++]; 566 } 567 bpf_iter_num_destroy(&it); 568 return sum; 569 } 570 571 SEC("socket") 572 __success __flag(BPF_F_TEST_STATE_FREQ) 573 int loop_inside_iter_signed(const void *ctx) 574 { 575 struct bpf_iter_num it; 576 int *v, sum = 0; 577 long i = 0; 578 579 bpf_iter_num_new(&it, 0, ARR2_SZ); 580 while ((v = bpf_iter_num_next(&it))) { 581 if (i < ARR2_SZ && i >= 0) 582 sum += arr2[i++]; 583 } 584 bpf_iter_num_destroy(&it); 585 return sum; 586 } 587 588 volatile const int limit = ARR2_SZ; 589 590 SEC("socket") 591 __success __flag(BPF_F_TEST_STATE_FREQ) 592 int loop_inside_iter_volatile_limit(const void *ctx) 593 { 594 struct bpf_iter_num it; 595 int *v, sum = 0; 596 __u64 i = 0; 597 598 bpf_iter_num_new(&it, 0, ARR2_SZ); 599 while ((v = bpf_iter_num_next(&it))) { 600 if (i < limit) 601 sum += arr2[i++]; 602 } 603 bpf_iter_num_destroy(&it); 604 return sum; 605 } 606 607 #define ARR_LONG_SZ 1000 608 609 SEC(".data.arr_long") 610 long arr_long[ARR_LONG_SZ]; 611 612 SEC("socket") 613 __success 614 int test1(const void *ctx) 615 { 616 long i; 617 618 for (i = 0; i < ARR_LONG_SZ && can_loop; i++) 619 arr_long[i] = i; 620 return 0; 621 } 622 623 SEC("socket") 624 __success 625 int test2(const void *ctx) 626 { 627 __u64 i; 628 629 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { 630 barrier_var(i); 631 arr_long[i] = i; 632 } 633 return 0; 634 } 635 636 SEC(".data.arr_foo") 637 struct { 638 int a; 639 int b; 640 } arr_foo[ARR_LONG_SZ]; 641 642 SEC("socket") 643 __success 644 int test3(const void *ctx) 645 { 646 __u64 i; 647 648 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { 649 barrier_var(i); 650 arr_foo[i].a = i; 651 arr_foo[i].b = i; 652 } 653 return 0; 654 } 655 656 SEC("socket") 657 __success 658 int test4(const void *ctx) 659 { 660 long i; 661 662 for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) { 663 barrier_var(i); 664 arr_foo[i].a = i; 665 arr_foo[i].b = i; 666 } 667 return 0; 668 } 669 670 char buf[10] SEC(".data.buf"); 671 672 SEC("socket") 673 __description("check add const") 674 __success 675 __naked void check_add_const(void) 676 { 677 /* typical LLVM generated loop with may_goto */ 678 asm volatile (" \ 679 call %[bpf_ktime_get_ns]; \ 680 if r0 > 9 goto l1_%=; \ 681 l0_%=: r1 = %[buf]; \ 682 r2 = r0; \ 683 r1 += r2; \ 684 r3 = *(u8 *)(r1 +0); \ 685 .byte 0xe5; /* may_goto */ \ 686 .byte 0; /* regs */ \ 687 .short 4; /* off of l1_%=: */ \ 688 .long 0; /* imm */ \ 689 r0 = r2; \ 690 r0 += 1; \ 691 if r2 < 9 goto l0_%=; \ 692 exit; \ 693 l1_%=: r0 = 0; \ 694 exit; \ 695 " : 696 : __imm(bpf_ktime_get_ns), 697 __imm_ptr(buf) 698 : __clobber_common); 699 } 700 701 SEC("socket") 702 __failure 703 __msg("*(u8 *)(r7 +0) = r0") 704 __msg("invalid access to map value, value_size=10 off=10 size=1") 705 __naked void check_add_const_3regs(void) 706 { 707 asm volatile ( 708 "r6 = %[buf];" 709 "r7 = %[buf];" 710 "call %[bpf_ktime_get_ns];" 711 "r1 = r0;" /* link r0.id == r1.id == r2.id */ 712 "r2 = r0;" 713 "r1 += 1;" /* r1 == r0+1 */ 714 "r2 += 2;" /* r2 == r0+2 */ 715 "if r0 > 8 goto 1f;" /* r0 range [0, 8] */ 716 "r6 += r1;" /* r1 range [1, 9] */ 717 "r7 += r2;" /* r2 range [2, 10] */ 718 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ 719 "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */ 720 "1: exit;" 721 : 722 : __imm(bpf_ktime_get_ns), 723 __imm_ptr(buf) 724 : __clobber_common); 725 } 726 727 SEC("socket") 728 __failure 729 __msg("*(u8 *)(r8 -1) = r0") 730 __msg("invalid access to map value, value_size=10 off=10 size=1") 731 __naked void check_add_const_3regs_2if(void) 732 { 733 asm volatile ( 734 "r6 = %[buf];" 735 "r7 = %[buf];" 736 "r8 = %[buf];" 737 "call %[bpf_ktime_get_ns];" 738 "if r0 < 2 goto 1f;" 739 "r1 = r0;" /* link r0.id == r1.id == r2.id */ 740 "r2 = r0;" 741 "r1 += 1;" /* r1 == r0+1 */ 742 "r2 += 2;" /* r2 == r0+2 */ 743 "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */ 744 "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */ 745 "r6 += r0;" /* r0 range [0, 9] */ 746 "r7 += r1;" /* r1 range [1, 10] */ 747 "r8 += r2;" /* r2 range [2, 11] */ 748 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ 749 "*(u8 *)(r7 -1) = r0;" /* safe */ 750 "*(u8 *)(r8 -1) = r0;" /* unsafe */ 751 "1: exit;" 752 : 753 : __imm(bpf_ktime_get_ns), 754 __imm_ptr(buf) 755 : __clobber_common); 756 } 757 758 SEC("socket") 759 __failure 760 __flag(BPF_F_TEST_STATE_FREQ) 761 __naked void check_add_const_regsafe_off(void) 762 { 763 asm volatile ( 764 "r8 = %[buf];" 765 "call %[bpf_ktime_get_ns];" 766 "r6 = r0;" 767 "call %[bpf_ktime_get_ns];" 768 "r7 = r0;" 769 "call %[bpf_ktime_get_ns];" 770 "r1 = r0;" /* same ids for r1 and r0 */ 771 "if r6 > r7 goto 1f;" /* this jump can't be predicted */ 772 "r1 += 1;" /* r1.off == +1 */ 773 "goto 2f;" 774 "1: r1 += 100;" /* r1.off == +100 */ 775 "goto +0;" /* verify r1.off in regsafe() after this insn */ 776 "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/ 777 "r8 += r1;" 778 "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */ 779 "3: exit;" 780 : 781 : __imm(bpf_ktime_get_ns), 782 __imm_ptr(buf) 783 : __clobber_common); 784 } 785 786 char _license[] SEC("license") = "GPL"; 787