1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <linux/bpf.h> 4 #include <linux/btf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/delay.h> 7 #include <linux/error-injection.h> 8 #include <linux/init.h> 9 #include <linux/module.h> 10 #include <linux/percpu-defs.h> 11 #include <linux/sysfs.h> 12 #include <linux/tracepoint.h> 13 #include <linux/net.h> 14 #include <linux/socket.h> 15 #include <linux/nsproxy.h> 16 #include <linux/inet.h> 17 #include <linux/in.h> 18 #include <linux/in6.h> 19 #include <linux/un.h> 20 #include <linux/filter.h> 21 #include <linux/rcupdate_trace.h> 22 #include <net/sock.h> 23 #include <linux/namei.h> 24 #include "bpf_testmod.h" 25 #include "bpf_testmod_kfunc.h" 26 27 #define CREATE_TRACE_POINTS 28 #include "bpf_testmod-events.h" 29 30 #define CONNECT_TIMEOUT_SEC 1 31 32 typedef int (*func_proto_typedef)(long); 33 typedef int (*func_proto_typedef_nested1)(func_proto_typedef); 34 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1); 35 36 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 37 long bpf_testmod_test_struct_arg_result; 38 static DEFINE_MUTEX(sock_lock); 39 static struct socket *sock; 40 41 struct bpf_testmod_struct_arg_1 { 42 int a; 43 }; 44 struct bpf_testmod_struct_arg_2 { 45 long a; 46 long b; 47 }; 48 49 struct bpf_testmod_struct_arg_3 { 50 int a; 51 int b[]; 52 }; 53 54 struct bpf_testmod_struct_arg_4 { 55 u64 a; 56 int b; 57 }; 58 59 struct bpf_testmod_struct_arg_5 { 60 char a; 61 short b; 62 int c; 63 long d; 64 }; 65 66 union bpf_testmod_union_arg_1 { 67 char a; 68 short b; 69 struct bpf_testmod_struct_arg_1 arg; 70 }; 71 72 union bpf_testmod_union_arg_2 { 73 int a; 74 long b; 75 struct bpf_testmod_struct_arg_2 arg; 76 }; 77 78 __bpf_hook_start(); 79 80 noinline int 81 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { 82 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c; 83 return bpf_testmod_test_struct_arg_result; 84 } 85 86 noinline int 87 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) { 88 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c; 89 return bpf_testmod_test_struct_arg_result; 90 } 91 92 noinline int 93 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) { 94 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b; 95 return bpf_testmod_test_struct_arg_result; 96 } 97 98 noinline int 99 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b, 100 int c, int d, struct bpf_testmod_struct_arg_2 e) { 101 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b; 102 return bpf_testmod_test_struct_arg_result; 103 } 104 105 noinline int 106 bpf_testmod_test_struct_arg_5(void) { 107 bpf_testmod_test_struct_arg_result = 1; 108 return bpf_testmod_test_struct_arg_result; 109 } 110 111 noinline int 112 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { 113 bpf_testmod_test_struct_arg_result = a->b[0]; 114 return bpf_testmod_test_struct_arg_result; 115 } 116 117 noinline int 118 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e, 119 struct bpf_testmod_struct_arg_4 f) 120 { 121 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 122 (long)e + f.a + f.b; 123 return bpf_testmod_test_struct_arg_result; 124 } 125 126 noinline int 127 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, 128 struct bpf_testmod_struct_arg_4 f, int g) 129 { 130 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 131 (long)e + f.a + f.b + g; 132 return bpf_testmod_test_struct_arg_result; 133 } 134 135 noinline int 136 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, 137 short g, struct bpf_testmod_struct_arg_5 h, long i) 138 { 139 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + 140 f + g + h.a + h.b + h.c + h.d + i; 141 return bpf_testmod_test_struct_arg_result; 142 } 143 144 noinline int 145 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c) 146 { 147 bpf_testmod_test_struct_arg_result = a.arg.a + b + c; 148 return bpf_testmod_test_struct_arg_result; 149 } 150 151 noinline int 152 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b) 153 { 154 bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b; 155 return bpf_testmod_test_struct_arg_result; 156 } 157 158 noinline int 159 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { 160 bpf_testmod_test_struct_arg_result = a->a; 161 return bpf_testmod_test_struct_arg_result; 162 } 163 164 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) 165 { 166 } 167 168 __bpf_kfunc void 169 bpf_testmod_test_mod_kfunc(int i) 170 { 171 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; 172 } 173 174 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) 175 { 176 it->cnt = cnt; 177 178 if (cnt < 0) 179 return -EINVAL; 180 181 it->value = value; 182 183 return 0; 184 } 185 186 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) 187 { 188 if (it->cnt <= 0) 189 return NULL; 190 191 it->cnt--; 192 193 return &it->value; 194 } 195 196 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter) 197 { 198 if (it__iter->cnt < 0) 199 return 0; 200 201 return val + it__iter->value; 202 } 203 204 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) 205 { 206 it->cnt = 0; 207 } 208 209 __bpf_kfunc void bpf_kfunc_common_test(void) 210 { 211 } 212 213 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, 214 struct bpf_dynptr *ptr__nullable) 215 { 216 } 217 218 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) 219 { 220 return NULL; 221 } 222 223 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) 224 { 225 return NULL; 226 } 227 228 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr) 229 { 230 } 231 232 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) 233 { 234 } 235 236 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr) 237 { 238 } 239 240 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr) 241 { 242 } 243 244 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr) 245 { 246 } 247 248 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void) 249 { 250 return NULL; 251 } 252 253 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size) 254 { 255 return NULL; 256 } 257 258 static struct prog_test_member trusted_ptr; 259 260 __bpf_kfunc struct prog_test_member *bpf_kfunc_get_default_trusted_ptr_test(void) 261 { 262 return &trusted_ptr; 263 } 264 265 __bpf_kfunc void bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member *trusted_ptr) 266 { 267 /* 268 * This BPF kfunc doesn't actually have any put/KF_ACQUIRE 269 * semantics. We're simply wanting to simulate a BPF kfunc that takes a 270 * struct prog_test_member pointer as an argument. 271 */ 272 } 273 274 __bpf_kfunc struct bpf_testmod_ctx * 275 bpf_testmod_ctx_create(int *err) 276 { 277 struct bpf_testmod_ctx *ctx; 278 279 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 280 if (!ctx) { 281 *err = -ENOMEM; 282 return NULL; 283 } 284 refcount_set(&ctx->usage, 1); 285 286 return ctx; 287 } 288 289 static void testmod_free_cb(struct rcu_head *head) 290 { 291 struct bpf_testmod_ctx *ctx; 292 293 ctx = container_of(head, struct bpf_testmod_ctx, rcu); 294 kfree(ctx); 295 } 296 297 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) 298 { 299 if (!ctx) 300 return; 301 if (refcount_dec_and_test(&ctx->usage)) 302 call_rcu(&ctx->rcu, testmod_free_cb); 303 } 304 305 __bpf_kfunc void bpf_testmod_ctx_release_dtor(void *ctx) 306 { 307 bpf_testmod_ctx_release(ctx); 308 } 309 CFI_NOSEAL(bpf_testmod_ctx_release_dtor); 310 311 static struct bpf_testmod_ops3 *st_ops3; 312 313 static int bpf_testmod_test_3(void) 314 { 315 return 0; 316 } 317 318 static int bpf_testmod_test_4(void) 319 { 320 return 0; 321 } 322 323 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { 324 .test_1 = bpf_testmod_test_3, 325 .test_2 = bpf_testmod_test_4, 326 }; 327 328 static void bpf_testmod_test_struct_ops3(void) 329 { 330 if (st_ops3) 331 st_ops3->test_1(); 332 } 333 334 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void) 335 { 336 st_ops3->test_1(); 337 } 338 339 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void) 340 { 341 st_ops3->test_2(); 342 } 343 344 struct bpf_testmod_btf_type_tag_1 { 345 int a; 346 }; 347 348 struct bpf_testmod_btf_type_tag_2 { 349 struct bpf_testmod_btf_type_tag_1 __user *p; 350 }; 351 352 struct bpf_testmod_btf_type_tag_3 { 353 struct bpf_testmod_btf_type_tag_1 __percpu *p; 354 }; 355 356 noinline int 357 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { 358 BTF_TYPE_EMIT(func_proto_typedef); 359 BTF_TYPE_EMIT(func_proto_typedef_nested1); 360 BTF_TYPE_EMIT(func_proto_typedef_nested2); 361 return arg->a; 362 } 363 364 noinline int 365 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { 366 return arg->p->a; 367 } 368 369 noinline int 370 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) { 371 return arg->a; 372 } 373 374 noinline int 375 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { 376 return arg->p->a; 377 } 378 379 noinline int bpf_testmod_loop_test(int n) 380 { 381 /* Make sum volatile, so smart compilers, such as clang, will not 382 * optimize the code by removing the loop. 383 */ 384 volatile int sum = 0; 385 int i; 386 387 /* the primary goal of this test is to test LBR. Create a lot of 388 * branches in the function, so we can catch it easily. 389 */ 390 for (i = 0; i < n; i++) 391 sum += i; 392 return sum; 393 } 394 395 __weak noinline struct file *bpf_testmod_return_ptr(int arg) 396 { 397 static struct file f = {}; 398 399 switch (arg) { 400 case 1: return (void *)EINVAL; /* user addr */ 401 case 2: return (void *)0xcafe4a11; /* user addr */ 402 case 3: return (void *)-EINVAL; /* canonical, but invalid */ 403 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ 404 case 5: return (void *)~(1ull << 30); /* trigger extable */ 405 case 6: return &f; /* valid addr */ 406 case 7: return (void *)((long)&f | 1); /* kernel tricks */ 407 #ifdef CONFIG_X86_64 408 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */ 409 #endif 410 default: return NULL; 411 } 412 } 413 414 noinline int bpf_testmod_fentry_test1(int a) 415 { 416 trace_bpf_testmod_fentry_test1_tp(a); 417 418 return a + 1; 419 } 420 421 noinline int bpf_testmod_fentry_test2(int a, u64 b) 422 { 423 trace_bpf_testmod_fentry_test2_tp(a, b); 424 425 return a + b; 426 } 427 428 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) 429 { 430 return a + b + c; 431 } 432 433 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d, 434 void *e, char f, int g) 435 { 436 return a + (long)b + c + d + (long)e + f + g; 437 } 438 439 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d, 440 void *e, char f, int g, 441 unsigned int h, long i, __u64 j, 442 unsigned long k) 443 { 444 return a + (long)b + c + d + (long)e + f + g + h + i + j + k; 445 } 446 447 noinline void bpf_testmod_stacktrace_test(void) 448 { 449 /* used for stacktrace test as attach function */ 450 asm volatile (""); 451 } 452 453 noinline void bpf_testmod_stacktrace_test_3(void) 454 { 455 bpf_testmod_stacktrace_test(); 456 asm volatile (""); 457 } 458 459 noinline void bpf_testmod_stacktrace_test_2(void) 460 { 461 bpf_testmod_stacktrace_test_3(); 462 asm volatile (""); 463 } 464 465 noinline void bpf_testmod_stacktrace_test_1(void) 466 { 467 bpf_testmod_stacktrace_test_2(); 468 asm volatile (""); 469 } 470 471 int bpf_testmod_fentry_ok; 472 473 noinline int bpf_testmod_trampoline_count_test(void) 474 { 475 return 0; 476 } 477 478 noinline ssize_t 479 bpf_testmod_test_read(struct file *file, struct kobject *kobj, 480 const struct bin_attribute *bin_attr, 481 char *buf, loff_t off, size_t len) 482 { 483 struct bpf_testmod_test_read_ctx ctx = { 484 .buf = buf, 485 .off = off, 486 .len = len, 487 }; 488 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; 489 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; 490 struct bpf_testmod_struct_arg_3 *struct_arg3; 491 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; 492 struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; 493 union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} }; 494 union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} }; 495 int i = 1; 496 497 while (bpf_testmod_return_ptr(i)) 498 i++; 499 500 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4); 501 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4); 502 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2); 503 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); 504 (void)bpf_testmod_test_struct_arg_5(); 505 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19, 506 (void *)20, struct_arg4); 507 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, 508 (void *)20, struct_arg4, 23); 509 (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, 510 21, 22, struct_arg5, 27); 511 512 (void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5); 513 (void)bpf_testmod_test_union_arg_2(6, union_arg2); 514 515 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 516 517 (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); 518 519 bpf_testmod_test_struct_ops3(); 520 521 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + 522 sizeof(int)), GFP_KERNEL); 523 if (struct_arg3 != NULL) { 524 struct_arg3->b[0] = 1; 525 (void)bpf_testmod_test_struct_arg_6(struct_arg3); 526 kfree(struct_arg3); 527 } 528 529 /* This is always true. Use the check to make sure the compiler 530 * doesn't remove bpf_testmod_loop_test. 531 */ 532 if (bpf_testmod_loop_test(101) > 100) 533 trace_bpf_testmod_test_read(current, &ctx); 534 535 trace_bpf_testmod_test_nullable_bare_tp(NULL); 536 537 /* Magic number to enable writable tp */ 538 if (len == 64) { 539 struct bpf_testmod_test_writable_ctx writable = { 540 .val = 1024, 541 }; 542 trace_bpf_testmod_test_writable_bare_tp(&writable); 543 if (writable.early_ret) 544 return snprintf(buf, len, "%d\n", writable.val); 545 } 546 547 if (bpf_testmod_fentry_test1(1) != 2 || 548 bpf_testmod_fentry_test2(2, 3) != 5 || 549 bpf_testmod_fentry_test3(4, 5, 6) != 15 || 550 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20, 551 21, 22) != 133 || 552 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, 553 21, 22, 23, 24, 25, 26) != 231) 554 goto out; 555 556 bpf_testmod_trampoline_count_test(); 557 558 bpf_testmod_stacktrace_test_1(); 559 560 bpf_testmod_fentry_ok = 1; 561 out: 562 return -EIO; /* always fail */ 563 } 564 EXPORT_SYMBOL(bpf_testmod_test_read); 565 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO); 566 567 noinline ssize_t 568 bpf_testmod_test_write(struct file *file, struct kobject *kobj, 569 const struct bin_attribute *bin_attr, 570 char *buf, loff_t off, size_t len) 571 { 572 struct bpf_testmod_test_write_ctx ctx = { 573 .buf = buf, 574 .off = off, 575 .len = len, 576 }; 577 578 trace_bpf_testmod_test_write_bare_tp(current, &ctx); 579 580 return -EIO; /* always fail */ 581 } 582 EXPORT_SYMBOL(bpf_testmod_test_write); 583 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO); 584 585 noinline int bpf_fentry_shadow_test(int a) 586 { 587 return a + 2; 588 } 589 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); 590 591 __bpf_hook_end(); 592 593 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { 594 .attr = { .name = "bpf_testmod", .mode = 0666, }, 595 .read = bpf_testmod_test_read, 596 .write = bpf_testmod_test_write, 597 }; 598 599 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, 600 * please see test_uretprobe_regs_change test 601 */ 602 #ifdef __x86_64__ 603 604 static int 605 uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data) 606 { 607 regs->cx = 0x87654321feebdaed; 608 return 0; 609 } 610 611 static int 612 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, 613 struct pt_regs *regs, __u64 *data) 614 615 { 616 regs->ax = 0x12345678deadbeef; 617 regs->r11 = (u64) -1; 618 return 0; 619 } 620 621 struct testmod_uprobe { 622 struct path path; 623 struct uprobe *uprobe; 624 struct uprobe_consumer consumer; 625 }; 626 627 static DEFINE_MUTEX(testmod_uprobe_mutex); 628 629 static struct testmod_uprobe uprobe = { 630 .consumer.handler = uprobe_handler, 631 .consumer.ret_handler = uprobe_ret_handler, 632 }; 633 634 static int testmod_register_uprobe(loff_t offset) 635 { 636 int err = -EBUSY; 637 638 if (uprobe.uprobe) 639 return -EBUSY; 640 641 mutex_lock(&testmod_uprobe_mutex); 642 643 if (uprobe.uprobe) 644 goto out; 645 646 err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); 647 if (err) 648 goto out; 649 650 uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry), 651 offset, 0, &uprobe.consumer); 652 if (IS_ERR(uprobe.uprobe)) { 653 err = PTR_ERR(uprobe.uprobe); 654 path_put(&uprobe.path); 655 uprobe.uprobe = NULL; 656 } 657 out: 658 mutex_unlock(&testmod_uprobe_mutex); 659 return err; 660 } 661 662 static void testmod_unregister_uprobe(void) 663 { 664 mutex_lock(&testmod_uprobe_mutex); 665 666 if (uprobe.uprobe) { 667 uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer); 668 uprobe_unregister_sync(); 669 path_put(&uprobe.path); 670 uprobe.uprobe = NULL; 671 } 672 673 mutex_unlock(&testmod_uprobe_mutex); 674 } 675 676 static ssize_t 677 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, 678 const struct bin_attribute *bin_attr, 679 char *buf, loff_t off, size_t len) 680 { 681 unsigned long offset = 0; 682 int err = 0; 683 684 if (kstrtoul(buf, 0, &offset)) 685 return -EINVAL; 686 687 if (offset) 688 err = testmod_register_uprobe(offset); 689 else 690 testmod_unregister_uprobe(); 691 692 return err ?: strlen(buf); 693 } 694 695 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { 696 .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, 697 .write = bpf_testmod_uprobe_write, 698 }; 699 700 static int register_bpf_testmod_uprobe(void) 701 { 702 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 703 } 704 705 static void unregister_bpf_testmod_uprobe(void) 706 { 707 testmod_unregister_uprobe(); 708 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 709 } 710 711 #else 712 static int register_bpf_testmod_uprobe(void) 713 { 714 return 0; 715 } 716 717 static void unregister_bpf_testmod_uprobe(void) { } 718 #endif 719 720 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) 721 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) 722 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) 723 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) 724 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) 725 BTF_ID_FLAGS(func, bpf_kfunc_common_test) 726 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 727 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) 728 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) 729 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) 730 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE) 731 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test) 732 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test) 733 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test) 734 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) 735 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED) 736 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED) 737 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) 738 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) 739 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) 740 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) 741 BTF_ID_FLAGS(func, bpf_kfunc_get_default_trusted_ptr_test); 742 BTF_ID_FLAGS(func, bpf_kfunc_put_default_trusted_ptr_test); 743 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) 744 745 BTF_ID_LIST(bpf_testmod_dtor_ids) 746 BTF_ID(struct, bpf_testmod_ctx) 747 BTF_ID(func, bpf_testmod_ctx_release_dtor) 748 749 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { 750 .owner = THIS_MODULE, 751 .set = &bpf_testmod_common_kfunc_ids, 752 }; 753 754 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 755 { 756 return a + b + c + d; 757 } 758 759 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 760 { 761 return a + b; 762 } 763 764 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) 765 { 766 return sk; 767 } 768 769 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) 770 { 771 /* 772 * Make val as volatile to avoid compiler optimizations. 773 * Verify that negative signed values remain negative after 774 * sign-extension (JIT must sign-extend, not zero-extend). 775 */ 776 volatile long val; 777 778 /* val will be positive, if JIT does zero-extension instead of sign-extension */ 779 val = a; 780 if (val >= 0) 781 return 1; 782 783 val = b; 784 if (val >= 0) 785 return 2; 786 787 val = c; 788 if (val >= 0) 789 return 3; 790 791 /* 792 * Provoke the compiler to assume that the caller has sign-extended a, 793 * b and c on platforms where this is required (e.g. s390x). 794 */ 795 return (long)a + (long)b + (long)c + d; 796 } 797 798 __bpf_kfunc int bpf_kfunc_call_test5(u8 a, u16 b, u32 c) 799 { 800 /* 801 * Make val as volatile to avoid compiler optimizations on the below checks 802 * In C, assigning u8/u16/u32 to long performs zero-extension. 803 */ 804 volatile long val = a; 805 806 /* Check zero-extension */ 807 if (val != (unsigned long)a) 808 return 1; 809 /* Check no sign-extension */ 810 if (val < 0) 811 return 2; 812 813 val = b; 814 if (val != (unsigned long)b) 815 return 3; 816 if (val < 0) 817 return 4; 818 819 val = c; 820 if (val != (unsigned long)c) 821 return 5; 822 if (val < 0) 823 return 6; 824 825 return 0; 826 } 827 828 static struct prog_test_ref_kfunc prog_test_struct = { 829 .a = 42, 830 .b = 108, 831 .next = &prog_test_struct, 832 .cnt = REFCOUNT_INIT(1), 833 }; 834 835 __bpf_kfunc struct prog_test_ref_kfunc * 836 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 837 { 838 refcount_inc(&prog_test_struct.cnt); 839 return &prog_test_struct; 840 } 841 842 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) 843 { 844 WARN_ON_ONCE(1); 845 } 846 847 __bpf_kfunc struct prog_test_member * 848 bpf_kfunc_call_memb_acquire(void) 849 { 850 WARN_ON_ONCE(1); 851 return NULL; 852 } 853 854 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 855 { 856 WARN_ON_ONCE(1); 857 } 858 859 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 860 { 861 if (size > 2 * sizeof(int)) 862 return NULL; 863 864 return (int *)p; 865 } 866 867 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, 868 const int rdwr_buf_size) 869 { 870 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 871 } 872 873 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, 874 const int rdonly_buf_size) 875 { 876 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 877 } 878 879 /* the next 2 ones can't be really used for testing expect to ensure 880 * that the verifier rejects the call. 881 * Acquire functions must return struct pointers, so these ones are 882 * failing. 883 */ 884 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, 885 const int rdonly_buf_size) 886 { 887 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 888 } 889 890 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) 891 { 892 } 893 894 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 895 { 896 } 897 898 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 899 { 900 } 901 902 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 903 { 904 } 905 906 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 907 { 908 } 909 910 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 911 { 912 } 913 914 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 915 { 916 } 917 918 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 919 { 920 } 921 922 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 923 { 924 } 925 926 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 927 { 928 } 929 930 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 931 { 932 /* p != NULL, but p->cnt could be 0 */ 933 } 934 935 __bpf_kfunc void bpf_kfunc_call_test_destructive(void) 936 { 937 } 938 939 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) 940 { 941 return arg; 942 } 943 944 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void) 945 { 946 } 947 948 struct bpf_kfunc_rcu_tasks_trace_data { 949 struct rcu_head rcu; 950 int *done; 951 }; 952 953 static void bpf_kfunc_rcu_tasks_trace_cb(struct rcu_head *rhp) 954 { 955 struct bpf_kfunc_rcu_tasks_trace_data *data; 956 957 data = container_of(rhp, struct bpf_kfunc_rcu_tasks_trace_data, rcu); 958 WRITE_ONCE(*data->done, 1); 959 kfree(data); 960 } 961 962 __bpf_kfunc int bpf_kfunc_call_test_call_rcu_tasks_trace(int *done) 963 { 964 struct bpf_kfunc_rcu_tasks_trace_data *data; 965 966 data = kmalloc(sizeof(*data), GFP_ATOMIC); 967 if (!data) 968 return -ENOMEM; 969 data->done = done; 970 call_rcu_tasks_trace(&data->rcu, bpf_kfunc_rcu_tasks_trace_cb); 971 return 0; 972 } 973 974 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args) 975 { 976 int proto; 977 int err; 978 979 mutex_lock(&sock_lock); 980 981 if (sock) { 982 pr_err("%s called without releasing old sock", __func__); 983 err = -EPERM; 984 goto out; 985 } 986 987 switch (args->af) { 988 case AF_INET: 989 case AF_INET6: 990 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP; 991 break; 992 case AF_UNIX: 993 proto = PF_UNIX; 994 break; 995 default: 996 pr_err("invalid address family %d\n", args->af); 997 err = -EINVAL; 998 goto out; 999 } 1000 1001 err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type, 1002 proto, &sock); 1003 1004 if (!err) 1005 /* Set timeout for call to kernel_connect() to prevent it from hanging, 1006 * and consider the connection attempt failed if it returns 1007 * -EINPROGRESS. 1008 */ 1009 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ; 1010 out: 1011 mutex_unlock(&sock_lock); 1012 1013 return err; 1014 } 1015 1016 __bpf_kfunc void bpf_kfunc_close_sock(void) 1017 { 1018 mutex_lock(&sock_lock); 1019 1020 if (sock) { 1021 sock_release(sock); 1022 sock = NULL; 1023 } 1024 1025 mutex_unlock(&sock_lock); 1026 } 1027 1028 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args) 1029 { 1030 int err; 1031 1032 if (args->addrlen > sizeof(args->addr)) 1033 return -EINVAL; 1034 1035 mutex_lock(&sock_lock); 1036 1037 if (!sock) { 1038 pr_err("%s called without initializing sock", __func__); 1039 err = -EPERM; 1040 goto out; 1041 } 1042 1043 err = kernel_connect(sock, (struct sockaddr_unsized *)&args->addr, 1044 args->addrlen, 0); 1045 out: 1046 mutex_unlock(&sock_lock); 1047 1048 return err; 1049 } 1050 1051 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args) 1052 { 1053 int err; 1054 1055 if (args->addrlen > sizeof(args->addr)) 1056 return -EINVAL; 1057 1058 mutex_lock(&sock_lock); 1059 1060 if (!sock) { 1061 pr_err("%s called without initializing sock", __func__); 1062 err = -EPERM; 1063 goto out; 1064 } 1065 1066 err = kernel_bind(sock, (struct sockaddr_unsized *)&args->addr, args->addrlen); 1067 out: 1068 mutex_unlock(&sock_lock); 1069 1070 return err; 1071 } 1072 1073 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void) 1074 { 1075 int err; 1076 1077 mutex_lock(&sock_lock); 1078 1079 if (!sock) { 1080 pr_err("%s called without initializing sock", __func__); 1081 err = -EPERM; 1082 goto out; 1083 } 1084 1085 err = kernel_listen(sock, 128); 1086 out: 1087 mutex_unlock(&sock_lock); 1088 1089 return err; 1090 } 1091 1092 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) 1093 { 1094 struct msghdr msg = { 1095 .msg_name = &args->addr.addr, 1096 .msg_namelen = args->addr.addrlen, 1097 }; 1098 struct kvec iov; 1099 int err; 1100 1101 if (args->addr.addrlen > sizeof(args->addr.addr) || 1102 args->msglen > sizeof(args->msg)) 1103 return -EINVAL; 1104 1105 iov.iov_base = args->msg; 1106 iov.iov_len = args->msglen; 1107 1108 mutex_lock(&sock_lock); 1109 1110 if (!sock) { 1111 pr_err("%s called without initializing sock", __func__); 1112 err = -EPERM; 1113 goto out; 1114 } 1115 1116 err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen); 1117 args->addr.addrlen = msg.msg_namelen; 1118 out: 1119 mutex_unlock(&sock_lock); 1120 1121 return err; 1122 } 1123 1124 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) 1125 { 1126 struct msghdr msg = { 1127 .msg_name = &args->addr.addr, 1128 .msg_namelen = args->addr.addrlen, 1129 }; 1130 struct kvec iov; 1131 int err; 1132 1133 if (args->addr.addrlen > sizeof(args->addr.addr) || 1134 args->msglen > sizeof(args->msg)) 1135 return -EINVAL; 1136 1137 iov.iov_base = args->msg; 1138 iov.iov_len = args->msglen; 1139 1140 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen); 1141 mutex_lock(&sock_lock); 1142 1143 if (!sock) { 1144 pr_err("%s called without initializing sock", __func__); 1145 err = -EPERM; 1146 goto out; 1147 } 1148 1149 err = sock_sendmsg(sock, &msg); 1150 args->addr.addrlen = msg.msg_namelen; 1151 out: 1152 mutex_unlock(&sock_lock); 1153 1154 return err; 1155 } 1156 1157 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) 1158 { 1159 int err; 1160 1161 mutex_lock(&sock_lock); 1162 1163 if (!sock) { 1164 pr_err("%s called without initializing sock", __func__); 1165 err = -EPERM; 1166 goto out; 1167 } 1168 1169 err = kernel_getsockname(sock, (struct sockaddr *)&args->addr); 1170 if (err < 0) 1171 goto out; 1172 1173 args->addrlen = err; 1174 err = 0; 1175 out: 1176 mutex_unlock(&sock_lock); 1177 1178 return err; 1179 } 1180 1181 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) 1182 { 1183 int err; 1184 1185 mutex_lock(&sock_lock); 1186 1187 if (!sock) { 1188 pr_err("%s called without initializing sock", __func__); 1189 err = -EPERM; 1190 goto out; 1191 } 1192 1193 err = kernel_getpeername(sock, (struct sockaddr *)&args->addr); 1194 if (err < 0) 1195 goto out; 1196 1197 args->addrlen = err; 1198 err = 0; 1199 out: 1200 mutex_unlock(&sock_lock); 1201 1202 return err; 1203 } 1204 1205 static DEFINE_MUTEX(st_ops_mutex); 1206 static struct bpf_testmod_st_ops *st_ops; 1207 1208 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) 1209 { 1210 int ret = -1; 1211 1212 mutex_lock(&st_ops_mutex); 1213 if (st_ops && st_ops->test_prologue) 1214 ret = st_ops->test_prologue(args); 1215 mutex_unlock(&st_ops_mutex); 1216 1217 return ret; 1218 } 1219 1220 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) 1221 { 1222 int ret = -1; 1223 1224 mutex_lock(&st_ops_mutex); 1225 if (st_ops && st_ops->test_epilogue) 1226 ret = st_ops->test_epilogue(args); 1227 mutex_unlock(&st_ops_mutex); 1228 1229 return ret; 1230 } 1231 1232 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) 1233 { 1234 int ret = -1; 1235 1236 mutex_lock(&st_ops_mutex); 1237 if (st_ops && st_ops->test_pro_epilogue) 1238 ret = st_ops->test_pro_epilogue(args); 1239 mutex_unlock(&st_ops_mutex); 1240 1241 return ret; 1242 } 1243 1244 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) 1245 { 1246 args->a += 10; 1247 return args->a; 1248 } 1249 1250 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id); 1251 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux); 1252 1253 __bpf_kfunc int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux); 1254 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux); 1255 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux); 1256 1257 /* hook targets */ 1258 noinline void bpf_testmod_test_hardirq_fn(void) { barrier(); } 1259 noinline void bpf_testmod_test_softirq_fn(void) { barrier(); } 1260 1261 /* Tasklet for SoftIRQ context */ 1262 static void ctx_check_tasklet_fn(struct tasklet_struct *t) 1263 { 1264 bpf_testmod_test_softirq_fn(); 1265 } 1266 1267 DECLARE_TASKLET(ctx_check_tasklet, ctx_check_tasklet_fn); 1268 1269 /* IRQ Work for HardIRQ context */ 1270 static void ctx_check_irq_fn(struct irq_work *work) 1271 { 1272 bpf_testmod_test_hardirq_fn(); 1273 tasklet_schedule(&ctx_check_tasklet); 1274 } 1275 1276 static struct irq_work ctx_check_irq = IRQ_WORK_INIT_HARD(ctx_check_irq_fn); 1277 1278 /* The kfunc trigger */ 1279 __bpf_kfunc void bpf_kfunc_trigger_ctx_check(void) 1280 { 1281 irq_work_queue(&ctx_check_irq); 1282 } 1283 1284 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) 1285 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 1286 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 1287 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 1288 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 1289 BTF_ID_FLAGS(func, bpf_kfunc_call_test4) 1290 BTF_ID_FLAGS(func, bpf_kfunc_call_test5) 1291 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 1292 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 1293 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 1294 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 1295 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 1296 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 1297 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 1298 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 1299 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 1300 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 1301 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 1302 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 1303 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 1304 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 1305 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 1306 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_RCU) 1307 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 1308 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 1309 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) 1310 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE) 1311 BTF_ID_FLAGS(func, bpf_kfunc_call_test_call_rcu_tasks_trace) 1312 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE) 1313 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE) 1314 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE) 1315 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE) 1316 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE) 1317 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) 1318 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) 1319 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) 1320 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) 1321 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_SLEEPABLE) 1322 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_SLEEPABLE) 1323 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_SLEEPABLE) 1324 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10) 1325 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1) 1326 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_assoc, KF_IMPLICIT_ARGS) 1327 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS) 1328 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS) 1329 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl) 1330 BTF_ID_FLAGS(func, bpf_kfunc_trigger_ctx_check) 1331 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) 1332 1333 static int bpf_testmod_ops_init(struct btf *btf) 1334 { 1335 return 0; 1336 } 1337 1338 static bool bpf_testmod_ops_is_valid_access(int off, int size, 1339 enum bpf_access_type type, 1340 const struct bpf_prog *prog, 1341 struct bpf_insn_access_aux *info) 1342 { 1343 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1344 } 1345 1346 static int bpf_testmod_ops_init_member(const struct btf_type *t, 1347 const struct btf_member *member, 1348 void *kdata, const void *udata) 1349 { 1350 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) { 1351 /* For data fields, this function has to copy it and return 1352 * 1 to indicate that the data has been handled by the 1353 * struct_ops type, or the verifier will reject the map if 1354 * the value of the data field is not zero. 1355 */ 1356 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; 1357 return 1; 1358 } 1359 return 0; 1360 } 1361 1362 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { 1363 .owner = THIS_MODULE, 1364 .set = &bpf_testmod_check_kfunc_ids, 1365 }; 1366 1367 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { 1368 .get_func_proto = bpf_base_func_proto, 1369 .is_valid_access = bpf_testmod_ops_is_valid_access, 1370 }; 1371 1372 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { 1373 .is_valid_access = bpf_testmod_ops_is_valid_access, 1374 }; 1375 1376 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 1377 { 1378 struct bpf_testmod_ops *ops = kdata; 1379 1380 if (ops->test_1) 1381 ops->test_1(); 1382 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2 1383 * initialized, so we need to check for NULL. 1384 */ 1385 if (ops->test_2) 1386 ops->test_2(4, ops->data); 1387 1388 return 0; 1389 } 1390 1391 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) 1392 { 1393 } 1394 1395 static int bpf_testmod_test_1(void) 1396 { 1397 return 0; 1398 } 1399 1400 static void bpf_testmod_test_2(int a, int b) 1401 { 1402 } 1403 1404 static int bpf_testmod_tramp(int value) 1405 { 1406 return 0; 1407 } 1408 1409 static int bpf_testmod_ops__test_maybe_null(int dummy, 1410 struct task_struct *task__nullable) 1411 { 1412 return 0; 1413 } 1414 1415 static int bpf_testmod_ops__test_refcounted(int dummy, 1416 struct task_struct *task__ref) 1417 { 1418 return 0; 1419 } 1420 1421 static int bpf_testmod_ops__test_refcounted_multi(int dummy, struct task_struct *task__nullable, 1422 struct task_struct *task__ref) 1423 { 1424 return 0; 1425 } 1426 1427 static struct task_struct * 1428 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref, 1429 struct cgroup *cgrp) 1430 { 1431 return NULL; 1432 } 1433 1434 static struct bpf_testmod_ops __bpf_testmod_ops = { 1435 .test_1 = bpf_testmod_test_1, 1436 .test_2 = bpf_testmod_test_2, 1437 .test_maybe_null = bpf_testmod_ops__test_maybe_null, 1438 .test_refcounted = bpf_testmod_ops__test_refcounted, 1439 .test_refcounted_multi = bpf_testmod_ops__test_refcounted_multi, 1440 .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr, 1441 }; 1442 1443 struct bpf_struct_ops bpf_bpf_testmod_ops = { 1444 .verifier_ops = &bpf_testmod_verifier_ops, 1445 .init = bpf_testmod_ops_init, 1446 .init_member = bpf_testmod_ops_init_member, 1447 .reg = bpf_dummy_reg, 1448 .unreg = bpf_dummy_unreg, 1449 .cfi_stubs = &__bpf_testmod_ops, 1450 .name = "bpf_testmod_ops", 1451 .owner = THIS_MODULE, 1452 }; 1453 1454 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) 1455 { 1456 struct bpf_testmod_ops2 *ops = kdata; 1457 1458 ops->test_1(); 1459 return 0; 1460 } 1461 1462 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = { 1463 .test_1 = bpf_testmod_test_1, 1464 }; 1465 1466 struct bpf_struct_ops bpf_testmod_ops2 = { 1467 .verifier_ops = &bpf_testmod_verifier_ops, 1468 .init = bpf_testmod_ops_init, 1469 .init_member = bpf_testmod_ops_init_member, 1470 .reg = bpf_dummy_reg2, 1471 .unreg = bpf_dummy_unreg, 1472 .cfi_stubs = &__bpf_testmod_ops2, 1473 .name = "bpf_testmod_ops2", 1474 .owner = THIS_MODULE, 1475 }; 1476 1477 static int st_ops3_reg(void *kdata, struct bpf_link *link) 1478 { 1479 int err = 0; 1480 1481 mutex_lock(&st_ops_mutex); 1482 if (st_ops3) { 1483 pr_err("st_ops has already been registered\n"); 1484 err = -EEXIST; 1485 goto unlock; 1486 } 1487 st_ops3 = kdata; 1488 1489 unlock: 1490 mutex_unlock(&st_ops_mutex); 1491 return err; 1492 } 1493 1494 static void st_ops3_unreg(void *kdata, struct bpf_link *link) 1495 { 1496 mutex_lock(&st_ops_mutex); 1497 st_ops3 = NULL; 1498 mutex_unlock(&st_ops_mutex); 1499 } 1500 1501 static void test_1_recursion_detected(struct bpf_prog *prog) 1502 { 1503 struct bpf_prog_stats *stats; 1504 1505 stats = this_cpu_ptr(prog->stats); 1506 printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", 1507 u64_stats_read(&stats->misses)); 1508 } 1509 1510 static int st_ops3_check_member(const struct btf_type *t, 1511 const struct btf_member *member, 1512 const struct bpf_prog *prog) 1513 { 1514 u32 moff = __btf_member_bit_offset(t, member) / 8; 1515 1516 switch (moff) { 1517 case offsetof(struct bpf_testmod_ops3, test_1): 1518 prog->aux->priv_stack_requested = true; 1519 prog->aux->recursion_detected = test_1_recursion_detected; 1520 fallthrough; 1521 default: 1522 break; 1523 } 1524 return 0; 1525 } 1526 1527 struct bpf_struct_ops bpf_testmod_ops3 = { 1528 .verifier_ops = &bpf_testmod_verifier_ops3, 1529 .init = bpf_testmod_ops_init, 1530 .init_member = bpf_testmod_ops_init_member, 1531 .reg = st_ops3_reg, 1532 .unreg = st_ops3_unreg, 1533 .check_member = st_ops3_check_member, 1534 .cfi_stubs = &__bpf_testmod_ops3, 1535 .name = "bpf_testmod_ops3", 1536 .owner = THIS_MODULE, 1537 }; 1538 1539 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) 1540 { 1541 return 0; 1542 } 1543 1544 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) 1545 { 1546 return 0; 1547 } 1548 1549 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) 1550 { 1551 return 0; 1552 } 1553 1554 static int bpf_cgroup_from_id_id; 1555 static int bpf_cgroup_release_id; 1556 1557 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, 1558 const struct bpf_prog *prog) 1559 { 1560 struct bpf_insn *insn = insn_buf; 1561 1562 /* r8 = r1; // r8 will be "u64 *ctx". 1563 * r1 = 0; 1564 * r0 = bpf_cgroup_from_id(r1); 1565 * if r0 != 0 goto pc+5; 1566 * r6 = r8[0]; // r6 will be "struct st_ops *args". 1567 * r7 = r6->a; 1568 * r7 += 1000; 1569 * r6->a = r7; 1570 * goto pc+2; 1571 * r1 = r0; 1572 * bpf_cgroup_release(r1); 1573 * r1 = r8; 1574 */ 1575 *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); 1576 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1577 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1578 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); 1579 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); 1580 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1581 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1582 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1583 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1584 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1585 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1586 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); 1587 *insn++ = prog->insnsi[0]; 1588 1589 return insn - insn_buf; 1590 } 1591 1592 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1593 s16 ctx_stack_off) 1594 { 1595 struct bpf_insn *insn = insn_buf; 1596 1597 /* r1 = 0; 1598 * r6 = 0; 1599 * r0 = bpf_cgroup_from_id(r1); 1600 * if r0 != 0 goto pc+6; 1601 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1602 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1603 * r6 = r1->a; 1604 * r6 += 10000; 1605 * r1->a = r6; 1606 * goto pc+2 1607 * r1 = r0; 1608 * bpf_cgroup_release(r1); 1609 * r0 = r6; 1610 * r0 *= 2; 1611 * BPF_EXIT; 1612 */ 1613 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1614 *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); 1615 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1616 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); 1617 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1618 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1619 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1620 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1621 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1622 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1623 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1624 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1625 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1626 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1627 *insn++ = BPF_EXIT_INSN(); 1628 1629 return insn - insn_buf; 1630 } 1631 1632 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_" 1633 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 1634 const struct bpf_prog *prog) 1635 { 1636 struct bpf_insn *insn = insn_buf; 1637 1638 if (strcmp(prog->aux->attach_func_name, "test_prologue") && 1639 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1640 return 0; 1641 1642 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1643 return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); 1644 1645 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". 1646 * r7 = r6->a; 1647 * r7 += 1000; 1648 * r6->a = r7; 1649 */ 1650 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); 1651 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1652 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1653 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1654 *insn++ = prog->insnsi[0]; 1655 1656 return insn - insn_buf; 1657 } 1658 1659 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1660 s16 ctx_stack_off) 1661 { 1662 struct bpf_insn *insn = insn_buf; 1663 1664 if (strcmp(prog->aux->attach_func_name, "test_epilogue") && 1665 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1666 return 0; 1667 1668 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1669 return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); 1670 1671 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1672 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1673 * r6 = r1->a; 1674 * r6 += 10000; 1675 * r1->a = r6; 1676 * r0 = r6; 1677 * r0 *= 2; 1678 * BPF_EXIT; 1679 */ 1680 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1681 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1682 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1683 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1684 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1685 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1686 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1687 *insn++ = BPF_EXIT_INSN(); 1688 1689 return insn - insn_buf; 1690 } 1691 1692 static int st_ops_btf_struct_access(struct bpf_verifier_log *log, 1693 const struct bpf_reg_state *reg, 1694 int off, int size) 1695 { 1696 if (off < 0 || off + size > sizeof(struct st_ops_args)) 1697 return -EACCES; 1698 return 0; 1699 } 1700 1701 static const struct bpf_verifier_ops st_ops_verifier_ops = { 1702 .is_valid_access = bpf_testmod_ops_is_valid_access, 1703 .btf_struct_access = st_ops_btf_struct_access, 1704 .gen_prologue = st_ops_gen_prologue, 1705 .gen_epilogue = st_ops_gen_epilogue, 1706 .get_func_proto = bpf_base_func_proto, 1707 }; 1708 1709 static struct bpf_testmod_st_ops st_ops_cfi_stubs = { 1710 .test_prologue = bpf_test_mod_st_ops__test_prologue, 1711 .test_epilogue = bpf_test_mod_st_ops__test_epilogue, 1712 .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, 1713 }; 1714 1715 static int st_ops_reg(void *kdata, struct bpf_link *link) 1716 { 1717 int err = 0; 1718 1719 mutex_lock(&st_ops_mutex); 1720 if (st_ops) { 1721 pr_err("st_ops has already been registered\n"); 1722 err = -EEXIST; 1723 goto unlock; 1724 } 1725 st_ops = kdata; 1726 1727 unlock: 1728 mutex_unlock(&st_ops_mutex); 1729 return err; 1730 } 1731 1732 static void st_ops_unreg(void *kdata, struct bpf_link *link) 1733 { 1734 mutex_lock(&st_ops_mutex); 1735 st_ops = NULL; 1736 mutex_unlock(&st_ops_mutex); 1737 } 1738 1739 static int st_ops_init(struct btf *btf) 1740 { 1741 struct btf *kfunc_btf; 1742 1743 bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); 1744 bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); 1745 if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) 1746 return -EINVAL; 1747 1748 return 0; 1749 } 1750 1751 static int st_ops_init_member(const struct btf_type *t, 1752 const struct btf_member *member, 1753 void *kdata, const void *udata) 1754 { 1755 return 0; 1756 } 1757 1758 static struct bpf_struct_ops testmod_st_ops = { 1759 .verifier_ops = &st_ops_verifier_ops, 1760 .init = st_ops_init, 1761 .init_member = st_ops_init_member, 1762 .reg = st_ops_reg, 1763 .unreg = st_ops_unreg, 1764 .cfi_stubs = &st_ops_cfi_stubs, 1765 .name = "bpf_testmod_st_ops", 1766 .owner = THIS_MODULE, 1767 }; 1768 1769 struct hlist_head multi_st_ops_list; 1770 static DEFINE_SPINLOCK(multi_st_ops_lock); 1771 1772 static int multi_st_ops_init(struct btf *btf) 1773 { 1774 spin_lock_init(&multi_st_ops_lock); 1775 INIT_HLIST_HEAD(&multi_st_ops_list); 1776 1777 return 0; 1778 } 1779 1780 static int multi_st_ops_init_member(const struct btf_type *t, 1781 const struct btf_member *member, 1782 void *kdata, const void *udata) 1783 { 1784 return 0; 1785 } 1786 1787 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id) 1788 { 1789 struct bpf_testmod_multi_st_ops *st_ops; 1790 1791 hlist_for_each_entry(st_ops, &multi_st_ops_list, node) { 1792 if (st_ops->id == id) 1793 return st_ops; 1794 } 1795 1796 return NULL; 1797 } 1798 1799 /* Call test_1() of the struct_ops map identified by the id */ 1800 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) 1801 { 1802 struct bpf_testmod_multi_st_ops *st_ops; 1803 unsigned long flags; 1804 int ret = -1; 1805 1806 spin_lock_irqsave(&multi_st_ops_lock, flags); 1807 st_ops = multi_st_ops_find_nolock(id); 1808 if (st_ops) 1809 ret = st_ops->test_1(args); 1810 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1811 1812 return ret; 1813 } 1814 1815 /* Call test_1() of the associated struct_ops map */ 1816 int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args, struct bpf_prog_aux *aux) 1817 { 1818 struct bpf_testmod_multi_st_ops *st_ops; 1819 int ret = -1; 1820 1821 st_ops = (struct bpf_testmod_multi_st_ops *)bpf_prog_get_assoc_struct_ops(aux); 1822 if (st_ops) 1823 ret = st_ops->test_1(args); 1824 1825 return ret; 1826 } 1827 1828 int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux) 1829 { 1830 if (aux && a > 0) 1831 return a; 1832 return -EINVAL; 1833 } 1834 1835 int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux) 1836 { 1837 if (aux) 1838 return a + b; 1839 return -EINVAL; 1840 } 1841 1842 int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux) 1843 { 1844 return bpf_kfunc_implicit_arg_legacy(a, b, aux); 1845 } 1846 1847 static int multi_st_ops_reg(void *kdata, struct bpf_link *link) 1848 { 1849 struct bpf_testmod_multi_st_ops *st_ops = 1850 (struct bpf_testmod_multi_st_ops *)kdata; 1851 unsigned long flags; 1852 int err = 0; 1853 u32 id; 1854 1855 if (!st_ops->test_1) 1856 return -EINVAL; 1857 1858 id = bpf_struct_ops_id(kdata); 1859 1860 spin_lock_irqsave(&multi_st_ops_lock, flags); 1861 if (multi_st_ops_find_nolock(id)) { 1862 pr_err("multi_st_ops(id:%d) has already been registered\n", id); 1863 err = -EEXIST; 1864 goto unlock; 1865 } 1866 1867 st_ops->id = id; 1868 hlist_add_head(&st_ops->node, &multi_st_ops_list); 1869 unlock: 1870 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1871 1872 return err; 1873 } 1874 1875 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link) 1876 { 1877 struct bpf_testmod_multi_st_ops *st_ops; 1878 unsigned long flags; 1879 u32 id; 1880 1881 id = bpf_struct_ops_id(kdata); 1882 1883 spin_lock_irqsave(&multi_st_ops_lock, flags); 1884 st_ops = multi_st_ops_find_nolock(id); 1885 if (st_ops) 1886 hlist_del(&st_ops->node); 1887 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1888 } 1889 1890 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args) 1891 { 1892 return 0; 1893 } 1894 1895 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = { 1896 .test_1 = bpf_testmod_multi_st_ops__test_1, 1897 }; 1898 1899 struct bpf_struct_ops testmod_multi_st_ops = { 1900 .verifier_ops = &bpf_testmod_verifier_ops, 1901 .init = multi_st_ops_init, 1902 .init_member = multi_st_ops_init_member, 1903 .reg = multi_st_ops_reg, 1904 .unreg = multi_st_ops_unreg, 1905 .cfi_stubs = &multi_st_ops_cfi_stubs, 1906 .name = "bpf_testmod_multi_st_ops", 1907 .owner = THIS_MODULE, 1908 }; 1909 1910 extern int bpf_fentry_test1(int a); 1911 1912 BTF_KFUNCS_START(bpf_testmod_trampoline_count_ids) 1913 BTF_ID_FLAGS(func, bpf_testmod_trampoline_count_test) 1914 BTF_KFUNCS_END(bpf_testmod_trampoline_count_ids) 1915 1916 static const struct 1917 btf_kfunc_id_set bpf_testmod_trampoline_count_fmodret_set = { 1918 .owner = THIS_MODULE, 1919 .set = &bpf_testmod_trampoline_count_ids, 1920 }; 1921 1922 static int bpf_testmod_init(void) 1923 { 1924 const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { 1925 { 1926 .btf_id = bpf_testmod_dtor_ids[0], 1927 .kfunc_btf_id = bpf_testmod_dtor_ids[1] 1928 }, 1929 }; 1930 void **tramp; 1931 int ret; 1932 1933 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); 1934 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); 1935 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); 1936 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); 1937 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); 1938 ret = ret ?: register_btf_fmodret_id_set(&bpf_testmod_trampoline_count_fmodret_set); 1939 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); 1940 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); 1941 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); 1942 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); 1943 ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops); 1944 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, 1945 ARRAY_SIZE(bpf_testmod_dtors), 1946 THIS_MODULE); 1947 if (ret < 0) 1948 return ret; 1949 if (bpf_fentry_test1(0) < 0) 1950 return -EINVAL; 1951 sock = NULL; 1952 mutex_init(&sock_lock); 1953 ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1954 if (ret < 0) 1955 return ret; 1956 ret = register_bpf_testmod_uprobe(); 1957 if (ret < 0) 1958 return ret; 1959 1960 /* Ensure nothing is between tramp_1..tramp_40 */ 1961 BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) != 1962 offsetofend(struct bpf_testmod_ops, tramp_40)); 1963 tramp = (void **)&__bpf_testmod_ops.tramp_1; 1964 while (tramp <= (void **)&__bpf_testmod_ops.tramp_40) 1965 *tramp++ = bpf_testmod_tramp; 1966 1967 return 0; 1968 } 1969 1970 static void bpf_testmod_exit(void) 1971 { 1972 /* Need to wait for all references to be dropped because 1973 * bpf_kfunc_call_test_release() which currently resides in kernel can 1974 * be called after bpf_testmod is unloaded. Once release function is 1975 * moved into the module this wait can be removed. 1976 */ 1977 while (refcount_read(&prog_test_struct.cnt) > 1) 1978 msleep(20); 1979 1980 /* Clean up irqwork and tasklet */ 1981 irq_work_sync(&ctx_check_irq); 1982 tasklet_kill(&ctx_check_tasklet); 1983 1984 bpf_kfunc_close_sock(); 1985 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1986 unregister_bpf_testmod_uprobe(); 1987 } 1988 1989 module_init(bpf_testmod_init); 1990 module_exit(bpf_testmod_exit); 1991 1992 MODULE_AUTHOR("Andrii Nakryiko"); 1993 MODULE_DESCRIPTION("BPF selftests module"); 1994 MODULE_LICENSE("Dual BSD/GPL"); 1995