1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <linux/bpf.h> 4 #include <linux/btf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/delay.h> 7 #include <linux/error-injection.h> 8 #include <linux/init.h> 9 #include <linux/module.h> 10 #include <linux/percpu-defs.h> 11 #include <linux/sysfs.h> 12 #include <linux/tracepoint.h> 13 #include <linux/net.h> 14 #include <linux/socket.h> 15 #include <linux/nsproxy.h> 16 #include <linux/inet.h> 17 #include <linux/in.h> 18 #include <linux/in6.h> 19 #include <linux/un.h> 20 #include <linux/filter.h> 21 #include <net/sock.h> 22 #include <linux/namei.h> 23 #include "bpf_testmod.h" 24 #include "bpf_testmod_kfunc.h" 25 26 #define CREATE_TRACE_POINTS 27 #include "bpf_testmod-events.h" 28 29 #define CONNECT_TIMEOUT_SEC 1 30 31 typedef int (*func_proto_typedef)(long); 32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef); 33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1); 34 35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 36 long bpf_testmod_test_struct_arg_result; 37 static DEFINE_MUTEX(sock_lock); 38 static struct socket *sock; 39 40 struct bpf_testmod_struct_arg_1 { 41 int a; 42 }; 43 struct bpf_testmod_struct_arg_2 { 44 long a; 45 long b; 46 }; 47 48 struct bpf_testmod_struct_arg_3 { 49 int a; 50 int b[]; 51 }; 52 53 struct bpf_testmod_struct_arg_4 { 54 u64 a; 55 int b; 56 }; 57 58 struct bpf_testmod_struct_arg_5 { 59 char a; 60 short b; 61 int c; 62 long d; 63 }; 64 65 union bpf_testmod_union_arg_1 { 66 char a; 67 short b; 68 struct bpf_testmod_struct_arg_1 arg; 69 }; 70 71 union bpf_testmod_union_arg_2 { 72 int a; 73 long b; 74 struct bpf_testmod_struct_arg_2 arg; 75 }; 76 77 __bpf_hook_start(); 78 79 noinline int 80 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { 81 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c; 82 return bpf_testmod_test_struct_arg_result; 83 } 84 85 noinline int 86 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) { 87 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c; 88 return bpf_testmod_test_struct_arg_result; 89 } 90 91 noinline int 92 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) { 93 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b; 94 return bpf_testmod_test_struct_arg_result; 95 } 96 97 noinline int 98 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b, 99 int c, int d, struct bpf_testmod_struct_arg_2 e) { 100 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b; 101 return bpf_testmod_test_struct_arg_result; 102 } 103 104 noinline int 105 bpf_testmod_test_struct_arg_5(void) { 106 bpf_testmod_test_struct_arg_result = 1; 107 return bpf_testmod_test_struct_arg_result; 108 } 109 110 noinline int 111 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { 112 bpf_testmod_test_struct_arg_result = a->b[0]; 113 return bpf_testmod_test_struct_arg_result; 114 } 115 116 noinline int 117 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e, 118 struct bpf_testmod_struct_arg_4 f) 119 { 120 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 121 (long)e + f.a + f.b; 122 return bpf_testmod_test_struct_arg_result; 123 } 124 125 noinline int 126 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, 127 struct bpf_testmod_struct_arg_4 f, int g) 128 { 129 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 130 (long)e + f.a + f.b + g; 131 return bpf_testmod_test_struct_arg_result; 132 } 133 134 noinline int 135 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, 136 short g, struct bpf_testmod_struct_arg_5 h, long i) 137 { 138 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + 139 f + g + h.a + h.b + h.c + h.d + i; 140 return bpf_testmod_test_struct_arg_result; 141 } 142 143 noinline int 144 bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c) 145 { 146 bpf_testmod_test_struct_arg_result = a.arg.a + b + c; 147 return bpf_testmod_test_struct_arg_result; 148 } 149 150 noinline int 151 bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b) 152 { 153 bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b; 154 return bpf_testmod_test_struct_arg_result; 155 } 156 157 noinline int 158 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { 159 bpf_testmod_test_struct_arg_result = a->a; 160 return bpf_testmod_test_struct_arg_result; 161 } 162 163 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) 164 { 165 } 166 167 __bpf_kfunc void 168 bpf_testmod_test_mod_kfunc(int i) 169 { 170 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; 171 } 172 173 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) 174 { 175 it->cnt = cnt; 176 177 if (cnt < 0) 178 return -EINVAL; 179 180 it->value = value; 181 182 return 0; 183 } 184 185 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) 186 { 187 if (it->cnt <= 0) 188 return NULL; 189 190 it->cnt--; 191 192 return &it->value; 193 } 194 195 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter) 196 { 197 if (it__iter->cnt < 0) 198 return 0; 199 200 return val + it__iter->value; 201 } 202 203 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) 204 { 205 it->cnt = 0; 206 } 207 208 __bpf_kfunc void bpf_kfunc_common_test(void) 209 { 210 } 211 212 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, 213 struct bpf_dynptr *ptr__nullable) 214 { 215 } 216 217 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) 218 { 219 return NULL; 220 } 221 222 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) 223 { 224 return NULL; 225 } 226 227 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr) 228 { 229 } 230 231 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) 232 { 233 } 234 235 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr) 236 { 237 } 238 239 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr) 240 { 241 } 242 243 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr) 244 { 245 } 246 247 __bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void) 248 { 249 return NULL; 250 } 251 252 __bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size) 253 { 254 return NULL; 255 } 256 257 __bpf_kfunc struct bpf_testmod_ctx * 258 bpf_testmod_ctx_create(int *err) 259 { 260 struct bpf_testmod_ctx *ctx; 261 262 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 263 if (!ctx) { 264 *err = -ENOMEM; 265 return NULL; 266 } 267 refcount_set(&ctx->usage, 1); 268 269 return ctx; 270 } 271 272 static void testmod_free_cb(struct rcu_head *head) 273 { 274 struct bpf_testmod_ctx *ctx; 275 276 ctx = container_of(head, struct bpf_testmod_ctx, rcu); 277 kfree(ctx); 278 } 279 280 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) 281 { 282 if (!ctx) 283 return; 284 if (refcount_dec_and_test(&ctx->usage)) 285 call_rcu(&ctx->rcu, testmod_free_cb); 286 } 287 288 static struct bpf_testmod_ops3 *st_ops3; 289 290 static int bpf_testmod_test_3(void) 291 { 292 return 0; 293 } 294 295 static int bpf_testmod_test_4(void) 296 { 297 return 0; 298 } 299 300 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { 301 .test_1 = bpf_testmod_test_3, 302 .test_2 = bpf_testmod_test_4, 303 }; 304 305 static void bpf_testmod_test_struct_ops3(void) 306 { 307 if (st_ops3) 308 st_ops3->test_1(); 309 } 310 311 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void) 312 { 313 st_ops3->test_1(); 314 } 315 316 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void) 317 { 318 st_ops3->test_2(); 319 } 320 321 struct bpf_testmod_btf_type_tag_1 { 322 int a; 323 }; 324 325 struct bpf_testmod_btf_type_tag_2 { 326 struct bpf_testmod_btf_type_tag_1 __user *p; 327 }; 328 329 struct bpf_testmod_btf_type_tag_3 { 330 struct bpf_testmod_btf_type_tag_1 __percpu *p; 331 }; 332 333 noinline int 334 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { 335 BTF_TYPE_EMIT(func_proto_typedef); 336 BTF_TYPE_EMIT(func_proto_typedef_nested1); 337 BTF_TYPE_EMIT(func_proto_typedef_nested2); 338 return arg->a; 339 } 340 341 noinline int 342 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { 343 return arg->p->a; 344 } 345 346 noinline int 347 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) { 348 return arg->a; 349 } 350 351 noinline int 352 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { 353 return arg->p->a; 354 } 355 356 noinline int bpf_testmod_loop_test(int n) 357 { 358 /* Make sum volatile, so smart compilers, such as clang, will not 359 * optimize the code by removing the loop. 360 */ 361 volatile int sum = 0; 362 int i; 363 364 /* the primary goal of this test is to test LBR. Create a lot of 365 * branches in the function, so we can catch it easily. 366 */ 367 for (i = 0; i < n; i++) 368 sum += i; 369 return sum; 370 } 371 372 __weak noinline struct file *bpf_testmod_return_ptr(int arg) 373 { 374 static struct file f = {}; 375 376 switch (arg) { 377 case 1: return (void *)EINVAL; /* user addr */ 378 case 2: return (void *)0xcafe4a11; /* user addr */ 379 case 3: return (void *)-EINVAL; /* canonical, but invalid */ 380 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ 381 case 5: return (void *)~(1ull << 30); /* trigger extable */ 382 case 6: return &f; /* valid addr */ 383 case 7: return (void *)((long)&f | 1); /* kernel tricks */ 384 #ifdef CONFIG_X86_64 385 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */ 386 #endif 387 default: return NULL; 388 } 389 } 390 391 noinline int bpf_testmod_fentry_test1(int a) 392 { 393 return a + 1; 394 } 395 396 noinline int bpf_testmod_fentry_test2(int a, u64 b) 397 { 398 return a + b; 399 } 400 401 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) 402 { 403 return a + b + c; 404 } 405 406 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d, 407 void *e, char f, int g) 408 { 409 return a + (long)b + c + d + (long)e + f + g; 410 } 411 412 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d, 413 void *e, char f, int g, 414 unsigned int h, long i, __u64 j, 415 unsigned long k) 416 { 417 return a + (long)b + c + d + (long)e + f + g + h + i + j + k; 418 } 419 420 int bpf_testmod_fentry_ok; 421 422 noinline ssize_t 423 bpf_testmod_test_read(struct file *file, struct kobject *kobj, 424 const struct bin_attribute *bin_attr, 425 char *buf, loff_t off, size_t len) 426 { 427 struct bpf_testmod_test_read_ctx ctx = { 428 .buf = buf, 429 .off = off, 430 .len = len, 431 }; 432 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; 433 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; 434 struct bpf_testmod_struct_arg_3 *struct_arg3; 435 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; 436 struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; 437 union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} }; 438 union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} }; 439 int i = 1; 440 441 while (bpf_testmod_return_ptr(i)) 442 i++; 443 444 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4); 445 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4); 446 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2); 447 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); 448 (void)bpf_testmod_test_struct_arg_5(); 449 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19, 450 (void *)20, struct_arg4); 451 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, 452 (void *)20, struct_arg4, 23); 453 (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, 454 21, 22, struct_arg5, 27); 455 456 (void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5); 457 (void)bpf_testmod_test_union_arg_2(6, union_arg2); 458 459 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 460 461 (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); 462 463 bpf_testmod_test_struct_ops3(); 464 465 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + 466 sizeof(int)), GFP_KERNEL); 467 if (struct_arg3 != NULL) { 468 struct_arg3->b[0] = 1; 469 (void)bpf_testmod_test_struct_arg_6(struct_arg3); 470 kfree(struct_arg3); 471 } 472 473 /* This is always true. Use the check to make sure the compiler 474 * doesn't remove bpf_testmod_loop_test. 475 */ 476 if (bpf_testmod_loop_test(101) > 100) 477 trace_bpf_testmod_test_read(current, &ctx); 478 479 trace_bpf_testmod_test_nullable_bare_tp(NULL); 480 481 /* Magic number to enable writable tp */ 482 if (len == 64) { 483 struct bpf_testmod_test_writable_ctx writable = { 484 .val = 1024, 485 }; 486 trace_bpf_testmod_test_writable_bare_tp(&writable); 487 if (writable.early_ret) 488 return snprintf(buf, len, "%d\n", writable.val); 489 } 490 491 if (bpf_testmod_fentry_test1(1) != 2 || 492 bpf_testmod_fentry_test2(2, 3) != 5 || 493 bpf_testmod_fentry_test3(4, 5, 6) != 15 || 494 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20, 495 21, 22) != 133 || 496 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, 497 21, 22, 23, 24, 25, 26) != 231) 498 goto out; 499 500 bpf_testmod_fentry_ok = 1; 501 out: 502 return -EIO; /* always fail */ 503 } 504 EXPORT_SYMBOL(bpf_testmod_test_read); 505 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO); 506 507 noinline ssize_t 508 bpf_testmod_test_write(struct file *file, struct kobject *kobj, 509 const struct bin_attribute *bin_attr, 510 char *buf, loff_t off, size_t len) 511 { 512 struct bpf_testmod_test_write_ctx ctx = { 513 .buf = buf, 514 .off = off, 515 .len = len, 516 }; 517 518 trace_bpf_testmod_test_write_bare_tp(current, &ctx); 519 520 return -EIO; /* always fail */ 521 } 522 EXPORT_SYMBOL(bpf_testmod_test_write); 523 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO); 524 525 noinline int bpf_fentry_shadow_test(int a) 526 { 527 return a + 2; 528 } 529 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); 530 531 __bpf_hook_end(); 532 533 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { 534 .attr = { .name = "bpf_testmod", .mode = 0666, }, 535 .read = bpf_testmod_test_read, 536 .write = bpf_testmod_test_write, 537 }; 538 539 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, 540 * please see test_uretprobe_regs_change test 541 */ 542 #ifdef __x86_64__ 543 544 static int 545 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, 546 struct pt_regs *regs, __u64 *data) 547 548 { 549 regs->ax = 0x12345678deadbeef; 550 regs->cx = 0x87654321feebdaed; 551 regs->r11 = (u64) -1; 552 return true; 553 } 554 555 struct testmod_uprobe { 556 struct path path; 557 struct uprobe *uprobe; 558 struct uprobe_consumer consumer; 559 }; 560 561 static DEFINE_MUTEX(testmod_uprobe_mutex); 562 563 static struct testmod_uprobe uprobe = { 564 .consumer.ret_handler = uprobe_ret_handler, 565 }; 566 567 static int testmod_register_uprobe(loff_t offset) 568 { 569 int err = -EBUSY; 570 571 if (uprobe.uprobe) 572 return -EBUSY; 573 574 mutex_lock(&testmod_uprobe_mutex); 575 576 if (uprobe.uprobe) 577 goto out; 578 579 err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); 580 if (err) 581 goto out; 582 583 uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry), 584 offset, 0, &uprobe.consumer); 585 if (IS_ERR(uprobe.uprobe)) { 586 err = PTR_ERR(uprobe.uprobe); 587 path_put(&uprobe.path); 588 uprobe.uprobe = NULL; 589 } 590 out: 591 mutex_unlock(&testmod_uprobe_mutex); 592 return err; 593 } 594 595 static void testmod_unregister_uprobe(void) 596 { 597 mutex_lock(&testmod_uprobe_mutex); 598 599 if (uprobe.uprobe) { 600 uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer); 601 uprobe_unregister_sync(); 602 path_put(&uprobe.path); 603 uprobe.uprobe = NULL; 604 } 605 606 mutex_unlock(&testmod_uprobe_mutex); 607 } 608 609 static ssize_t 610 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, 611 const struct bin_attribute *bin_attr, 612 char *buf, loff_t off, size_t len) 613 { 614 unsigned long offset = 0; 615 int err = 0; 616 617 if (kstrtoul(buf, 0, &offset)) 618 return -EINVAL; 619 620 if (offset) 621 err = testmod_register_uprobe(offset); 622 else 623 testmod_unregister_uprobe(); 624 625 return err ?: strlen(buf); 626 } 627 628 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { 629 .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, 630 .write = bpf_testmod_uprobe_write, 631 }; 632 633 static int register_bpf_testmod_uprobe(void) 634 { 635 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 636 } 637 638 static void unregister_bpf_testmod_uprobe(void) 639 { 640 testmod_unregister_uprobe(); 641 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 642 } 643 644 #else 645 static int register_bpf_testmod_uprobe(void) 646 { 647 return 0; 648 } 649 650 static void unregister_bpf_testmod_uprobe(void) { } 651 #endif 652 653 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) 654 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) 655 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) 656 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) 657 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) 658 BTF_ID_FLAGS(func, bpf_kfunc_common_test) 659 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) 660 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) 661 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) 662 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE) 663 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS) 664 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS) 665 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) 666 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) 667 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED) 668 BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED) 669 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) 670 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) 671 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) 672 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) 673 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) 674 675 BTF_ID_LIST(bpf_testmod_dtor_ids) 676 BTF_ID(struct, bpf_testmod_ctx) 677 BTF_ID(func, bpf_testmod_ctx_release) 678 679 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { 680 .owner = THIS_MODULE, 681 .set = &bpf_testmod_common_kfunc_ids, 682 }; 683 684 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 685 { 686 return a + b + c + d; 687 } 688 689 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 690 { 691 return a + b; 692 } 693 694 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) 695 { 696 return sk; 697 } 698 699 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) 700 { 701 /* Provoke the compiler to assume that the caller has sign-extended a, 702 * b and c on platforms where this is required (e.g. s390x). 703 */ 704 return (long)a + (long)b + (long)c + d; 705 } 706 707 static struct prog_test_ref_kfunc prog_test_struct = { 708 .a = 42, 709 .b = 108, 710 .next = &prog_test_struct, 711 .cnt = REFCOUNT_INIT(1), 712 }; 713 714 __bpf_kfunc struct prog_test_ref_kfunc * 715 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 716 { 717 refcount_inc(&prog_test_struct.cnt); 718 return &prog_test_struct; 719 } 720 721 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) 722 { 723 WARN_ON_ONCE(1); 724 } 725 726 __bpf_kfunc struct prog_test_member * 727 bpf_kfunc_call_memb_acquire(void) 728 { 729 WARN_ON_ONCE(1); 730 return NULL; 731 } 732 733 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 734 { 735 WARN_ON_ONCE(1); 736 } 737 738 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 739 { 740 if (size > 2 * sizeof(int)) 741 return NULL; 742 743 return (int *)p; 744 } 745 746 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, 747 const int rdwr_buf_size) 748 { 749 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 750 } 751 752 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, 753 const int rdonly_buf_size) 754 { 755 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 756 } 757 758 /* the next 2 ones can't be really used for testing expect to ensure 759 * that the verifier rejects the call. 760 * Acquire functions must return struct pointers, so these ones are 761 * failing. 762 */ 763 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, 764 const int rdonly_buf_size) 765 { 766 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 767 } 768 769 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) 770 { 771 } 772 773 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 774 { 775 } 776 777 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 778 { 779 } 780 781 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 782 { 783 } 784 785 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 786 { 787 } 788 789 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 790 { 791 } 792 793 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 794 { 795 } 796 797 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 798 { 799 } 800 801 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 802 { 803 } 804 805 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 806 { 807 } 808 809 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 810 { 811 /* p != NULL, but p->cnt could be 0 */ 812 } 813 814 __bpf_kfunc void bpf_kfunc_call_test_destructive(void) 815 { 816 } 817 818 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) 819 { 820 return arg; 821 } 822 823 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void) 824 { 825 } 826 827 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args) 828 { 829 int proto; 830 int err; 831 832 mutex_lock(&sock_lock); 833 834 if (sock) { 835 pr_err("%s called without releasing old sock", __func__); 836 err = -EPERM; 837 goto out; 838 } 839 840 switch (args->af) { 841 case AF_INET: 842 case AF_INET6: 843 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP; 844 break; 845 case AF_UNIX: 846 proto = PF_UNIX; 847 break; 848 default: 849 pr_err("invalid address family %d\n", args->af); 850 err = -EINVAL; 851 goto out; 852 } 853 854 err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type, 855 proto, &sock); 856 857 if (!err) 858 /* Set timeout for call to kernel_connect() to prevent it from hanging, 859 * and consider the connection attempt failed if it returns 860 * -EINPROGRESS. 861 */ 862 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ; 863 out: 864 mutex_unlock(&sock_lock); 865 866 return err; 867 } 868 869 __bpf_kfunc void bpf_kfunc_close_sock(void) 870 { 871 mutex_lock(&sock_lock); 872 873 if (sock) { 874 sock_release(sock); 875 sock = NULL; 876 } 877 878 mutex_unlock(&sock_lock); 879 } 880 881 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args) 882 { 883 int err; 884 885 if (args->addrlen > sizeof(args->addr)) 886 return -EINVAL; 887 888 mutex_lock(&sock_lock); 889 890 if (!sock) { 891 pr_err("%s called without initializing sock", __func__); 892 err = -EPERM; 893 goto out; 894 } 895 896 err = kernel_connect(sock, (struct sockaddr *)&args->addr, 897 args->addrlen, 0); 898 out: 899 mutex_unlock(&sock_lock); 900 901 return err; 902 } 903 904 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args) 905 { 906 int err; 907 908 if (args->addrlen > sizeof(args->addr)) 909 return -EINVAL; 910 911 mutex_lock(&sock_lock); 912 913 if (!sock) { 914 pr_err("%s called without initializing sock", __func__); 915 err = -EPERM; 916 goto out; 917 } 918 919 err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen); 920 out: 921 mutex_unlock(&sock_lock); 922 923 return err; 924 } 925 926 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void) 927 { 928 int err; 929 930 mutex_lock(&sock_lock); 931 932 if (!sock) { 933 pr_err("%s called without initializing sock", __func__); 934 err = -EPERM; 935 goto out; 936 } 937 938 err = kernel_listen(sock, 128); 939 out: 940 mutex_unlock(&sock_lock); 941 942 return err; 943 } 944 945 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) 946 { 947 struct msghdr msg = { 948 .msg_name = &args->addr.addr, 949 .msg_namelen = args->addr.addrlen, 950 }; 951 struct kvec iov; 952 int err; 953 954 if (args->addr.addrlen > sizeof(args->addr.addr) || 955 args->msglen > sizeof(args->msg)) 956 return -EINVAL; 957 958 iov.iov_base = args->msg; 959 iov.iov_len = args->msglen; 960 961 mutex_lock(&sock_lock); 962 963 if (!sock) { 964 pr_err("%s called without initializing sock", __func__); 965 err = -EPERM; 966 goto out; 967 } 968 969 err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen); 970 args->addr.addrlen = msg.msg_namelen; 971 out: 972 mutex_unlock(&sock_lock); 973 974 return err; 975 } 976 977 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) 978 { 979 struct msghdr msg = { 980 .msg_name = &args->addr.addr, 981 .msg_namelen = args->addr.addrlen, 982 }; 983 struct kvec iov; 984 int err; 985 986 if (args->addr.addrlen > sizeof(args->addr.addr) || 987 args->msglen > sizeof(args->msg)) 988 return -EINVAL; 989 990 iov.iov_base = args->msg; 991 iov.iov_len = args->msglen; 992 993 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen); 994 mutex_lock(&sock_lock); 995 996 if (!sock) { 997 pr_err("%s called without initializing sock", __func__); 998 err = -EPERM; 999 goto out; 1000 } 1001 1002 err = sock_sendmsg(sock, &msg); 1003 args->addr.addrlen = msg.msg_namelen; 1004 out: 1005 mutex_unlock(&sock_lock); 1006 1007 return err; 1008 } 1009 1010 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) 1011 { 1012 int err; 1013 1014 mutex_lock(&sock_lock); 1015 1016 if (!sock) { 1017 pr_err("%s called without initializing sock", __func__); 1018 err = -EPERM; 1019 goto out; 1020 } 1021 1022 err = kernel_getsockname(sock, (struct sockaddr *)&args->addr); 1023 if (err < 0) 1024 goto out; 1025 1026 args->addrlen = err; 1027 err = 0; 1028 out: 1029 mutex_unlock(&sock_lock); 1030 1031 return err; 1032 } 1033 1034 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) 1035 { 1036 int err; 1037 1038 mutex_lock(&sock_lock); 1039 1040 if (!sock) { 1041 pr_err("%s called without initializing sock", __func__); 1042 err = -EPERM; 1043 goto out; 1044 } 1045 1046 err = kernel_getpeername(sock, (struct sockaddr *)&args->addr); 1047 if (err < 0) 1048 goto out; 1049 1050 args->addrlen = err; 1051 err = 0; 1052 out: 1053 mutex_unlock(&sock_lock); 1054 1055 return err; 1056 } 1057 1058 static DEFINE_MUTEX(st_ops_mutex); 1059 static struct bpf_testmod_st_ops *st_ops; 1060 1061 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) 1062 { 1063 int ret = -1; 1064 1065 mutex_lock(&st_ops_mutex); 1066 if (st_ops && st_ops->test_prologue) 1067 ret = st_ops->test_prologue(args); 1068 mutex_unlock(&st_ops_mutex); 1069 1070 return ret; 1071 } 1072 1073 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) 1074 { 1075 int ret = -1; 1076 1077 mutex_lock(&st_ops_mutex); 1078 if (st_ops && st_ops->test_epilogue) 1079 ret = st_ops->test_epilogue(args); 1080 mutex_unlock(&st_ops_mutex); 1081 1082 return ret; 1083 } 1084 1085 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) 1086 { 1087 int ret = -1; 1088 1089 mutex_lock(&st_ops_mutex); 1090 if (st_ops && st_ops->test_pro_epilogue) 1091 ret = st_ops->test_pro_epilogue(args); 1092 mutex_unlock(&st_ops_mutex); 1093 1094 return ret; 1095 } 1096 1097 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) 1098 { 1099 args->a += 10; 1100 return args->a; 1101 } 1102 1103 __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id); 1104 1105 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) 1106 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 1107 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 1108 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 1109 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 1110 BTF_ID_FLAGS(func, bpf_kfunc_call_test4) 1111 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 1112 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 1113 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 1114 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 1115 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 1116 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 1117 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 1118 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 1119 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 1120 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 1121 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 1122 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 1123 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 1124 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 1125 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 1126 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 1127 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) 1128 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 1129 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 1130 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) 1131 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE) 1132 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE) 1133 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE) 1134 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE) 1135 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE) 1136 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE) 1137 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) 1138 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) 1139 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) 1140 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) 1141 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1142 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1143 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1144 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) 1145 BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS) 1146 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) 1147 1148 static int bpf_testmod_ops_init(struct btf *btf) 1149 { 1150 return 0; 1151 } 1152 1153 static bool bpf_testmod_ops_is_valid_access(int off, int size, 1154 enum bpf_access_type type, 1155 const struct bpf_prog *prog, 1156 struct bpf_insn_access_aux *info) 1157 { 1158 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1159 } 1160 1161 static int bpf_testmod_ops_init_member(const struct btf_type *t, 1162 const struct btf_member *member, 1163 void *kdata, const void *udata) 1164 { 1165 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) { 1166 /* For data fields, this function has to copy it and return 1167 * 1 to indicate that the data has been handled by the 1168 * struct_ops type, or the verifier will reject the map if 1169 * the value of the data field is not zero. 1170 */ 1171 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; 1172 return 1; 1173 } 1174 return 0; 1175 } 1176 1177 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { 1178 .owner = THIS_MODULE, 1179 .set = &bpf_testmod_check_kfunc_ids, 1180 }; 1181 1182 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { 1183 .get_func_proto = bpf_base_func_proto, 1184 .is_valid_access = bpf_testmod_ops_is_valid_access, 1185 }; 1186 1187 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { 1188 .is_valid_access = bpf_testmod_ops_is_valid_access, 1189 }; 1190 1191 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 1192 { 1193 struct bpf_testmod_ops *ops = kdata; 1194 1195 if (ops->test_1) 1196 ops->test_1(); 1197 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2 1198 * initialized, so we need to check for NULL. 1199 */ 1200 if (ops->test_2) 1201 ops->test_2(4, ops->data); 1202 1203 return 0; 1204 } 1205 1206 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) 1207 { 1208 } 1209 1210 static int bpf_testmod_test_1(void) 1211 { 1212 return 0; 1213 } 1214 1215 static void bpf_testmod_test_2(int a, int b) 1216 { 1217 } 1218 1219 static int bpf_testmod_tramp(int value) 1220 { 1221 return 0; 1222 } 1223 1224 static int bpf_testmod_ops__test_maybe_null(int dummy, 1225 struct task_struct *task__nullable) 1226 { 1227 return 0; 1228 } 1229 1230 static int bpf_testmod_ops__test_refcounted(int dummy, 1231 struct task_struct *task__ref) 1232 { 1233 return 0; 1234 } 1235 1236 static struct task_struct * 1237 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref, 1238 struct cgroup *cgrp) 1239 { 1240 return NULL; 1241 } 1242 1243 static struct bpf_testmod_ops __bpf_testmod_ops = { 1244 .test_1 = bpf_testmod_test_1, 1245 .test_2 = bpf_testmod_test_2, 1246 .test_maybe_null = bpf_testmod_ops__test_maybe_null, 1247 .test_refcounted = bpf_testmod_ops__test_refcounted, 1248 .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr, 1249 }; 1250 1251 struct bpf_struct_ops bpf_bpf_testmod_ops = { 1252 .verifier_ops = &bpf_testmod_verifier_ops, 1253 .init = bpf_testmod_ops_init, 1254 .init_member = bpf_testmod_ops_init_member, 1255 .reg = bpf_dummy_reg, 1256 .unreg = bpf_dummy_unreg, 1257 .cfi_stubs = &__bpf_testmod_ops, 1258 .name = "bpf_testmod_ops", 1259 .owner = THIS_MODULE, 1260 }; 1261 1262 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) 1263 { 1264 struct bpf_testmod_ops2 *ops = kdata; 1265 1266 ops->test_1(); 1267 return 0; 1268 } 1269 1270 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = { 1271 .test_1 = bpf_testmod_test_1, 1272 }; 1273 1274 struct bpf_struct_ops bpf_testmod_ops2 = { 1275 .verifier_ops = &bpf_testmod_verifier_ops, 1276 .init = bpf_testmod_ops_init, 1277 .init_member = bpf_testmod_ops_init_member, 1278 .reg = bpf_dummy_reg2, 1279 .unreg = bpf_dummy_unreg, 1280 .cfi_stubs = &__bpf_testmod_ops2, 1281 .name = "bpf_testmod_ops2", 1282 .owner = THIS_MODULE, 1283 }; 1284 1285 static int st_ops3_reg(void *kdata, struct bpf_link *link) 1286 { 1287 int err = 0; 1288 1289 mutex_lock(&st_ops_mutex); 1290 if (st_ops3) { 1291 pr_err("st_ops has already been registered\n"); 1292 err = -EEXIST; 1293 goto unlock; 1294 } 1295 st_ops3 = kdata; 1296 1297 unlock: 1298 mutex_unlock(&st_ops_mutex); 1299 return err; 1300 } 1301 1302 static void st_ops3_unreg(void *kdata, struct bpf_link *link) 1303 { 1304 mutex_lock(&st_ops_mutex); 1305 st_ops3 = NULL; 1306 mutex_unlock(&st_ops_mutex); 1307 } 1308 1309 static void test_1_recursion_detected(struct bpf_prog *prog) 1310 { 1311 struct bpf_prog_stats *stats; 1312 1313 stats = this_cpu_ptr(prog->stats); 1314 printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", 1315 u64_stats_read(&stats->misses)); 1316 } 1317 1318 static int st_ops3_check_member(const struct btf_type *t, 1319 const struct btf_member *member, 1320 const struct bpf_prog *prog) 1321 { 1322 u32 moff = __btf_member_bit_offset(t, member) / 8; 1323 1324 switch (moff) { 1325 case offsetof(struct bpf_testmod_ops3, test_1): 1326 prog->aux->priv_stack_requested = true; 1327 prog->aux->recursion_detected = test_1_recursion_detected; 1328 fallthrough; 1329 default: 1330 break; 1331 } 1332 return 0; 1333 } 1334 1335 struct bpf_struct_ops bpf_testmod_ops3 = { 1336 .verifier_ops = &bpf_testmod_verifier_ops3, 1337 .init = bpf_testmod_ops_init, 1338 .init_member = bpf_testmod_ops_init_member, 1339 .reg = st_ops3_reg, 1340 .unreg = st_ops3_unreg, 1341 .check_member = st_ops3_check_member, 1342 .cfi_stubs = &__bpf_testmod_ops3, 1343 .name = "bpf_testmod_ops3", 1344 .owner = THIS_MODULE, 1345 }; 1346 1347 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) 1348 { 1349 return 0; 1350 } 1351 1352 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) 1353 { 1354 return 0; 1355 } 1356 1357 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) 1358 { 1359 return 0; 1360 } 1361 1362 static int bpf_cgroup_from_id_id; 1363 static int bpf_cgroup_release_id; 1364 1365 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, 1366 const struct bpf_prog *prog) 1367 { 1368 struct bpf_insn *insn = insn_buf; 1369 1370 /* r8 = r1; // r8 will be "u64 *ctx". 1371 * r1 = 0; 1372 * r0 = bpf_cgroup_from_id(r1); 1373 * if r0 != 0 goto pc+5; 1374 * r6 = r8[0]; // r6 will be "struct st_ops *args". 1375 * r7 = r6->a; 1376 * r7 += 1000; 1377 * r6->a = r7; 1378 * goto pc+2; 1379 * r1 = r0; 1380 * bpf_cgroup_release(r1); 1381 * r1 = r8; 1382 */ 1383 *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); 1384 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1385 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1386 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); 1387 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); 1388 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1389 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1390 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1391 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1392 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1393 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1394 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); 1395 *insn++ = prog->insnsi[0]; 1396 1397 return insn - insn_buf; 1398 } 1399 1400 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1401 s16 ctx_stack_off) 1402 { 1403 struct bpf_insn *insn = insn_buf; 1404 1405 /* r1 = 0; 1406 * r6 = 0; 1407 * r0 = bpf_cgroup_from_id(r1); 1408 * if r0 != 0 goto pc+6; 1409 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1410 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1411 * r6 = r1->a; 1412 * r6 += 10000; 1413 * r1->a = r6; 1414 * goto pc+2 1415 * r1 = r0; 1416 * bpf_cgroup_release(r1); 1417 * r0 = r6; 1418 * r0 *= 2; 1419 * BPF_EXIT; 1420 */ 1421 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1422 *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); 1423 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1424 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); 1425 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1426 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1427 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1428 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1429 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1430 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1431 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1432 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1433 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1434 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1435 *insn++ = BPF_EXIT_INSN(); 1436 1437 return insn - insn_buf; 1438 } 1439 1440 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_" 1441 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 1442 const struct bpf_prog *prog) 1443 { 1444 struct bpf_insn *insn = insn_buf; 1445 1446 if (strcmp(prog->aux->attach_func_name, "test_prologue") && 1447 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1448 return 0; 1449 1450 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1451 return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); 1452 1453 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". 1454 * r7 = r6->a; 1455 * r7 += 1000; 1456 * r6->a = r7; 1457 */ 1458 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); 1459 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1460 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1461 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1462 *insn++ = prog->insnsi[0]; 1463 1464 return insn - insn_buf; 1465 } 1466 1467 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1468 s16 ctx_stack_off) 1469 { 1470 struct bpf_insn *insn = insn_buf; 1471 1472 if (strcmp(prog->aux->attach_func_name, "test_epilogue") && 1473 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1474 return 0; 1475 1476 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1477 return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); 1478 1479 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1480 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1481 * r6 = r1->a; 1482 * r6 += 10000; 1483 * r1->a = r6; 1484 * r0 = r6; 1485 * r0 *= 2; 1486 * BPF_EXIT; 1487 */ 1488 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1489 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1490 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1491 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1492 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1493 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1494 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1495 *insn++ = BPF_EXIT_INSN(); 1496 1497 return insn - insn_buf; 1498 } 1499 1500 static int st_ops_btf_struct_access(struct bpf_verifier_log *log, 1501 const struct bpf_reg_state *reg, 1502 int off, int size) 1503 { 1504 if (off < 0 || off + size > sizeof(struct st_ops_args)) 1505 return -EACCES; 1506 return 0; 1507 } 1508 1509 static const struct bpf_verifier_ops st_ops_verifier_ops = { 1510 .is_valid_access = bpf_testmod_ops_is_valid_access, 1511 .btf_struct_access = st_ops_btf_struct_access, 1512 .gen_prologue = st_ops_gen_prologue, 1513 .gen_epilogue = st_ops_gen_epilogue, 1514 .get_func_proto = bpf_base_func_proto, 1515 }; 1516 1517 static struct bpf_testmod_st_ops st_ops_cfi_stubs = { 1518 .test_prologue = bpf_test_mod_st_ops__test_prologue, 1519 .test_epilogue = bpf_test_mod_st_ops__test_epilogue, 1520 .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, 1521 }; 1522 1523 static int st_ops_reg(void *kdata, struct bpf_link *link) 1524 { 1525 int err = 0; 1526 1527 mutex_lock(&st_ops_mutex); 1528 if (st_ops) { 1529 pr_err("st_ops has already been registered\n"); 1530 err = -EEXIST; 1531 goto unlock; 1532 } 1533 st_ops = kdata; 1534 1535 unlock: 1536 mutex_unlock(&st_ops_mutex); 1537 return err; 1538 } 1539 1540 static void st_ops_unreg(void *kdata, struct bpf_link *link) 1541 { 1542 mutex_lock(&st_ops_mutex); 1543 st_ops = NULL; 1544 mutex_unlock(&st_ops_mutex); 1545 } 1546 1547 static int st_ops_init(struct btf *btf) 1548 { 1549 struct btf *kfunc_btf; 1550 1551 bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); 1552 bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); 1553 if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) 1554 return -EINVAL; 1555 1556 return 0; 1557 } 1558 1559 static int st_ops_init_member(const struct btf_type *t, 1560 const struct btf_member *member, 1561 void *kdata, const void *udata) 1562 { 1563 return 0; 1564 } 1565 1566 static struct bpf_struct_ops testmod_st_ops = { 1567 .verifier_ops = &st_ops_verifier_ops, 1568 .init = st_ops_init, 1569 .init_member = st_ops_init_member, 1570 .reg = st_ops_reg, 1571 .unreg = st_ops_unreg, 1572 .cfi_stubs = &st_ops_cfi_stubs, 1573 .name = "bpf_testmod_st_ops", 1574 .owner = THIS_MODULE, 1575 }; 1576 1577 struct hlist_head multi_st_ops_list; 1578 static DEFINE_SPINLOCK(multi_st_ops_lock); 1579 1580 static int multi_st_ops_init(struct btf *btf) 1581 { 1582 spin_lock_init(&multi_st_ops_lock); 1583 INIT_HLIST_HEAD(&multi_st_ops_list); 1584 1585 return 0; 1586 } 1587 1588 static int multi_st_ops_init_member(const struct btf_type *t, 1589 const struct btf_member *member, 1590 void *kdata, const void *udata) 1591 { 1592 return 0; 1593 } 1594 1595 static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id) 1596 { 1597 struct bpf_testmod_multi_st_ops *st_ops; 1598 1599 hlist_for_each_entry(st_ops, &multi_st_ops_list, node) { 1600 if (st_ops->id == id) 1601 return st_ops; 1602 } 1603 1604 return NULL; 1605 } 1606 1607 int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) 1608 { 1609 struct bpf_testmod_multi_st_ops *st_ops; 1610 unsigned long flags; 1611 int ret = -1; 1612 1613 spin_lock_irqsave(&multi_st_ops_lock, flags); 1614 st_ops = multi_st_ops_find_nolock(id); 1615 if (st_ops) 1616 ret = st_ops->test_1(args); 1617 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1618 1619 return ret; 1620 } 1621 1622 static int multi_st_ops_reg(void *kdata, struct bpf_link *link) 1623 { 1624 struct bpf_testmod_multi_st_ops *st_ops = 1625 (struct bpf_testmod_multi_st_ops *)kdata; 1626 unsigned long flags; 1627 int err = 0; 1628 u32 id; 1629 1630 if (!st_ops->test_1) 1631 return -EINVAL; 1632 1633 id = bpf_struct_ops_id(kdata); 1634 1635 spin_lock_irqsave(&multi_st_ops_lock, flags); 1636 if (multi_st_ops_find_nolock(id)) { 1637 pr_err("multi_st_ops(id:%d) has already been registered\n", id); 1638 err = -EEXIST; 1639 goto unlock; 1640 } 1641 1642 st_ops->id = id; 1643 hlist_add_head(&st_ops->node, &multi_st_ops_list); 1644 unlock: 1645 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1646 1647 return err; 1648 } 1649 1650 static void multi_st_ops_unreg(void *kdata, struct bpf_link *link) 1651 { 1652 struct bpf_testmod_multi_st_ops *st_ops; 1653 unsigned long flags; 1654 u32 id; 1655 1656 id = bpf_struct_ops_id(kdata); 1657 1658 spin_lock_irqsave(&multi_st_ops_lock, flags); 1659 st_ops = multi_st_ops_find_nolock(id); 1660 if (st_ops) 1661 hlist_del(&st_ops->node); 1662 spin_unlock_irqrestore(&multi_st_ops_lock, flags); 1663 } 1664 1665 static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args) 1666 { 1667 return 0; 1668 } 1669 1670 static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = { 1671 .test_1 = bpf_testmod_multi_st_ops__test_1, 1672 }; 1673 1674 struct bpf_struct_ops testmod_multi_st_ops = { 1675 .verifier_ops = &bpf_testmod_verifier_ops, 1676 .init = multi_st_ops_init, 1677 .init_member = multi_st_ops_init_member, 1678 .reg = multi_st_ops_reg, 1679 .unreg = multi_st_ops_unreg, 1680 .cfi_stubs = &multi_st_ops_cfi_stubs, 1681 .name = "bpf_testmod_multi_st_ops", 1682 .owner = THIS_MODULE, 1683 }; 1684 1685 extern int bpf_fentry_test1(int a); 1686 1687 static int bpf_testmod_init(void) 1688 { 1689 const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { 1690 { 1691 .btf_id = bpf_testmod_dtor_ids[0], 1692 .kfunc_btf_id = bpf_testmod_dtor_ids[1] 1693 }, 1694 }; 1695 void **tramp; 1696 int ret; 1697 1698 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); 1699 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); 1700 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); 1701 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); 1702 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); 1703 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); 1704 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); 1705 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); 1706 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); 1707 ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops); 1708 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, 1709 ARRAY_SIZE(bpf_testmod_dtors), 1710 THIS_MODULE); 1711 if (ret < 0) 1712 return ret; 1713 if (bpf_fentry_test1(0) < 0) 1714 return -EINVAL; 1715 sock = NULL; 1716 mutex_init(&sock_lock); 1717 ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1718 if (ret < 0) 1719 return ret; 1720 ret = register_bpf_testmod_uprobe(); 1721 if (ret < 0) 1722 return ret; 1723 1724 /* Ensure nothing is between tramp_1..tramp_40 */ 1725 BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) != 1726 offsetofend(struct bpf_testmod_ops, tramp_40)); 1727 tramp = (void **)&__bpf_testmod_ops.tramp_1; 1728 while (tramp <= (void **)&__bpf_testmod_ops.tramp_40) 1729 *tramp++ = bpf_testmod_tramp; 1730 1731 return 0; 1732 } 1733 1734 static void bpf_testmod_exit(void) 1735 { 1736 /* Need to wait for all references to be dropped because 1737 * bpf_kfunc_call_test_release() which currently resides in kernel can 1738 * be called after bpf_testmod is unloaded. Once release function is 1739 * moved into the module this wait can be removed. 1740 */ 1741 while (refcount_read(&prog_test_struct.cnt) > 1) 1742 msleep(20); 1743 1744 bpf_kfunc_close_sock(); 1745 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1746 unregister_bpf_testmod_uprobe(); 1747 } 1748 1749 module_init(bpf_testmod_init); 1750 module_exit(bpf_testmod_exit); 1751 1752 MODULE_AUTHOR("Andrii Nakryiko"); 1753 MODULE_DESCRIPTION("BPF selftests module"); 1754 MODULE_LICENSE("Dual BSD/GPL"); 1755