1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <linux/bpf.h> 4 #include <linux/btf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/delay.h> 7 #include <linux/error-injection.h> 8 #include <linux/init.h> 9 #include <linux/module.h> 10 #include <linux/percpu-defs.h> 11 #include <linux/sysfs.h> 12 #include <linux/tracepoint.h> 13 #include <linux/net.h> 14 #include <linux/socket.h> 15 #include <linux/nsproxy.h> 16 #include <linux/inet.h> 17 #include <linux/in.h> 18 #include <linux/in6.h> 19 #include <linux/un.h> 20 #include <linux/filter.h> 21 #include <net/sock.h> 22 #include <linux/namei.h> 23 #include "bpf_testmod.h" 24 #include "bpf_testmod_kfunc.h" 25 26 #define CREATE_TRACE_POINTS 27 #include "bpf_testmod-events.h" 28 29 #define CONNECT_TIMEOUT_SEC 1 30 31 typedef int (*func_proto_typedef)(long); 32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef); 33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1); 34 35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 36 long bpf_testmod_test_struct_arg_result; 37 static DEFINE_MUTEX(sock_lock); 38 static struct socket *sock; 39 40 struct bpf_testmod_struct_arg_1 { 41 int a; 42 }; 43 struct bpf_testmod_struct_arg_2 { 44 long a; 45 long b; 46 }; 47 48 struct bpf_testmod_struct_arg_3 { 49 int a; 50 int b[]; 51 }; 52 53 struct bpf_testmod_struct_arg_4 { 54 u64 a; 55 int b; 56 }; 57 58 struct bpf_testmod_struct_arg_5 { 59 char a; 60 short b; 61 int c; 62 long d; 63 }; 64 65 __bpf_hook_start(); 66 67 noinline int 68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { 69 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c; 70 return bpf_testmod_test_struct_arg_result; 71 } 72 73 noinline int 74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) { 75 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c; 76 return bpf_testmod_test_struct_arg_result; 77 } 78 79 noinline int 80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) { 81 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b; 82 return bpf_testmod_test_struct_arg_result; 83 } 84 85 noinline int 86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b, 87 int c, int d, struct bpf_testmod_struct_arg_2 e) { 88 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b; 89 return bpf_testmod_test_struct_arg_result; 90 } 91 92 noinline int 93 bpf_testmod_test_struct_arg_5(void) { 94 bpf_testmod_test_struct_arg_result = 1; 95 return bpf_testmod_test_struct_arg_result; 96 } 97 98 noinline int 99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { 100 bpf_testmod_test_struct_arg_result = a->b[0]; 101 return bpf_testmod_test_struct_arg_result; 102 } 103 104 noinline int 105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e, 106 struct bpf_testmod_struct_arg_4 f) 107 { 108 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 109 (long)e + f.a + f.b; 110 return bpf_testmod_test_struct_arg_result; 111 } 112 113 noinline int 114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, 115 struct bpf_testmod_struct_arg_4 f, int g) 116 { 117 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + 118 (long)e + f.a + f.b + g; 119 return bpf_testmod_test_struct_arg_result; 120 } 121 122 noinline int 123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, 124 short g, struct bpf_testmod_struct_arg_5 h, long i) 125 { 126 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + 127 f + g + h.a + h.b + h.c + h.d + i; 128 return bpf_testmod_test_struct_arg_result; 129 } 130 131 noinline int 132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { 133 bpf_testmod_test_struct_arg_result = a->a; 134 return bpf_testmod_test_struct_arg_result; 135 } 136 137 __weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) 138 { 139 } 140 141 __bpf_kfunc void 142 bpf_testmod_test_mod_kfunc(int i) 143 { 144 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; 145 } 146 147 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) 148 { 149 it->cnt = cnt; 150 151 if (cnt < 0) 152 return -EINVAL; 153 154 it->value = value; 155 156 return 0; 157 } 158 159 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) 160 { 161 if (it->cnt <= 0) 162 return NULL; 163 164 it->cnt--; 165 166 return &it->value; 167 } 168 169 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter) 170 { 171 if (it__iter->cnt < 0) 172 return 0; 173 174 return val + it__iter->value; 175 } 176 177 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) 178 { 179 it->cnt = 0; 180 } 181 182 __bpf_kfunc void bpf_kfunc_common_test(void) 183 { 184 } 185 186 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, 187 struct bpf_dynptr *ptr__nullable) 188 { 189 } 190 191 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) 192 { 193 return NULL; 194 } 195 196 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) 197 { 198 return NULL; 199 } 200 201 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr) 202 { 203 } 204 205 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) 206 { 207 } 208 209 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr) 210 { 211 } 212 213 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr) 214 { 215 } 216 217 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr) 218 { 219 } 220 221 __bpf_kfunc struct bpf_testmod_ctx * 222 bpf_testmod_ctx_create(int *err) 223 { 224 struct bpf_testmod_ctx *ctx; 225 226 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 227 if (!ctx) { 228 *err = -ENOMEM; 229 return NULL; 230 } 231 refcount_set(&ctx->usage, 1); 232 233 return ctx; 234 } 235 236 static void testmod_free_cb(struct rcu_head *head) 237 { 238 struct bpf_testmod_ctx *ctx; 239 240 ctx = container_of(head, struct bpf_testmod_ctx, rcu); 241 kfree(ctx); 242 } 243 244 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) 245 { 246 if (!ctx) 247 return; 248 if (refcount_dec_and_test(&ctx->usage)) 249 call_rcu(&ctx->rcu, testmod_free_cb); 250 } 251 252 static struct bpf_testmod_ops3 *st_ops3; 253 254 static int bpf_testmod_test_3(void) 255 { 256 return 0; 257 } 258 259 static int bpf_testmod_test_4(void) 260 { 261 return 0; 262 } 263 264 static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { 265 .test_1 = bpf_testmod_test_3, 266 .test_2 = bpf_testmod_test_4, 267 }; 268 269 static void bpf_testmod_test_struct_ops3(void) 270 { 271 if (st_ops3) 272 st_ops3->test_1(); 273 } 274 275 __bpf_kfunc void bpf_testmod_ops3_call_test_1(void) 276 { 277 st_ops3->test_1(); 278 } 279 280 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void) 281 { 282 st_ops3->test_2(); 283 } 284 285 struct bpf_testmod_btf_type_tag_1 { 286 int a; 287 }; 288 289 struct bpf_testmod_btf_type_tag_2 { 290 struct bpf_testmod_btf_type_tag_1 __user *p; 291 }; 292 293 struct bpf_testmod_btf_type_tag_3 { 294 struct bpf_testmod_btf_type_tag_1 __percpu *p; 295 }; 296 297 noinline int 298 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { 299 BTF_TYPE_EMIT(func_proto_typedef); 300 BTF_TYPE_EMIT(func_proto_typedef_nested1); 301 BTF_TYPE_EMIT(func_proto_typedef_nested2); 302 return arg->a; 303 } 304 305 noinline int 306 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { 307 return arg->p->a; 308 } 309 310 noinline int 311 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) { 312 return arg->a; 313 } 314 315 noinline int 316 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { 317 return arg->p->a; 318 } 319 320 noinline int bpf_testmod_loop_test(int n) 321 { 322 /* Make sum volatile, so smart compilers, such as clang, will not 323 * optimize the code by removing the loop. 324 */ 325 volatile int sum = 0; 326 int i; 327 328 /* the primary goal of this test is to test LBR. Create a lot of 329 * branches in the function, so we can catch it easily. 330 */ 331 for (i = 0; i < n; i++) 332 sum += i; 333 return sum; 334 } 335 336 __weak noinline struct file *bpf_testmod_return_ptr(int arg) 337 { 338 static struct file f = {}; 339 340 switch (arg) { 341 case 1: return (void *)EINVAL; /* user addr */ 342 case 2: return (void *)0xcafe4a11; /* user addr */ 343 case 3: return (void *)-EINVAL; /* canonical, but invalid */ 344 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ 345 case 5: return (void *)~(1ull << 30); /* trigger extable */ 346 case 6: return &f; /* valid addr */ 347 case 7: return (void *)((long)&f | 1); /* kernel tricks */ 348 #ifdef CONFIG_X86_64 349 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */ 350 #endif 351 default: return NULL; 352 } 353 } 354 355 noinline int bpf_testmod_fentry_test1(int a) 356 { 357 return a + 1; 358 } 359 360 noinline int bpf_testmod_fentry_test2(int a, u64 b) 361 { 362 return a + b; 363 } 364 365 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) 366 { 367 return a + b + c; 368 } 369 370 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d, 371 void *e, char f, int g) 372 { 373 return a + (long)b + c + d + (long)e + f + g; 374 } 375 376 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d, 377 void *e, char f, int g, 378 unsigned int h, long i, __u64 j, 379 unsigned long k) 380 { 381 return a + (long)b + c + d + (long)e + f + g + h + i + j + k; 382 } 383 384 int bpf_testmod_fentry_ok; 385 386 noinline ssize_t 387 bpf_testmod_test_read(struct file *file, struct kobject *kobj, 388 const struct bin_attribute *bin_attr, 389 char *buf, loff_t off, size_t len) 390 { 391 struct bpf_testmod_test_read_ctx ctx = { 392 .buf = buf, 393 .off = off, 394 .len = len, 395 }; 396 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; 397 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; 398 struct bpf_testmod_struct_arg_3 *struct_arg3; 399 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; 400 struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; 401 int i = 1; 402 403 while (bpf_testmod_return_ptr(i)) 404 i++; 405 406 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4); 407 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4); 408 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2); 409 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); 410 (void)bpf_testmod_test_struct_arg_5(); 411 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19, 412 (void *)20, struct_arg4); 413 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, 414 (void *)20, struct_arg4, 23); 415 (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, 416 21, 22, struct_arg5, 27); 417 418 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 419 420 (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); 421 422 bpf_testmod_test_struct_ops3(); 423 424 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + 425 sizeof(int)), GFP_KERNEL); 426 if (struct_arg3 != NULL) { 427 struct_arg3->b[0] = 1; 428 (void)bpf_testmod_test_struct_arg_6(struct_arg3); 429 kfree(struct_arg3); 430 } 431 432 /* This is always true. Use the check to make sure the compiler 433 * doesn't remove bpf_testmod_loop_test. 434 */ 435 if (bpf_testmod_loop_test(101) > 100) 436 trace_bpf_testmod_test_read(current, &ctx); 437 438 trace_bpf_testmod_test_nullable_bare_tp(NULL); 439 440 /* Magic number to enable writable tp */ 441 if (len == 64) { 442 struct bpf_testmod_test_writable_ctx writable = { 443 .val = 1024, 444 }; 445 trace_bpf_testmod_test_writable_bare_tp(&writable); 446 if (writable.early_ret) 447 return snprintf(buf, len, "%d\n", writable.val); 448 } 449 450 if (bpf_testmod_fentry_test1(1) != 2 || 451 bpf_testmod_fentry_test2(2, 3) != 5 || 452 bpf_testmod_fentry_test3(4, 5, 6) != 15 || 453 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20, 454 21, 22) != 133 || 455 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, 456 21, 22, 23, 24, 25, 26) != 231) 457 goto out; 458 459 bpf_testmod_fentry_ok = 1; 460 out: 461 return -EIO; /* always fail */ 462 } 463 EXPORT_SYMBOL(bpf_testmod_test_read); 464 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO); 465 466 noinline ssize_t 467 bpf_testmod_test_write(struct file *file, struct kobject *kobj, 468 const struct bin_attribute *bin_attr, 469 char *buf, loff_t off, size_t len) 470 { 471 struct bpf_testmod_test_write_ctx ctx = { 472 .buf = buf, 473 .off = off, 474 .len = len, 475 }; 476 477 trace_bpf_testmod_test_write_bare_tp(current, &ctx); 478 479 return -EIO; /* always fail */ 480 } 481 EXPORT_SYMBOL(bpf_testmod_test_write); 482 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO); 483 484 noinline int bpf_fentry_shadow_test(int a) 485 { 486 return a + 2; 487 } 488 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); 489 490 __bpf_hook_end(); 491 492 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { 493 .attr = { .name = "bpf_testmod", .mode = 0666, }, 494 .read = bpf_testmod_test_read, 495 .write = bpf_testmod_test_write, 496 }; 497 498 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, 499 * please see test_uretprobe_regs_change test 500 */ 501 #ifdef __x86_64__ 502 503 static int 504 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, 505 struct pt_regs *regs, __u64 *data) 506 507 { 508 regs->ax = 0x12345678deadbeef; 509 regs->cx = 0x87654321feebdaed; 510 regs->r11 = (u64) -1; 511 return true; 512 } 513 514 struct testmod_uprobe { 515 struct path path; 516 struct uprobe *uprobe; 517 struct uprobe_consumer consumer; 518 }; 519 520 static DEFINE_MUTEX(testmod_uprobe_mutex); 521 522 static struct testmod_uprobe uprobe = { 523 .consumer.ret_handler = uprobe_ret_handler, 524 }; 525 526 static int testmod_register_uprobe(loff_t offset) 527 { 528 int err = -EBUSY; 529 530 if (uprobe.uprobe) 531 return -EBUSY; 532 533 mutex_lock(&testmod_uprobe_mutex); 534 535 if (uprobe.uprobe) 536 goto out; 537 538 err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); 539 if (err) 540 goto out; 541 542 uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry), 543 offset, 0, &uprobe.consumer); 544 if (IS_ERR(uprobe.uprobe)) { 545 err = PTR_ERR(uprobe.uprobe); 546 path_put(&uprobe.path); 547 uprobe.uprobe = NULL; 548 } 549 out: 550 mutex_unlock(&testmod_uprobe_mutex); 551 return err; 552 } 553 554 static void testmod_unregister_uprobe(void) 555 { 556 mutex_lock(&testmod_uprobe_mutex); 557 558 if (uprobe.uprobe) { 559 uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer); 560 uprobe_unregister_sync(); 561 path_put(&uprobe.path); 562 uprobe.uprobe = NULL; 563 } 564 565 mutex_unlock(&testmod_uprobe_mutex); 566 } 567 568 static ssize_t 569 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, 570 const struct bin_attribute *bin_attr, 571 char *buf, loff_t off, size_t len) 572 { 573 unsigned long offset = 0; 574 int err = 0; 575 576 if (kstrtoul(buf, 0, &offset)) 577 return -EINVAL; 578 579 if (offset) 580 err = testmod_register_uprobe(offset); 581 else 582 testmod_unregister_uprobe(); 583 584 return err ?: strlen(buf); 585 } 586 587 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { 588 .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, 589 .write = bpf_testmod_uprobe_write, 590 }; 591 592 static int register_bpf_testmod_uprobe(void) 593 { 594 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 595 } 596 597 static void unregister_bpf_testmod_uprobe(void) 598 { 599 testmod_unregister_uprobe(); 600 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); 601 } 602 603 #else 604 static int register_bpf_testmod_uprobe(void) 605 { 606 return 0; 607 } 608 609 static void unregister_bpf_testmod_uprobe(void) { } 610 #endif 611 612 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) 613 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) 614 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) 615 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) 616 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) 617 BTF_ID_FLAGS(func, bpf_kfunc_common_test) 618 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) 619 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) 620 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) 621 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE) 622 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS) 623 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS) 624 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) 625 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) 626 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) 627 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) 628 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) 629 BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) 630 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) 631 632 BTF_ID_LIST(bpf_testmod_dtor_ids) 633 BTF_ID(struct, bpf_testmod_ctx) 634 BTF_ID(func, bpf_testmod_ctx_release) 635 636 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { 637 .owner = THIS_MODULE, 638 .set = &bpf_testmod_common_kfunc_ids, 639 }; 640 641 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 642 { 643 return a + b + c + d; 644 } 645 646 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 647 { 648 return a + b; 649 } 650 651 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) 652 { 653 return sk; 654 } 655 656 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) 657 { 658 /* Provoke the compiler to assume that the caller has sign-extended a, 659 * b and c on platforms where this is required (e.g. s390x). 660 */ 661 return (long)a + (long)b + (long)c + d; 662 } 663 664 static struct prog_test_ref_kfunc prog_test_struct = { 665 .a = 42, 666 .b = 108, 667 .next = &prog_test_struct, 668 .cnt = REFCOUNT_INIT(1), 669 }; 670 671 __bpf_kfunc struct prog_test_ref_kfunc * 672 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 673 { 674 refcount_inc(&prog_test_struct.cnt); 675 return &prog_test_struct; 676 } 677 678 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) 679 { 680 WARN_ON_ONCE(1); 681 } 682 683 __bpf_kfunc struct prog_test_member * 684 bpf_kfunc_call_memb_acquire(void) 685 { 686 WARN_ON_ONCE(1); 687 return NULL; 688 } 689 690 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 691 { 692 WARN_ON_ONCE(1); 693 } 694 695 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 696 { 697 if (size > 2 * sizeof(int)) 698 return NULL; 699 700 return (int *)p; 701 } 702 703 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, 704 const int rdwr_buf_size) 705 { 706 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 707 } 708 709 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, 710 const int rdonly_buf_size) 711 { 712 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 713 } 714 715 /* the next 2 ones can't be really used for testing expect to ensure 716 * that the verifier rejects the call. 717 * Acquire functions must return struct pointers, so these ones are 718 * failing. 719 */ 720 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, 721 const int rdonly_buf_size) 722 { 723 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 724 } 725 726 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) 727 { 728 } 729 730 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 731 { 732 } 733 734 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 735 { 736 } 737 738 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 739 { 740 } 741 742 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 743 { 744 } 745 746 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 747 { 748 } 749 750 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 751 { 752 } 753 754 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 755 { 756 } 757 758 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 759 { 760 } 761 762 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 763 { 764 } 765 766 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 767 { 768 /* p != NULL, but p->cnt could be 0 */ 769 } 770 771 __bpf_kfunc void bpf_kfunc_call_test_destructive(void) 772 { 773 } 774 775 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) 776 { 777 return arg; 778 } 779 780 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void) 781 { 782 } 783 784 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args) 785 { 786 int proto; 787 int err; 788 789 mutex_lock(&sock_lock); 790 791 if (sock) { 792 pr_err("%s called without releasing old sock", __func__); 793 err = -EPERM; 794 goto out; 795 } 796 797 switch (args->af) { 798 case AF_INET: 799 case AF_INET6: 800 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP; 801 break; 802 case AF_UNIX: 803 proto = PF_UNIX; 804 break; 805 default: 806 pr_err("invalid address family %d\n", args->af); 807 err = -EINVAL; 808 goto out; 809 } 810 811 err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type, 812 proto, &sock); 813 814 if (!err) 815 /* Set timeout for call to kernel_connect() to prevent it from hanging, 816 * and consider the connection attempt failed if it returns 817 * -EINPROGRESS. 818 */ 819 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ; 820 out: 821 mutex_unlock(&sock_lock); 822 823 return err; 824 } 825 826 __bpf_kfunc void bpf_kfunc_close_sock(void) 827 { 828 mutex_lock(&sock_lock); 829 830 if (sock) { 831 sock_release(sock); 832 sock = NULL; 833 } 834 835 mutex_unlock(&sock_lock); 836 } 837 838 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args) 839 { 840 int err; 841 842 if (args->addrlen > sizeof(args->addr)) 843 return -EINVAL; 844 845 mutex_lock(&sock_lock); 846 847 if (!sock) { 848 pr_err("%s called without initializing sock", __func__); 849 err = -EPERM; 850 goto out; 851 } 852 853 err = kernel_connect(sock, (struct sockaddr *)&args->addr, 854 args->addrlen, 0); 855 out: 856 mutex_unlock(&sock_lock); 857 858 return err; 859 } 860 861 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args) 862 { 863 int err; 864 865 if (args->addrlen > sizeof(args->addr)) 866 return -EINVAL; 867 868 mutex_lock(&sock_lock); 869 870 if (!sock) { 871 pr_err("%s called without initializing sock", __func__); 872 err = -EPERM; 873 goto out; 874 } 875 876 err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen); 877 out: 878 mutex_unlock(&sock_lock); 879 880 return err; 881 } 882 883 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void) 884 { 885 int err; 886 887 mutex_lock(&sock_lock); 888 889 if (!sock) { 890 pr_err("%s called without initializing sock", __func__); 891 err = -EPERM; 892 goto out; 893 } 894 895 err = kernel_listen(sock, 128); 896 out: 897 mutex_unlock(&sock_lock); 898 899 return err; 900 } 901 902 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args) 903 { 904 struct msghdr msg = { 905 .msg_name = &args->addr.addr, 906 .msg_namelen = args->addr.addrlen, 907 }; 908 struct kvec iov; 909 int err; 910 911 if (args->addr.addrlen > sizeof(args->addr.addr) || 912 args->msglen > sizeof(args->msg)) 913 return -EINVAL; 914 915 iov.iov_base = args->msg; 916 iov.iov_len = args->msglen; 917 918 mutex_lock(&sock_lock); 919 920 if (!sock) { 921 pr_err("%s called without initializing sock", __func__); 922 err = -EPERM; 923 goto out; 924 } 925 926 err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen); 927 args->addr.addrlen = msg.msg_namelen; 928 out: 929 mutex_unlock(&sock_lock); 930 931 return err; 932 } 933 934 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) 935 { 936 struct msghdr msg = { 937 .msg_name = &args->addr.addr, 938 .msg_namelen = args->addr.addrlen, 939 }; 940 struct kvec iov; 941 int err; 942 943 if (args->addr.addrlen > sizeof(args->addr.addr) || 944 args->msglen > sizeof(args->msg)) 945 return -EINVAL; 946 947 iov.iov_base = args->msg; 948 iov.iov_len = args->msglen; 949 950 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen); 951 mutex_lock(&sock_lock); 952 953 if (!sock) { 954 pr_err("%s called without initializing sock", __func__); 955 err = -EPERM; 956 goto out; 957 } 958 959 err = sock_sendmsg(sock, &msg); 960 args->addr.addrlen = msg.msg_namelen; 961 out: 962 mutex_unlock(&sock_lock); 963 964 return err; 965 } 966 967 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) 968 { 969 int err; 970 971 mutex_lock(&sock_lock); 972 973 if (!sock) { 974 pr_err("%s called without initializing sock", __func__); 975 err = -EPERM; 976 goto out; 977 } 978 979 err = kernel_getsockname(sock, (struct sockaddr *)&args->addr); 980 if (err < 0) 981 goto out; 982 983 args->addrlen = err; 984 err = 0; 985 out: 986 mutex_unlock(&sock_lock); 987 988 return err; 989 } 990 991 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) 992 { 993 int err; 994 995 mutex_lock(&sock_lock); 996 997 if (!sock) { 998 pr_err("%s called without initializing sock", __func__); 999 err = -EPERM; 1000 goto out; 1001 } 1002 1003 err = kernel_getpeername(sock, (struct sockaddr *)&args->addr); 1004 if (err < 0) 1005 goto out; 1006 1007 args->addrlen = err; 1008 err = 0; 1009 out: 1010 mutex_unlock(&sock_lock); 1011 1012 return err; 1013 } 1014 1015 static DEFINE_MUTEX(st_ops_mutex); 1016 static struct bpf_testmod_st_ops *st_ops; 1017 1018 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) 1019 { 1020 int ret = -1; 1021 1022 mutex_lock(&st_ops_mutex); 1023 if (st_ops && st_ops->test_prologue) 1024 ret = st_ops->test_prologue(args); 1025 mutex_unlock(&st_ops_mutex); 1026 1027 return ret; 1028 } 1029 1030 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) 1031 { 1032 int ret = -1; 1033 1034 mutex_lock(&st_ops_mutex); 1035 if (st_ops && st_ops->test_epilogue) 1036 ret = st_ops->test_epilogue(args); 1037 mutex_unlock(&st_ops_mutex); 1038 1039 return ret; 1040 } 1041 1042 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) 1043 { 1044 int ret = -1; 1045 1046 mutex_lock(&st_ops_mutex); 1047 if (st_ops && st_ops->test_pro_epilogue) 1048 ret = st_ops->test_pro_epilogue(args); 1049 mutex_unlock(&st_ops_mutex); 1050 1051 return ret; 1052 } 1053 1054 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) 1055 { 1056 args->a += 10; 1057 return args->a; 1058 } 1059 1060 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) 1061 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 1062 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 1063 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 1064 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 1065 BTF_ID_FLAGS(func, bpf_kfunc_call_test4) 1066 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 1067 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 1068 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 1069 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 1070 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 1071 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 1072 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 1073 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 1074 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 1075 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 1076 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 1077 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 1078 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 1079 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 1080 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 1081 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 1082 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) 1083 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 1084 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 1085 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) 1086 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE) 1087 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE) 1088 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE) 1089 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE) 1090 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE) 1091 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE) 1092 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) 1093 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) 1094 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) 1095 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) 1096 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1097 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1098 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 1099 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) 1100 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) 1101 1102 static int bpf_testmod_ops_init(struct btf *btf) 1103 { 1104 return 0; 1105 } 1106 1107 static bool bpf_testmod_ops_is_valid_access(int off, int size, 1108 enum bpf_access_type type, 1109 const struct bpf_prog *prog, 1110 struct bpf_insn_access_aux *info) 1111 { 1112 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1113 } 1114 1115 static int bpf_testmod_ops_init_member(const struct btf_type *t, 1116 const struct btf_member *member, 1117 void *kdata, const void *udata) 1118 { 1119 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) { 1120 /* For data fields, this function has to copy it and return 1121 * 1 to indicate that the data has been handled by the 1122 * struct_ops type, or the verifier will reject the map if 1123 * the value of the data field is not zero. 1124 */ 1125 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; 1126 return 1; 1127 } 1128 return 0; 1129 } 1130 1131 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { 1132 .owner = THIS_MODULE, 1133 .set = &bpf_testmod_check_kfunc_ids, 1134 }; 1135 1136 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { 1137 .get_func_proto = bpf_base_func_proto, 1138 .is_valid_access = bpf_testmod_ops_is_valid_access, 1139 }; 1140 1141 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { 1142 .is_valid_access = bpf_testmod_ops_is_valid_access, 1143 }; 1144 1145 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 1146 { 1147 struct bpf_testmod_ops *ops = kdata; 1148 1149 if (ops->test_1) 1150 ops->test_1(); 1151 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2 1152 * initialized, so we need to check for NULL. 1153 */ 1154 if (ops->test_2) 1155 ops->test_2(4, ops->data); 1156 1157 return 0; 1158 } 1159 1160 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) 1161 { 1162 } 1163 1164 static int bpf_testmod_test_1(void) 1165 { 1166 return 0; 1167 } 1168 1169 static void bpf_testmod_test_2(int a, int b) 1170 { 1171 } 1172 1173 static int bpf_testmod_tramp(int value) 1174 { 1175 return 0; 1176 } 1177 1178 static int bpf_testmod_ops__test_maybe_null(int dummy, 1179 struct task_struct *task__nullable) 1180 { 1181 return 0; 1182 } 1183 1184 static int bpf_testmod_ops__test_refcounted(int dummy, 1185 struct task_struct *task__ref) 1186 { 1187 return 0; 1188 } 1189 1190 static struct task_struct * 1191 bpf_testmod_ops__test_return_ref_kptr(int dummy, struct task_struct *task__ref, 1192 struct cgroup *cgrp) 1193 { 1194 return NULL; 1195 } 1196 1197 static struct bpf_testmod_ops __bpf_testmod_ops = { 1198 .test_1 = bpf_testmod_test_1, 1199 .test_2 = bpf_testmod_test_2, 1200 .test_maybe_null = bpf_testmod_ops__test_maybe_null, 1201 .test_refcounted = bpf_testmod_ops__test_refcounted, 1202 .test_return_ref_kptr = bpf_testmod_ops__test_return_ref_kptr, 1203 }; 1204 1205 struct bpf_struct_ops bpf_bpf_testmod_ops = { 1206 .verifier_ops = &bpf_testmod_verifier_ops, 1207 .init = bpf_testmod_ops_init, 1208 .init_member = bpf_testmod_ops_init_member, 1209 .reg = bpf_dummy_reg, 1210 .unreg = bpf_dummy_unreg, 1211 .cfi_stubs = &__bpf_testmod_ops, 1212 .name = "bpf_testmod_ops", 1213 .owner = THIS_MODULE, 1214 }; 1215 1216 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) 1217 { 1218 struct bpf_testmod_ops2 *ops = kdata; 1219 1220 ops->test_1(); 1221 return 0; 1222 } 1223 1224 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = { 1225 .test_1 = bpf_testmod_test_1, 1226 }; 1227 1228 struct bpf_struct_ops bpf_testmod_ops2 = { 1229 .verifier_ops = &bpf_testmod_verifier_ops, 1230 .init = bpf_testmod_ops_init, 1231 .init_member = bpf_testmod_ops_init_member, 1232 .reg = bpf_dummy_reg2, 1233 .unreg = bpf_dummy_unreg, 1234 .cfi_stubs = &__bpf_testmod_ops2, 1235 .name = "bpf_testmod_ops2", 1236 .owner = THIS_MODULE, 1237 }; 1238 1239 static int st_ops3_reg(void *kdata, struct bpf_link *link) 1240 { 1241 int err = 0; 1242 1243 mutex_lock(&st_ops_mutex); 1244 if (st_ops3) { 1245 pr_err("st_ops has already been registered\n"); 1246 err = -EEXIST; 1247 goto unlock; 1248 } 1249 st_ops3 = kdata; 1250 1251 unlock: 1252 mutex_unlock(&st_ops_mutex); 1253 return err; 1254 } 1255 1256 static void st_ops3_unreg(void *kdata, struct bpf_link *link) 1257 { 1258 mutex_lock(&st_ops_mutex); 1259 st_ops3 = NULL; 1260 mutex_unlock(&st_ops_mutex); 1261 } 1262 1263 static void test_1_recursion_detected(struct bpf_prog *prog) 1264 { 1265 struct bpf_prog_stats *stats; 1266 1267 stats = this_cpu_ptr(prog->stats); 1268 printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", 1269 u64_stats_read(&stats->misses)); 1270 } 1271 1272 static int st_ops3_check_member(const struct btf_type *t, 1273 const struct btf_member *member, 1274 const struct bpf_prog *prog) 1275 { 1276 u32 moff = __btf_member_bit_offset(t, member) / 8; 1277 1278 switch (moff) { 1279 case offsetof(struct bpf_testmod_ops3, test_1): 1280 prog->aux->priv_stack_requested = true; 1281 prog->aux->recursion_detected = test_1_recursion_detected; 1282 fallthrough; 1283 default: 1284 break; 1285 } 1286 return 0; 1287 } 1288 1289 struct bpf_struct_ops bpf_testmod_ops3 = { 1290 .verifier_ops = &bpf_testmod_verifier_ops3, 1291 .init = bpf_testmod_ops_init, 1292 .init_member = bpf_testmod_ops_init_member, 1293 .reg = st_ops3_reg, 1294 .unreg = st_ops3_unreg, 1295 .check_member = st_ops3_check_member, 1296 .cfi_stubs = &__bpf_testmod_ops3, 1297 .name = "bpf_testmod_ops3", 1298 .owner = THIS_MODULE, 1299 }; 1300 1301 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) 1302 { 1303 return 0; 1304 } 1305 1306 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) 1307 { 1308 return 0; 1309 } 1310 1311 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) 1312 { 1313 return 0; 1314 } 1315 1316 static int bpf_cgroup_from_id_id; 1317 static int bpf_cgroup_release_id; 1318 1319 static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, 1320 const struct bpf_prog *prog) 1321 { 1322 struct bpf_insn *insn = insn_buf; 1323 1324 /* r8 = r1; // r8 will be "u64 *ctx". 1325 * r1 = 0; 1326 * r0 = bpf_cgroup_from_id(r1); 1327 * if r0 != 0 goto pc+5; 1328 * r6 = r8[0]; // r6 will be "struct st_ops *args". 1329 * r7 = r6->a; 1330 * r7 += 1000; 1331 * r6->a = r7; 1332 * goto pc+2; 1333 * r1 = r0; 1334 * bpf_cgroup_release(r1); 1335 * r1 = r8; 1336 */ 1337 *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); 1338 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1339 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1340 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); 1341 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); 1342 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1343 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1344 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1345 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1346 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1347 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1348 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); 1349 *insn++ = prog->insnsi[0]; 1350 1351 return insn - insn_buf; 1352 } 1353 1354 static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1355 s16 ctx_stack_off) 1356 { 1357 struct bpf_insn *insn = insn_buf; 1358 1359 /* r1 = 0; 1360 * r6 = 0; 1361 * r0 = bpf_cgroup_from_id(r1); 1362 * if r0 != 0 goto pc+6; 1363 * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1364 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1365 * r6 = r1->a; 1366 * r6 += 10000; 1367 * r1->a = r6; 1368 * goto pc+2 1369 * r1 = r0; 1370 * bpf_cgroup_release(r1); 1371 * r0 = r6; 1372 * r0 *= 2; 1373 * BPF_EXIT; 1374 */ 1375 *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1376 *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); 1377 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1378 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); 1379 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1380 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1381 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1382 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1383 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1384 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1385 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1386 *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); 1387 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1388 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1389 *insn++ = BPF_EXIT_INSN(); 1390 1391 return insn - insn_buf; 1392 } 1393 1394 #define KFUNC_PRO_EPI_PREFIX "test_kfunc_" 1395 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 1396 const struct bpf_prog *prog) 1397 { 1398 struct bpf_insn *insn = insn_buf; 1399 1400 if (strcmp(prog->aux->attach_func_name, "test_prologue") && 1401 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1402 return 0; 1403 1404 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1405 return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); 1406 1407 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". 1408 * r7 = r6->a; 1409 * r7 += 1000; 1410 * r6->a = r7; 1411 */ 1412 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); 1413 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1414 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1415 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1416 *insn++ = prog->insnsi[0]; 1417 1418 return insn - insn_buf; 1419 } 1420 1421 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1422 s16 ctx_stack_off) 1423 { 1424 struct bpf_insn *insn = insn_buf; 1425 1426 if (strcmp(prog->aux->attach_func_name, "test_epilogue") && 1427 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1428 return 0; 1429 1430 if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1431 return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); 1432 1433 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1434 * r1 = r1[0]; // r1 will be "struct st_ops *args" 1435 * r6 = r1->a; 1436 * r6 += 10000; 1437 * r1->a = r6; 1438 * r0 = r6; 1439 * r0 *= 2; 1440 * BPF_EXIT; 1441 */ 1442 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1443 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1444 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1445 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1446 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1447 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1448 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1449 *insn++ = BPF_EXIT_INSN(); 1450 1451 return insn - insn_buf; 1452 } 1453 1454 static int st_ops_btf_struct_access(struct bpf_verifier_log *log, 1455 const struct bpf_reg_state *reg, 1456 int off, int size) 1457 { 1458 if (off < 0 || off + size > sizeof(struct st_ops_args)) 1459 return -EACCES; 1460 return 0; 1461 } 1462 1463 static const struct bpf_verifier_ops st_ops_verifier_ops = { 1464 .is_valid_access = bpf_testmod_ops_is_valid_access, 1465 .btf_struct_access = st_ops_btf_struct_access, 1466 .gen_prologue = st_ops_gen_prologue, 1467 .gen_epilogue = st_ops_gen_epilogue, 1468 .get_func_proto = bpf_base_func_proto, 1469 }; 1470 1471 static struct bpf_testmod_st_ops st_ops_cfi_stubs = { 1472 .test_prologue = bpf_test_mod_st_ops__test_prologue, 1473 .test_epilogue = bpf_test_mod_st_ops__test_epilogue, 1474 .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, 1475 }; 1476 1477 static int st_ops_reg(void *kdata, struct bpf_link *link) 1478 { 1479 int err = 0; 1480 1481 mutex_lock(&st_ops_mutex); 1482 if (st_ops) { 1483 pr_err("st_ops has already been registered\n"); 1484 err = -EEXIST; 1485 goto unlock; 1486 } 1487 st_ops = kdata; 1488 1489 unlock: 1490 mutex_unlock(&st_ops_mutex); 1491 return err; 1492 } 1493 1494 static void st_ops_unreg(void *kdata, struct bpf_link *link) 1495 { 1496 mutex_lock(&st_ops_mutex); 1497 st_ops = NULL; 1498 mutex_unlock(&st_ops_mutex); 1499 } 1500 1501 static int st_ops_init(struct btf *btf) 1502 { 1503 struct btf *kfunc_btf; 1504 1505 bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); 1506 bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); 1507 if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) 1508 return -EINVAL; 1509 1510 return 0; 1511 } 1512 1513 static int st_ops_init_member(const struct btf_type *t, 1514 const struct btf_member *member, 1515 void *kdata, const void *udata) 1516 { 1517 return 0; 1518 } 1519 1520 static struct bpf_struct_ops testmod_st_ops = { 1521 .verifier_ops = &st_ops_verifier_ops, 1522 .init = st_ops_init, 1523 .init_member = st_ops_init_member, 1524 .reg = st_ops_reg, 1525 .unreg = st_ops_unreg, 1526 .cfi_stubs = &st_ops_cfi_stubs, 1527 .name = "bpf_testmod_st_ops", 1528 .owner = THIS_MODULE, 1529 }; 1530 1531 extern int bpf_fentry_test1(int a); 1532 1533 static int bpf_testmod_init(void) 1534 { 1535 const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { 1536 { 1537 .btf_id = bpf_testmod_dtor_ids[0], 1538 .kfunc_btf_id = bpf_testmod_dtor_ids[1] 1539 }, 1540 }; 1541 void **tramp; 1542 int ret; 1543 1544 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); 1545 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); 1546 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); 1547 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); 1548 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); 1549 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); 1550 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); 1551 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); 1552 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); 1553 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, 1554 ARRAY_SIZE(bpf_testmod_dtors), 1555 THIS_MODULE); 1556 if (ret < 0) 1557 return ret; 1558 if (bpf_fentry_test1(0) < 0) 1559 return -EINVAL; 1560 sock = NULL; 1561 mutex_init(&sock_lock); 1562 ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1563 if (ret < 0) 1564 return ret; 1565 ret = register_bpf_testmod_uprobe(); 1566 if (ret < 0) 1567 return ret; 1568 1569 /* Ensure nothing is between tramp_1..tramp_40 */ 1570 BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) != 1571 offsetofend(struct bpf_testmod_ops, tramp_40)); 1572 tramp = (void **)&__bpf_testmod_ops.tramp_1; 1573 while (tramp <= (void **)&__bpf_testmod_ops.tramp_40) 1574 *tramp++ = bpf_testmod_tramp; 1575 1576 return 0; 1577 } 1578 1579 static void bpf_testmod_exit(void) 1580 { 1581 /* Need to wait for all references to be dropped because 1582 * bpf_kfunc_call_test_release() which currently resides in kernel can 1583 * be called after bpf_testmod is unloaded. Once release function is 1584 * moved into the module this wait can be removed. 1585 */ 1586 while (refcount_read(&prog_test_struct.cnt) > 1) 1587 msleep(20); 1588 1589 bpf_kfunc_close_sock(); 1590 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); 1591 unregister_bpf_testmod_uprobe(); 1592 } 1593 1594 module_init(bpf_testmod_init); 1595 module_exit(bpf_testmod_exit); 1596 1597 MODULE_AUTHOR("Andrii Nakryiko"); 1598 MODULE_DESCRIPTION("BPF selftests module"); 1599 MODULE_LICENSE("Dual BSD/GPL"); 1600