1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/btf_ids.h> 7 #include <linux/slab.h> 8 #include <linux/init.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/sched/signal.h> 14 #include <net/bpf_sk_storage.h> 15 #include <net/sock.h> 16 #include <net/tcp.h> 17 #include <net/net_namespace.h> 18 #include <net/page_pool.h> 19 #include <linux/error-injection.h> 20 #include <linux/smp.h> 21 #include <linux/sock_diag.h> 22 #include <net/xdp.h> 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/bpf_test_run.h> 26 27 struct bpf_test_timer { 28 enum { NO_PREEMPT, NO_MIGRATE } mode; 29 u32 i; 30 u64 time_start, time_spent; 31 }; 32 33 static void bpf_test_timer_enter(struct bpf_test_timer *t) 34 __acquires(rcu) 35 { 36 rcu_read_lock(); 37 if (t->mode == NO_PREEMPT) 38 preempt_disable(); 39 else 40 migrate_disable(); 41 42 t->time_start = ktime_get_ns(); 43 } 44 45 static void bpf_test_timer_leave(struct bpf_test_timer *t) 46 __releases(rcu) 47 { 48 t->time_start = 0; 49 50 if (t->mode == NO_PREEMPT) 51 preempt_enable(); 52 else 53 migrate_enable(); 54 rcu_read_unlock(); 55 } 56 57 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, 58 u32 repeat, int *err, u32 *duration) 59 __must_hold(rcu) 60 { 61 t->i += iterations; 62 if (t->i >= repeat) { 63 /* We're done. */ 64 t->time_spent += ktime_get_ns() - t->time_start; 65 do_div(t->time_spent, t->i); 66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; 67 *err = 0; 68 goto reset; 69 } 70 71 if (signal_pending(current)) { 72 /* During iteration: we've been cancelled, abort. */ 73 *err = -EINTR; 74 goto reset; 75 } 76 77 if (need_resched()) { 78 /* During iteration: we need to reschedule between runs. */ 79 t->time_spent += ktime_get_ns() - t->time_start; 80 bpf_test_timer_leave(t); 81 cond_resched(); 82 bpf_test_timer_enter(t); 83 } 84 85 /* Do another round. */ 86 return true; 87 88 reset: 89 t->i = 0; 90 return false; 91 } 92 93 /* We put this struct at the head of each page with a context and frame 94 * initialised when the page is allocated, so we don't have to do this on each 95 * repetition of the test run. 96 */ 97 struct xdp_page_head { 98 struct xdp_buff orig_ctx; 99 struct xdp_buff ctx; 100 union { 101 /* ::data_hard_start starts here */ 102 DECLARE_FLEX_ARRAY(struct xdp_frame, frame); 103 DECLARE_FLEX_ARRAY(u8, data); 104 }; 105 }; 106 107 struct xdp_test_data { 108 struct xdp_buff *orig_ctx; 109 struct xdp_rxq_info rxq; 110 struct net_device *dev; 111 struct page_pool *pp; 112 struct xdp_frame **frames; 113 struct sk_buff **skbs; 114 struct xdp_mem_info mem; 115 u32 batch_size; 116 u32 frame_cnt; 117 }; 118 119 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) 120 #define TEST_XDP_MAX_BATCH 256 121 122 #if BITS_PER_LONG == 64 && PAGE_SIZE == SZ_4K 123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE 124 * must be updated accordingly when any of these changes, otherwise BPF 125 * selftests will fail. 126 */ 127 #ifdef __s390x__ 128 #define TEST_MAX_PKT_SIZE 3216 129 #else 130 #define TEST_MAX_PKT_SIZE 3408 131 #endif 132 static_assert(SKB_WITH_OVERHEAD(TEST_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM) == 133 TEST_MAX_PKT_SIZE); 134 #endif 135 136 static void xdp_test_run_init_page(struct page *page, void *arg) 137 { 138 struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); 139 struct xdp_buff *new_ctx, *orig_ctx; 140 u32 headroom = XDP_PACKET_HEADROOM; 141 struct xdp_test_data *xdp = arg; 142 size_t frm_len, meta_len; 143 struct xdp_frame *frm; 144 void *data; 145 146 orig_ctx = xdp->orig_ctx; 147 frm_len = orig_ctx->data_end - orig_ctx->data_meta; 148 meta_len = orig_ctx->data - orig_ctx->data_meta; 149 headroom -= meta_len; 150 151 new_ctx = &head->ctx; 152 frm = head->frame; 153 data = head->data; 154 memcpy(data + headroom, orig_ctx->data_meta, frm_len); 155 156 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); 157 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); 158 new_ctx->data = new_ctx->data_meta + meta_len; 159 160 xdp_update_frame_from_buff(new_ctx, frm); 161 frm->mem = new_ctx->rxq->mem; 162 163 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); 164 } 165 166 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) 167 { 168 struct page_pool *pp; 169 int err = -ENOMEM; 170 struct page_pool_params pp_params = { 171 .order = 0, 172 .flags = 0, 173 .pool_size = xdp->batch_size, 174 .nid = NUMA_NO_NODE, 175 .init_callback = xdp_test_run_init_page, 176 .init_arg = xdp, 177 }; 178 179 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 180 if (!xdp->frames) 181 return -ENOMEM; 182 183 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 184 if (!xdp->skbs) 185 goto err_skbs; 186 187 pp = page_pool_create(&pp_params); 188 if (IS_ERR(pp)) { 189 err = PTR_ERR(pp); 190 goto err_pp; 191 } 192 193 /* will copy 'mem.id' into pp->xdp_mem_id */ 194 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); 195 if (err) 196 goto err_mmodel; 197 198 xdp->pp = pp; 199 200 /* We create a 'fake' RXQ referencing the original dev, but with an 201 * xdp_mem_info pointing to our page_pool 202 */ 203 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); 204 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; 205 xdp->rxq.mem.id = pp->xdp_mem_id; 206 xdp->dev = orig_ctx->rxq->dev; 207 xdp->orig_ctx = orig_ctx; 208 209 return 0; 210 211 err_mmodel: 212 page_pool_destroy(pp); 213 err_pp: 214 kvfree(xdp->skbs); 215 err_skbs: 216 kvfree(xdp->frames); 217 return err; 218 } 219 220 static void xdp_test_run_teardown(struct xdp_test_data *xdp) 221 { 222 xdp_unreg_mem_model(&xdp->mem); 223 page_pool_destroy(xdp->pp); 224 kfree(xdp->frames); 225 kfree(xdp->skbs); 226 } 227 228 static bool ctx_was_changed(struct xdp_page_head *head) 229 { 230 return head->orig_ctx.data != head->ctx.data || 231 head->orig_ctx.data_meta != head->ctx.data_meta || 232 head->orig_ctx.data_end != head->ctx.data_end; 233 } 234 235 static void reset_ctx(struct xdp_page_head *head) 236 { 237 if (likely(!ctx_was_changed(head))) 238 return; 239 240 head->ctx.data = head->orig_ctx.data; 241 head->ctx.data_meta = head->orig_ctx.data_meta; 242 head->ctx.data_end = head->orig_ctx.data_end; 243 xdp_update_frame_from_buff(&head->ctx, head->frame); 244 } 245 246 static int xdp_recv_frames(struct xdp_frame **frames, int nframes, 247 struct sk_buff **skbs, 248 struct net_device *dev) 249 { 250 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; 251 int i, n; 252 LIST_HEAD(list); 253 254 n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs); 255 if (unlikely(n == 0)) { 256 for (i = 0; i < nframes; i++) 257 xdp_return_frame(frames[i]); 258 return -ENOMEM; 259 } 260 261 for (i = 0; i < nframes; i++) { 262 struct xdp_frame *xdpf = frames[i]; 263 struct sk_buff *skb = skbs[i]; 264 265 skb = __xdp_build_skb_from_frame(xdpf, skb, dev); 266 if (!skb) { 267 xdp_return_frame(xdpf); 268 continue; 269 } 270 271 list_add_tail(&skb->list, &list); 272 } 273 netif_receive_skb_list(&list); 274 275 return 0; 276 } 277 278 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, 279 u32 repeat) 280 { 281 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 282 int err = 0, act, ret, i, nframes = 0, batch_sz; 283 struct xdp_frame **frames = xdp->frames; 284 struct xdp_page_head *head; 285 struct xdp_frame *frm; 286 bool redirect = false; 287 struct xdp_buff *ctx; 288 struct page *page; 289 290 batch_sz = min_t(u32, repeat, xdp->batch_size); 291 292 local_bh_disable(); 293 xdp_set_return_frame_no_direct(); 294 295 for (i = 0; i < batch_sz; i++) { 296 page = page_pool_dev_alloc_pages(xdp->pp); 297 if (!page) { 298 err = -ENOMEM; 299 goto out; 300 } 301 302 head = phys_to_virt(page_to_phys(page)); 303 reset_ctx(head); 304 ctx = &head->ctx; 305 frm = head->frame; 306 xdp->frame_cnt++; 307 308 act = bpf_prog_run_xdp(prog, ctx); 309 310 /* if program changed pkt bounds we need to update the xdp_frame */ 311 if (unlikely(ctx_was_changed(head))) { 312 ret = xdp_update_frame_from_buff(ctx, frm); 313 if (ret) { 314 xdp_return_buff(ctx); 315 continue; 316 } 317 } 318 319 switch (act) { 320 case XDP_TX: 321 /* we can't do a real XDP_TX since we're not in the 322 * driver, so turn it into a REDIRECT back to the same 323 * index 324 */ 325 ri->tgt_index = xdp->dev->ifindex; 326 ri->map_id = INT_MAX; 327 ri->map_type = BPF_MAP_TYPE_UNSPEC; 328 fallthrough; 329 case XDP_REDIRECT: 330 redirect = true; 331 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); 332 if (ret) 333 xdp_return_buff(ctx); 334 break; 335 case XDP_PASS: 336 frames[nframes++] = frm; 337 break; 338 default: 339 bpf_warn_invalid_xdp_action(NULL, prog, act); 340 fallthrough; 341 case XDP_DROP: 342 xdp_return_buff(ctx); 343 break; 344 } 345 } 346 347 out: 348 if (redirect) 349 xdp_do_flush(); 350 if (nframes) { 351 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); 352 if (ret) 353 err = ret; 354 } 355 356 xdp_clear_return_frame_no_direct(); 357 local_bh_enable(); 358 return err; 359 } 360 361 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, 362 u32 repeat, u32 batch_size, u32 *time) 363 364 { 365 struct xdp_test_data xdp = { .batch_size = batch_size }; 366 struct bpf_test_timer t = { .mode = NO_MIGRATE }; 367 int ret; 368 369 if (!repeat) 370 repeat = 1; 371 372 ret = xdp_test_run_setup(&xdp, ctx); 373 if (ret) 374 return ret; 375 376 bpf_test_timer_enter(&t); 377 do { 378 xdp.frame_cnt = 0; 379 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); 380 if (unlikely(ret < 0)) 381 break; 382 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); 383 bpf_test_timer_leave(&t); 384 385 xdp_test_run_teardown(&xdp); 386 return ret; 387 } 388 389 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 390 u32 *retval, u32 *time, bool xdp) 391 { 392 struct bpf_prog_array_item item = {.prog = prog}; 393 struct bpf_run_ctx *old_ctx; 394 struct bpf_cg_run_ctx run_ctx; 395 struct bpf_test_timer t = { NO_MIGRATE }; 396 enum bpf_cgroup_storage_type stype; 397 int ret; 398 399 for_each_cgroup_storage_type(stype) { 400 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 401 if (IS_ERR(item.cgroup_storage[stype])) { 402 item.cgroup_storage[stype] = NULL; 403 for_each_cgroup_storage_type(stype) 404 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 405 return -ENOMEM; 406 } 407 } 408 409 if (!repeat) 410 repeat = 1; 411 412 bpf_test_timer_enter(&t); 413 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 414 do { 415 run_ctx.prog_item = &item; 416 if (xdp) 417 *retval = bpf_prog_run_xdp(prog, ctx); 418 else 419 *retval = bpf_prog_run(prog, ctx); 420 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); 421 bpf_reset_run_ctx(old_ctx); 422 bpf_test_timer_leave(&t); 423 424 for_each_cgroup_storage_type(stype) 425 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 426 427 return ret; 428 } 429 430 static int bpf_test_finish(const union bpf_attr *kattr, 431 union bpf_attr __user *uattr, const void *data, 432 struct skb_shared_info *sinfo, u32 size, 433 u32 retval, u32 duration) 434 { 435 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 436 int err = -EFAULT; 437 u32 copy_size = size; 438 439 /* Clamp copy if the user has provided a size hint, but copy the full 440 * buffer if not to retain old behaviour. 441 */ 442 if (kattr->test.data_size_out && 443 copy_size > kattr->test.data_size_out) { 444 copy_size = kattr->test.data_size_out; 445 err = -ENOSPC; 446 } 447 448 if (data_out) { 449 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; 450 451 if (len < 0) { 452 err = -ENOSPC; 453 goto out; 454 } 455 456 if (copy_to_user(data_out, data, len)) 457 goto out; 458 459 if (sinfo) { 460 int i, offset = len; 461 u32 data_len; 462 463 for (i = 0; i < sinfo->nr_frags; i++) { 464 skb_frag_t *frag = &sinfo->frags[i]; 465 466 if (offset >= copy_size) { 467 err = -ENOSPC; 468 break; 469 } 470 471 data_len = min_t(u32, copy_size - offset, 472 skb_frag_size(frag)); 473 474 if (copy_to_user(data_out + offset, 475 skb_frag_address(frag), 476 data_len)) 477 goto out; 478 479 offset += data_len; 480 } 481 } 482 } 483 484 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 485 goto out; 486 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 487 goto out; 488 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 489 goto out; 490 if (err != -ENOSPC) 491 err = 0; 492 out: 493 trace_bpf_test_finish(&err); 494 return err; 495 } 496 497 /* Integer types of various sizes and pointer combinations cover variety of 498 * architecture dependent calling conventions. 7+ can be supported in the 499 * future. 500 */ 501 __diag_push(); 502 __diag_ignore_all("-Wmissing-prototypes", 503 "Global functions as their definitions will be in vmlinux BTF"); 504 __bpf_kfunc int bpf_fentry_test1(int a) 505 { 506 return a + 1; 507 } 508 EXPORT_SYMBOL_GPL(bpf_fentry_test1); 509 510 int noinline bpf_fentry_test2(int a, u64 b) 511 { 512 return a + b; 513 } 514 515 int noinline bpf_fentry_test3(char a, int b, u64 c) 516 { 517 return a + b + c; 518 } 519 520 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) 521 { 522 return (long)a + b + c + d; 523 } 524 525 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) 526 { 527 return a + (long)b + c + d + e; 528 } 529 530 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) 531 { 532 return a + (long)b + c + d + (long)e + f; 533 } 534 535 struct bpf_fentry_test_t { 536 struct bpf_fentry_test_t *a; 537 }; 538 539 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 540 { 541 return (long)arg; 542 } 543 544 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 545 { 546 return (long)arg->a; 547 } 548 549 __bpf_kfunc int bpf_modify_return_test(int a, int *b) 550 { 551 *b += 1; 552 return a + *b; 553 } 554 555 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 556 { 557 return a + b + c + d; 558 } 559 560 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 561 { 562 return a + b; 563 } 564 565 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) 566 { 567 return sk; 568 } 569 570 long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) 571 { 572 /* Provoke the compiler to assume that the caller has sign-extended a, 573 * b and c on platforms where this is required (e.g. s390x). 574 */ 575 return (long)a + (long)b + (long)c + d; 576 } 577 578 struct prog_test_member1 { 579 int a; 580 }; 581 582 struct prog_test_member { 583 struct prog_test_member1 m; 584 int c; 585 }; 586 587 struct prog_test_ref_kfunc { 588 int a; 589 int b; 590 struct prog_test_member memb; 591 struct prog_test_ref_kfunc *next; 592 refcount_t cnt; 593 }; 594 595 static struct prog_test_ref_kfunc prog_test_struct = { 596 .a = 42, 597 .b = 108, 598 .next = &prog_test_struct, 599 .cnt = REFCOUNT_INIT(1), 600 }; 601 602 __bpf_kfunc struct prog_test_ref_kfunc * 603 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 604 { 605 refcount_inc(&prog_test_struct.cnt); 606 return &prog_test_struct; 607 } 608 609 __bpf_kfunc struct prog_test_member * 610 bpf_kfunc_call_memb_acquire(void) 611 { 612 WARN_ON_ONCE(1); 613 return NULL; 614 } 615 616 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) 617 { 618 if (!p) 619 return; 620 621 refcount_dec(&p->cnt); 622 } 623 624 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) 625 { 626 } 627 628 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 629 { 630 WARN_ON_ONCE(1); 631 } 632 633 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 634 { 635 if (size > 2 * sizeof(int)) 636 return NULL; 637 638 return (int *)p; 639 } 640 641 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, 642 const int rdwr_buf_size) 643 { 644 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 645 } 646 647 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, 648 const int rdonly_buf_size) 649 { 650 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 651 } 652 653 /* the next 2 ones can't be really used for testing expect to ensure 654 * that the verifier rejects the call. 655 * Acquire functions must return struct pointers, so these ones are 656 * failing. 657 */ 658 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, 659 const int rdonly_buf_size) 660 { 661 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 662 } 663 664 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) 665 { 666 } 667 668 __bpf_kfunc struct prog_test_ref_kfunc * 669 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) 670 { 671 struct prog_test_ref_kfunc *p = READ_ONCE(*pp); 672 673 if (!p) 674 return NULL; 675 refcount_inc(&p->cnt); 676 return p; 677 } 678 679 struct prog_test_pass1 { 680 int x0; 681 struct { 682 int x1; 683 struct { 684 int x2; 685 struct { 686 int x3; 687 }; 688 }; 689 }; 690 }; 691 692 struct prog_test_pass2 { 693 int len; 694 short arr1[4]; 695 struct { 696 char arr2[4]; 697 unsigned long arr3[8]; 698 } x; 699 }; 700 701 struct prog_test_fail1 { 702 void *p; 703 int x; 704 }; 705 706 struct prog_test_fail2 { 707 int x8; 708 struct prog_test_pass1 x; 709 }; 710 711 struct prog_test_fail3 { 712 int len; 713 char arr1[2]; 714 char arr2[]; 715 }; 716 717 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 718 { 719 } 720 721 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 722 { 723 } 724 725 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 726 { 727 } 728 729 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 730 { 731 } 732 733 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 734 { 735 } 736 737 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 738 { 739 } 740 741 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 742 { 743 } 744 745 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 746 { 747 } 748 749 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 750 { 751 } 752 753 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 754 { 755 } 756 757 __bpf_kfunc void bpf_kfunc_call_test_destructive(void) 758 { 759 } 760 761 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) 762 { 763 return arg; 764 } 765 766 __diag_pop(); 767 768 BTF_SET8_START(bpf_test_modify_return_ids) 769 BTF_ID_FLAGS(func, bpf_modify_return_test) 770 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) 771 BTF_SET8_END(bpf_test_modify_return_ids) 772 773 static const struct btf_kfunc_id_set bpf_test_modify_return_set = { 774 .owner = THIS_MODULE, 775 .set = &bpf_test_modify_return_ids, 776 }; 777 778 BTF_SET8_START(test_sk_check_kfunc_ids) 779 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 780 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 781 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 782 BTF_ID_FLAGS(func, bpf_kfunc_call_test4) 783 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 784 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 785 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) 786 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) 787 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 788 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 789 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 790 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 791 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 792 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) 793 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 794 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 795 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 796 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 797 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 798 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 799 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 800 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 801 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 802 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) 803 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 804 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 805 BTF_SET8_END(test_sk_check_kfunc_ids) 806 807 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, 808 u32 size, u32 headroom, u32 tailroom) 809 { 810 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 811 void *data; 812 813 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 814 return ERR_PTR(-EINVAL); 815 816 if (user_size > size) 817 return ERR_PTR(-EMSGSIZE); 818 819 size = SKB_DATA_ALIGN(size); 820 data = kzalloc(size + headroom + tailroom, GFP_USER); 821 if (!data) 822 return ERR_PTR(-ENOMEM); 823 824 if (copy_from_user(data + headroom, data_in, user_size)) { 825 kfree(data); 826 return ERR_PTR(-EFAULT); 827 } 828 829 return data; 830 } 831 832 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 833 const union bpf_attr *kattr, 834 union bpf_attr __user *uattr) 835 { 836 struct bpf_fentry_test_t arg = {}; 837 u16 side_effect = 0, ret = 0; 838 int b = 2, err = -EFAULT; 839 u32 retval = 0; 840 841 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 842 return -EINVAL; 843 844 switch (prog->expected_attach_type) { 845 case BPF_TRACE_FENTRY: 846 case BPF_TRACE_FEXIT: 847 if (bpf_fentry_test1(1) != 2 || 848 bpf_fentry_test2(2, 3) != 5 || 849 bpf_fentry_test3(4, 5, 6) != 15 || 850 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 851 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 852 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 853 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 854 bpf_fentry_test8(&arg) != 0) 855 goto out; 856 break; 857 case BPF_MODIFY_RETURN: 858 ret = bpf_modify_return_test(1, &b); 859 if (b != 2) 860 side_effect = 1; 861 break; 862 default: 863 goto out; 864 } 865 866 retval = ((u32)side_effect << 16) | ret; 867 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 868 goto out; 869 870 err = 0; 871 out: 872 trace_bpf_test_finish(&err); 873 return err; 874 } 875 876 struct bpf_raw_tp_test_run_info { 877 struct bpf_prog *prog; 878 void *ctx; 879 u32 retval; 880 }; 881 882 static void 883 __bpf_prog_test_run_raw_tp(void *data) 884 { 885 struct bpf_raw_tp_test_run_info *info = data; 886 887 rcu_read_lock(); 888 info->retval = bpf_prog_run(info->prog, info->ctx); 889 rcu_read_unlock(); 890 } 891 892 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 893 const union bpf_attr *kattr, 894 union bpf_attr __user *uattr) 895 { 896 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 897 __u32 ctx_size_in = kattr->test.ctx_size_in; 898 struct bpf_raw_tp_test_run_info info; 899 int cpu = kattr->test.cpu, err = 0; 900 int current_cpu; 901 902 /* doesn't support data_in/out, ctx_out, duration, or repeat */ 903 if (kattr->test.data_in || kattr->test.data_out || 904 kattr->test.ctx_out || kattr->test.duration || 905 kattr->test.repeat || kattr->test.batch_size) 906 return -EINVAL; 907 908 if (ctx_size_in < prog->aux->max_ctx_offset || 909 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) 910 return -EINVAL; 911 912 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) 913 return -EINVAL; 914 915 if (ctx_size_in) { 916 info.ctx = memdup_user(ctx_in, ctx_size_in); 917 if (IS_ERR(info.ctx)) 918 return PTR_ERR(info.ctx); 919 } else { 920 info.ctx = NULL; 921 } 922 923 info.prog = prog; 924 925 current_cpu = get_cpu(); 926 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || 927 cpu == current_cpu) { 928 __bpf_prog_test_run_raw_tp(&info); 929 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 930 /* smp_call_function_single() also checks cpu_online() 931 * after csd_lock(). However, since cpu is from user 932 * space, let's do an extra quick check to filter out 933 * invalid value before smp_call_function_single(). 934 */ 935 err = -ENXIO; 936 } else { 937 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, 938 &info, 1); 939 } 940 put_cpu(); 941 942 if (!err && 943 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) 944 err = -EFAULT; 945 946 kfree(info.ctx); 947 return err; 948 } 949 950 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 951 { 952 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 953 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 954 u32 size = kattr->test.ctx_size_in; 955 void *data; 956 int err; 957 958 if (!data_in && !data_out) 959 return NULL; 960 961 data = kzalloc(max_size, GFP_USER); 962 if (!data) 963 return ERR_PTR(-ENOMEM); 964 965 if (data_in) { 966 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); 967 if (err) { 968 kfree(data); 969 return ERR_PTR(err); 970 } 971 972 size = min_t(u32, max_size, size); 973 if (copy_from_user(data, data_in, size)) { 974 kfree(data); 975 return ERR_PTR(-EFAULT); 976 } 977 } 978 return data; 979 } 980 981 static int bpf_ctx_finish(const union bpf_attr *kattr, 982 union bpf_attr __user *uattr, const void *data, 983 u32 size) 984 { 985 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 986 int err = -EFAULT; 987 u32 copy_size = size; 988 989 if (!data || !data_out) 990 return 0; 991 992 if (copy_size > kattr->test.ctx_size_out) { 993 copy_size = kattr->test.ctx_size_out; 994 err = -ENOSPC; 995 } 996 997 if (copy_to_user(data_out, data, copy_size)) 998 goto out; 999 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 1000 goto out; 1001 if (err != -ENOSPC) 1002 err = 0; 1003 out: 1004 return err; 1005 } 1006 1007 /** 1008 * range_is_zero - test whether buffer is initialized 1009 * @buf: buffer to check 1010 * @from: check from this position 1011 * @to: check up until (excluding) this position 1012 * 1013 * This function returns true if the there is a non-zero byte 1014 * in the buf in the range [from,to). 1015 */ 1016 static inline bool range_is_zero(void *buf, size_t from, size_t to) 1017 { 1018 return !memchr_inv((u8 *)buf + from, 0, to - from); 1019 } 1020 1021 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 1022 { 1023 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1024 1025 if (!__skb) 1026 return 0; 1027 1028 /* make sure the fields we don't use are zeroed */ 1029 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) 1030 return -EINVAL; 1031 1032 /* mark is allowed */ 1033 1034 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), 1035 offsetof(struct __sk_buff, priority))) 1036 return -EINVAL; 1037 1038 /* priority is allowed */ 1039 /* ingress_ifindex is allowed */ 1040 /* ifindex is allowed */ 1041 1042 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), 1043 offsetof(struct __sk_buff, cb))) 1044 return -EINVAL; 1045 1046 /* cb is allowed */ 1047 1048 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), 1049 offsetof(struct __sk_buff, tstamp))) 1050 return -EINVAL; 1051 1052 /* tstamp is allowed */ 1053 /* wire_len is allowed */ 1054 /* gso_segs is allowed */ 1055 1056 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), 1057 offsetof(struct __sk_buff, gso_size))) 1058 return -EINVAL; 1059 1060 /* gso_size is allowed */ 1061 1062 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 1063 offsetof(struct __sk_buff, hwtstamp))) 1064 return -EINVAL; 1065 1066 /* hwtstamp is allowed */ 1067 1068 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), 1069 sizeof(struct __sk_buff))) 1070 return -EINVAL; 1071 1072 skb->mark = __skb->mark; 1073 skb->priority = __skb->priority; 1074 skb->skb_iif = __skb->ingress_ifindex; 1075 skb->tstamp = __skb->tstamp; 1076 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 1077 1078 if (__skb->wire_len == 0) { 1079 cb->pkt_len = skb->len; 1080 } else { 1081 if (__skb->wire_len < skb->len || 1082 __skb->wire_len > GSO_LEGACY_MAX_SIZE) 1083 return -EINVAL; 1084 cb->pkt_len = __skb->wire_len; 1085 } 1086 1087 if (__skb->gso_segs > GSO_MAX_SEGS) 1088 return -EINVAL; 1089 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 1090 skb_shinfo(skb)->gso_size = __skb->gso_size; 1091 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; 1092 1093 return 0; 1094 } 1095 1096 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 1097 { 1098 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1099 1100 if (!__skb) 1101 return; 1102 1103 __skb->mark = skb->mark; 1104 __skb->priority = skb->priority; 1105 __skb->ingress_ifindex = skb->skb_iif; 1106 __skb->ifindex = skb->dev->ifindex; 1107 __skb->tstamp = skb->tstamp; 1108 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 1109 __skb->wire_len = cb->pkt_len; 1110 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 1111 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; 1112 } 1113 1114 static struct proto bpf_dummy_proto = { 1115 .name = "bpf_dummy", 1116 .owner = THIS_MODULE, 1117 .obj_size = sizeof(struct sock), 1118 }; 1119 1120 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1121 union bpf_attr __user *uattr) 1122 { 1123 bool is_l2 = false, is_direct_pkt_access = false; 1124 struct net *net = current->nsproxy->net_ns; 1125 struct net_device *dev = net->loopback_dev; 1126 u32 size = kattr->test.data_size_in; 1127 u32 repeat = kattr->test.repeat; 1128 struct __sk_buff *ctx = NULL; 1129 u32 retval, duration; 1130 int hh_len = ETH_HLEN; 1131 struct sk_buff *skb; 1132 struct sock *sk; 1133 void *data; 1134 int ret; 1135 1136 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1137 return -EINVAL; 1138 1139 data = bpf_test_init(kattr, kattr->test.data_size_in, 1140 size, NET_SKB_PAD + NET_IP_ALIGN, 1141 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1142 if (IS_ERR(data)) 1143 return PTR_ERR(data); 1144 1145 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 1146 if (IS_ERR(ctx)) { 1147 kfree(data); 1148 return PTR_ERR(ctx); 1149 } 1150 1151 switch (prog->type) { 1152 case BPF_PROG_TYPE_SCHED_CLS: 1153 case BPF_PROG_TYPE_SCHED_ACT: 1154 is_l2 = true; 1155 fallthrough; 1156 case BPF_PROG_TYPE_LWT_IN: 1157 case BPF_PROG_TYPE_LWT_OUT: 1158 case BPF_PROG_TYPE_LWT_XMIT: 1159 is_direct_pkt_access = true; 1160 break; 1161 default: 1162 break; 1163 } 1164 1165 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); 1166 if (!sk) { 1167 kfree(data); 1168 kfree(ctx); 1169 return -ENOMEM; 1170 } 1171 sock_init_data(NULL, sk); 1172 1173 skb = slab_build_skb(data); 1174 if (!skb) { 1175 kfree(data); 1176 kfree(ctx); 1177 sk_free(sk); 1178 return -ENOMEM; 1179 } 1180 skb->sk = sk; 1181 1182 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 1183 __skb_put(skb, size); 1184 if (ctx && ctx->ifindex > 1) { 1185 dev = dev_get_by_index(net, ctx->ifindex); 1186 if (!dev) { 1187 ret = -ENODEV; 1188 goto out; 1189 } 1190 } 1191 skb->protocol = eth_type_trans(skb, dev); 1192 skb_reset_network_header(skb); 1193 1194 switch (skb->protocol) { 1195 case htons(ETH_P_IP): 1196 sk->sk_family = AF_INET; 1197 if (sizeof(struct iphdr) <= skb_headlen(skb)) { 1198 sk->sk_rcv_saddr = ip_hdr(skb)->saddr; 1199 sk->sk_daddr = ip_hdr(skb)->daddr; 1200 } 1201 break; 1202 #if IS_ENABLED(CONFIG_IPV6) 1203 case htons(ETH_P_IPV6): 1204 sk->sk_family = AF_INET6; 1205 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { 1206 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; 1207 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; 1208 } 1209 break; 1210 #endif 1211 default: 1212 break; 1213 } 1214 1215 if (is_l2) 1216 __skb_push(skb, hh_len); 1217 if (is_direct_pkt_access) 1218 bpf_compute_data_pointers(skb); 1219 ret = convert___skb_to_skb(skb, ctx); 1220 if (ret) 1221 goto out; 1222 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); 1223 if (ret) 1224 goto out; 1225 if (!is_l2) { 1226 if (skb_headroom(skb) < hh_len) { 1227 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 1228 1229 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 1230 ret = -ENOMEM; 1231 goto out; 1232 } 1233 } 1234 memset(__skb_push(skb, hh_len), 0, hh_len); 1235 } 1236 convert_skb_to___skb(skb, ctx); 1237 1238 size = skb->len; 1239 /* bpf program can never convert linear skb to non-linear */ 1240 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 1241 size = skb_headlen(skb); 1242 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, 1243 duration); 1244 if (!ret) 1245 ret = bpf_ctx_finish(kattr, uattr, ctx, 1246 sizeof(struct __sk_buff)); 1247 out: 1248 if (dev && dev != net->loopback_dev) 1249 dev_put(dev); 1250 kfree_skb(skb); 1251 sk_free(sk); 1252 kfree(ctx); 1253 return ret; 1254 } 1255 1256 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) 1257 { 1258 unsigned int ingress_ifindex, rx_queue_index; 1259 struct netdev_rx_queue *rxqueue; 1260 struct net_device *device; 1261 1262 if (!xdp_md) 1263 return 0; 1264 1265 if (xdp_md->egress_ifindex != 0) 1266 return -EINVAL; 1267 1268 ingress_ifindex = xdp_md->ingress_ifindex; 1269 rx_queue_index = xdp_md->rx_queue_index; 1270 1271 if (!ingress_ifindex && rx_queue_index) 1272 return -EINVAL; 1273 1274 if (ingress_ifindex) { 1275 device = dev_get_by_index(current->nsproxy->net_ns, 1276 ingress_ifindex); 1277 if (!device) 1278 return -ENODEV; 1279 1280 if (rx_queue_index >= device->real_num_rx_queues) 1281 goto free_dev; 1282 1283 rxqueue = __netif_get_rx_queue(device, rx_queue_index); 1284 1285 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) 1286 goto free_dev; 1287 1288 xdp->rxq = &rxqueue->xdp_rxq; 1289 /* The device is now tracked in the xdp->rxq for later 1290 * dev_put() 1291 */ 1292 } 1293 1294 xdp->data = xdp->data_meta + xdp_md->data; 1295 return 0; 1296 1297 free_dev: 1298 dev_put(device); 1299 return -EINVAL; 1300 } 1301 1302 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) 1303 { 1304 if (!xdp_md) 1305 return; 1306 1307 xdp_md->data = xdp->data - xdp->data_meta; 1308 xdp_md->data_end = xdp->data_end - xdp->data_meta; 1309 1310 if (xdp_md->ingress_ifindex) 1311 dev_put(xdp->rxq->dev); 1312 } 1313 1314 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1315 union bpf_attr __user *uattr) 1316 { 1317 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); 1318 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1319 u32 batch_size = kattr->test.batch_size; 1320 u32 retval = 0, duration, max_data_sz; 1321 u32 size = kattr->test.data_size_in; 1322 u32 headroom = XDP_PACKET_HEADROOM; 1323 u32 repeat = kattr->test.repeat; 1324 struct netdev_rx_queue *rxqueue; 1325 struct skb_shared_info *sinfo; 1326 struct xdp_buff xdp = {}; 1327 int i, ret = -EINVAL; 1328 struct xdp_md *ctx; 1329 void *data; 1330 1331 if (prog->expected_attach_type == BPF_XDP_DEVMAP || 1332 prog->expected_attach_type == BPF_XDP_CPUMAP) 1333 return -EINVAL; 1334 1335 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) 1336 return -EINVAL; 1337 1338 if (bpf_prog_is_dev_bound(prog->aux)) 1339 return -EINVAL; 1340 1341 if (do_live) { 1342 if (!batch_size) 1343 batch_size = NAPI_POLL_WEIGHT; 1344 else if (batch_size > TEST_XDP_MAX_BATCH) 1345 return -E2BIG; 1346 1347 headroom += sizeof(struct xdp_page_head); 1348 } else if (batch_size) { 1349 return -EINVAL; 1350 } 1351 1352 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 1353 if (IS_ERR(ctx)) 1354 return PTR_ERR(ctx); 1355 1356 if (ctx) { 1357 /* There can't be user provided data before the meta data */ 1358 if (ctx->data_meta || ctx->data_end != size || 1359 ctx->data > ctx->data_end || 1360 unlikely(xdp_metalen_invalid(ctx->data)) || 1361 (do_live && (kattr->test.data_out || kattr->test.ctx_out))) 1362 goto free_ctx; 1363 /* Meta data is allocated from the headroom */ 1364 headroom -= ctx->data; 1365 } 1366 1367 max_data_sz = 4096 - headroom - tailroom; 1368 if (size > max_data_sz) { 1369 /* disallow live data mode for jumbo frames */ 1370 if (do_live) 1371 goto free_ctx; 1372 size = max_data_sz; 1373 } 1374 1375 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); 1376 if (IS_ERR(data)) { 1377 ret = PTR_ERR(data); 1378 goto free_ctx; 1379 } 1380 1381 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 1382 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; 1383 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); 1384 xdp_prepare_buff(&xdp, data, headroom, size, true); 1385 sinfo = xdp_get_shared_info_from_buff(&xdp); 1386 1387 ret = xdp_convert_md_to_buff(ctx, &xdp); 1388 if (ret) 1389 goto free_data; 1390 1391 if (unlikely(kattr->test.data_size_in > size)) { 1392 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 1393 1394 while (size < kattr->test.data_size_in) { 1395 struct page *page; 1396 skb_frag_t *frag; 1397 u32 data_len; 1398 1399 if (sinfo->nr_frags == MAX_SKB_FRAGS) { 1400 ret = -ENOMEM; 1401 goto out; 1402 } 1403 1404 page = alloc_page(GFP_KERNEL); 1405 if (!page) { 1406 ret = -ENOMEM; 1407 goto out; 1408 } 1409 1410 frag = &sinfo->frags[sinfo->nr_frags++]; 1411 __skb_frag_set_page(frag, page); 1412 1413 data_len = min_t(u32, kattr->test.data_size_in - size, 1414 PAGE_SIZE); 1415 skb_frag_size_set(frag, data_len); 1416 1417 if (copy_from_user(page_address(page), data_in + size, 1418 data_len)) { 1419 ret = -EFAULT; 1420 goto out; 1421 } 1422 sinfo->xdp_frags_size += data_len; 1423 size += data_len; 1424 } 1425 xdp_buff_set_frags_flag(&xdp); 1426 } 1427 1428 if (repeat > 1) 1429 bpf_prog_change_xdp(NULL, prog); 1430 1431 if (do_live) 1432 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); 1433 else 1434 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); 1435 /* We convert the xdp_buff back to an xdp_md before checking the return 1436 * code so the reference count of any held netdevice will be decremented 1437 * even if the test run failed. 1438 */ 1439 xdp_convert_buff_to_md(&xdp, ctx); 1440 if (ret) 1441 goto out; 1442 1443 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; 1444 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, 1445 retval, duration); 1446 if (!ret) 1447 ret = bpf_ctx_finish(kattr, uattr, ctx, 1448 sizeof(struct xdp_md)); 1449 1450 out: 1451 if (repeat > 1) 1452 bpf_prog_change_xdp(prog, NULL); 1453 free_data: 1454 for (i = 0; i < sinfo->nr_frags; i++) 1455 __free_page(skb_frag_page(&sinfo->frags[i])); 1456 kfree(data); 1457 free_ctx: 1458 kfree(ctx); 1459 return ret; 1460 } 1461 1462 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) 1463 { 1464 /* make sure the fields we don't use are zeroed */ 1465 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) 1466 return -EINVAL; 1467 1468 /* flags is allowed */ 1469 1470 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), 1471 sizeof(struct bpf_flow_keys))) 1472 return -EINVAL; 1473 1474 return 0; 1475 } 1476 1477 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1478 const union bpf_attr *kattr, 1479 union bpf_attr __user *uattr) 1480 { 1481 struct bpf_test_timer t = { NO_PREEMPT }; 1482 u32 size = kattr->test.data_size_in; 1483 struct bpf_flow_dissector ctx = {}; 1484 u32 repeat = kattr->test.repeat; 1485 struct bpf_flow_keys *user_ctx; 1486 struct bpf_flow_keys flow_keys; 1487 const struct ethhdr *eth; 1488 unsigned int flags = 0; 1489 u32 retval, duration; 1490 void *data; 1491 int ret; 1492 1493 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1494 return -EINVAL; 1495 1496 if (size < ETH_HLEN) 1497 return -EINVAL; 1498 1499 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); 1500 if (IS_ERR(data)) 1501 return PTR_ERR(data); 1502 1503 eth = (struct ethhdr *)data; 1504 1505 if (!repeat) 1506 repeat = 1; 1507 1508 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); 1509 if (IS_ERR(user_ctx)) { 1510 kfree(data); 1511 return PTR_ERR(user_ctx); 1512 } 1513 if (user_ctx) { 1514 ret = verify_user_bpf_flow_keys(user_ctx); 1515 if (ret) 1516 goto out; 1517 flags = user_ctx->flags; 1518 } 1519 1520 ctx.flow_keys = &flow_keys; 1521 ctx.data = data; 1522 ctx.data_end = (__u8 *)data + size; 1523 1524 bpf_test_timer_enter(&t); 1525 do { 1526 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 1527 size, flags); 1528 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1529 bpf_test_timer_leave(&t); 1530 1531 if (ret < 0) 1532 goto out; 1533 1534 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, 1535 sizeof(flow_keys), retval, duration); 1536 if (!ret) 1537 ret = bpf_ctx_finish(kattr, uattr, user_ctx, 1538 sizeof(struct bpf_flow_keys)); 1539 1540 out: 1541 kfree(user_ctx); 1542 kfree(data); 1543 return ret; 1544 } 1545 1546 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, 1547 union bpf_attr __user *uattr) 1548 { 1549 struct bpf_test_timer t = { NO_PREEMPT }; 1550 struct bpf_prog_array *progs = NULL; 1551 struct bpf_sk_lookup_kern ctx = {}; 1552 u32 repeat = kattr->test.repeat; 1553 struct bpf_sk_lookup *user_ctx; 1554 u32 retval, duration; 1555 int ret = -EINVAL; 1556 1557 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1558 return -EINVAL; 1559 1560 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || 1561 kattr->test.data_size_out) 1562 return -EINVAL; 1563 1564 if (!repeat) 1565 repeat = 1; 1566 1567 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); 1568 if (IS_ERR(user_ctx)) 1569 return PTR_ERR(user_ctx); 1570 1571 if (!user_ctx) 1572 return -EINVAL; 1573 1574 if (user_ctx->sk) 1575 goto out; 1576 1577 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) 1578 goto out; 1579 1580 if (user_ctx->local_port > U16_MAX) { 1581 ret = -ERANGE; 1582 goto out; 1583 } 1584 1585 ctx.family = (u16)user_ctx->family; 1586 ctx.protocol = (u16)user_ctx->protocol; 1587 ctx.dport = (u16)user_ctx->local_port; 1588 ctx.sport = user_ctx->remote_port; 1589 1590 switch (ctx.family) { 1591 case AF_INET: 1592 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; 1593 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; 1594 break; 1595 1596 #if IS_ENABLED(CONFIG_IPV6) 1597 case AF_INET6: 1598 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; 1599 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; 1600 break; 1601 #endif 1602 1603 default: 1604 ret = -EAFNOSUPPORT; 1605 goto out; 1606 } 1607 1608 progs = bpf_prog_array_alloc(1, GFP_KERNEL); 1609 if (!progs) { 1610 ret = -ENOMEM; 1611 goto out; 1612 } 1613 1614 progs->items[0].prog = prog; 1615 1616 bpf_test_timer_enter(&t); 1617 do { 1618 ctx.selected_sk = NULL; 1619 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); 1620 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1621 bpf_test_timer_leave(&t); 1622 1623 if (ret < 0) 1624 goto out; 1625 1626 user_ctx->cookie = 0; 1627 if (ctx.selected_sk) { 1628 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { 1629 ret = -EOPNOTSUPP; 1630 goto out; 1631 } 1632 1633 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); 1634 } 1635 1636 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); 1637 if (!ret) 1638 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); 1639 1640 out: 1641 bpf_prog_array_free(progs); 1642 kfree(user_ctx); 1643 return ret; 1644 } 1645 1646 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1647 const union bpf_attr *kattr, 1648 union bpf_attr __user *uattr) 1649 { 1650 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 1651 __u32 ctx_size_in = kattr->test.ctx_size_in; 1652 void *ctx = NULL; 1653 u32 retval; 1654 int err = 0; 1655 1656 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ 1657 if (kattr->test.data_in || kattr->test.data_out || 1658 kattr->test.ctx_out || kattr->test.duration || 1659 kattr->test.repeat || kattr->test.flags || 1660 kattr->test.batch_size) 1661 return -EINVAL; 1662 1663 if (ctx_size_in < prog->aux->max_ctx_offset || 1664 ctx_size_in > U16_MAX) 1665 return -EINVAL; 1666 1667 if (ctx_size_in) { 1668 ctx = memdup_user(ctx_in, ctx_size_in); 1669 if (IS_ERR(ctx)) 1670 return PTR_ERR(ctx); 1671 } 1672 1673 rcu_read_lock_trace(); 1674 retval = bpf_prog_run_pin_on_cpu(prog, ctx); 1675 rcu_read_unlock_trace(); 1676 1677 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { 1678 err = -EFAULT; 1679 goto out; 1680 } 1681 if (ctx_size_in) 1682 if (copy_to_user(ctx_in, ctx, ctx_size_in)) 1683 err = -EFAULT; 1684 out: 1685 kfree(ctx); 1686 return err; 1687 } 1688 1689 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { 1690 .owner = THIS_MODULE, 1691 .set = &test_sk_check_kfunc_ids, 1692 }; 1693 1694 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) 1695 BTF_ID(struct, prog_test_ref_kfunc) 1696 BTF_ID(func, bpf_kfunc_call_test_release) 1697 BTF_ID(struct, prog_test_member) 1698 BTF_ID(func, bpf_kfunc_call_memb_release) 1699 1700 static int __init bpf_prog_test_run_init(void) 1701 { 1702 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { 1703 { 1704 .btf_id = bpf_prog_test_dtor_kfunc_ids[0], 1705 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] 1706 }, 1707 { 1708 .btf_id = bpf_prog_test_dtor_kfunc_ids[2], 1709 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], 1710 }, 1711 }; 1712 int ret; 1713 1714 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set); 1715 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); 1716 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); 1717 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); 1718 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, 1719 ARRAY_SIZE(bpf_prog_test_dtor_kfunc), 1720 THIS_MODULE); 1721 } 1722 late_initcall(bpf_prog_test_run_init); 1723