1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/btf_ids.h> 7 #include <linux/slab.h> 8 #include <linux/init.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/sched/signal.h> 14 #include <net/bpf_sk_storage.h> 15 #include <net/sock.h> 16 #include <net/tcp.h> 17 #include <net/net_namespace.h> 18 #include <net/page_pool.h> 19 #include <linux/error-injection.h> 20 #include <linux/smp.h> 21 #include <linux/sock_diag.h> 22 #include <net/xdp.h> 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/bpf_test_run.h> 26 27 struct bpf_test_timer { 28 enum { NO_PREEMPT, NO_MIGRATE } mode; 29 u32 i; 30 u64 time_start, time_spent; 31 }; 32 33 static void bpf_test_timer_enter(struct bpf_test_timer *t) 34 __acquires(rcu) 35 { 36 rcu_read_lock(); 37 if (t->mode == NO_PREEMPT) 38 preempt_disable(); 39 else 40 migrate_disable(); 41 42 t->time_start = ktime_get_ns(); 43 } 44 45 static void bpf_test_timer_leave(struct bpf_test_timer *t) 46 __releases(rcu) 47 { 48 t->time_start = 0; 49 50 if (t->mode == NO_PREEMPT) 51 preempt_enable(); 52 else 53 migrate_enable(); 54 rcu_read_unlock(); 55 } 56 57 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, 58 u32 repeat, int *err, u32 *duration) 59 __must_hold(rcu) 60 { 61 t->i += iterations; 62 if (t->i >= repeat) { 63 /* We're done. */ 64 t->time_spent += ktime_get_ns() - t->time_start; 65 do_div(t->time_spent, t->i); 66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; 67 *err = 0; 68 goto reset; 69 } 70 71 if (signal_pending(current)) { 72 /* During iteration: we've been cancelled, abort. */ 73 *err = -EINTR; 74 goto reset; 75 } 76 77 if (need_resched()) { 78 /* During iteration: we need to reschedule between runs. */ 79 t->time_spent += ktime_get_ns() - t->time_start; 80 bpf_test_timer_leave(t); 81 cond_resched(); 82 bpf_test_timer_enter(t); 83 } 84 85 /* Do another round. */ 86 return true; 87 88 reset: 89 t->i = 0; 90 return false; 91 } 92 93 /* We put this struct at the head of each page with a context and frame 94 * initialised when the page is allocated, so we don't have to do this on each 95 * repetition of the test run. 96 */ 97 struct xdp_page_head { 98 struct xdp_buff orig_ctx; 99 struct xdp_buff ctx; 100 struct xdp_frame frm; 101 u8 data[]; 102 }; 103 104 struct xdp_test_data { 105 struct xdp_buff *orig_ctx; 106 struct xdp_rxq_info rxq; 107 struct net_device *dev; 108 struct page_pool *pp; 109 struct xdp_frame **frames; 110 struct sk_buff **skbs; 111 struct xdp_mem_info mem; 112 u32 batch_size; 113 u32 frame_cnt; 114 }; 115 116 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) 117 #define TEST_XDP_MAX_BATCH 256 118 119 static void xdp_test_run_init_page(struct page *page, void *arg) 120 { 121 struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); 122 struct xdp_buff *new_ctx, *orig_ctx; 123 u32 headroom = XDP_PACKET_HEADROOM; 124 struct xdp_test_data *xdp = arg; 125 size_t frm_len, meta_len; 126 struct xdp_frame *frm; 127 void *data; 128 129 orig_ctx = xdp->orig_ctx; 130 frm_len = orig_ctx->data_end - orig_ctx->data_meta; 131 meta_len = orig_ctx->data - orig_ctx->data_meta; 132 headroom -= meta_len; 133 134 new_ctx = &head->ctx; 135 frm = &head->frm; 136 data = &head->data; 137 memcpy(data + headroom, orig_ctx->data_meta, frm_len); 138 139 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); 140 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); 141 new_ctx->data = new_ctx->data_meta + meta_len; 142 143 xdp_update_frame_from_buff(new_ctx, frm); 144 frm->mem = new_ctx->rxq->mem; 145 146 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); 147 } 148 149 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) 150 { 151 struct page_pool *pp; 152 int err = -ENOMEM; 153 struct page_pool_params pp_params = { 154 .order = 0, 155 .flags = 0, 156 .pool_size = xdp->batch_size, 157 .nid = NUMA_NO_NODE, 158 .init_callback = xdp_test_run_init_page, 159 .init_arg = xdp, 160 }; 161 162 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 163 if (!xdp->frames) 164 return -ENOMEM; 165 166 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 167 if (!xdp->skbs) 168 goto err_skbs; 169 170 pp = page_pool_create(&pp_params); 171 if (IS_ERR(pp)) { 172 err = PTR_ERR(pp); 173 goto err_pp; 174 } 175 176 /* will copy 'mem.id' into pp->xdp_mem_id */ 177 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); 178 if (err) 179 goto err_mmodel; 180 181 xdp->pp = pp; 182 183 /* We create a 'fake' RXQ referencing the original dev, but with an 184 * xdp_mem_info pointing to our page_pool 185 */ 186 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); 187 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; 188 xdp->rxq.mem.id = pp->xdp_mem_id; 189 xdp->dev = orig_ctx->rxq->dev; 190 xdp->orig_ctx = orig_ctx; 191 192 return 0; 193 194 err_mmodel: 195 page_pool_destroy(pp); 196 err_pp: 197 kvfree(xdp->skbs); 198 err_skbs: 199 kvfree(xdp->frames); 200 return err; 201 } 202 203 static void xdp_test_run_teardown(struct xdp_test_data *xdp) 204 { 205 xdp_unreg_mem_model(&xdp->mem); 206 page_pool_destroy(xdp->pp); 207 kfree(xdp->frames); 208 kfree(xdp->skbs); 209 } 210 211 static bool ctx_was_changed(struct xdp_page_head *head) 212 { 213 return head->orig_ctx.data != head->ctx.data || 214 head->orig_ctx.data_meta != head->ctx.data_meta || 215 head->orig_ctx.data_end != head->ctx.data_end; 216 } 217 218 static void reset_ctx(struct xdp_page_head *head) 219 { 220 if (likely(!ctx_was_changed(head))) 221 return; 222 223 head->ctx.data = head->orig_ctx.data; 224 head->ctx.data_meta = head->orig_ctx.data_meta; 225 head->ctx.data_end = head->orig_ctx.data_end; 226 xdp_update_frame_from_buff(&head->ctx, &head->frm); 227 } 228 229 static int xdp_recv_frames(struct xdp_frame **frames, int nframes, 230 struct sk_buff **skbs, 231 struct net_device *dev) 232 { 233 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; 234 int i, n; 235 LIST_HEAD(list); 236 237 n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs); 238 if (unlikely(n == 0)) { 239 for (i = 0; i < nframes; i++) 240 xdp_return_frame(frames[i]); 241 return -ENOMEM; 242 } 243 244 for (i = 0; i < nframes; i++) { 245 struct xdp_frame *xdpf = frames[i]; 246 struct sk_buff *skb = skbs[i]; 247 248 skb = __xdp_build_skb_from_frame(xdpf, skb, dev); 249 if (!skb) { 250 xdp_return_frame(xdpf); 251 continue; 252 } 253 254 list_add_tail(&skb->list, &list); 255 } 256 netif_receive_skb_list(&list); 257 258 return 0; 259 } 260 261 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, 262 u32 repeat) 263 { 264 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 265 int err = 0, act, ret, i, nframes = 0, batch_sz; 266 struct xdp_frame **frames = xdp->frames; 267 struct xdp_page_head *head; 268 struct xdp_frame *frm; 269 bool redirect = false; 270 struct xdp_buff *ctx; 271 struct page *page; 272 273 batch_sz = min_t(u32, repeat, xdp->batch_size); 274 275 local_bh_disable(); 276 xdp_set_return_frame_no_direct(); 277 278 for (i = 0; i < batch_sz; i++) { 279 page = page_pool_dev_alloc_pages(xdp->pp); 280 if (!page) { 281 err = -ENOMEM; 282 goto out; 283 } 284 285 head = phys_to_virt(page_to_phys(page)); 286 reset_ctx(head); 287 ctx = &head->ctx; 288 frm = &head->frm; 289 xdp->frame_cnt++; 290 291 act = bpf_prog_run_xdp(prog, ctx); 292 293 /* if program changed pkt bounds we need to update the xdp_frame */ 294 if (unlikely(ctx_was_changed(head))) { 295 ret = xdp_update_frame_from_buff(ctx, frm); 296 if (ret) { 297 xdp_return_buff(ctx); 298 continue; 299 } 300 } 301 302 switch (act) { 303 case XDP_TX: 304 /* we can't do a real XDP_TX since we're not in the 305 * driver, so turn it into a REDIRECT back to the same 306 * index 307 */ 308 ri->tgt_index = xdp->dev->ifindex; 309 ri->map_id = INT_MAX; 310 ri->map_type = BPF_MAP_TYPE_UNSPEC; 311 fallthrough; 312 case XDP_REDIRECT: 313 redirect = true; 314 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); 315 if (ret) 316 xdp_return_buff(ctx); 317 break; 318 case XDP_PASS: 319 frames[nframes++] = frm; 320 break; 321 default: 322 bpf_warn_invalid_xdp_action(NULL, prog, act); 323 fallthrough; 324 case XDP_DROP: 325 xdp_return_buff(ctx); 326 break; 327 } 328 } 329 330 out: 331 if (redirect) 332 xdp_do_flush(); 333 if (nframes) { 334 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); 335 if (ret) 336 err = ret; 337 } 338 339 xdp_clear_return_frame_no_direct(); 340 local_bh_enable(); 341 return err; 342 } 343 344 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, 345 u32 repeat, u32 batch_size, u32 *time) 346 347 { 348 struct xdp_test_data xdp = { .batch_size = batch_size }; 349 struct bpf_test_timer t = { .mode = NO_MIGRATE }; 350 int ret; 351 352 if (!repeat) 353 repeat = 1; 354 355 ret = xdp_test_run_setup(&xdp, ctx); 356 if (ret) 357 return ret; 358 359 bpf_test_timer_enter(&t); 360 do { 361 xdp.frame_cnt = 0; 362 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); 363 if (unlikely(ret < 0)) 364 break; 365 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); 366 bpf_test_timer_leave(&t); 367 368 xdp_test_run_teardown(&xdp); 369 return ret; 370 } 371 372 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 373 u32 *retval, u32 *time, bool xdp) 374 { 375 struct bpf_prog_array_item item = {.prog = prog}; 376 struct bpf_run_ctx *old_ctx; 377 struct bpf_cg_run_ctx run_ctx; 378 struct bpf_test_timer t = { NO_MIGRATE }; 379 enum bpf_cgroup_storage_type stype; 380 int ret; 381 382 for_each_cgroup_storage_type(stype) { 383 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 384 if (IS_ERR(item.cgroup_storage[stype])) { 385 item.cgroup_storage[stype] = NULL; 386 for_each_cgroup_storage_type(stype) 387 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 388 return -ENOMEM; 389 } 390 } 391 392 if (!repeat) 393 repeat = 1; 394 395 bpf_test_timer_enter(&t); 396 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 397 do { 398 run_ctx.prog_item = &item; 399 local_bh_disable(); 400 if (xdp) 401 *retval = bpf_prog_run_xdp(prog, ctx); 402 else 403 *retval = bpf_prog_run(prog, ctx); 404 local_bh_enable(); 405 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); 406 bpf_reset_run_ctx(old_ctx); 407 bpf_test_timer_leave(&t); 408 409 for_each_cgroup_storage_type(stype) 410 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 411 412 return ret; 413 } 414 415 static int bpf_test_finish(const union bpf_attr *kattr, 416 union bpf_attr __user *uattr, const void *data, 417 struct skb_shared_info *sinfo, u32 size, 418 u32 retval, u32 duration) 419 { 420 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 421 int err = -EFAULT; 422 u32 copy_size = size; 423 424 /* Clamp copy if the user has provided a size hint, but copy the full 425 * buffer if not to retain old behaviour. 426 */ 427 if (kattr->test.data_size_out && 428 copy_size > kattr->test.data_size_out) { 429 copy_size = kattr->test.data_size_out; 430 err = -ENOSPC; 431 } 432 433 if (data_out) { 434 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; 435 436 if (len < 0) { 437 err = -ENOSPC; 438 goto out; 439 } 440 441 if (copy_to_user(data_out, data, len)) 442 goto out; 443 444 if (sinfo) { 445 int i, offset = len; 446 u32 data_len; 447 448 for (i = 0; i < sinfo->nr_frags; i++) { 449 skb_frag_t *frag = &sinfo->frags[i]; 450 451 if (offset >= copy_size) { 452 err = -ENOSPC; 453 break; 454 } 455 456 data_len = min_t(u32, copy_size - offset, 457 skb_frag_size(frag)); 458 459 if (copy_to_user(data_out + offset, 460 skb_frag_address(frag), 461 data_len)) 462 goto out; 463 464 offset += data_len; 465 } 466 } 467 } 468 469 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 470 goto out; 471 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 472 goto out; 473 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 474 goto out; 475 if (err != -ENOSPC) 476 err = 0; 477 out: 478 trace_bpf_test_finish(&err); 479 return err; 480 } 481 482 /* Integer types of various sizes and pointer combinations cover variety of 483 * architecture dependent calling conventions. 7+ can be supported in the 484 * future. 485 */ 486 __diag_push(); 487 __diag_ignore_all("-Wmissing-prototypes", 488 "Global functions as their definitions will be in vmlinux BTF"); 489 __bpf_kfunc int bpf_fentry_test1(int a) 490 { 491 return a + 1; 492 } 493 EXPORT_SYMBOL_GPL(bpf_fentry_test1); 494 495 int noinline bpf_fentry_test2(int a, u64 b) 496 { 497 return a + b; 498 } 499 500 int noinline bpf_fentry_test3(char a, int b, u64 c) 501 { 502 return a + b + c; 503 } 504 505 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) 506 { 507 return (long)a + b + c + d; 508 } 509 510 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) 511 { 512 return a + (long)b + c + d + e; 513 } 514 515 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) 516 { 517 return a + (long)b + c + d + (long)e + f; 518 } 519 520 struct bpf_fentry_test_t { 521 struct bpf_fentry_test_t *a; 522 }; 523 524 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 525 { 526 return (long)arg; 527 } 528 529 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 530 { 531 return (long)arg->a; 532 } 533 534 __bpf_kfunc int bpf_modify_return_test(int a, int *b) 535 { 536 *b += 1; 537 return a + *b; 538 } 539 540 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 541 { 542 return a + b + c + d; 543 } 544 545 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 546 { 547 return a + b; 548 } 549 550 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) 551 { 552 return sk; 553 } 554 555 long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) 556 { 557 /* Provoke the compiler to assume that the caller has sign-extended a, 558 * b and c on platforms where this is required (e.g. s390x). 559 */ 560 return (long)a + (long)b + (long)c + d; 561 } 562 563 struct prog_test_member1 { 564 int a; 565 }; 566 567 struct prog_test_member { 568 struct prog_test_member1 m; 569 int c; 570 }; 571 572 struct prog_test_ref_kfunc { 573 int a; 574 int b; 575 struct prog_test_member memb; 576 struct prog_test_ref_kfunc *next; 577 refcount_t cnt; 578 }; 579 580 static struct prog_test_ref_kfunc prog_test_struct = { 581 .a = 42, 582 .b = 108, 583 .next = &prog_test_struct, 584 .cnt = REFCOUNT_INIT(1), 585 }; 586 587 __bpf_kfunc struct prog_test_ref_kfunc * 588 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 589 { 590 refcount_inc(&prog_test_struct.cnt); 591 return &prog_test_struct; 592 } 593 594 __bpf_kfunc struct prog_test_member * 595 bpf_kfunc_call_memb_acquire(void) 596 { 597 WARN_ON_ONCE(1); 598 return NULL; 599 } 600 601 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) 602 { 603 if (!p) 604 return; 605 606 refcount_dec(&p->cnt); 607 } 608 609 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) 610 { 611 } 612 613 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 614 { 615 WARN_ON_ONCE(1); 616 } 617 618 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 619 { 620 if (size > 2 * sizeof(int)) 621 return NULL; 622 623 return (int *)p; 624 } 625 626 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, 627 const int rdwr_buf_size) 628 { 629 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 630 } 631 632 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, 633 const int rdonly_buf_size) 634 { 635 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 636 } 637 638 /* the next 2 ones can't be really used for testing expect to ensure 639 * that the verifier rejects the call. 640 * Acquire functions must return struct pointers, so these ones are 641 * failing. 642 */ 643 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, 644 const int rdonly_buf_size) 645 { 646 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 647 } 648 649 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) 650 { 651 } 652 653 __bpf_kfunc struct prog_test_ref_kfunc * 654 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) 655 { 656 struct prog_test_ref_kfunc *p = READ_ONCE(*pp); 657 658 if (!p) 659 return NULL; 660 refcount_inc(&p->cnt); 661 return p; 662 } 663 664 struct prog_test_pass1 { 665 int x0; 666 struct { 667 int x1; 668 struct { 669 int x2; 670 struct { 671 int x3; 672 }; 673 }; 674 }; 675 }; 676 677 struct prog_test_pass2 { 678 int len; 679 short arr1[4]; 680 struct { 681 char arr2[4]; 682 unsigned long arr3[8]; 683 } x; 684 }; 685 686 struct prog_test_fail1 { 687 void *p; 688 int x; 689 }; 690 691 struct prog_test_fail2 { 692 int x8; 693 struct prog_test_pass1 x; 694 }; 695 696 struct prog_test_fail3 { 697 int len; 698 char arr1[2]; 699 char arr2[]; 700 }; 701 702 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 703 { 704 } 705 706 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 707 { 708 } 709 710 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 711 { 712 } 713 714 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 715 { 716 } 717 718 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 719 { 720 } 721 722 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 723 { 724 } 725 726 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 727 { 728 } 729 730 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 731 { 732 } 733 734 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 735 { 736 } 737 738 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 739 { 740 } 741 742 __bpf_kfunc void bpf_kfunc_call_test_destructive(void) 743 { 744 } 745 746 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) 747 { 748 return arg; 749 } 750 751 __diag_pop(); 752 753 BTF_SET8_START(bpf_test_modify_return_ids) 754 BTF_ID_FLAGS(func, bpf_modify_return_test) 755 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) 756 BTF_SET8_END(bpf_test_modify_return_ids) 757 758 static const struct btf_kfunc_id_set bpf_test_modify_return_set = { 759 .owner = THIS_MODULE, 760 .set = &bpf_test_modify_return_ids, 761 }; 762 763 BTF_SET8_START(test_sk_check_kfunc_ids) 764 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 765 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 766 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 767 BTF_ID_FLAGS(func, bpf_kfunc_call_test4) 768 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 769 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 770 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) 771 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) 772 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 773 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 774 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 775 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 776 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 777 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) 778 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 779 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 780 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 781 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 782 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 783 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 784 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 785 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 786 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 787 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) 788 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 789 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) 790 BTF_SET8_END(test_sk_check_kfunc_ids) 791 792 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, 793 u32 size, u32 headroom, u32 tailroom) 794 { 795 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 796 void *data; 797 798 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 799 return ERR_PTR(-EINVAL); 800 801 if (user_size > size) 802 return ERR_PTR(-EMSGSIZE); 803 804 size = SKB_DATA_ALIGN(size); 805 data = kzalloc(size + headroom + tailroom, GFP_USER); 806 if (!data) 807 return ERR_PTR(-ENOMEM); 808 809 if (copy_from_user(data + headroom, data_in, user_size)) { 810 kfree(data); 811 return ERR_PTR(-EFAULT); 812 } 813 814 return data; 815 } 816 817 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 818 const union bpf_attr *kattr, 819 union bpf_attr __user *uattr) 820 { 821 struct bpf_fentry_test_t arg = {}; 822 u16 side_effect = 0, ret = 0; 823 int b = 2, err = -EFAULT; 824 u32 retval = 0; 825 826 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 827 return -EINVAL; 828 829 switch (prog->expected_attach_type) { 830 case BPF_TRACE_FENTRY: 831 case BPF_TRACE_FEXIT: 832 if (bpf_fentry_test1(1) != 2 || 833 bpf_fentry_test2(2, 3) != 5 || 834 bpf_fentry_test3(4, 5, 6) != 15 || 835 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 836 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 837 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 838 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 839 bpf_fentry_test8(&arg) != 0) 840 goto out; 841 break; 842 case BPF_MODIFY_RETURN: 843 ret = bpf_modify_return_test(1, &b); 844 if (b != 2) 845 side_effect = 1; 846 break; 847 default: 848 goto out; 849 } 850 851 retval = ((u32)side_effect << 16) | ret; 852 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 853 goto out; 854 855 err = 0; 856 out: 857 trace_bpf_test_finish(&err); 858 return err; 859 } 860 861 struct bpf_raw_tp_test_run_info { 862 struct bpf_prog *prog; 863 void *ctx; 864 u32 retval; 865 }; 866 867 static void 868 __bpf_prog_test_run_raw_tp(void *data) 869 { 870 struct bpf_raw_tp_test_run_info *info = data; 871 872 rcu_read_lock(); 873 info->retval = bpf_prog_run(info->prog, info->ctx); 874 rcu_read_unlock(); 875 } 876 877 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 878 const union bpf_attr *kattr, 879 union bpf_attr __user *uattr) 880 { 881 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 882 __u32 ctx_size_in = kattr->test.ctx_size_in; 883 struct bpf_raw_tp_test_run_info info; 884 int cpu = kattr->test.cpu, err = 0; 885 int current_cpu; 886 887 /* doesn't support data_in/out, ctx_out, duration, or repeat */ 888 if (kattr->test.data_in || kattr->test.data_out || 889 kattr->test.ctx_out || kattr->test.duration || 890 kattr->test.repeat || kattr->test.batch_size) 891 return -EINVAL; 892 893 if (ctx_size_in < prog->aux->max_ctx_offset || 894 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) 895 return -EINVAL; 896 897 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) 898 return -EINVAL; 899 900 if (ctx_size_in) { 901 info.ctx = memdup_user(ctx_in, ctx_size_in); 902 if (IS_ERR(info.ctx)) 903 return PTR_ERR(info.ctx); 904 } else { 905 info.ctx = NULL; 906 } 907 908 info.prog = prog; 909 910 current_cpu = get_cpu(); 911 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || 912 cpu == current_cpu) { 913 __bpf_prog_test_run_raw_tp(&info); 914 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 915 /* smp_call_function_single() also checks cpu_online() 916 * after csd_lock(). However, since cpu is from user 917 * space, let's do an extra quick check to filter out 918 * invalid value before smp_call_function_single(). 919 */ 920 err = -ENXIO; 921 } else { 922 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, 923 &info, 1); 924 } 925 put_cpu(); 926 927 if (!err && 928 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) 929 err = -EFAULT; 930 931 kfree(info.ctx); 932 return err; 933 } 934 935 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 936 { 937 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 938 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 939 u32 size = kattr->test.ctx_size_in; 940 void *data; 941 int err; 942 943 if (!data_in && !data_out) 944 return NULL; 945 946 data = kzalloc(max_size, GFP_USER); 947 if (!data) 948 return ERR_PTR(-ENOMEM); 949 950 if (data_in) { 951 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); 952 if (err) { 953 kfree(data); 954 return ERR_PTR(err); 955 } 956 957 size = min_t(u32, max_size, size); 958 if (copy_from_user(data, data_in, size)) { 959 kfree(data); 960 return ERR_PTR(-EFAULT); 961 } 962 } 963 return data; 964 } 965 966 static int bpf_ctx_finish(const union bpf_attr *kattr, 967 union bpf_attr __user *uattr, const void *data, 968 u32 size) 969 { 970 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 971 int err = -EFAULT; 972 u32 copy_size = size; 973 974 if (!data || !data_out) 975 return 0; 976 977 if (copy_size > kattr->test.ctx_size_out) { 978 copy_size = kattr->test.ctx_size_out; 979 err = -ENOSPC; 980 } 981 982 if (copy_to_user(data_out, data, copy_size)) 983 goto out; 984 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 985 goto out; 986 if (err != -ENOSPC) 987 err = 0; 988 out: 989 return err; 990 } 991 992 /** 993 * range_is_zero - test whether buffer is initialized 994 * @buf: buffer to check 995 * @from: check from this position 996 * @to: check up until (excluding) this position 997 * 998 * This function returns true if the there is a non-zero byte 999 * in the buf in the range [from,to). 1000 */ 1001 static inline bool range_is_zero(void *buf, size_t from, size_t to) 1002 { 1003 return !memchr_inv((u8 *)buf + from, 0, to - from); 1004 } 1005 1006 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 1007 { 1008 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1009 1010 if (!__skb) 1011 return 0; 1012 1013 /* make sure the fields we don't use are zeroed */ 1014 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) 1015 return -EINVAL; 1016 1017 /* mark is allowed */ 1018 1019 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), 1020 offsetof(struct __sk_buff, priority))) 1021 return -EINVAL; 1022 1023 /* priority is allowed */ 1024 /* ingress_ifindex is allowed */ 1025 /* ifindex is allowed */ 1026 1027 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), 1028 offsetof(struct __sk_buff, cb))) 1029 return -EINVAL; 1030 1031 /* cb is allowed */ 1032 1033 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), 1034 offsetof(struct __sk_buff, tstamp))) 1035 return -EINVAL; 1036 1037 /* tstamp is allowed */ 1038 /* wire_len is allowed */ 1039 /* gso_segs is allowed */ 1040 1041 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), 1042 offsetof(struct __sk_buff, gso_size))) 1043 return -EINVAL; 1044 1045 /* gso_size is allowed */ 1046 1047 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 1048 offsetof(struct __sk_buff, hwtstamp))) 1049 return -EINVAL; 1050 1051 /* hwtstamp is allowed */ 1052 1053 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), 1054 sizeof(struct __sk_buff))) 1055 return -EINVAL; 1056 1057 skb->mark = __skb->mark; 1058 skb->priority = __skb->priority; 1059 skb->skb_iif = __skb->ingress_ifindex; 1060 skb->tstamp = __skb->tstamp; 1061 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 1062 1063 if (__skb->wire_len == 0) { 1064 cb->pkt_len = skb->len; 1065 } else { 1066 if (__skb->wire_len < skb->len || 1067 __skb->wire_len > GSO_LEGACY_MAX_SIZE) 1068 return -EINVAL; 1069 cb->pkt_len = __skb->wire_len; 1070 } 1071 1072 if (__skb->gso_segs > GSO_MAX_SEGS) 1073 return -EINVAL; 1074 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 1075 skb_shinfo(skb)->gso_size = __skb->gso_size; 1076 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; 1077 1078 return 0; 1079 } 1080 1081 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 1082 { 1083 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1084 1085 if (!__skb) 1086 return; 1087 1088 __skb->mark = skb->mark; 1089 __skb->priority = skb->priority; 1090 __skb->ingress_ifindex = skb->skb_iif; 1091 __skb->ifindex = skb->dev->ifindex; 1092 __skb->tstamp = skb->tstamp; 1093 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 1094 __skb->wire_len = cb->pkt_len; 1095 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 1096 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; 1097 } 1098 1099 static struct proto bpf_dummy_proto = { 1100 .name = "bpf_dummy", 1101 .owner = THIS_MODULE, 1102 .obj_size = sizeof(struct sock), 1103 }; 1104 1105 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1106 union bpf_attr __user *uattr) 1107 { 1108 bool is_l2 = false, is_direct_pkt_access = false; 1109 struct net *net = current->nsproxy->net_ns; 1110 struct net_device *dev = net->loopback_dev; 1111 u32 size = kattr->test.data_size_in; 1112 u32 repeat = kattr->test.repeat; 1113 struct __sk_buff *ctx = NULL; 1114 u32 retval, duration; 1115 int hh_len = ETH_HLEN; 1116 struct sk_buff *skb; 1117 struct sock *sk; 1118 void *data; 1119 int ret; 1120 1121 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1122 return -EINVAL; 1123 1124 data = bpf_test_init(kattr, kattr->test.data_size_in, 1125 size, NET_SKB_PAD + NET_IP_ALIGN, 1126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1127 if (IS_ERR(data)) 1128 return PTR_ERR(data); 1129 1130 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 1131 if (IS_ERR(ctx)) { 1132 kfree(data); 1133 return PTR_ERR(ctx); 1134 } 1135 1136 switch (prog->type) { 1137 case BPF_PROG_TYPE_SCHED_CLS: 1138 case BPF_PROG_TYPE_SCHED_ACT: 1139 is_l2 = true; 1140 fallthrough; 1141 case BPF_PROG_TYPE_LWT_IN: 1142 case BPF_PROG_TYPE_LWT_OUT: 1143 case BPF_PROG_TYPE_LWT_XMIT: 1144 is_direct_pkt_access = true; 1145 break; 1146 default: 1147 break; 1148 } 1149 1150 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); 1151 if (!sk) { 1152 kfree(data); 1153 kfree(ctx); 1154 return -ENOMEM; 1155 } 1156 sock_init_data(NULL, sk); 1157 1158 skb = slab_build_skb(data); 1159 if (!skb) { 1160 kfree(data); 1161 kfree(ctx); 1162 sk_free(sk); 1163 return -ENOMEM; 1164 } 1165 skb->sk = sk; 1166 1167 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 1168 __skb_put(skb, size); 1169 if (ctx && ctx->ifindex > 1) { 1170 dev = dev_get_by_index(net, ctx->ifindex); 1171 if (!dev) { 1172 ret = -ENODEV; 1173 goto out; 1174 } 1175 } 1176 skb->protocol = eth_type_trans(skb, dev); 1177 skb_reset_network_header(skb); 1178 1179 switch (skb->protocol) { 1180 case htons(ETH_P_IP): 1181 sk->sk_family = AF_INET; 1182 if (sizeof(struct iphdr) <= skb_headlen(skb)) { 1183 sk->sk_rcv_saddr = ip_hdr(skb)->saddr; 1184 sk->sk_daddr = ip_hdr(skb)->daddr; 1185 } 1186 break; 1187 #if IS_ENABLED(CONFIG_IPV6) 1188 case htons(ETH_P_IPV6): 1189 sk->sk_family = AF_INET6; 1190 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { 1191 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; 1192 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; 1193 } 1194 break; 1195 #endif 1196 default: 1197 break; 1198 } 1199 1200 if (is_l2) 1201 __skb_push(skb, hh_len); 1202 if (is_direct_pkt_access) 1203 bpf_compute_data_pointers(skb); 1204 ret = convert___skb_to_skb(skb, ctx); 1205 if (ret) 1206 goto out; 1207 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); 1208 if (ret) 1209 goto out; 1210 if (!is_l2) { 1211 if (skb_headroom(skb) < hh_len) { 1212 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 1213 1214 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 1215 ret = -ENOMEM; 1216 goto out; 1217 } 1218 } 1219 memset(__skb_push(skb, hh_len), 0, hh_len); 1220 } 1221 convert_skb_to___skb(skb, ctx); 1222 1223 size = skb->len; 1224 /* bpf program can never convert linear skb to non-linear */ 1225 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 1226 size = skb_headlen(skb); 1227 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, 1228 duration); 1229 if (!ret) 1230 ret = bpf_ctx_finish(kattr, uattr, ctx, 1231 sizeof(struct __sk_buff)); 1232 out: 1233 if (dev && dev != net->loopback_dev) 1234 dev_put(dev); 1235 kfree_skb(skb); 1236 sk_free(sk); 1237 kfree(ctx); 1238 return ret; 1239 } 1240 1241 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) 1242 { 1243 unsigned int ingress_ifindex, rx_queue_index; 1244 struct netdev_rx_queue *rxqueue; 1245 struct net_device *device; 1246 1247 if (!xdp_md) 1248 return 0; 1249 1250 if (xdp_md->egress_ifindex != 0) 1251 return -EINVAL; 1252 1253 ingress_ifindex = xdp_md->ingress_ifindex; 1254 rx_queue_index = xdp_md->rx_queue_index; 1255 1256 if (!ingress_ifindex && rx_queue_index) 1257 return -EINVAL; 1258 1259 if (ingress_ifindex) { 1260 device = dev_get_by_index(current->nsproxy->net_ns, 1261 ingress_ifindex); 1262 if (!device) 1263 return -ENODEV; 1264 1265 if (rx_queue_index >= device->real_num_rx_queues) 1266 goto free_dev; 1267 1268 rxqueue = __netif_get_rx_queue(device, rx_queue_index); 1269 1270 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) 1271 goto free_dev; 1272 1273 xdp->rxq = &rxqueue->xdp_rxq; 1274 /* The device is now tracked in the xdp->rxq for later 1275 * dev_put() 1276 */ 1277 } 1278 1279 xdp->data = xdp->data_meta + xdp_md->data; 1280 return 0; 1281 1282 free_dev: 1283 dev_put(device); 1284 return -EINVAL; 1285 } 1286 1287 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) 1288 { 1289 if (!xdp_md) 1290 return; 1291 1292 xdp_md->data = xdp->data - xdp->data_meta; 1293 xdp_md->data_end = xdp->data_end - xdp->data_meta; 1294 1295 if (xdp_md->ingress_ifindex) 1296 dev_put(xdp->rxq->dev); 1297 } 1298 1299 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1300 union bpf_attr __user *uattr) 1301 { 1302 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); 1303 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1304 u32 batch_size = kattr->test.batch_size; 1305 u32 retval = 0, duration, max_data_sz; 1306 u32 size = kattr->test.data_size_in; 1307 u32 headroom = XDP_PACKET_HEADROOM; 1308 u32 repeat = kattr->test.repeat; 1309 struct netdev_rx_queue *rxqueue; 1310 struct skb_shared_info *sinfo; 1311 struct xdp_buff xdp = {}; 1312 int i, ret = -EINVAL; 1313 struct xdp_md *ctx; 1314 void *data; 1315 1316 if (prog->expected_attach_type == BPF_XDP_DEVMAP || 1317 prog->expected_attach_type == BPF_XDP_CPUMAP) 1318 return -EINVAL; 1319 1320 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) 1321 return -EINVAL; 1322 1323 if (bpf_prog_is_dev_bound(prog->aux)) 1324 return -EINVAL; 1325 1326 if (do_live) { 1327 if (!batch_size) 1328 batch_size = NAPI_POLL_WEIGHT; 1329 else if (batch_size > TEST_XDP_MAX_BATCH) 1330 return -E2BIG; 1331 1332 headroom += sizeof(struct xdp_page_head); 1333 } else if (batch_size) { 1334 return -EINVAL; 1335 } 1336 1337 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 1338 if (IS_ERR(ctx)) 1339 return PTR_ERR(ctx); 1340 1341 if (ctx) { 1342 /* There can't be user provided data before the meta data */ 1343 if (ctx->data_meta || ctx->data_end != size || 1344 ctx->data > ctx->data_end || 1345 unlikely(xdp_metalen_invalid(ctx->data)) || 1346 (do_live && (kattr->test.data_out || kattr->test.ctx_out))) 1347 goto free_ctx; 1348 /* Meta data is allocated from the headroom */ 1349 headroom -= ctx->data; 1350 } 1351 1352 max_data_sz = 4096 - headroom - tailroom; 1353 if (size > max_data_sz) { 1354 /* disallow live data mode for jumbo frames */ 1355 if (do_live) 1356 goto free_ctx; 1357 size = max_data_sz; 1358 } 1359 1360 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); 1361 if (IS_ERR(data)) { 1362 ret = PTR_ERR(data); 1363 goto free_ctx; 1364 } 1365 1366 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 1367 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; 1368 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); 1369 xdp_prepare_buff(&xdp, data, headroom, size, true); 1370 sinfo = xdp_get_shared_info_from_buff(&xdp); 1371 1372 ret = xdp_convert_md_to_buff(ctx, &xdp); 1373 if (ret) 1374 goto free_data; 1375 1376 if (unlikely(kattr->test.data_size_in > size)) { 1377 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 1378 1379 while (size < kattr->test.data_size_in) { 1380 struct page *page; 1381 skb_frag_t *frag; 1382 u32 data_len; 1383 1384 if (sinfo->nr_frags == MAX_SKB_FRAGS) { 1385 ret = -ENOMEM; 1386 goto out; 1387 } 1388 1389 page = alloc_page(GFP_KERNEL); 1390 if (!page) { 1391 ret = -ENOMEM; 1392 goto out; 1393 } 1394 1395 frag = &sinfo->frags[sinfo->nr_frags++]; 1396 __skb_frag_set_page(frag, page); 1397 1398 data_len = min_t(u32, kattr->test.data_size_in - size, 1399 PAGE_SIZE); 1400 skb_frag_size_set(frag, data_len); 1401 1402 if (copy_from_user(page_address(page), data_in + size, 1403 data_len)) { 1404 ret = -EFAULT; 1405 goto out; 1406 } 1407 sinfo->xdp_frags_size += data_len; 1408 size += data_len; 1409 } 1410 xdp_buff_set_frags_flag(&xdp); 1411 } 1412 1413 if (repeat > 1) 1414 bpf_prog_change_xdp(NULL, prog); 1415 1416 if (do_live) 1417 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); 1418 else 1419 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); 1420 /* We convert the xdp_buff back to an xdp_md before checking the return 1421 * code so the reference count of any held netdevice will be decremented 1422 * even if the test run failed. 1423 */ 1424 xdp_convert_buff_to_md(&xdp, ctx); 1425 if (ret) 1426 goto out; 1427 1428 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; 1429 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, 1430 retval, duration); 1431 if (!ret) 1432 ret = bpf_ctx_finish(kattr, uattr, ctx, 1433 sizeof(struct xdp_md)); 1434 1435 out: 1436 if (repeat > 1) 1437 bpf_prog_change_xdp(prog, NULL); 1438 free_data: 1439 for (i = 0; i < sinfo->nr_frags; i++) 1440 __free_page(skb_frag_page(&sinfo->frags[i])); 1441 kfree(data); 1442 free_ctx: 1443 kfree(ctx); 1444 return ret; 1445 } 1446 1447 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) 1448 { 1449 /* make sure the fields we don't use are zeroed */ 1450 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) 1451 return -EINVAL; 1452 1453 /* flags is allowed */ 1454 1455 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), 1456 sizeof(struct bpf_flow_keys))) 1457 return -EINVAL; 1458 1459 return 0; 1460 } 1461 1462 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1463 const union bpf_attr *kattr, 1464 union bpf_attr __user *uattr) 1465 { 1466 struct bpf_test_timer t = { NO_PREEMPT }; 1467 u32 size = kattr->test.data_size_in; 1468 struct bpf_flow_dissector ctx = {}; 1469 u32 repeat = kattr->test.repeat; 1470 struct bpf_flow_keys *user_ctx; 1471 struct bpf_flow_keys flow_keys; 1472 const struct ethhdr *eth; 1473 unsigned int flags = 0; 1474 u32 retval, duration; 1475 void *data; 1476 int ret; 1477 1478 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1479 return -EINVAL; 1480 1481 if (size < ETH_HLEN) 1482 return -EINVAL; 1483 1484 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); 1485 if (IS_ERR(data)) 1486 return PTR_ERR(data); 1487 1488 eth = (struct ethhdr *)data; 1489 1490 if (!repeat) 1491 repeat = 1; 1492 1493 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); 1494 if (IS_ERR(user_ctx)) { 1495 kfree(data); 1496 return PTR_ERR(user_ctx); 1497 } 1498 if (user_ctx) { 1499 ret = verify_user_bpf_flow_keys(user_ctx); 1500 if (ret) 1501 goto out; 1502 flags = user_ctx->flags; 1503 } 1504 1505 ctx.flow_keys = &flow_keys; 1506 ctx.data = data; 1507 ctx.data_end = (__u8 *)data + size; 1508 1509 bpf_test_timer_enter(&t); 1510 do { 1511 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 1512 size, flags); 1513 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1514 bpf_test_timer_leave(&t); 1515 1516 if (ret < 0) 1517 goto out; 1518 1519 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, 1520 sizeof(flow_keys), retval, duration); 1521 if (!ret) 1522 ret = bpf_ctx_finish(kattr, uattr, user_ctx, 1523 sizeof(struct bpf_flow_keys)); 1524 1525 out: 1526 kfree(user_ctx); 1527 kfree(data); 1528 return ret; 1529 } 1530 1531 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, 1532 union bpf_attr __user *uattr) 1533 { 1534 struct bpf_test_timer t = { NO_PREEMPT }; 1535 struct bpf_prog_array *progs = NULL; 1536 struct bpf_sk_lookup_kern ctx = {}; 1537 u32 repeat = kattr->test.repeat; 1538 struct bpf_sk_lookup *user_ctx; 1539 u32 retval, duration; 1540 int ret = -EINVAL; 1541 1542 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1543 return -EINVAL; 1544 1545 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || 1546 kattr->test.data_size_out) 1547 return -EINVAL; 1548 1549 if (!repeat) 1550 repeat = 1; 1551 1552 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); 1553 if (IS_ERR(user_ctx)) 1554 return PTR_ERR(user_ctx); 1555 1556 if (!user_ctx) 1557 return -EINVAL; 1558 1559 if (user_ctx->sk) 1560 goto out; 1561 1562 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) 1563 goto out; 1564 1565 if (user_ctx->local_port > U16_MAX) { 1566 ret = -ERANGE; 1567 goto out; 1568 } 1569 1570 ctx.family = (u16)user_ctx->family; 1571 ctx.protocol = (u16)user_ctx->protocol; 1572 ctx.dport = (u16)user_ctx->local_port; 1573 ctx.sport = user_ctx->remote_port; 1574 1575 switch (ctx.family) { 1576 case AF_INET: 1577 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; 1578 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; 1579 break; 1580 1581 #if IS_ENABLED(CONFIG_IPV6) 1582 case AF_INET6: 1583 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; 1584 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; 1585 break; 1586 #endif 1587 1588 default: 1589 ret = -EAFNOSUPPORT; 1590 goto out; 1591 } 1592 1593 progs = bpf_prog_array_alloc(1, GFP_KERNEL); 1594 if (!progs) { 1595 ret = -ENOMEM; 1596 goto out; 1597 } 1598 1599 progs->items[0].prog = prog; 1600 1601 bpf_test_timer_enter(&t); 1602 do { 1603 ctx.selected_sk = NULL; 1604 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); 1605 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1606 bpf_test_timer_leave(&t); 1607 1608 if (ret < 0) 1609 goto out; 1610 1611 user_ctx->cookie = 0; 1612 if (ctx.selected_sk) { 1613 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { 1614 ret = -EOPNOTSUPP; 1615 goto out; 1616 } 1617 1618 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); 1619 } 1620 1621 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); 1622 if (!ret) 1623 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); 1624 1625 out: 1626 bpf_prog_array_free(progs); 1627 kfree(user_ctx); 1628 return ret; 1629 } 1630 1631 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1632 const union bpf_attr *kattr, 1633 union bpf_attr __user *uattr) 1634 { 1635 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 1636 __u32 ctx_size_in = kattr->test.ctx_size_in; 1637 void *ctx = NULL; 1638 u32 retval; 1639 int err = 0; 1640 1641 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ 1642 if (kattr->test.data_in || kattr->test.data_out || 1643 kattr->test.ctx_out || kattr->test.duration || 1644 kattr->test.repeat || kattr->test.flags || 1645 kattr->test.batch_size) 1646 return -EINVAL; 1647 1648 if (ctx_size_in < prog->aux->max_ctx_offset || 1649 ctx_size_in > U16_MAX) 1650 return -EINVAL; 1651 1652 if (ctx_size_in) { 1653 ctx = memdup_user(ctx_in, ctx_size_in); 1654 if (IS_ERR(ctx)) 1655 return PTR_ERR(ctx); 1656 } 1657 1658 rcu_read_lock_trace(); 1659 retval = bpf_prog_run_pin_on_cpu(prog, ctx); 1660 rcu_read_unlock_trace(); 1661 1662 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { 1663 err = -EFAULT; 1664 goto out; 1665 } 1666 if (ctx_size_in) 1667 if (copy_to_user(ctx_in, ctx, ctx_size_in)) 1668 err = -EFAULT; 1669 out: 1670 kfree(ctx); 1671 return err; 1672 } 1673 1674 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { 1675 .owner = THIS_MODULE, 1676 .set = &test_sk_check_kfunc_ids, 1677 }; 1678 1679 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) 1680 BTF_ID(struct, prog_test_ref_kfunc) 1681 BTF_ID(func, bpf_kfunc_call_test_release) 1682 BTF_ID(struct, prog_test_member) 1683 BTF_ID(func, bpf_kfunc_call_memb_release) 1684 1685 static int __init bpf_prog_test_run_init(void) 1686 { 1687 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { 1688 { 1689 .btf_id = bpf_prog_test_dtor_kfunc_ids[0], 1690 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] 1691 }, 1692 { 1693 .btf_id = bpf_prog_test_dtor_kfunc_ids[2], 1694 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], 1695 }, 1696 }; 1697 int ret; 1698 1699 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set); 1700 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); 1701 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); 1702 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); 1703 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, 1704 ARRAY_SIZE(bpf_prog_test_dtor_kfunc), 1705 THIS_MODULE); 1706 } 1707 late_initcall(bpf_prog_test_run_init); 1708