1 // SPDX-License-Identifier: GPL-2.0-only 2 /* net/core/xdp.c 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6 #include <linux/bpf.h> 7 #include <linux/btf.h> 8 #include <linux/btf_ids.h> 9 #include <linux/filter.h> 10 #include <linux/types.h> 11 #include <linux/mm.h> 12 #include <linux/netdevice.h> 13 #include <linux/slab.h> 14 #include <linux/idr.h> 15 #include <linux/rhashtable.h> 16 #include <linux/bug.h> 17 #include <net/page_pool/helpers.h> 18 19 #include <net/hotdata.h> 20 #include <net/xdp.h> 21 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ 22 #include <trace/events/xdp.h> 23 #include <net/xdp_sock_drv.h> 24 25 #define REG_STATE_NEW 0x0 26 #define REG_STATE_REGISTERED 0x1 27 #define REG_STATE_UNREGISTERED 0x2 28 #define REG_STATE_UNUSED 0x3 29 30 static DEFINE_IDA(mem_id_pool); 31 static DEFINE_MUTEX(mem_id_lock); 32 #define MEM_ID_MAX 0xFFFE 33 #define MEM_ID_MIN 1 34 static int mem_id_next = MEM_ID_MIN; 35 36 static bool mem_id_init; /* false */ 37 static struct rhashtable *mem_id_ht; 38 39 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) 40 { 41 const u32 *k = data; 42 const u32 key = *k; 43 44 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) 45 != sizeof(u32)); 46 47 /* Use cyclic increasing ID as direct hash key */ 48 return key; 49 } 50 51 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, 52 const void *ptr) 53 { 54 const struct xdp_mem_allocator *xa = ptr; 55 u32 mem_id = *(u32 *)arg->key; 56 57 return xa->mem.id != mem_id; 58 } 59 60 static const struct rhashtable_params mem_id_rht_params = { 61 .nelem_hint = 64, 62 .head_offset = offsetof(struct xdp_mem_allocator, node), 63 .key_offset = offsetof(struct xdp_mem_allocator, mem.id), 64 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), 65 .max_size = MEM_ID_MAX, 66 .min_size = 8, 67 .automatic_shrinking = true, 68 .hashfn = xdp_mem_id_hashfn, 69 .obj_cmpfn = xdp_mem_id_cmp, 70 }; 71 72 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) 73 { 74 struct xdp_mem_allocator *xa; 75 76 xa = container_of(rcu, struct xdp_mem_allocator, rcu); 77 78 /* Allow this ID to be reused */ 79 ida_free(&mem_id_pool, xa->mem.id); 80 81 kfree(xa); 82 } 83 84 static void mem_xa_remove(struct xdp_mem_allocator *xa) 85 { 86 trace_mem_disconnect(xa); 87 88 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) 89 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); 90 } 91 92 static void mem_allocator_disconnect(void *allocator) 93 { 94 struct xdp_mem_allocator *xa; 95 struct rhashtable_iter iter; 96 97 mutex_lock(&mem_id_lock); 98 99 rhashtable_walk_enter(mem_id_ht, &iter); 100 do { 101 rhashtable_walk_start(&iter); 102 103 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { 104 if (xa->allocator == allocator) 105 mem_xa_remove(xa); 106 } 107 108 rhashtable_walk_stop(&iter); 109 110 } while (xa == ERR_PTR(-EAGAIN)); 111 rhashtable_walk_exit(&iter); 112 113 mutex_unlock(&mem_id_lock); 114 } 115 116 void xdp_unreg_mem_model(struct xdp_mem_info *mem) 117 { 118 struct xdp_mem_allocator *xa; 119 int type = mem->type; 120 int id = mem->id; 121 122 /* Reset mem info to defaults */ 123 mem->id = 0; 124 mem->type = 0; 125 126 if (id == 0) 127 return; 128 129 if (type == MEM_TYPE_PAGE_POOL) { 130 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); 131 page_pool_destroy(xa->page_pool); 132 } 133 } 134 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); 135 136 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) 137 { 138 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 139 WARN(1, "Missing register, driver bug"); 140 return; 141 } 142 143 xdp_unreg_mem_model(&xdp_rxq->mem); 144 } 145 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); 146 147 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) 148 { 149 /* Simplify driver cleanup code paths, allow unreg "unused" */ 150 if (xdp_rxq->reg_state == REG_STATE_UNUSED) 151 return; 152 153 xdp_rxq_info_unreg_mem_model(xdp_rxq); 154 155 xdp_rxq->reg_state = REG_STATE_UNREGISTERED; 156 xdp_rxq->dev = NULL; 157 } 158 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); 159 160 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) 161 { 162 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); 163 } 164 165 /* Returns 0 on success, negative on failure */ 166 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 167 struct net_device *dev, u32 queue_index, 168 unsigned int napi_id, u32 frag_size) 169 { 170 if (!dev) { 171 WARN(1, "Missing net_device from driver"); 172 return -ENODEV; 173 } 174 175 if (xdp_rxq->reg_state == REG_STATE_UNUSED) { 176 WARN(1, "Driver promised not to register this"); 177 return -EINVAL; 178 } 179 180 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { 181 WARN(1, "Missing unregister, handled but fix driver"); 182 xdp_rxq_info_unreg(xdp_rxq); 183 } 184 185 /* State either UNREGISTERED or NEW */ 186 xdp_rxq_info_init(xdp_rxq); 187 xdp_rxq->dev = dev; 188 xdp_rxq->queue_index = queue_index; 189 xdp_rxq->frag_size = frag_size; 190 191 xdp_rxq->reg_state = REG_STATE_REGISTERED; 192 return 0; 193 } 194 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); 195 196 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) 197 { 198 xdp_rxq->reg_state = REG_STATE_UNUSED; 199 } 200 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); 201 202 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) 203 { 204 return (xdp_rxq->reg_state == REG_STATE_REGISTERED); 205 } 206 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); 207 208 static int __mem_id_init_hash_table(void) 209 { 210 struct rhashtable *rht; 211 int ret; 212 213 if (unlikely(mem_id_init)) 214 return 0; 215 216 rht = kzalloc(sizeof(*rht), GFP_KERNEL); 217 if (!rht) 218 return -ENOMEM; 219 220 ret = rhashtable_init(rht, &mem_id_rht_params); 221 if (ret < 0) { 222 kfree(rht); 223 return ret; 224 } 225 mem_id_ht = rht; 226 smp_mb(); /* mutex lock should provide enough pairing */ 227 mem_id_init = true; 228 229 return 0; 230 } 231 232 /* Allocate a cyclic ID that maps to allocator pointer. 233 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html 234 * 235 * Caller must lock mem_id_lock. 236 */ 237 static int __mem_id_cyclic_get(gfp_t gfp) 238 { 239 int retries = 1; 240 int id; 241 242 again: 243 id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp); 244 if (id < 0) { 245 if (id == -ENOSPC) { 246 /* Cyclic allocator, reset next id */ 247 if (retries--) { 248 mem_id_next = MEM_ID_MIN; 249 goto again; 250 } 251 } 252 return id; /* errno */ 253 } 254 mem_id_next = id + 1; 255 256 return id; 257 } 258 259 static bool __is_supported_mem_type(enum xdp_mem_type type) 260 { 261 if (type == MEM_TYPE_PAGE_POOL) 262 return is_page_pool_compiled_in(); 263 264 if (type >= MEM_TYPE_MAX) 265 return false; 266 267 return true; 268 } 269 270 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, 271 enum xdp_mem_type type, 272 void *allocator) 273 { 274 struct xdp_mem_allocator *xdp_alloc; 275 gfp_t gfp = GFP_KERNEL; 276 int id, errno, ret; 277 void *ptr; 278 279 if (!__is_supported_mem_type(type)) 280 return ERR_PTR(-EOPNOTSUPP); 281 282 mem->type = type; 283 284 if (!allocator) { 285 if (type == MEM_TYPE_PAGE_POOL) 286 return ERR_PTR(-EINVAL); /* Setup time check page_pool req */ 287 return NULL; 288 } 289 290 /* Delay init of rhashtable to save memory if feature isn't used */ 291 if (!mem_id_init) { 292 mutex_lock(&mem_id_lock); 293 ret = __mem_id_init_hash_table(); 294 mutex_unlock(&mem_id_lock); 295 if (ret < 0) 296 return ERR_PTR(ret); 297 } 298 299 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); 300 if (!xdp_alloc) 301 return ERR_PTR(-ENOMEM); 302 303 mutex_lock(&mem_id_lock); 304 id = __mem_id_cyclic_get(gfp); 305 if (id < 0) { 306 errno = id; 307 goto err; 308 } 309 mem->id = id; 310 xdp_alloc->mem = *mem; 311 xdp_alloc->allocator = allocator; 312 313 /* Insert allocator into ID lookup table */ 314 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); 315 if (IS_ERR(ptr)) { 316 ida_free(&mem_id_pool, mem->id); 317 mem->id = 0; 318 errno = PTR_ERR(ptr); 319 goto err; 320 } 321 322 if (type == MEM_TYPE_PAGE_POOL) 323 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem); 324 325 mutex_unlock(&mem_id_lock); 326 327 return xdp_alloc; 328 err: 329 mutex_unlock(&mem_id_lock); 330 kfree(xdp_alloc); 331 return ERR_PTR(errno); 332 } 333 334 int xdp_reg_mem_model(struct xdp_mem_info *mem, 335 enum xdp_mem_type type, void *allocator) 336 { 337 struct xdp_mem_allocator *xdp_alloc; 338 339 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); 340 if (IS_ERR(xdp_alloc)) 341 return PTR_ERR(xdp_alloc); 342 return 0; 343 } 344 EXPORT_SYMBOL_GPL(xdp_reg_mem_model); 345 346 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 347 enum xdp_mem_type type, void *allocator) 348 { 349 struct xdp_mem_allocator *xdp_alloc; 350 351 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 352 WARN(1, "Missing register, driver bug"); 353 return -EFAULT; 354 } 355 356 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator); 357 if (IS_ERR(xdp_alloc)) 358 return PTR_ERR(xdp_alloc); 359 360 if (type == MEM_TYPE_XSK_BUFF_POOL && allocator) 361 xsk_pool_set_rxq_info(allocator, xdp_rxq); 362 363 if (trace_mem_connect_enabled() && xdp_alloc) 364 trace_mem_connect(xdp_alloc, xdp_rxq); 365 return 0; 366 } 367 368 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); 369 370 /** 371 * xdp_reg_page_pool - register &page_pool as a memory provider for XDP 372 * @pool: &page_pool to register 373 * 374 * Can be used to register pools manually without connecting to any XDP RxQ 375 * info, so that the XDP layer will be aware of them. Then, they can be 376 * attached to an RxQ info manually via xdp_rxq_info_attach_page_pool(). 377 * 378 * Return: %0 on success, -errno on error. 379 */ 380 int xdp_reg_page_pool(struct page_pool *pool) 381 { 382 struct xdp_mem_info mem; 383 384 return xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pool); 385 } 386 EXPORT_SYMBOL_GPL(xdp_reg_page_pool); 387 388 /** 389 * xdp_unreg_page_pool - unregister &page_pool from the memory providers list 390 * @pool: &page_pool to unregister 391 * 392 * A shorthand for manual unregistering page pools. If the pool was previously 393 * attached to an RxQ info, it must be detached first. 394 */ 395 void xdp_unreg_page_pool(const struct page_pool *pool) 396 { 397 struct xdp_mem_info mem = { 398 .type = MEM_TYPE_PAGE_POOL, 399 .id = pool->xdp_mem_id, 400 }; 401 402 xdp_unreg_mem_model(&mem); 403 } 404 EXPORT_SYMBOL_GPL(xdp_unreg_page_pool); 405 406 /** 407 * xdp_rxq_info_attach_page_pool - attach registered pool to RxQ info 408 * @xdp_rxq: XDP RxQ info to attach the pool to 409 * @pool: pool to attach 410 * 411 * If the pool was registered manually, this function must be called instead 412 * of xdp_rxq_info_reg_mem_model() to connect it to the RxQ info. 413 */ 414 void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq, 415 const struct page_pool *pool) 416 { 417 struct xdp_mem_info mem = { 418 .type = MEM_TYPE_PAGE_POOL, 419 .id = pool->xdp_mem_id, 420 }; 421 422 xdp_rxq_info_attach_mem_model(xdp_rxq, &mem); 423 } 424 EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool); 425 426 /* XDP RX runs under NAPI protection, and in different delivery error 427 * scenarios (e.g. queue full), it is possible to return the xdp_frame 428 * while still leveraging this protection. The @napi_direct boolean 429 * is used for those calls sites. Thus, allowing for faster recycling 430 * of xdp_frames/pages in those cases. 431 */ 432 void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type, 433 bool napi_direct, struct xdp_buff *xdp) 434 { 435 switch (mem_type) { 436 case MEM_TYPE_PAGE_POOL: 437 netmem = netmem_compound_head(netmem); 438 if (napi_direct && xdp_return_frame_no_direct()) 439 napi_direct = false; 440 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) 441 * as mem->type knows this a page_pool page 442 */ 443 page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, 444 napi_direct); 445 break; 446 case MEM_TYPE_PAGE_SHARED: 447 page_frag_free(__netmem_address(netmem)); 448 break; 449 case MEM_TYPE_PAGE_ORDER0: 450 put_page(__netmem_to_page(netmem)); 451 break; 452 case MEM_TYPE_XSK_BUFF_POOL: 453 /* NB! Only valid from an xdp_buff! */ 454 xsk_buff_free(xdp); 455 break; 456 default: 457 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 458 WARN(1, "Incorrect XDP memory type (%d) usage", mem_type); 459 break; 460 } 461 } 462 463 void xdp_return_frame(struct xdp_frame *xdpf) 464 { 465 struct skb_shared_info *sinfo; 466 467 if (likely(!xdp_frame_has_frags(xdpf))) 468 goto out; 469 470 sinfo = xdp_get_shared_info_from_frame(xdpf); 471 for (u32 i = 0; i < sinfo->nr_frags; i++) 472 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type, 473 false, NULL); 474 475 out: 476 __xdp_return(virt_to_netmem(xdpf->data), xdpf->mem_type, false, NULL); 477 } 478 EXPORT_SYMBOL_GPL(xdp_return_frame); 479 480 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) 481 { 482 struct skb_shared_info *sinfo; 483 484 if (likely(!xdp_frame_has_frags(xdpf))) 485 goto out; 486 487 sinfo = xdp_get_shared_info_from_frame(xdpf); 488 for (u32 i = 0; i < sinfo->nr_frags; i++) 489 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type, 490 true, NULL); 491 492 out: 493 __xdp_return(virt_to_netmem(xdpf->data), xdpf->mem_type, true, NULL); 494 } 495 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); 496 497 /* XDP bulk APIs introduce a defer/flush mechanism to return 498 * pages belonging to the same xdp_mem_allocator object 499 * (identified via the mem.id field) in bulk to optimize 500 * I-cache and D-cache. 501 * The bulk queue size is set to 16 to be aligned to how 502 * XDP_REDIRECT bulking works. The bulk is flushed when 503 * it is full or when mem.id changes. 504 * xdp_frame_bulk is usually stored/allocated on the function 505 * call-stack to avoid locking penalties. 506 */ 507 508 /* Must be called with rcu_read_lock held */ 509 void xdp_return_frame_bulk(struct xdp_frame *xdpf, 510 struct xdp_frame_bulk *bq) 511 { 512 if (xdpf->mem_type != MEM_TYPE_PAGE_POOL) { 513 xdp_return_frame(xdpf); 514 return; 515 } 516 517 if (bq->count == XDP_BULK_QUEUE_SIZE) 518 xdp_flush_frame_bulk(bq); 519 520 if (unlikely(xdp_frame_has_frags(xdpf))) { 521 struct skb_shared_info *sinfo; 522 int i; 523 524 sinfo = xdp_get_shared_info_from_frame(xdpf); 525 for (i = 0; i < sinfo->nr_frags; i++) { 526 skb_frag_t *frag = &sinfo->frags[i]; 527 528 bq->q[bq->count++] = skb_frag_netmem(frag); 529 if (bq->count == XDP_BULK_QUEUE_SIZE) 530 xdp_flush_frame_bulk(bq); 531 } 532 } 533 bq->q[bq->count++] = virt_to_netmem(xdpf->data); 534 } 535 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); 536 537 /** 538 * xdp_return_frag -- free one XDP frag or decrement its refcount 539 * @netmem: network memory reference to release 540 * @xdp: &xdp_buff to release the frag for 541 */ 542 void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp) 543 { 544 __xdp_return(netmem, xdp->rxq->mem.type, true, NULL); 545 } 546 EXPORT_SYMBOL_GPL(xdp_return_frag); 547 548 void xdp_return_buff(struct xdp_buff *xdp) 549 { 550 struct skb_shared_info *sinfo; 551 552 if (likely(!xdp_buff_has_frags(xdp))) 553 goto out; 554 555 sinfo = xdp_get_shared_info_from_buff(xdp); 556 for (u32 i = 0; i < sinfo->nr_frags; i++) 557 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), 558 xdp->rxq->mem.type, true, xdp); 559 560 out: 561 __xdp_return(virt_to_netmem(xdp->data), xdp->rxq->mem.type, true, xdp); 562 } 563 EXPORT_SYMBOL_GPL(xdp_return_buff); 564 565 void xdp_attachment_setup(struct xdp_attachment_info *info, 566 struct netdev_bpf *bpf) 567 { 568 if (info->prog) 569 bpf_prog_put(info->prog); 570 info->prog = bpf->prog; 571 info->flags = bpf->flags; 572 } 573 EXPORT_SYMBOL_GPL(xdp_attachment_setup); 574 575 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) 576 { 577 unsigned int metasize, totsize; 578 void *addr, *data_to_copy; 579 struct xdp_frame *xdpf; 580 struct page *page; 581 582 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ 583 metasize = xdp_data_meta_unsupported(xdp) ? 0 : 584 xdp->data - xdp->data_meta; 585 totsize = xdp->data_end - xdp->data + metasize; 586 587 if (sizeof(*xdpf) + totsize > PAGE_SIZE) 588 return NULL; 589 590 page = dev_alloc_page(); 591 if (!page) 592 return NULL; 593 594 addr = page_to_virt(page); 595 xdpf = addr; 596 memset(xdpf, 0, sizeof(*xdpf)); 597 598 addr += sizeof(*xdpf); 599 data_to_copy = metasize ? xdp->data_meta : xdp->data; 600 memcpy(addr, data_to_copy, totsize); 601 602 xdpf->data = addr + metasize; 603 xdpf->len = totsize - metasize; 604 xdpf->headroom = 0; 605 xdpf->metasize = metasize; 606 xdpf->frame_sz = PAGE_SIZE; 607 xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; 608 609 xsk_buff_free(xdp); 610 return xdpf; 611 } 612 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); 613 614 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ 615 void xdp_warn(const char *msg, const char *func, const int line) 616 { 617 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); 618 }; 619 EXPORT_SYMBOL_GPL(xdp_warn); 620 621 /** 622 * xdp_build_skb_from_buff - create an skb from &xdp_buff 623 * @xdp: &xdp_buff to convert to an skb 624 * 625 * Perform common operations to create a new skb to pass up the stack from 626 * &xdp_buff: allocate an skb head from the NAPI percpu cache, initialize 627 * skb data pointers and offsets, set the recycle bit if the buff is 628 * PP-backed, Rx queue index, protocol and update frags info. 629 * 630 * Return: new &sk_buff on success, %NULL on error. 631 */ 632 struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp) 633 { 634 const struct xdp_rxq_info *rxq = xdp->rxq; 635 const struct skb_shared_info *sinfo; 636 struct sk_buff *skb; 637 u32 nr_frags = 0; 638 int metalen; 639 640 if (unlikely(xdp_buff_has_frags(xdp))) { 641 sinfo = xdp_get_shared_info_from_buff(xdp); 642 nr_frags = sinfo->nr_frags; 643 } 644 645 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); 646 if (unlikely(!skb)) 647 return NULL; 648 649 skb_reserve(skb, xdp->data - xdp->data_hard_start); 650 __skb_put(skb, xdp->data_end - xdp->data); 651 652 metalen = xdp->data - xdp->data_meta; 653 if (metalen > 0) 654 skb_metadata_set(skb, metalen); 655 656 if (rxq->mem.type == MEM_TYPE_PAGE_POOL) 657 skb_mark_for_recycle(skb); 658 659 skb_record_rx_queue(skb, rxq->queue_index); 660 661 if (unlikely(nr_frags)) { 662 u32 tsize; 663 664 tsize = sinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz; 665 xdp_update_skb_shared_info(skb, nr_frags, 666 sinfo->xdp_frags_size, tsize, 667 xdp_buff_is_frag_pfmemalloc(xdp)); 668 } 669 670 skb->protocol = eth_type_trans(skb, rxq->dev); 671 672 return skb; 673 } 674 EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff); 675 676 /** 677 * xdp_copy_frags_from_zc - copy frags from XSk buff to skb 678 * @skb: skb to copy frags to 679 * @xdp: XSk &xdp_buff from which the frags will be copied 680 * @pp: &page_pool backing page allocation, if available 681 * 682 * Copy all frags from XSk &xdp_buff to the skb to pass it up the stack. 683 * Allocate a new buffer for each frag, copy it and attach to the skb. 684 * 685 * Return: true on success, false on netmem allocation fail. 686 */ 687 static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb, 688 const struct xdp_buff *xdp, 689 struct page_pool *pp) 690 { 691 struct skb_shared_info *sinfo = skb_shinfo(skb); 692 const struct skb_shared_info *xinfo; 693 u32 nr_frags, tsize = 0; 694 bool pfmemalloc = false; 695 696 xinfo = xdp_get_shared_info_from_buff(xdp); 697 nr_frags = xinfo->nr_frags; 698 699 for (u32 i = 0; i < nr_frags; i++) { 700 u32 len = skb_frag_size(&xinfo->frags[i]); 701 u32 offset, truesize = len; 702 netmem_ref netmem; 703 704 netmem = page_pool_dev_alloc_netmem(pp, &offset, &truesize); 705 if (unlikely(!netmem)) { 706 sinfo->nr_frags = i; 707 return false; 708 } 709 710 memcpy(__netmem_address(netmem), 711 __netmem_address(xinfo->frags[i].netmem), 712 LARGEST_ALIGN(len)); 713 __skb_fill_netmem_desc_noacc(sinfo, i, netmem, offset, len); 714 715 tsize += truesize; 716 pfmemalloc |= netmem_is_pfmemalloc(netmem); 717 } 718 719 xdp_update_skb_shared_info(skb, nr_frags, xinfo->xdp_frags_size, 720 tsize, pfmemalloc); 721 722 return true; 723 } 724 725 /** 726 * xdp_build_skb_from_zc - create an skb from XSk &xdp_buff 727 * @xdp: source XSk buff 728 * 729 * Similar to xdp_build_skb_from_buff(), but for XSk frames. Allocate an skb 730 * head, new buffer for the head, copy the data and initialize the skb fields. 731 * If there are frags, allocate new buffers for them and copy. 732 * Buffers are allocated from the system percpu pools to try recycling them. 733 * If new skb was built successfully, @xdp is returned to XSk pool's freelist. 734 * On error, it remains untouched and the caller must take care of this. 735 * 736 * Return: new &sk_buff on success, %NULL on error. 737 */ 738 struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp) 739 { 740 struct page_pool *pp = this_cpu_read(system_page_pool); 741 const struct xdp_rxq_info *rxq = xdp->rxq; 742 u32 len = xdp->data_end - xdp->data_meta; 743 u32 truesize = xdp->frame_sz; 744 struct sk_buff *skb; 745 int metalen; 746 void *data; 747 748 if (!IS_ENABLED(CONFIG_PAGE_POOL)) 749 return NULL; 750 751 data = page_pool_dev_alloc_va(pp, &truesize); 752 if (unlikely(!data)) 753 return NULL; 754 755 skb = napi_build_skb(data, truesize); 756 if (unlikely(!skb)) { 757 page_pool_free_va(pp, data, true); 758 return NULL; 759 } 760 761 skb_mark_for_recycle(skb); 762 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 763 764 memcpy(__skb_put(skb, len), xdp->data_meta, LARGEST_ALIGN(len)); 765 766 metalen = xdp->data - xdp->data_meta; 767 if (metalen > 0) { 768 skb_metadata_set(skb, metalen); 769 __skb_pull(skb, metalen); 770 } 771 772 skb_record_rx_queue(skb, rxq->queue_index); 773 774 if (unlikely(xdp_buff_has_frags(xdp)) && 775 unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) { 776 napi_consume_skb(skb, true); 777 return NULL; 778 } 779 780 xsk_buff_free(xdp); 781 782 skb->protocol = eth_type_trans(skb, rxq->dev); 783 784 return skb; 785 } 786 EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc); 787 788 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, 789 struct sk_buff *skb, 790 struct net_device *dev) 791 { 792 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 793 unsigned int headroom, frame_size; 794 void *hard_start; 795 u8 nr_frags; 796 797 /* xdp frags frame */ 798 if (unlikely(xdp_frame_has_frags(xdpf))) 799 nr_frags = sinfo->nr_frags; 800 801 /* Part of headroom was reserved to xdpf */ 802 headroom = sizeof(*xdpf) + xdpf->headroom; 803 804 /* Memory size backing xdp_frame data already have reserved 805 * room for build_skb to place skb_shared_info in tailroom. 806 */ 807 frame_size = xdpf->frame_sz; 808 809 hard_start = xdpf->data - headroom; 810 skb = build_skb_around(skb, hard_start, frame_size); 811 if (unlikely(!skb)) 812 return NULL; 813 814 skb_reserve(skb, headroom); 815 __skb_put(skb, xdpf->len); 816 if (xdpf->metasize) 817 skb_metadata_set(skb, xdpf->metasize); 818 819 if (unlikely(xdp_frame_has_frags(xdpf))) 820 xdp_update_skb_shared_info(skb, nr_frags, 821 sinfo->xdp_frags_size, 822 nr_frags * xdpf->frame_sz, 823 xdp_frame_is_frag_pfmemalloc(xdpf)); 824 825 /* Essential SKB info: protocol and skb->dev */ 826 skb->protocol = eth_type_trans(skb, dev); 827 828 /* Optional SKB info, currently missing: 829 * - HW checksum info (skb->ip_summed) 830 * - HW RX hash (skb_set_hash) 831 * - RX ring dev queue index (skb_record_rx_queue) 832 */ 833 834 if (xdpf->mem_type == MEM_TYPE_PAGE_POOL) 835 skb_mark_for_recycle(skb); 836 837 /* Allow SKB to reuse area used by xdp_frame */ 838 xdp_scrub_frame(xdpf); 839 840 return skb; 841 } 842 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); 843 844 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, 845 struct net_device *dev) 846 { 847 struct sk_buff *skb; 848 849 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); 850 if (unlikely(!skb)) 851 return NULL; 852 853 memset(skb, 0, offsetof(struct sk_buff, tail)); 854 855 return __xdp_build_skb_from_frame(xdpf, skb, dev); 856 } 857 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); 858 859 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) 860 { 861 unsigned int headroom, totalsize; 862 struct xdp_frame *nxdpf; 863 struct page *page; 864 void *addr; 865 866 headroom = xdpf->headroom + sizeof(*xdpf); 867 totalsize = headroom + xdpf->len; 868 869 if (unlikely(totalsize > PAGE_SIZE)) 870 return NULL; 871 page = dev_alloc_page(); 872 if (!page) 873 return NULL; 874 addr = page_to_virt(page); 875 876 memcpy(addr, xdpf, totalsize); 877 878 nxdpf = addr; 879 nxdpf->data = addr + headroom; 880 nxdpf->frame_sz = PAGE_SIZE; 881 nxdpf->mem_type = MEM_TYPE_PAGE_ORDER0; 882 883 return nxdpf; 884 } 885 886 __bpf_kfunc_start_defs(); 887 888 /** 889 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. 890 * @ctx: XDP context pointer. 891 * @timestamp: Return value pointer. 892 * 893 * Return: 894 * * Returns 0 on success or ``-errno`` on error. 895 * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc 896 * * ``-ENODATA`` : means no RX-timestamp available for this frame 897 */ 898 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) 899 { 900 return -EOPNOTSUPP; 901 } 902 903 /** 904 * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. 905 * @ctx: XDP context pointer. 906 * @hash: Return value pointer. 907 * @rss_type: Return value pointer for RSS type. 908 * 909 * The RSS hash type (@rss_type) specifies what portion of packet headers NIC 910 * hardware used when calculating RSS hash value. The RSS type can be decoded 911 * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits 912 * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types* 913 * ``XDP_RSS_TYPE_L*``. 914 * 915 * Return: 916 * * Returns 0 on success or ``-errno`` on error. 917 * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc 918 * * ``-ENODATA`` : means no RX-hash available for this frame 919 */ 920 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, 921 enum xdp_rss_hash_type *rss_type) 922 { 923 return -EOPNOTSUPP; 924 } 925 926 /** 927 * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag 928 * @ctx: XDP context pointer. 929 * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID). 930 * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP) 931 * 932 * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*, 933 * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use 934 * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)** 935 * and should be used as follows: 936 * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();`` 937 * 938 * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag. 939 * Driver is expected to provide those in **host byte order (usually LE)**, 940 * so the bpf program should not perform byte conversion. 941 * According to 802.1Q standard, *VLAN TCI (Tag control information)* 942 * is a bit field that contains: 943 * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``, 944 * *Drop eligible indicator (DEI)* - 1 bit, 945 * *Priority code point (PCP)* - 3 bits. 946 * For detailed meaning of DEI and PCP, please refer to other sources. 947 * 948 * Return: 949 * * Returns 0 on success or ``-errno`` on error. 950 * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc 951 * * ``-ENODATA`` : VLAN tag was not stripped or is not available 952 */ 953 __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx, 954 __be16 *vlan_proto, u16 *vlan_tci) 955 { 956 return -EOPNOTSUPP; 957 } 958 959 __bpf_kfunc_end_defs(); 960 961 BTF_KFUNCS_START(xdp_metadata_kfunc_ids) 962 #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) 963 XDP_METADATA_KFUNC_xxx 964 #undef XDP_METADATA_KFUNC 965 BTF_KFUNCS_END(xdp_metadata_kfunc_ids) 966 967 static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { 968 .owner = THIS_MODULE, 969 .set = &xdp_metadata_kfunc_ids, 970 }; 971 972 BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) 973 #define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str) 974 XDP_METADATA_KFUNC_xxx 975 #undef XDP_METADATA_KFUNC 976 977 u32 bpf_xdp_metadata_kfunc_id(int id) 978 { 979 /* xdp_metadata_kfunc_ids is sorted and can't be used */ 980 return xdp_metadata_kfunc_ids_unsorted[id]; 981 } 982 983 bool bpf_dev_bound_kfunc_id(u32 btf_id) 984 { 985 return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id); 986 } 987 988 static int __init xdp_metadata_init(void) 989 { 990 return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set); 991 } 992 late_initcall(xdp_metadata_init); 993 994 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) 995 { 996 val &= NETDEV_XDP_ACT_MASK; 997 if (dev->xdp_features == val) 998 return; 999 1000 dev->xdp_features = val; 1001 1002 if (dev->reg_state == NETREG_REGISTERED) 1003 call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); 1004 } 1005 EXPORT_SYMBOL_GPL(xdp_set_features_flag); 1006 1007 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) 1008 { 1009 xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); 1010 1011 if (support_sg) 1012 val |= NETDEV_XDP_ACT_NDO_XMIT_SG; 1013 xdp_set_features_flag(dev, val); 1014 } 1015 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); 1016 1017 void xdp_features_clear_redirect_target(struct net_device *dev) 1018 { 1019 xdp_features_t val = dev->xdp_features; 1020 1021 val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); 1022 xdp_set_features_flag(dev, val); 1023 } 1024 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); 1025