1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/netdevice.h> 4 #include <net/netdev_lock.h> 5 #include <net/xsk_buff_pool.h> 6 #include <net/xdp_sock.h> 7 #include <net/xdp_sock_drv.h> 8 9 #include "xsk_queue.h" 10 #include "xdp_umem.h" 11 #include "xsk.h" 12 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 14 { 15 if (!xs->tx) 16 return; 17 18 spin_lock(&pool->xsk_tx_list_lock); 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); 20 spin_unlock(&pool->xsk_tx_list_lock); 21 } 22 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 24 { 25 if (!xs->tx) 26 return; 27 28 spin_lock(&pool->xsk_tx_list_lock); 29 list_del_rcu(&xs->tx_list); 30 spin_unlock(&pool->xsk_tx_list_lock); 31 } 32 33 void xp_destroy(struct xsk_buff_pool *pool) 34 { 35 if (!pool) 36 return; 37 38 kvfree(pool->tx_descs); 39 kvfree(pool->heads); 40 kvfree(pool); 41 } 42 43 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) 44 { 45 pool->tx_descs = kvzalloc_objs(*pool->tx_descs, xs->tx->nentries, 46 GFP_KERNEL); 47 if (!pool->tx_descs) 48 return -ENOMEM; 49 50 return 0; 51 } 52 53 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, 54 struct xdp_umem *umem) 55 { 56 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 57 struct xsk_buff_pool *pool; 58 struct xdp_buff_xsk *xskb; 59 u32 i, entries; 60 61 entries = unaligned ? umem->chunks : 0; 62 pool = kvzalloc_flex(*pool, free_heads, entries, GFP_KERNEL); 63 if (!pool) 64 goto out; 65 66 pool->heads = kvzalloc_objs(*pool->heads, umem->chunks); 67 if (!pool->heads) 68 goto out; 69 70 if (xs->tx) 71 if (xp_alloc_tx_descs(pool, xs)) 72 goto out; 73 74 pool->chunk_mask = ~((u64)umem->chunk_size - 1); 75 pool->addrs_cnt = umem->size; 76 pool->heads_cnt = umem->chunks; 77 pool->free_heads_cnt = umem->chunks; 78 pool->headroom = umem->headroom; 79 pool->chunk_size = umem->chunk_size; 80 pool->chunk_shift = ffs(umem->chunk_size) - 1; 81 pool->unaligned = unaligned; 82 pool->frame_len = umem->chunk_size - umem->headroom - 83 XDP_PACKET_HEADROOM; 84 pool->umem = umem; 85 pool->addrs = umem->addrs; 86 pool->tx_metadata_len = umem->tx_metadata_len; 87 pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM; 88 spin_lock_init(&pool->rx_lock); 89 INIT_LIST_HEAD(&pool->free_list); 90 INIT_LIST_HEAD(&pool->xskb_list); 91 INIT_LIST_HEAD(&pool->xsk_tx_list); 92 spin_lock_init(&pool->xsk_tx_list_lock); 93 spin_lock_init(&pool->cq_prod_lock); 94 spin_lock_init(&xs->cq_tmp->cq_cached_prod_lock); 95 refcount_set(&pool->users, 1); 96 97 pool->fq = xs->fq_tmp; 98 pool->cq = xs->cq_tmp; 99 100 for (i = 0; i < pool->free_heads_cnt; i++) { 101 xskb = &pool->heads[i]; 102 xskb->pool = pool; 103 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; 104 INIT_LIST_HEAD(&xskb->list_node); 105 if (pool->unaligned) 106 pool->free_heads[i] = xskb; 107 else 108 xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size); 109 } 110 111 return pool; 112 113 out: 114 xp_destroy(pool); 115 return NULL; 116 } 117 118 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) 119 { 120 u32 i; 121 122 for (i = 0; i < pool->heads_cnt; i++) 123 pool->heads[i].xdp.rxq = rxq; 124 } 125 EXPORT_SYMBOL(xp_set_rxq_info); 126 127 void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc) 128 { 129 u32 i; 130 131 for (i = 0; i < pool->heads_cnt; i++) { 132 struct xdp_buff_xsk *xskb = &pool->heads[i]; 133 134 memcpy(xskb->cb + desc->off, desc->src, desc->bytes); 135 } 136 } 137 EXPORT_SYMBOL(xp_fill_cb); 138 139 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) 140 { 141 struct netdev_bpf bpf; 142 int err; 143 144 ASSERT_RTNL(); 145 146 if (pool->umem->zc) { 147 bpf.command = XDP_SETUP_XSK_POOL; 148 bpf.xsk.pool = NULL; 149 bpf.xsk.queue_id = pool->queue_id; 150 151 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); 152 153 if (err) 154 WARN(1, "Failed to disable zero-copy!\n"); 155 } 156 } 157 158 int xp_assign_dev(struct xsk_buff_pool *pool, 159 struct net_device *netdev, u16 queue_id, u16 flags) 160 { 161 bool force_zc, force_copy; 162 struct netdev_bpf bpf; 163 int err = 0; 164 165 ASSERT_RTNL(); 166 167 force_zc = flags & XDP_ZEROCOPY; 168 force_copy = flags & XDP_COPY; 169 170 if (force_zc && force_copy) 171 return -EINVAL; 172 173 if (xsk_get_pool_from_qid(netdev, queue_id)) 174 return -EBUSY; 175 176 pool->netdev = netdev; 177 pool->queue_id = queue_id; 178 err = xsk_reg_pool_at_qid(netdev, pool, queue_id); 179 if (err) 180 return err; 181 182 if (flags & XDP_USE_SG) 183 pool->umem->flags |= XDP_UMEM_SG_FLAG; 184 185 if (flags & XDP_USE_NEED_WAKEUP) 186 pool->uses_need_wakeup = true; 187 /* Tx needs to be explicitly woken up the first time. Also 188 * for supporting drivers that do not implement this 189 * feature. They will always have to call sendto() or poll(). 190 */ 191 pool->cached_need_wakeup = XDP_WAKEUP_TX; 192 193 dev_hold(netdev); 194 195 if (force_copy) 196 /* For copy-mode, we are done. */ 197 return 0; 198 199 if ((netdev->xdp_features & NETDEV_XDP_ACT_XSK) != NETDEV_XDP_ACT_XSK) { 200 err = -EOPNOTSUPP; 201 goto err_unreg_pool; 202 } 203 204 if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) { 205 err = -EOPNOTSUPP; 206 goto err_unreg_pool; 207 } 208 209 if (dev_get_min_mp_channel_count(netdev)) { 210 err = -EBUSY; 211 goto err_unreg_pool; 212 } 213 214 bpf.command = XDP_SETUP_XSK_POOL; 215 bpf.xsk.pool = pool; 216 bpf.xsk.queue_id = queue_id; 217 218 netdev_ops_assert_locked(netdev); 219 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); 220 if (err) 221 goto err_unreg_pool; 222 223 if (!pool->dma_pages) { 224 WARN(1, "Driver did not DMA map zero-copy buffers"); 225 err = -EINVAL; 226 goto err_unreg_xsk; 227 } 228 pool->umem->zc = true; 229 pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs; 230 return 0; 231 232 err_unreg_xsk: 233 xp_disable_drv_zc(pool); 234 err_unreg_pool: 235 if (!force_zc) 236 err = 0; /* fallback to copy mode */ 237 if (err) { 238 xsk_clear_pool_at_qid(netdev, queue_id); 239 dev_put(netdev); 240 } 241 return err; 242 } 243 244 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 245 struct net_device *dev, u16 queue_id) 246 { 247 u16 flags; 248 struct xdp_umem *umem = umem_xs->umem; 249 250 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; 251 if (umem_xs->pool->uses_need_wakeup) 252 flags |= XDP_USE_NEED_WAKEUP; 253 254 return xp_assign_dev(pool, dev, queue_id, flags); 255 } 256 257 void xp_clear_dev(struct xsk_buff_pool *pool) 258 { 259 struct net_device *netdev = pool->netdev; 260 261 if (!pool->netdev) 262 return; 263 264 netdev_lock_ops(netdev); 265 xp_disable_drv_zc(pool); 266 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); 267 pool->netdev = NULL; 268 netdev_unlock_ops(netdev); 269 dev_put(netdev); 270 } 271 272 static void xp_release_deferred(struct work_struct *work) 273 { 274 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, 275 work); 276 277 rtnl_lock(); 278 xp_clear_dev(pool); 279 rtnl_unlock(); 280 281 if (pool->fq) { 282 xskq_destroy(pool->fq); 283 pool->fq = NULL; 284 } 285 286 if (pool->cq) { 287 xskq_destroy(pool->cq); 288 pool->cq = NULL; 289 } 290 291 xdp_put_umem(pool->umem, false); 292 xp_destroy(pool); 293 } 294 295 void xp_get_pool(struct xsk_buff_pool *pool) 296 { 297 refcount_inc(&pool->users); 298 } 299 300 bool xp_put_pool(struct xsk_buff_pool *pool) 301 { 302 if (!pool) 303 return false; 304 305 if (refcount_dec_and_test(&pool->users)) { 306 INIT_WORK(&pool->work, xp_release_deferred); 307 schedule_work(&pool->work); 308 return true; 309 } 310 311 return false; 312 } 313 314 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) 315 { 316 struct xsk_dma_map *dma_map; 317 318 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { 319 if (dma_map->netdev == pool->netdev) 320 return dma_map; 321 } 322 323 return NULL; 324 } 325 326 static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev, 327 u32 nr_pages, struct xdp_umem *umem) 328 { 329 struct xsk_dma_map *dma_map; 330 331 dma_map = kzalloc_obj(*dma_map); 332 if (!dma_map) 333 return NULL; 334 335 dma_map->dma_pages = kvzalloc_objs(*dma_map->dma_pages, nr_pages, 336 GFP_KERNEL); 337 if (!dma_map->dma_pages) { 338 kfree(dma_map); 339 return NULL; 340 } 341 342 dma_map->netdev = netdev; 343 dma_map->dev = dev; 344 dma_map->dma_pages_cnt = nr_pages; 345 refcount_set(&dma_map->users, 1); 346 list_add(&dma_map->list, &umem->xsk_dma_list); 347 return dma_map; 348 } 349 350 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) 351 { 352 list_del(&dma_map->list); 353 kvfree(dma_map->dma_pages); 354 kfree(dma_map); 355 } 356 357 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) 358 { 359 dma_addr_t *dma; 360 u32 i; 361 362 for (i = 0; i < dma_map->dma_pages_cnt; i++) { 363 dma = &dma_map->dma_pages[i]; 364 if (*dma) { 365 *dma &= ~XSK_NEXT_PG_CONTIG_MASK; 366 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, 367 DMA_BIDIRECTIONAL, attrs); 368 *dma = 0; 369 } 370 } 371 372 xp_destroy_dma_map(dma_map); 373 } 374 375 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) 376 { 377 struct xsk_dma_map *dma_map; 378 379 if (!pool->dma_pages) 380 return; 381 382 dma_map = xp_find_dma_map(pool); 383 if (!dma_map) { 384 WARN(1, "Could not find dma_map for device"); 385 return; 386 } 387 388 if (refcount_dec_and_test(&dma_map->users)) 389 __xp_dma_unmap(dma_map, attrs); 390 391 kvfree(pool->dma_pages); 392 pool->dma_pages = NULL; 393 pool->dma_pages_cnt = 0; 394 pool->dev = NULL; 395 } 396 EXPORT_SYMBOL(xp_dma_unmap); 397 398 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) 399 { 400 u32 i; 401 402 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { 403 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) 404 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; 405 else 406 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; 407 } 408 } 409 410 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) 411 { 412 if (!pool->unaligned) { 413 u32 i; 414 415 for (i = 0; i < pool->heads_cnt; i++) { 416 struct xdp_buff_xsk *xskb = &pool->heads[i]; 417 u64 orig_addr; 418 419 orig_addr = xskb->xdp.data_hard_start - pool->addrs - pool->headroom; 420 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, orig_addr); 421 } 422 } 423 424 pool->dma_pages = kvzalloc_objs(*pool->dma_pages, 425 dma_map->dma_pages_cnt, GFP_KERNEL); 426 if (!pool->dma_pages) 427 return -ENOMEM; 428 429 pool->dev = dma_map->dev; 430 pool->dma_pages_cnt = dma_map->dma_pages_cnt; 431 memcpy(pool->dma_pages, dma_map->dma_pages, 432 pool->dma_pages_cnt * sizeof(*pool->dma_pages)); 433 434 return 0; 435 } 436 437 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 438 unsigned long attrs, struct page **pages, u32 nr_pages) 439 { 440 struct xsk_dma_map *dma_map; 441 dma_addr_t dma; 442 int err; 443 u32 i; 444 445 dma_map = xp_find_dma_map(pool); 446 if (dma_map) { 447 err = xp_init_dma_info(pool, dma_map); 448 if (err) 449 return err; 450 451 refcount_inc(&dma_map->users); 452 return 0; 453 } 454 455 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); 456 if (!dma_map) 457 return -ENOMEM; 458 459 for (i = 0; i < dma_map->dma_pages_cnt; i++) { 460 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, 461 DMA_BIDIRECTIONAL, attrs); 462 if (dma_mapping_error(dev, dma)) { 463 __xp_dma_unmap(dma_map, attrs); 464 return -ENOMEM; 465 } 466 dma_map->dma_pages[i] = dma; 467 } 468 469 if (pool->unaligned) 470 xp_check_dma_contiguity(dma_map); 471 472 err = xp_init_dma_info(pool, dma_map); 473 if (err) { 474 __xp_dma_unmap(dma_map, attrs); 475 return err; 476 } 477 478 return 0; 479 } 480 EXPORT_SYMBOL(xp_dma_map); 481 482 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, 483 u64 addr) 484 { 485 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); 486 } 487 488 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) 489 { 490 *addr = xp_unaligned_extract_addr(*addr); 491 if (*addr >= pool->addrs_cnt || 492 *addr + pool->chunk_size > pool->addrs_cnt || 493 xp_addr_crosses_non_contig_pg(pool, *addr)) 494 return false; 495 return true; 496 } 497 498 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) 499 { 500 *addr = xp_aligned_extract_addr(pool, *addr); 501 return *addr < pool->addrs_cnt; 502 } 503 504 static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr) 505 { 506 struct xdp_buff_xsk *xskb; 507 508 if (pool->unaligned) { 509 xskb = pool->free_heads[--pool->free_heads_cnt]; 510 xp_init_xskb_addr(xskb, pool, addr); 511 if (pool->dma_pages) 512 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); 513 } else { 514 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; 515 } 516 517 return xskb; 518 } 519 520 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) 521 { 522 struct xdp_buff_xsk *xskb; 523 u64 addr; 524 bool ok; 525 526 if (pool->free_heads_cnt == 0) 527 return NULL; 528 529 for (;;) { 530 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { 531 pool->fq->queue_empty_descs++; 532 return NULL; 533 } 534 535 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : 536 xp_check_aligned(pool, &addr); 537 if (!ok) { 538 pool->fq->invalid_descs++; 539 xskq_cons_release(pool->fq); 540 continue; 541 } 542 break; 543 } 544 545 xskb = xp_get_xskb(pool, addr); 546 547 xskq_cons_release(pool->fq); 548 return xskb; 549 } 550 551 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) 552 { 553 struct xdp_buff_xsk *xskb; 554 555 if (!pool->free_list_cnt) { 556 xskb = __xp_alloc(pool); 557 if (!xskb) 558 return NULL; 559 } else { 560 pool->free_list_cnt--; 561 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, 562 list_node); 563 list_del_init(&xskb->list_node); 564 } 565 566 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 567 xskb->xdp.data_meta = xskb->xdp.data; 568 xskb->xdp.flags = 0; 569 570 if (pool->dev) 571 xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len); 572 573 return &xskb->xdp; 574 } 575 EXPORT_SYMBOL(xp_alloc); 576 577 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 578 { 579 u32 i, cached_cons, nb_entries; 580 581 if (max > pool->free_heads_cnt) 582 max = pool->free_heads_cnt; 583 max = xskq_cons_nb_entries(pool->fq, max); 584 585 cached_cons = pool->fq->cached_cons; 586 nb_entries = max; 587 i = max; 588 while (i--) { 589 struct xdp_buff_xsk *xskb; 590 u64 addr; 591 bool ok; 592 593 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); 594 595 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : 596 xp_check_aligned(pool, &addr); 597 if (unlikely(!ok)) { 598 pool->fq->invalid_descs++; 599 nb_entries--; 600 continue; 601 } 602 603 xskb = xp_get_xskb(pool, addr); 604 605 *xdp = &xskb->xdp; 606 xdp++; 607 } 608 609 xskq_cons_release_n(pool->fq, max); 610 return nb_entries; 611 } 612 613 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) 614 { 615 struct xdp_buff_xsk *xskb; 616 u32 i; 617 618 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); 619 620 i = nb_entries; 621 while (i--) { 622 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node); 623 list_del_init(&xskb->list_node); 624 625 *xdp = &xskb->xdp; 626 xdp++; 627 } 628 pool->free_list_cnt -= nb_entries; 629 630 return nb_entries; 631 } 632 633 static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp, 634 u32 max) 635 { 636 int i; 637 638 for (i = 0; i < max; i++) { 639 struct xdp_buff *buff; 640 641 buff = xp_alloc(pool); 642 if (unlikely(!buff)) 643 return i; 644 *xdp = buff; 645 xdp++; 646 } 647 648 return max; 649 } 650 651 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 652 { 653 u32 nb_entries1 = 0, nb_entries2; 654 655 if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) 656 return xp_alloc_slow(pool, xdp, max); 657 658 if (unlikely(pool->free_list_cnt)) { 659 nb_entries1 = xp_alloc_reused(pool, xdp, max); 660 if (nb_entries1 == max) 661 return nb_entries1; 662 663 max -= nb_entries1; 664 xdp += nb_entries1; 665 } 666 667 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max); 668 if (!nb_entries2) 669 pool->fq->queue_empty_descs++; 670 671 return nb_entries1 + nb_entries2; 672 } 673 EXPORT_SYMBOL(xp_alloc_batch); 674 675 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) 676 { 677 u32 req_count, avail_count; 678 679 if (pool->free_list_cnt >= count) 680 return true; 681 682 req_count = count - pool->free_list_cnt; 683 avail_count = xskq_cons_nb_entries(pool->fq, req_count); 684 if (!avail_count) 685 pool->fq->queue_empty_descs++; 686 687 return avail_count >= req_count; 688 } 689 EXPORT_SYMBOL(xp_can_alloc); 690 691 void xp_free(struct xdp_buff_xsk *xskb) 692 { 693 if (!list_empty(&xskb->list_node)) 694 return; 695 696 xskb->pool->free_list_cnt++; 697 list_add(&xskb->list_node, &xskb->pool->free_list); 698 } 699 EXPORT_SYMBOL(xp_free); 700 701 static u64 __xp_raw_get_addr(const struct xsk_buff_pool *pool, u64 addr) 702 { 703 return pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; 704 } 705 706 static void *__xp_raw_get_data(const struct xsk_buff_pool *pool, u64 addr) 707 { 708 return pool->addrs + addr; 709 } 710 711 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) 712 { 713 return __xp_raw_get_data(pool, __xp_raw_get_addr(pool, addr)); 714 } 715 EXPORT_SYMBOL(xp_raw_get_data); 716 717 static dma_addr_t __xp_raw_get_dma(const struct xsk_buff_pool *pool, u64 addr) 718 { 719 return (pool->dma_pages[addr >> PAGE_SHIFT] & 720 ~XSK_NEXT_PG_CONTIG_MASK) + 721 (addr & ~PAGE_MASK); 722 } 723 724 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) 725 { 726 return __xp_raw_get_dma(pool, __xp_raw_get_addr(pool, addr)); 727 } 728 EXPORT_SYMBOL(xp_raw_get_dma); 729 730 /** 731 * xp_raw_get_ctx - get &xdp_desc context 732 * @pool: XSk buff pool desc address belongs to 733 * @addr: desc address (from userspace) 734 * 735 * Helper for getting desc's DMA address and metadata pointer, if present. 736 * Saves one call on hotpath, double calculation of the actual address, 737 * and inline checks for metadata presence and sanity. 738 * 739 * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata 740 * pointer, if it is present and valid (initialized to %NULL otherwise). 741 */ 742 struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) 743 { 744 struct xdp_desc_ctx ret; 745 746 addr = __xp_raw_get_addr(pool, addr); 747 748 ret.dma = __xp_raw_get_dma(pool, addr); 749 ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr)); 750 751 return ret; 752 } 753 EXPORT_SYMBOL(xp_raw_get_ctx); 754