1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/dma-map-ops.h> 5 #include <linux/mm.h> 6 #include <linux/nospec.h> 7 #include <linux/io_uring.h> 8 #include <linux/netdevice.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/skbuff_ref.h> 11 12 #include <net/page_pool/helpers.h> 13 #include <net/page_pool/memory_provider.h> 14 #include <net/netlink.h> 15 #include <net/netdev_queues.h> 16 #include <net/netdev_rx_queue.h> 17 #include <net/tcp.h> 18 #include <net/rps.h> 19 20 #include <trace/events/page_pool.h> 21 22 #include <uapi/linux/io_uring.h> 23 24 #include "io_uring.h" 25 #include "kbuf.h" 26 #include "memmap.h" 27 #include "zcrx.h" 28 #include "rsrc.h" 29 30 #define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF) 31 32 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 33 34 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp) 35 { 36 return pp->mp_priv; 37 } 38 39 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov) 40 { 41 struct net_iov_area *owner = net_iov_owner(niov); 42 43 return container_of(owner, struct io_zcrx_area, nia); 44 } 45 46 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov) 47 { 48 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 49 unsigned niov_pages_shift; 50 51 lockdep_assert(!area->mem.is_dmabuf); 52 53 niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT; 54 return area->mem.pages[net_iov_idx(niov) << niov_pages_shift]; 55 } 56 57 static int io_populate_area_dma(struct io_zcrx_ifq *ifq, 58 struct io_zcrx_area *area) 59 { 60 unsigned niov_size = 1U << ifq->niov_shift; 61 struct sg_table *sgt = area->mem.sgt; 62 struct scatterlist *sg; 63 unsigned i, niov_idx = 0; 64 65 for_each_sgtable_dma_sg(sgt, sg, i) { 66 dma_addr_t dma = sg_dma_address(sg); 67 unsigned long sg_len = sg_dma_len(sg); 68 69 if (WARN_ON_ONCE(sg_len % niov_size)) 70 return -EINVAL; 71 72 while (sg_len && niov_idx < area->nia.num_niovs) { 73 struct net_iov *niov = &area->nia.niovs[niov_idx]; 74 75 if (net_mp_niov_set_dma_addr(niov, dma)) 76 return -EFAULT; 77 sg_len -= niov_size; 78 dma += niov_size; 79 niov_idx++; 80 } 81 } 82 83 if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs)) 84 return -EFAULT; 85 return 0; 86 } 87 88 static void io_release_dmabuf(struct io_zcrx_mem *mem) 89 { 90 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) 91 return; 92 93 if (mem->sgt) 94 dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt, 95 DMA_FROM_DEVICE); 96 if (mem->attach) 97 dma_buf_detach(mem->dmabuf, mem->attach); 98 if (mem->dmabuf) 99 dma_buf_put(mem->dmabuf); 100 101 mem->sgt = NULL; 102 mem->attach = NULL; 103 mem->dmabuf = NULL; 104 } 105 106 static int io_import_dmabuf(struct io_zcrx_ifq *ifq, 107 struct io_zcrx_mem *mem, 108 struct io_uring_zcrx_area_reg *area_reg) 109 { 110 unsigned long off = (unsigned long)area_reg->addr; 111 unsigned long len = (unsigned long)area_reg->len; 112 unsigned long total_size = 0; 113 struct scatterlist *sg; 114 int dmabuf_fd = area_reg->dmabuf_fd; 115 int i, ret; 116 117 if (off) 118 return -EINVAL; 119 if (WARN_ON_ONCE(!ifq->dev)) 120 return -EFAULT; 121 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) 122 return -EINVAL; 123 124 mem->is_dmabuf = true; 125 mem->dmabuf = dma_buf_get(dmabuf_fd); 126 if (IS_ERR(mem->dmabuf)) { 127 ret = PTR_ERR(mem->dmabuf); 128 mem->dmabuf = NULL; 129 goto err; 130 } 131 132 mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev); 133 if (IS_ERR(mem->attach)) { 134 ret = PTR_ERR(mem->attach); 135 mem->attach = NULL; 136 goto err; 137 } 138 139 mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE); 140 if (IS_ERR(mem->sgt)) { 141 ret = PTR_ERR(mem->sgt); 142 mem->sgt = NULL; 143 goto err; 144 } 145 146 for_each_sgtable_dma_sg(mem->sgt, sg, i) 147 total_size += sg_dma_len(sg); 148 149 if (total_size != len) { 150 ret = -EINVAL; 151 goto err; 152 } 153 154 mem->size = len; 155 return 0; 156 err: 157 io_release_dmabuf(mem); 158 return ret; 159 } 160 161 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages) 162 { 163 struct folio *last_folio = NULL; 164 unsigned long res = 0; 165 int i; 166 167 for (i = 0; i < nr_pages; i++) { 168 struct folio *folio = page_folio(pages[i]); 169 170 if (folio == last_folio) 171 continue; 172 last_folio = folio; 173 res += 1UL << folio_order(folio); 174 } 175 return res; 176 } 177 178 static int io_import_umem(struct io_zcrx_ifq *ifq, 179 struct io_zcrx_mem *mem, 180 struct io_uring_zcrx_area_reg *area_reg) 181 { 182 struct page **pages; 183 int nr_pages, ret; 184 185 if (area_reg->dmabuf_fd) 186 return -EINVAL; 187 if (!area_reg->addr) 188 return -EFAULT; 189 pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len, 190 &nr_pages); 191 if (IS_ERR(pages)) 192 return PTR_ERR(pages); 193 194 ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages, 195 0, nr_pages << PAGE_SHIFT, 196 GFP_KERNEL_ACCOUNT); 197 if (ret) { 198 unpin_user_pages(pages, nr_pages); 199 return ret; 200 } 201 202 mem->account_pages = io_count_account_pages(pages, nr_pages); 203 ret = io_account_mem(ifq->ctx, mem->account_pages); 204 if (ret < 0) 205 mem->account_pages = 0; 206 207 mem->sgt = &mem->page_sg_table; 208 mem->pages = pages; 209 mem->nr_folios = nr_pages; 210 mem->size = area_reg->len; 211 return ret; 212 } 213 214 static void io_release_area_mem(struct io_zcrx_mem *mem) 215 { 216 if (mem->is_dmabuf) { 217 io_release_dmabuf(mem); 218 return; 219 } 220 if (mem->pages) { 221 unpin_user_pages(mem->pages, mem->nr_folios); 222 sg_free_table(mem->sgt); 223 mem->sgt = NULL; 224 kvfree(mem->pages); 225 } 226 } 227 228 static int io_import_area(struct io_zcrx_ifq *ifq, 229 struct io_zcrx_mem *mem, 230 struct io_uring_zcrx_area_reg *area_reg) 231 { 232 int ret; 233 234 if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS) 235 return -EINVAL; 236 if (area_reg->rq_area_token) 237 return -EINVAL; 238 if (area_reg->__resv2[0] || area_reg->__resv2[1]) 239 return -EINVAL; 240 241 ret = io_validate_user_buf_range(area_reg->addr, area_reg->len); 242 if (ret) 243 return ret; 244 if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK) 245 return -EINVAL; 246 247 if (area_reg->flags & IORING_ZCRX_AREA_DMABUF) 248 return io_import_dmabuf(ifq, mem, area_reg); 249 return io_import_umem(ifq, mem, area_reg); 250 } 251 252 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, 253 struct io_zcrx_area *area) 254 { 255 int i; 256 257 guard(mutex)(&ifq->pp_lock); 258 if (!area->is_mapped) 259 return; 260 area->is_mapped = false; 261 262 for (i = 0; i < area->nia.num_niovs; i++) 263 net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0); 264 265 if (area->mem.is_dmabuf) { 266 io_release_dmabuf(&area->mem); 267 } else { 268 dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table, 269 DMA_FROM_DEVICE, IO_DMA_ATTR); 270 } 271 } 272 273 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) 274 { 275 int ret; 276 277 guard(mutex)(&ifq->pp_lock); 278 if (area->is_mapped) 279 return 0; 280 281 if (!area->mem.is_dmabuf) { 282 ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table, 283 DMA_FROM_DEVICE, IO_DMA_ATTR); 284 if (ret < 0) 285 return ret; 286 } 287 288 ret = io_populate_area_dma(ifq, area); 289 if (ret == 0) 290 area->is_mapped = true; 291 return ret; 292 } 293 294 static void io_zcrx_sync_for_device(struct page_pool *pool, 295 struct net_iov *niov) 296 { 297 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 298 dma_addr_t dma_addr; 299 300 unsigned niov_size; 301 302 if (!dma_dev_need_sync(pool->p.dev)) 303 return; 304 305 niov_size = 1U << io_pp_to_ifq(pool)->niov_shift; 306 dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov)); 307 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, 308 niov_size, pool->p.dma_dir); 309 #endif 310 } 311 312 #define IO_RQ_MAX_ENTRIES 32768 313 314 #define IO_SKBS_PER_CALL_LIMIT 20 315 316 struct io_zcrx_args { 317 struct io_kiocb *req; 318 struct io_zcrx_ifq *ifq; 319 struct socket *sock; 320 unsigned nr_skbs; 321 }; 322 323 static const struct memory_provider_ops io_uring_pp_zc_ops; 324 325 static inline atomic_t *io_get_user_counter(struct net_iov *niov) 326 { 327 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 328 329 return &area->user_refs[net_iov_idx(niov)]; 330 } 331 332 static bool io_zcrx_put_niov_uref(struct net_iov *niov) 333 { 334 atomic_t *uref = io_get_user_counter(niov); 335 336 if (unlikely(!atomic_read(uref))) 337 return false; 338 atomic_dec(uref); 339 return true; 340 } 341 342 static void io_zcrx_get_niov_uref(struct net_iov *niov) 343 { 344 atomic_inc(io_get_user_counter(niov)); 345 } 346 347 static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, 348 struct io_uring_zcrx_ifq_reg *reg, 349 struct io_uring_region_desc *rd, 350 u32 id) 351 { 352 u64 mmap_offset; 353 size_t off, size; 354 void *ptr; 355 int ret; 356 357 off = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES); 358 size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries; 359 if (size > rd->size) 360 return -EINVAL; 361 362 mmap_offset = IORING_MAP_OFF_ZCRX_REGION; 363 mmap_offset += id << IORING_OFF_PBUF_SHIFT; 364 365 ret = io_create_region(ifq->ctx, &ifq->region, rd, mmap_offset); 366 if (ret < 0) 367 return ret; 368 369 ptr = io_region_get_ptr(&ifq->region); 370 ifq->rq_ring = (struct io_uring *)ptr; 371 ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off); 372 373 reg->offsets.head = offsetof(struct io_uring, head); 374 reg->offsets.tail = offsetof(struct io_uring, tail); 375 reg->offsets.rqes = off; 376 return 0; 377 } 378 379 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq) 380 { 381 io_free_region(ifq->ctx, &ifq->region); 382 ifq->rq_ring = NULL; 383 ifq->rqes = NULL; 384 } 385 386 static void io_zcrx_free_area(struct io_zcrx_area *area) 387 { 388 io_zcrx_unmap_area(area->ifq, area); 389 io_release_area_mem(&area->mem); 390 391 if (area->mem.account_pages) 392 io_unaccount_mem(area->ifq->ctx, area->mem.account_pages); 393 394 kvfree(area->freelist); 395 kvfree(area->nia.niovs); 396 kvfree(area->user_refs); 397 kfree(area); 398 } 399 400 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq, 401 struct io_zcrx_area *area) 402 { 403 if (ifq->area) 404 return -EINVAL; 405 ifq->area = area; 406 return 0; 407 } 408 409 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, 410 struct io_uring_zcrx_area_reg *area_reg) 411 { 412 struct io_zcrx_area *area; 413 unsigned nr_iovs; 414 int i, ret; 415 416 ret = -ENOMEM; 417 area = kzalloc(sizeof(*area), GFP_KERNEL); 418 if (!area) 419 goto err; 420 area->ifq = ifq; 421 422 ret = io_import_area(ifq, &area->mem, area_reg); 423 if (ret) 424 goto err; 425 426 ifq->niov_shift = PAGE_SHIFT; 427 nr_iovs = area->mem.size >> ifq->niov_shift; 428 area->nia.num_niovs = nr_iovs; 429 430 ret = -ENOMEM; 431 area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), 432 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 433 if (!area->nia.niovs) 434 goto err; 435 436 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), 437 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 438 if (!area->freelist) 439 goto err; 440 441 area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), 442 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 443 if (!area->user_refs) 444 goto err; 445 446 for (i = 0; i < nr_iovs; i++) { 447 struct net_iov *niov = &area->nia.niovs[i]; 448 449 niov->owner = &area->nia; 450 area->freelist[i] = i; 451 atomic_set(&area->user_refs[i], 0); 452 niov->type = NET_IOV_IOURING; 453 } 454 455 area->free_count = nr_iovs; 456 /* we're only supporting one area per ifq for now */ 457 area->area_id = 0; 458 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT; 459 spin_lock_init(&area->freelist_lock); 460 461 ret = io_zcrx_append_area(ifq, area); 462 if (!ret) 463 return 0; 464 err: 465 if (area) 466 io_zcrx_free_area(area); 467 return ret; 468 } 469 470 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx) 471 { 472 struct io_zcrx_ifq *ifq; 473 474 ifq = kzalloc(sizeof(*ifq), GFP_KERNEL); 475 if (!ifq) 476 return NULL; 477 478 ifq->if_rxq = -1; 479 ifq->ctx = ctx; 480 spin_lock_init(&ifq->rq_lock); 481 mutex_init(&ifq->pp_lock); 482 return ifq; 483 } 484 485 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq) 486 { 487 guard(mutex)(&ifq->pp_lock); 488 489 if (!ifq->netdev) 490 return; 491 netdev_put(ifq->netdev, &ifq->netdev_tracker); 492 ifq->netdev = NULL; 493 } 494 495 static void io_close_queue(struct io_zcrx_ifq *ifq) 496 { 497 struct net_device *netdev; 498 netdevice_tracker netdev_tracker; 499 struct pp_memory_provider_params p = { 500 .mp_ops = &io_uring_pp_zc_ops, 501 .mp_priv = ifq, 502 }; 503 504 if (ifq->if_rxq == -1) 505 return; 506 507 scoped_guard(mutex, &ifq->pp_lock) { 508 netdev = ifq->netdev; 509 netdev_tracker = ifq->netdev_tracker; 510 ifq->netdev = NULL; 511 } 512 513 if (netdev) { 514 net_mp_close_rxq(netdev, ifq->if_rxq, &p); 515 netdev_put(netdev, &netdev_tracker); 516 } 517 ifq->if_rxq = -1; 518 } 519 520 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq) 521 { 522 io_close_queue(ifq); 523 524 if (ifq->area) 525 io_zcrx_free_area(ifq->area); 526 if (ifq->dev) 527 put_device(ifq->dev); 528 529 io_free_rbuf_ring(ifq); 530 mutex_destroy(&ifq->pp_lock); 531 kfree(ifq); 532 } 533 534 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, 535 unsigned int id) 536 { 537 struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id); 538 539 lockdep_assert_held(&ctx->mmap_lock); 540 541 return ifq ? &ifq->region : NULL; 542 } 543 544 int io_register_zcrx_ifq(struct io_ring_ctx *ctx, 545 struct io_uring_zcrx_ifq_reg __user *arg) 546 { 547 struct pp_memory_provider_params mp_param = {}; 548 struct io_uring_zcrx_area_reg area; 549 struct io_uring_zcrx_ifq_reg reg; 550 struct io_uring_region_desc rd; 551 struct io_zcrx_ifq *ifq; 552 int ret; 553 u32 id; 554 555 /* 556 * 1. Interface queue allocation. 557 * 2. It can observe data destined for sockets of other tasks. 558 */ 559 if (!capable(CAP_NET_ADMIN)) 560 return -EPERM; 561 562 /* mandatory io_uring features for zc rx */ 563 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 564 return -EINVAL; 565 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))) 566 return -EINVAL; 567 if (copy_from_user(®, arg, sizeof(reg))) 568 return -EFAULT; 569 if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd))) 570 return -EFAULT; 571 if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || 572 reg.__resv2 || reg.zcrx_id) 573 return -EINVAL; 574 if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags) 575 return -EINVAL; 576 if (reg.rq_entries > IO_RQ_MAX_ENTRIES) { 577 if (!(ctx->flags & IORING_SETUP_CLAMP)) 578 return -EINVAL; 579 reg.rq_entries = IO_RQ_MAX_ENTRIES; 580 } 581 reg.rq_entries = roundup_pow_of_two(reg.rq_entries); 582 583 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area))) 584 return -EFAULT; 585 586 ifq = io_zcrx_ifq_alloc(ctx); 587 if (!ifq) 588 return -ENOMEM; 589 ifq->rq_entries = reg.rq_entries; 590 591 scoped_guard(mutex, &ctx->mmap_lock) { 592 /* preallocate id */ 593 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL); 594 if (ret) 595 goto ifq_free; 596 } 597 598 ret = io_allocate_rbuf_ring(ifq, ®, &rd, id); 599 if (ret) 600 goto err; 601 602 ifq->netdev = netdev_get_by_index(current->nsproxy->net_ns, reg.if_idx, 603 &ifq->netdev_tracker, GFP_KERNEL); 604 if (!ifq->netdev) { 605 ret = -ENODEV; 606 goto err; 607 } 608 609 ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq); 610 if (!ifq->dev) { 611 ret = -EOPNOTSUPP; 612 goto err; 613 } 614 get_device(ifq->dev); 615 616 ret = io_zcrx_create_area(ifq, &area); 617 if (ret) 618 goto err; 619 620 mp_param.mp_ops = &io_uring_pp_zc_ops; 621 mp_param.mp_priv = ifq; 622 ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param); 623 if (ret) 624 goto err; 625 ifq->if_rxq = reg.if_rxq; 626 627 reg.zcrx_id = id; 628 629 scoped_guard(mutex, &ctx->mmap_lock) { 630 /* publish ifq */ 631 ret = -ENOMEM; 632 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL)) 633 goto err; 634 } 635 636 if (copy_to_user(arg, ®, sizeof(reg)) || 637 copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) || 638 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) { 639 ret = -EFAULT; 640 goto err; 641 } 642 return 0; 643 err: 644 scoped_guard(mutex, &ctx->mmap_lock) 645 xa_erase(&ctx->zcrx_ctxs, id); 646 ifq_free: 647 io_zcrx_ifq_free(ifq); 648 return ret; 649 } 650 651 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx) 652 { 653 struct io_zcrx_ifq *ifq; 654 655 lockdep_assert_held(&ctx->uring_lock); 656 657 while (1) { 658 scoped_guard(mutex, &ctx->mmap_lock) { 659 unsigned long id = 0; 660 661 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT); 662 if (ifq) 663 xa_erase(&ctx->zcrx_ctxs, id); 664 } 665 if (!ifq) 666 break; 667 io_zcrx_ifq_free(ifq); 668 } 669 670 xa_destroy(&ctx->zcrx_ctxs); 671 } 672 673 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area) 674 { 675 unsigned niov_idx; 676 677 lockdep_assert_held(&area->freelist_lock); 678 679 niov_idx = area->freelist[--area->free_count]; 680 return &area->nia.niovs[niov_idx]; 681 } 682 683 static void io_zcrx_return_niov_freelist(struct net_iov *niov) 684 { 685 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 686 687 spin_lock_bh(&area->freelist_lock); 688 area->freelist[area->free_count++] = net_iov_idx(niov); 689 spin_unlock_bh(&area->freelist_lock); 690 } 691 692 static void io_zcrx_return_niov(struct net_iov *niov) 693 { 694 netmem_ref netmem = net_iov_to_netmem(niov); 695 696 if (!niov->pp) { 697 /* copy fallback allocated niovs */ 698 io_zcrx_return_niov_freelist(niov); 699 return; 700 } 701 page_pool_put_unrefed_netmem(niov->pp, netmem, -1, false); 702 } 703 704 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq) 705 { 706 struct io_zcrx_area *area = ifq->area; 707 int i; 708 709 if (!area) 710 return; 711 712 /* Reclaim back all buffers given to the user space. */ 713 for (i = 0; i < area->nia.num_niovs; i++) { 714 struct net_iov *niov = &area->nia.niovs[i]; 715 int nr; 716 717 if (!atomic_read(io_get_user_counter(niov))) 718 continue; 719 nr = atomic_xchg(io_get_user_counter(niov), 0); 720 if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr)) 721 io_zcrx_return_niov(niov); 722 } 723 } 724 725 void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx) 726 { 727 struct io_zcrx_ifq *ifq; 728 unsigned long index; 729 730 lockdep_assert_held(&ctx->uring_lock); 731 732 xa_for_each(&ctx->zcrx_ctxs, index, ifq) { 733 io_zcrx_scrub(ifq); 734 io_close_queue(ifq); 735 } 736 } 737 738 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq) 739 { 740 u32 entries; 741 742 entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head; 743 return min(entries, ifq->rq_entries); 744 } 745 746 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq, 747 unsigned mask) 748 { 749 unsigned int idx = ifq->cached_rq_head++ & mask; 750 751 return &ifq->rqes[idx]; 752 } 753 754 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe, 755 struct io_zcrx_ifq *ifq, 756 struct net_iov **ret_niov) 757 { 758 unsigned niov_idx, area_idx; 759 struct io_zcrx_area *area; 760 761 area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT; 762 niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift; 763 764 if (unlikely(rqe->__pad || area_idx)) 765 return false; 766 area = ifq->area; 767 768 if (unlikely(niov_idx >= area->nia.num_niovs)) 769 return false; 770 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs); 771 772 *ret_niov = &area->nia.niovs[niov_idx]; 773 return true; 774 } 775 776 static void io_zcrx_ring_refill(struct page_pool *pp, 777 struct io_zcrx_ifq *ifq) 778 { 779 unsigned int mask = ifq->rq_entries - 1; 780 unsigned int entries; 781 782 guard(spinlock_bh)(&ifq->rq_lock); 783 784 entries = io_zcrx_rqring_entries(ifq); 785 entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL); 786 if (unlikely(!entries)) 787 return; 788 789 do { 790 struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask); 791 struct net_iov *niov; 792 netmem_ref netmem; 793 794 if (!io_parse_rqe(rqe, ifq, &niov)) 795 continue; 796 if (!io_zcrx_put_niov_uref(niov)) 797 continue; 798 799 netmem = net_iov_to_netmem(niov); 800 if (!page_pool_unref_and_test(netmem)) 801 continue; 802 803 if (unlikely(niov->pp != pp)) { 804 io_zcrx_return_niov(niov); 805 continue; 806 } 807 808 io_zcrx_sync_for_device(pp, niov); 809 net_mp_netmem_place_in_cache(pp, netmem); 810 } while (--entries); 811 812 smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head); 813 } 814 815 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) 816 { 817 struct io_zcrx_area *area = ifq->area; 818 819 spin_lock_bh(&area->freelist_lock); 820 while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { 821 struct net_iov *niov = __io_zcrx_get_free_niov(area); 822 netmem_ref netmem = net_iov_to_netmem(niov); 823 824 net_mp_niov_set_page_pool(pp, niov); 825 io_zcrx_sync_for_device(pp, niov); 826 net_mp_netmem_place_in_cache(pp, netmem); 827 } 828 spin_unlock_bh(&area->freelist_lock); 829 } 830 831 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp) 832 { 833 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); 834 835 /* pp should already be ensuring that */ 836 if (unlikely(pp->alloc.count)) 837 goto out_return; 838 839 io_zcrx_ring_refill(pp, ifq); 840 if (likely(pp->alloc.count)) 841 goto out_return; 842 843 io_zcrx_refill_slow(pp, ifq); 844 if (!pp->alloc.count) 845 return 0; 846 out_return: 847 return pp->alloc.cache[--pp->alloc.count]; 848 } 849 850 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem) 851 { 852 struct net_iov *niov; 853 854 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 855 return false; 856 857 niov = netmem_to_net_iov(netmem); 858 net_mp_niov_clear_page_pool(niov); 859 io_zcrx_return_niov_freelist(niov); 860 return false; 861 } 862 863 static int io_pp_zc_init(struct page_pool *pp) 864 { 865 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); 866 int ret; 867 868 if (WARN_ON_ONCE(!ifq)) 869 return -EINVAL; 870 if (WARN_ON_ONCE(ifq->dev != pp->p.dev)) 871 return -EINVAL; 872 if (WARN_ON_ONCE(!pp->dma_map)) 873 return -EOPNOTSUPP; 874 if (pp->p.order + PAGE_SHIFT != ifq->niov_shift) 875 return -EINVAL; 876 if (pp->p.dma_dir != DMA_FROM_DEVICE) 877 return -EOPNOTSUPP; 878 879 ret = io_zcrx_map_area(ifq, ifq->area); 880 if (ret) 881 return ret; 882 883 percpu_ref_get(&ifq->ctx->refs); 884 return 0; 885 } 886 887 static void io_pp_zc_destroy(struct page_pool *pp) 888 { 889 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); 890 891 percpu_ref_put(&ifq->ctx->refs); 892 } 893 894 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp, 895 struct netdev_rx_queue *rxq) 896 { 897 struct nlattr *nest; 898 int type; 899 900 type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING; 901 nest = nla_nest_start(rsp, type); 902 if (!nest) 903 return -EMSGSIZE; 904 nla_nest_end(rsp, nest); 905 906 return 0; 907 } 908 909 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq) 910 { 911 struct pp_memory_provider_params *p = &rxq->mp_params; 912 struct io_zcrx_ifq *ifq = mp_priv; 913 914 io_zcrx_drop_netdev(ifq); 915 if (ifq->area) 916 io_zcrx_unmap_area(ifq, ifq->area); 917 918 p->mp_ops = NULL; 919 p->mp_priv = NULL; 920 } 921 922 static const struct memory_provider_ops io_uring_pp_zc_ops = { 923 .alloc_netmems = io_pp_zc_alloc_netmems, 924 .release_netmem = io_pp_zc_release_netmem, 925 .init = io_pp_zc_init, 926 .destroy = io_pp_zc_destroy, 927 .nl_fill = io_pp_nl_fill, 928 .uninstall = io_pp_uninstall, 929 }; 930 931 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov, 932 struct io_zcrx_ifq *ifq, int off, int len) 933 { 934 struct io_ring_ctx *ctx = req->ctx; 935 struct io_uring_zcrx_cqe *rcqe; 936 struct io_zcrx_area *area; 937 struct io_uring_cqe *cqe; 938 u64 offset; 939 940 if (!io_defer_get_uncommited_cqe(ctx, &cqe)) 941 return false; 942 943 cqe->user_data = req->cqe.user_data; 944 cqe->res = len; 945 cqe->flags = IORING_CQE_F_MORE; 946 if (ctx->flags & IORING_SETUP_CQE_MIXED) 947 cqe->flags |= IORING_CQE_F_32; 948 949 area = io_zcrx_iov_to_area(niov); 950 offset = off + (net_iov_idx(niov) << ifq->niov_shift); 951 rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1); 952 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT); 953 rcqe->__pad = 0; 954 return true; 955 } 956 957 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq) 958 { 959 struct io_zcrx_area *area = ifq->area; 960 struct net_iov *niov = NULL; 961 962 if (area->mem.is_dmabuf) 963 return NULL; 964 965 spin_lock_bh(&area->freelist_lock); 966 if (area->free_count) 967 niov = __io_zcrx_get_free_niov(area); 968 spin_unlock_bh(&area->freelist_lock); 969 970 if (niov) 971 page_pool_fragment_netmem(net_iov_to_netmem(niov), 1); 972 return niov; 973 } 974 975 struct io_copy_cache { 976 struct page *page; 977 unsigned long offset; 978 size_t size; 979 }; 980 981 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page, 982 unsigned int src_offset, size_t len) 983 { 984 size_t copied = 0; 985 986 len = min(len, cc->size); 987 988 while (len) { 989 void *src_addr, *dst_addr; 990 struct page *dst_page = cc->page; 991 unsigned dst_offset = cc->offset; 992 size_t n = len; 993 994 if (folio_test_partial_kmap(page_folio(dst_page)) || 995 folio_test_partial_kmap(page_folio(src_page))) { 996 dst_page += dst_offset / PAGE_SIZE; 997 dst_offset = offset_in_page(dst_offset); 998 src_page += src_offset / PAGE_SIZE; 999 src_offset = offset_in_page(src_offset); 1000 n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset); 1001 n = min(n, len); 1002 } 1003 1004 dst_addr = kmap_local_page(dst_page) + dst_offset; 1005 src_addr = kmap_local_page(src_page) + src_offset; 1006 1007 memcpy(dst_addr, src_addr, n); 1008 1009 kunmap_local(src_addr); 1010 kunmap_local(dst_addr); 1011 1012 cc->size -= n; 1013 cc->offset += n; 1014 src_offset += n; 1015 len -= n; 1016 copied += n; 1017 } 1018 return copied; 1019 } 1020 1021 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1022 struct page *src_page, unsigned int src_offset, 1023 size_t len) 1024 { 1025 size_t copied = 0; 1026 int ret = 0; 1027 1028 while (len) { 1029 struct io_copy_cache cc; 1030 struct net_iov *niov; 1031 size_t n; 1032 1033 niov = io_alloc_fallback_niov(ifq); 1034 if (!niov) { 1035 ret = -ENOMEM; 1036 break; 1037 } 1038 1039 cc.page = io_zcrx_iov_page(niov); 1040 cc.offset = 0; 1041 cc.size = PAGE_SIZE; 1042 1043 n = io_copy_page(&cc, src_page, src_offset, len); 1044 1045 if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) { 1046 io_zcrx_return_niov(niov); 1047 ret = -ENOSPC; 1048 break; 1049 } 1050 1051 io_zcrx_get_niov_uref(niov); 1052 src_offset += n; 1053 len -= n; 1054 copied += n; 1055 } 1056 1057 return copied ? copied : ret; 1058 } 1059 1060 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1061 const skb_frag_t *frag, int off, int len) 1062 { 1063 struct page *page = skb_frag_page(frag); 1064 1065 return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len); 1066 } 1067 1068 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1069 const skb_frag_t *frag, int off, int len) 1070 { 1071 struct net_iov *niov; 1072 1073 if (unlikely(!skb_frag_is_net_iov(frag))) 1074 return io_zcrx_copy_frag(req, ifq, frag, off, len); 1075 1076 niov = netmem_to_net_iov(frag->netmem); 1077 if (!niov->pp || niov->pp->mp_ops != &io_uring_pp_zc_ops || 1078 io_pp_to_ifq(niov->pp) != ifq) 1079 return -EFAULT; 1080 1081 if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len)) 1082 return -ENOSPC; 1083 1084 /* 1085 * Prevent it from being recycled while user is accessing it. 1086 * It has to be done before grabbing a user reference. 1087 */ 1088 page_pool_ref_netmem(net_iov_to_netmem(niov)); 1089 io_zcrx_get_niov_uref(niov); 1090 return len; 1091 } 1092 1093 static int 1094 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, 1095 unsigned int offset, size_t len) 1096 { 1097 struct io_zcrx_args *args = desc->arg.data; 1098 struct io_zcrx_ifq *ifq = args->ifq; 1099 struct io_kiocb *req = args->req; 1100 struct sk_buff *frag_iter; 1101 unsigned start, start_off = offset; 1102 int i, copy, end, off; 1103 int ret = 0; 1104 1105 len = min_t(size_t, len, desc->count); 1106 /* 1107 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even 1108 * if desc->count is already 0. This is caused by the if (offset + 1 != 1109 * skb->len) check. Return early in this case to break out of 1110 * __tcp_read_sock(). 1111 */ 1112 if (!len) 1113 return 0; 1114 if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT)) 1115 return -EAGAIN; 1116 1117 if (unlikely(offset < skb_headlen(skb))) { 1118 ssize_t copied; 1119 size_t to_copy; 1120 1121 to_copy = min_t(size_t, skb_headlen(skb) - offset, len); 1122 copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data), 1123 offset_in_page(skb->data) + offset, 1124 to_copy); 1125 if (copied < 0) { 1126 ret = copied; 1127 goto out; 1128 } 1129 offset += copied; 1130 len -= copied; 1131 if (!len) 1132 goto out; 1133 if (offset != skb_headlen(skb)) 1134 goto out; 1135 } 1136 1137 start = skb_headlen(skb); 1138 1139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1140 const skb_frag_t *frag; 1141 1142 if (WARN_ON(start > offset + len)) 1143 return -EFAULT; 1144 1145 frag = &skb_shinfo(skb)->frags[i]; 1146 end = start + skb_frag_size(frag); 1147 1148 if (offset < end) { 1149 copy = end - offset; 1150 if (copy > len) 1151 copy = len; 1152 1153 off = offset - start; 1154 ret = io_zcrx_recv_frag(req, ifq, frag, off, copy); 1155 if (ret < 0) 1156 goto out; 1157 1158 offset += ret; 1159 len -= ret; 1160 if (len == 0 || ret != copy) 1161 goto out; 1162 } 1163 start = end; 1164 } 1165 1166 skb_walk_frags(skb, frag_iter) { 1167 if (WARN_ON(start > offset + len)) 1168 return -EFAULT; 1169 1170 end = start + frag_iter->len; 1171 if (offset < end) { 1172 size_t count; 1173 1174 copy = end - offset; 1175 if (copy > len) 1176 copy = len; 1177 1178 off = offset - start; 1179 count = desc->count; 1180 ret = io_zcrx_recv_skb(desc, frag_iter, off, copy); 1181 desc->count = count; 1182 if (ret < 0) 1183 goto out; 1184 1185 offset += ret; 1186 len -= ret; 1187 if (len == 0 || ret != copy) 1188 goto out; 1189 } 1190 start = end; 1191 } 1192 1193 out: 1194 if (offset == start_off) 1195 return ret; 1196 desc->count -= (offset - start_off); 1197 return offset - start_off; 1198 } 1199 1200 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1201 struct sock *sk, int flags, 1202 unsigned issue_flags, unsigned int *outlen) 1203 { 1204 unsigned int len = *outlen; 1205 struct io_zcrx_args args = { 1206 .req = req, 1207 .ifq = ifq, 1208 .sock = sk->sk_socket, 1209 }; 1210 read_descriptor_t rd_desc = { 1211 .count = len ? len : UINT_MAX, 1212 .arg.data = &args, 1213 }; 1214 int ret; 1215 1216 lock_sock(sk); 1217 ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb); 1218 if (len && ret > 0) 1219 *outlen = len - ret; 1220 if (ret <= 0) { 1221 if (ret < 0 || sock_flag(sk, SOCK_DONE)) 1222 goto out; 1223 if (sk->sk_err) 1224 ret = sock_error(sk); 1225 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1226 goto out; 1227 else if (sk->sk_state == TCP_CLOSE) 1228 ret = -ENOTCONN; 1229 else 1230 ret = -EAGAIN; 1231 } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) && 1232 (issue_flags & IO_URING_F_MULTISHOT)) { 1233 ret = IOU_REQUEUE; 1234 } else if (sock_flag(sk, SOCK_DONE)) { 1235 /* Make it to retry until it finally gets 0. */ 1236 if (issue_flags & IO_URING_F_MULTISHOT) 1237 ret = IOU_REQUEUE; 1238 else 1239 ret = -EAGAIN; 1240 } 1241 out: 1242 release_sock(sk); 1243 return ret; 1244 } 1245 1246 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1247 struct socket *sock, unsigned int flags, 1248 unsigned issue_flags, unsigned int *len) 1249 { 1250 struct sock *sk = sock->sk; 1251 const struct proto *prot = READ_ONCE(sk->sk_prot); 1252 1253 if (prot->recvmsg != tcp_recvmsg) 1254 return -EPROTONOSUPPORT; 1255 1256 sock_rps_record_flow(sk); 1257 return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len); 1258 } 1259