1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/dma-map-ops.h> 5 #include <linux/mm.h> 6 #include <linux/nospec.h> 7 #include <linux/io_uring.h> 8 #include <linux/netdevice.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/skbuff_ref.h> 11 #include <linux/anon_inodes.h> 12 13 #include <net/page_pool/helpers.h> 14 #include <net/page_pool/memory_provider.h> 15 #include <net/netlink.h> 16 #include <net/netdev_queues.h> 17 #include <net/netdev_rx_queue.h> 18 #include <net/tcp.h> 19 #include <net/rps.h> 20 21 #include <trace/events/page_pool.h> 22 23 #include <uapi/linux/io_uring.h> 24 25 #include "io_uring.h" 26 #include "kbuf.h" 27 #include "memmap.h" 28 #include "zcrx.h" 29 #include "rsrc.h" 30 31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF) 32 33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 34 35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp) 36 { 37 return pp->mp_priv; 38 } 39 40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov) 41 { 42 struct net_iov_area *owner = net_iov_owner(niov); 43 44 return container_of(owner, struct io_zcrx_area, nia); 45 } 46 47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov) 48 { 49 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 50 unsigned niov_pages_shift; 51 52 lockdep_assert(!area->mem.is_dmabuf); 53 54 niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT; 55 return area->mem.pages[net_iov_idx(niov) << niov_pages_shift]; 56 } 57 58 static int io_area_max_shift(struct io_zcrx_mem *mem) 59 { 60 struct sg_table *sgt = mem->sgt; 61 struct scatterlist *sg; 62 unsigned shift = -1U; 63 unsigned i; 64 65 for_each_sgtable_dma_sg(sgt, sg, i) 66 shift = min(shift, __ffs(sg_dma_len(sg))); 67 return shift; 68 } 69 70 static int io_populate_area_dma(struct io_zcrx_ifq *ifq, 71 struct io_zcrx_area *area) 72 { 73 unsigned niov_size = 1U << ifq->niov_shift; 74 struct sg_table *sgt = area->mem.sgt; 75 struct scatterlist *sg; 76 unsigned i, niov_idx = 0; 77 78 for_each_sgtable_dma_sg(sgt, sg, i) { 79 dma_addr_t dma = sg_dma_address(sg); 80 unsigned long sg_len = sg_dma_len(sg); 81 82 if (WARN_ON_ONCE(sg_len % niov_size)) 83 return -EINVAL; 84 85 while (sg_len && niov_idx < area->nia.num_niovs) { 86 struct net_iov *niov = &area->nia.niovs[niov_idx]; 87 88 if (net_mp_niov_set_dma_addr(niov, dma)) 89 return -EFAULT; 90 sg_len -= niov_size; 91 dma += niov_size; 92 niov_idx++; 93 } 94 } 95 96 if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs)) 97 return -EFAULT; 98 return 0; 99 } 100 101 static void io_release_dmabuf(struct io_zcrx_mem *mem) 102 { 103 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) 104 return; 105 106 if (mem->sgt) 107 dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt, 108 DMA_FROM_DEVICE); 109 if (mem->attach) 110 dma_buf_detach(mem->dmabuf, mem->attach); 111 if (mem->dmabuf) 112 dma_buf_put(mem->dmabuf); 113 114 mem->sgt = NULL; 115 mem->attach = NULL; 116 mem->dmabuf = NULL; 117 } 118 119 static int io_import_dmabuf(struct io_zcrx_ifq *ifq, 120 struct io_zcrx_mem *mem, 121 struct io_uring_zcrx_area_reg *area_reg) 122 { 123 unsigned long off = (unsigned long)area_reg->addr; 124 unsigned long len = (unsigned long)area_reg->len; 125 unsigned long total_size = 0; 126 struct scatterlist *sg; 127 int dmabuf_fd = area_reg->dmabuf_fd; 128 int i, ret; 129 130 if (!ifq->dev) 131 return -EINVAL; 132 if (off) 133 return -EINVAL; 134 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) 135 return -EINVAL; 136 137 mem->is_dmabuf = true; 138 mem->dmabuf = dma_buf_get(dmabuf_fd); 139 if (IS_ERR(mem->dmabuf)) { 140 ret = PTR_ERR(mem->dmabuf); 141 mem->dmabuf = NULL; 142 goto err; 143 } 144 145 mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev); 146 if (IS_ERR(mem->attach)) { 147 ret = PTR_ERR(mem->attach); 148 mem->attach = NULL; 149 goto err; 150 } 151 152 mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE); 153 if (IS_ERR(mem->sgt)) { 154 ret = PTR_ERR(mem->sgt); 155 mem->sgt = NULL; 156 goto err; 157 } 158 159 for_each_sgtable_dma_sg(mem->sgt, sg, i) 160 total_size += sg_dma_len(sg); 161 162 if (total_size != len) { 163 ret = -EINVAL; 164 goto err; 165 } 166 167 mem->size = len; 168 return 0; 169 err: 170 io_release_dmabuf(mem); 171 return ret; 172 } 173 174 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages) 175 { 176 struct folio *last_folio = NULL; 177 unsigned long res = 0; 178 int i; 179 180 for (i = 0; i < nr_pages; i++) { 181 struct folio *folio = page_folio(pages[i]); 182 183 if (folio == last_folio) 184 continue; 185 last_folio = folio; 186 res += folio_nr_pages(folio); 187 } 188 return res; 189 } 190 191 static int io_import_umem(struct io_zcrx_ifq *ifq, 192 struct io_zcrx_mem *mem, 193 struct io_uring_zcrx_area_reg *area_reg) 194 { 195 struct page **pages; 196 int nr_pages, ret; 197 bool mapped = false; 198 199 if (area_reg->dmabuf_fd) 200 return -EINVAL; 201 if (!area_reg->addr) 202 return -EFAULT; 203 pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len, 204 &nr_pages); 205 if (IS_ERR(pages)) 206 return PTR_ERR(pages); 207 208 ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages, 209 0, (unsigned long)nr_pages << PAGE_SHIFT, 210 GFP_KERNEL_ACCOUNT); 211 if (ret) 212 goto out_err; 213 214 if (ifq->dev) { 215 ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table, 216 DMA_FROM_DEVICE, IO_DMA_ATTR); 217 if (ret < 0) 218 goto out_err; 219 mapped = true; 220 } 221 222 mem->account_pages = io_count_account_pages(pages, nr_pages); 223 ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages); 224 if (ret < 0) { 225 mem->account_pages = 0; 226 goto out_err; 227 } 228 229 mem->sgt = &mem->page_sg_table; 230 mem->pages = pages; 231 mem->nr_folios = nr_pages; 232 mem->size = area_reg->len; 233 return ret; 234 out_err: 235 if (mapped) 236 dma_unmap_sgtable(ifq->dev, &mem->page_sg_table, 237 DMA_FROM_DEVICE, IO_DMA_ATTR); 238 sg_free_table(&mem->page_sg_table); 239 unpin_user_pages(pages, nr_pages); 240 kvfree(pages); 241 return ret; 242 } 243 244 static void io_release_area_mem(struct io_zcrx_mem *mem) 245 { 246 if (mem->is_dmabuf) { 247 io_release_dmabuf(mem); 248 return; 249 } 250 if (mem->pages) { 251 unpin_user_pages(mem->pages, mem->nr_folios); 252 sg_free_table(mem->sgt); 253 mem->sgt = NULL; 254 kvfree(mem->pages); 255 } 256 } 257 258 static int io_import_area(struct io_zcrx_ifq *ifq, 259 struct io_zcrx_mem *mem, 260 struct io_uring_zcrx_area_reg *area_reg) 261 { 262 int ret; 263 264 if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS) 265 return -EINVAL; 266 if (area_reg->rq_area_token) 267 return -EINVAL; 268 if (area_reg->__resv2[0] || area_reg->__resv2[1]) 269 return -EINVAL; 270 271 ret = io_validate_user_buf_range(area_reg->addr, area_reg->len); 272 if (ret) 273 return ret; 274 if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK) 275 return -EINVAL; 276 277 if (area_reg->flags & IORING_ZCRX_AREA_DMABUF) 278 return io_import_dmabuf(ifq, mem, area_reg); 279 return io_import_umem(ifq, mem, area_reg); 280 } 281 282 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, 283 struct io_zcrx_area *area) 284 { 285 int i; 286 287 guard(mutex)(&ifq->pp_lock); 288 if (!area->is_mapped) 289 return; 290 area->is_mapped = false; 291 292 if (area->nia.niovs) { 293 for (i = 0; i < area->nia.num_niovs; i++) 294 net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0); 295 } 296 297 if (area->mem.is_dmabuf) { 298 io_release_dmabuf(&area->mem); 299 } else { 300 dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table, 301 DMA_FROM_DEVICE, IO_DMA_ATTR); 302 } 303 } 304 305 static void zcrx_sync_for_device(struct page_pool *pp, struct io_zcrx_ifq *zcrx, 306 netmem_ref *netmems, unsigned nr) 307 { 308 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 309 struct device *dev = pp->p.dev; 310 unsigned i, niov_size; 311 dma_addr_t dma_addr; 312 313 if (!dma_dev_need_sync(dev)) 314 return; 315 niov_size = 1U << zcrx->niov_shift; 316 317 for (i = 0; i < nr; i++) { 318 dma_addr = page_pool_get_dma_addr_netmem(netmems[i]); 319 __dma_sync_single_for_device(dev, dma_addr + pp->p.offset, 320 niov_size, pp->p.dma_dir); 321 } 322 #endif 323 } 324 325 #define IO_RQ_MAX_ENTRIES 32768 326 327 #define IO_SKBS_PER_CALL_LIMIT 20 328 329 struct io_zcrx_args { 330 struct io_kiocb *req; 331 struct io_zcrx_ifq *ifq; 332 struct socket *sock; 333 unsigned nr_skbs; 334 }; 335 336 static const struct memory_provider_ops io_uring_pp_zc_ops; 337 338 static inline atomic_t *io_get_user_counter(struct net_iov *niov) 339 { 340 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 341 342 return &area->user_refs[net_iov_idx(niov)]; 343 } 344 345 static bool io_zcrx_put_niov_uref(struct net_iov *niov) 346 { 347 atomic_t *uref = io_get_user_counter(niov); 348 int old; 349 350 old = atomic_read(uref); 351 do { 352 if (unlikely(old == 0)) 353 return false; 354 } while (!atomic_try_cmpxchg(uref, &old, old - 1)); 355 356 return true; 357 } 358 359 static void io_zcrx_get_niov_uref(struct net_iov *niov) 360 { 361 atomic_inc(io_get_user_counter(niov)); 362 } 363 364 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets) 365 { 366 offsets->head = offsetof(struct io_uring, head); 367 offsets->tail = offsetof(struct io_uring, tail); 368 offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES); 369 } 370 371 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx, 372 struct io_zcrx_ifq *ifq, 373 struct io_uring_zcrx_ifq_reg *reg, 374 struct io_uring_region_desc *rd, 375 u32 id) 376 { 377 u64 mmap_offset; 378 size_t off, size; 379 void *ptr; 380 int ret; 381 382 io_fill_zcrx_offsets(®->offsets); 383 off = reg->offsets.rqes; 384 size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries; 385 if (size > rd->size) 386 return -EINVAL; 387 388 mmap_offset = IORING_MAP_OFF_ZCRX_REGION; 389 mmap_offset += (u64)id << IORING_OFF_ZCRX_SHIFT; 390 391 ret = io_create_region(ctx, &ifq->rq_region, rd, mmap_offset); 392 if (ret < 0) 393 return ret; 394 395 ptr = io_region_get_ptr(&ifq->rq_region); 396 ifq->rq.ring = (struct io_uring *)ptr; 397 ifq->rq.rqes = (struct io_uring_zcrx_rqe *)(ptr + off); 398 399 return 0; 400 } 401 402 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq) 403 { 404 io_free_region(ifq->user, &ifq->rq_region); 405 ifq->rq.ring = NULL; 406 ifq->rq.rqes = NULL; 407 } 408 409 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq, 410 struct io_zcrx_area *area) 411 { 412 io_zcrx_unmap_area(ifq, area); 413 io_release_area_mem(&area->mem); 414 415 if (area->mem.account_pages) 416 io_unaccount_mem(ifq->user, ifq->mm_account, 417 area->mem.account_pages); 418 419 kvfree(area->freelist); 420 kvfree(area->nia.niovs); 421 kvfree(area->user_refs); 422 kfree(area); 423 } 424 425 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq, 426 struct io_zcrx_area *area) 427 { 428 bool kern_readable = !area->mem.is_dmabuf; 429 430 if (WARN_ON_ONCE(ifq->area)) 431 return -EINVAL; 432 if (WARN_ON_ONCE(ifq->kern_readable != kern_readable)) 433 return -EINVAL; 434 435 ifq->area = area; 436 return 0; 437 } 438 439 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, 440 struct io_uring_zcrx_area_reg *area_reg, 441 struct io_uring_zcrx_ifq_reg *reg) 442 { 443 int buf_size_shift = PAGE_SHIFT; 444 struct io_zcrx_area *area; 445 unsigned nr_iovs; 446 int i, ret; 447 448 if (reg->rx_buf_len) { 449 if (!is_power_of_2(reg->rx_buf_len) || 450 reg->rx_buf_len < PAGE_SIZE) 451 return -EINVAL; 452 buf_size_shift = ilog2(reg->rx_buf_len); 453 } 454 if (!ifq->dev && buf_size_shift != PAGE_SHIFT) 455 return -EOPNOTSUPP; 456 457 ret = -ENOMEM; 458 area = kzalloc_obj(*area); 459 if (!area) 460 goto err; 461 area->ifq = ifq; 462 463 ret = io_import_area(ifq, &area->mem, area_reg); 464 if (ret) 465 goto err; 466 if (ifq->dev) 467 area->is_mapped = true; 468 469 if (ifq->dev && buf_size_shift > io_area_max_shift(&area->mem)) { 470 ret = -ERANGE; 471 goto err; 472 } 473 474 ifq->niov_shift = buf_size_shift; 475 nr_iovs = area->mem.size >> ifq->niov_shift; 476 area->nia.num_niovs = nr_iovs; 477 478 ret = -ENOMEM; 479 area->nia.niovs = kvmalloc_objs(area->nia.niovs[0], nr_iovs, 480 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 481 if (!area->nia.niovs) 482 goto err; 483 484 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), 485 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 486 if (!area->freelist) 487 goto err; 488 489 area->user_refs = kvmalloc_objs(area->user_refs[0], nr_iovs, 490 GFP_KERNEL_ACCOUNT | __GFP_ZERO); 491 if (!area->user_refs) 492 goto err; 493 494 for (i = 0; i < nr_iovs; i++) { 495 struct net_iov *niov = &area->nia.niovs[i]; 496 497 niov->owner = &area->nia; 498 area->freelist[i] = i; 499 atomic_set(&area->user_refs[i], 0); 500 niov->type = NET_IOV_IOURING; 501 } 502 503 if (ifq->dev) { 504 ret = io_populate_area_dma(ifq, area); 505 if (ret) 506 goto err; 507 } 508 509 area->free_count = nr_iovs; 510 /* we're only supporting one area per ifq for now */ 511 area->area_id = 0; 512 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT; 513 spin_lock_init(&area->freelist_lock); 514 515 ret = io_zcrx_append_area(ifq, area); 516 if (!ret) 517 return 0; 518 err: 519 if (area) 520 io_zcrx_free_area(ifq, area); 521 return ret; 522 } 523 524 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx) 525 { 526 struct io_zcrx_ifq *ifq; 527 528 ifq = kzalloc_obj(*ifq); 529 if (!ifq) 530 return NULL; 531 532 ifq->if_rxq = -1; 533 spin_lock_init(&ifq->rq.lock); 534 mutex_init(&ifq->pp_lock); 535 refcount_set(&ifq->refs, 1); 536 refcount_set(&ifq->user_refs, 1); 537 return ifq; 538 } 539 540 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq) 541 { 542 guard(mutex)(&ifq->pp_lock); 543 544 if (!ifq->netdev) 545 return; 546 netdev_put(ifq->netdev, &ifq->netdev_tracker); 547 ifq->netdev = NULL; 548 } 549 550 static void io_close_queue(struct io_zcrx_ifq *ifq) 551 { 552 struct net_device *netdev; 553 netdevice_tracker netdev_tracker; 554 struct pp_memory_provider_params p = { 555 .mp_ops = &io_uring_pp_zc_ops, 556 .mp_priv = ifq, 557 }; 558 559 scoped_guard(mutex, &ifq->pp_lock) { 560 netdev = ifq->netdev; 561 netdev_tracker = ifq->netdev_tracker; 562 ifq->netdev = NULL; 563 } 564 565 if (netdev) { 566 if (ifq->if_rxq != -1) 567 net_mp_close_rxq(netdev, ifq->if_rxq, &p); 568 netdev_put(netdev, &netdev_tracker); 569 } 570 ifq->if_rxq = -1; 571 } 572 573 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq) 574 { 575 io_close_queue(ifq); 576 577 if (ifq->area) 578 io_zcrx_free_area(ifq, ifq->area); 579 free_uid(ifq->user); 580 if (ifq->mm_account) 581 mmdrop(ifq->mm_account); 582 if (ifq->dev) 583 put_device(ifq->dev); 584 585 io_free_rbuf_ring(ifq); 586 mutex_destroy(&ifq->pp_lock); 587 kfree(ifq); 588 } 589 590 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq) 591 { 592 if (refcount_dec_and_test(&ifq->refs)) 593 io_zcrx_ifq_free(ifq); 594 } 595 596 static void io_zcrx_return_niov_freelist(struct net_iov *niov) 597 { 598 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); 599 600 guard(spinlock_bh)(&area->freelist_lock); 601 area->freelist[area->free_count++] = net_iov_idx(niov); 602 } 603 604 static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area) 605 { 606 unsigned niov_idx; 607 608 lockdep_assert_held(&area->freelist_lock); 609 610 if (unlikely(!area->free_count)) 611 return NULL; 612 613 niov_idx = area->freelist[--area->free_count]; 614 return &area->nia.niovs[niov_idx]; 615 } 616 617 static void io_zcrx_return_niov(struct net_iov *niov) 618 { 619 netmem_ref netmem = net_iov_to_netmem(niov); 620 621 if (!niov->desc.pp) { 622 /* copy fallback allocated niovs */ 623 io_zcrx_return_niov_freelist(niov); 624 return; 625 } 626 page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false); 627 } 628 629 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq) 630 { 631 struct io_zcrx_area *area = ifq->area; 632 int i; 633 634 if (!area) 635 return; 636 637 /* Reclaim back all buffers given to the user space. */ 638 for (i = 0; i < area->nia.num_niovs; i++) { 639 struct net_iov *niov = &area->nia.niovs[i]; 640 int nr; 641 642 if (!atomic_read(io_get_user_counter(niov))) 643 continue; 644 nr = atomic_xchg(io_get_user_counter(niov), 0); 645 if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr)) 646 io_zcrx_return_niov(niov); 647 } 648 } 649 650 static void zcrx_unregister_user(struct io_zcrx_ifq *ifq) 651 { 652 if (refcount_dec_and_test(&ifq->user_refs)) { 653 io_close_queue(ifq); 654 io_zcrx_scrub(ifq); 655 } 656 } 657 658 static void zcrx_unregister(struct io_zcrx_ifq *ifq) 659 { 660 zcrx_unregister_user(ifq); 661 io_put_zcrx_ifq(ifq); 662 } 663 664 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, 665 unsigned int id) 666 { 667 struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id); 668 669 lockdep_assert_held(&ctx->mmap_lock); 670 671 return ifq ? &ifq->rq_region : NULL; 672 } 673 674 static int zcrx_box_release(struct inode *inode, struct file *file) 675 { 676 struct io_zcrx_ifq *ifq = file->private_data; 677 678 if (WARN_ON_ONCE(!ifq)) 679 return -EFAULT; 680 zcrx_unregister(ifq); 681 return 0; 682 } 683 684 static const struct file_operations zcrx_box_fops = { 685 .owner = THIS_MODULE, 686 .release = zcrx_box_release, 687 }; 688 689 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq, 690 struct zcrx_ctrl *ctrl, void __user *arg) 691 { 692 struct zcrx_ctrl_export *ce = &ctrl->zc_export; 693 struct file *file; 694 int fd = -1; 695 696 if (!mem_is_zero(ce, sizeof(*ce))) 697 return -EINVAL; 698 fd = get_unused_fd_flags(O_CLOEXEC); 699 if (fd < 0) 700 return fd; 701 702 ce->zcrx_fd = fd; 703 if (copy_to_user(arg, ctrl, sizeof(*ctrl))) { 704 put_unused_fd(fd); 705 return -EFAULT; 706 } 707 708 refcount_inc(&ifq->refs); 709 refcount_inc(&ifq->user_refs); 710 711 file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops, 712 ifq, O_CLOEXEC, NULL); 713 if (IS_ERR(file)) { 714 put_unused_fd(fd); 715 zcrx_unregister(ifq); 716 return PTR_ERR(file); 717 } 718 719 fd_install(fd, file); 720 return 0; 721 } 722 723 static int import_zcrx(struct io_ring_ctx *ctx, 724 struct io_uring_zcrx_ifq_reg __user *arg, 725 struct io_uring_zcrx_ifq_reg *reg) 726 { 727 struct io_zcrx_ifq *ifq; 728 struct file *file; 729 int fd, ret; 730 u32 id; 731 732 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 733 return -EINVAL; 734 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))) 735 return -EINVAL; 736 if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr) 737 return -EINVAL; 738 if (reg->flags & ~ZCRX_REG_IMPORT) 739 return -EINVAL; 740 741 fd = reg->if_idx; 742 CLASS(fd, f)(fd); 743 if (fd_empty(f)) 744 return -EBADF; 745 746 file = fd_file(f); 747 if (file->f_op != &zcrx_box_fops || !file->private_data) 748 return -EBADF; 749 750 ifq = file->private_data; 751 refcount_inc(&ifq->refs); 752 refcount_inc(&ifq->user_refs); 753 754 scoped_guard(mutex, &ctx->mmap_lock) { 755 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL); 756 if (ret) 757 goto err; 758 } 759 760 reg->zcrx_id = id; 761 io_fill_zcrx_offsets(®->offsets); 762 if (copy_to_user(arg, reg, sizeof(*reg))) { 763 ret = -EFAULT; 764 goto err_xa_erase; 765 } 766 767 scoped_guard(mutex, &ctx->mmap_lock) { 768 ret = -ENOMEM; 769 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL)) 770 goto err_xa_erase; 771 } 772 773 return 0; 774 err_xa_erase: 775 scoped_guard(mutex, &ctx->mmap_lock) 776 xa_erase(&ctx->zcrx_ctxs, id); 777 err: 778 zcrx_unregister(ifq); 779 return ret; 780 } 781 782 static int zcrx_register_netdev(struct io_zcrx_ifq *ifq, 783 struct io_uring_zcrx_ifq_reg *reg, 784 struct io_uring_zcrx_area_reg *area) 785 { 786 struct pp_memory_provider_params mp_param = {}; 787 unsigned if_rxq = reg->if_rxq; 788 int ret; 789 790 ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns, 791 reg->if_idx); 792 if (!ifq->netdev) 793 return -ENODEV; 794 795 netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL); 796 797 ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, if_rxq); 798 if (!ifq->dev) { 799 ret = -EOPNOTSUPP; 800 goto netdev_put_unlock; 801 } 802 get_device(ifq->dev); 803 804 ret = io_zcrx_create_area(ifq, area, reg); 805 if (ret) 806 goto netdev_put_unlock; 807 808 if (reg->rx_buf_len) 809 mp_param.rx_page_size = 1U << ifq->niov_shift; 810 mp_param.mp_ops = &io_uring_pp_zc_ops; 811 mp_param.mp_priv = ifq; 812 ret = __net_mp_open_rxq(ifq->netdev, if_rxq, &mp_param, NULL); 813 if (ret) 814 goto netdev_put_unlock; 815 816 ifq->if_rxq = if_rxq; 817 ret = 0; 818 netdev_put_unlock: 819 netdev_unlock(ifq->netdev); 820 return ret; 821 } 822 823 int io_register_zcrx(struct io_ring_ctx *ctx, 824 struct io_uring_zcrx_ifq_reg __user *arg) 825 { 826 struct io_uring_zcrx_area_reg area; 827 struct io_uring_zcrx_ifq_reg reg; 828 struct io_uring_region_desc rd; 829 struct io_zcrx_ifq *ifq; 830 int ret; 831 u32 id; 832 833 /* 834 * 1. Interface queue allocation. 835 * 2. It can observe data destined for sockets of other tasks. 836 */ 837 if (!capable(CAP_NET_ADMIN)) 838 return -EPERM; 839 840 /* mandatory io_uring features for zc rx */ 841 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 842 return -EINVAL; 843 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))) 844 return -EINVAL; 845 if (copy_from_user(®, arg, sizeof(reg))) 846 return -EFAULT; 847 if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || reg.zcrx_id) 848 return -EINVAL; 849 if (reg.flags & ~ZCRX_SUPPORTED_REG_FLAGS) 850 return -EINVAL; 851 if (reg.flags & ZCRX_REG_IMPORT) 852 return import_zcrx(ctx, arg, ®); 853 if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd))) 854 return -EFAULT; 855 if (reg.if_rxq == -1 || !reg.rq_entries) 856 return -EINVAL; 857 if ((reg.if_rxq || reg.if_idx) && (reg.flags & ZCRX_REG_NODEV)) 858 return -EINVAL; 859 if (reg.rq_entries > IO_RQ_MAX_ENTRIES) { 860 if (!(ctx->flags & IORING_SETUP_CLAMP)) 861 return -EINVAL; 862 reg.rq_entries = IO_RQ_MAX_ENTRIES; 863 } 864 reg.rq_entries = roundup_pow_of_two(reg.rq_entries); 865 866 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area))) 867 return -EFAULT; 868 869 ifq = io_zcrx_ifq_alloc(ctx); 870 if (!ifq) 871 return -ENOMEM; 872 873 if (ctx->user) { 874 get_uid(ctx->user); 875 ifq->user = ctx->user; 876 } 877 if (ctx->mm_account) { 878 mmgrab(ctx->mm_account); 879 ifq->mm_account = ctx->mm_account; 880 } 881 ifq->rq.nr_entries = reg.rq_entries; 882 883 scoped_guard(mutex, &ctx->mmap_lock) { 884 /* preallocate id */ 885 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL); 886 if (ret) 887 goto ifq_free; 888 } 889 890 ret = io_allocate_rbuf_ring(ctx, ifq, ®, &rd, id); 891 if (ret) 892 goto err; 893 894 ifq->kern_readable = !(area.flags & IORING_ZCRX_AREA_DMABUF); 895 896 if (!(reg.flags & ZCRX_REG_NODEV)) { 897 ret = zcrx_register_netdev(ifq, ®, &area); 898 if (ret) 899 goto err; 900 } else { 901 ret = io_zcrx_create_area(ifq, &area, ®); 902 if (ret) 903 goto err; 904 } 905 906 reg.zcrx_id = id; 907 908 scoped_guard(mutex, &ctx->mmap_lock) { 909 /* publish ifq */ 910 ret = -ENOMEM; 911 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL)) 912 goto err; 913 } 914 915 reg.rx_buf_len = 1U << ifq->niov_shift; 916 917 if (copy_to_user(arg, ®, sizeof(reg)) || 918 copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) || 919 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) { 920 ret = -EFAULT; 921 goto err; 922 } 923 return 0; 924 err: 925 scoped_guard(mutex, &ctx->mmap_lock) 926 xa_erase(&ctx->zcrx_ctxs, id); 927 ifq_free: 928 zcrx_unregister(ifq); 929 return ret; 930 } 931 932 static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id) 933 { 934 return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_1); 935 } 936 937 static inline void set_zcrx_entry_mark(struct io_ring_ctx *ctx, unsigned long id) 938 { 939 xa_set_mark(&ctx->zcrx_ctxs, id, XA_MARK_1); 940 } 941 942 void io_terminate_zcrx(struct io_ring_ctx *ctx) 943 { 944 struct io_zcrx_ifq *ifq; 945 unsigned long id = 0; 946 947 lockdep_assert_held(&ctx->uring_lock); 948 949 while (1) { 950 scoped_guard(mutex, &ctx->mmap_lock) 951 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT); 952 if (!ifq) 953 break; 954 if (WARN_ON_ONCE(is_zcrx_entry_marked(ctx, id))) 955 break; 956 set_zcrx_entry_mark(ctx, id); 957 id++; 958 zcrx_unregister_user(ifq); 959 } 960 } 961 962 void io_unregister_zcrx(struct io_ring_ctx *ctx) 963 { 964 struct io_zcrx_ifq *ifq; 965 966 lockdep_assert_held(&ctx->uring_lock); 967 968 while (1) { 969 scoped_guard(mutex, &ctx->mmap_lock) { 970 unsigned long id = 0; 971 972 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT); 973 if (ifq) { 974 if (WARN_ON_ONCE(!is_zcrx_entry_marked(ctx, id))) { 975 ifq = NULL; 976 break; 977 } 978 xa_erase(&ctx->zcrx_ctxs, id); 979 } 980 } 981 if (!ifq) 982 break; 983 io_put_zcrx_ifq(ifq); 984 } 985 986 xa_destroy(&ctx->zcrx_ctxs); 987 } 988 989 static inline u32 zcrx_rq_entries(struct zcrx_rq *rq) 990 { 991 u32 entries; 992 993 entries = smp_load_acquire(&rq->ring->tail) - rq->cached_head; 994 return min(entries, rq->nr_entries); 995 } 996 997 static struct io_uring_zcrx_rqe *zcrx_next_rqe(struct zcrx_rq *rq, unsigned mask) 998 { 999 unsigned int idx = rq->cached_head++ & mask; 1000 1001 return &rq->rqes[idx]; 1002 } 1003 1004 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe, 1005 struct io_zcrx_ifq *ifq, 1006 struct net_iov **ret_niov) 1007 { 1008 __u64 off = READ_ONCE(rqe->off); 1009 unsigned niov_idx, area_idx; 1010 struct io_zcrx_area *area; 1011 1012 area_idx = off >> IORING_ZCRX_AREA_SHIFT; 1013 niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift; 1014 1015 if (unlikely(rqe->__pad || area_idx)) 1016 return false; 1017 area = ifq->area; 1018 1019 if (unlikely(niov_idx >= area->nia.num_niovs)) 1020 return false; 1021 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs); 1022 1023 *ret_niov = &area->nia.niovs[niov_idx]; 1024 return true; 1025 } 1026 1027 static unsigned io_zcrx_ring_refill(struct page_pool *pp, 1028 struct io_zcrx_ifq *ifq, 1029 netmem_ref *netmems, unsigned to_alloc) 1030 { 1031 struct zcrx_rq *rq = &ifq->rq; 1032 unsigned int mask = rq->nr_entries - 1; 1033 unsigned int entries; 1034 unsigned allocated = 0; 1035 1036 guard(spinlock_bh)(&rq->lock); 1037 1038 entries = zcrx_rq_entries(rq); 1039 entries = min_t(unsigned, entries, to_alloc); 1040 if (unlikely(!entries)) 1041 return 0; 1042 1043 do { 1044 struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask); 1045 struct net_iov *niov; 1046 netmem_ref netmem; 1047 1048 if (!io_parse_rqe(rqe, ifq, &niov)) 1049 continue; 1050 if (!io_zcrx_put_niov_uref(niov)) 1051 continue; 1052 1053 netmem = net_iov_to_netmem(niov); 1054 if (!page_pool_unref_and_test(netmem)) 1055 continue; 1056 1057 if (unlikely(niov->desc.pp != pp)) { 1058 io_zcrx_return_niov(niov); 1059 continue; 1060 } 1061 1062 netmems[allocated] = netmem; 1063 allocated++; 1064 } while (--entries); 1065 1066 smp_store_release(&rq->ring->head, rq->cached_head); 1067 return allocated; 1068 } 1069 1070 static unsigned io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq, 1071 netmem_ref *netmems, unsigned to_alloc) 1072 { 1073 struct io_zcrx_area *area = ifq->area; 1074 unsigned allocated = 0; 1075 1076 guard(spinlock_bh)(&area->freelist_lock); 1077 1078 for (allocated = 0; allocated < to_alloc; allocated++) { 1079 struct net_iov *niov = zcrx_get_free_niov(area); 1080 1081 if (!niov) 1082 break; 1083 net_mp_niov_set_page_pool(pp, niov); 1084 netmems[allocated] = net_iov_to_netmem(niov); 1085 } 1086 return allocated; 1087 } 1088 1089 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp) 1090 { 1091 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); 1092 netmem_ref *netmems = pp->alloc.cache; 1093 unsigned to_alloc = PP_ALLOC_CACHE_REFILL; 1094 unsigned allocated; 1095 1096 /* pp should already be ensuring that */ 1097 if (WARN_ON_ONCE(pp->alloc.count)) 1098 return 0; 1099 1100 allocated = io_zcrx_ring_refill(pp, ifq, netmems, to_alloc); 1101 if (likely(allocated)) 1102 goto out_return; 1103 1104 allocated = io_zcrx_refill_slow(pp, ifq, netmems, to_alloc); 1105 if (!allocated) 1106 return 0; 1107 out_return: 1108 zcrx_sync_for_device(pp, ifq, netmems, allocated); 1109 allocated--; 1110 pp->alloc.count += allocated; 1111 return netmems[allocated]; 1112 } 1113 1114 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem) 1115 { 1116 struct net_iov *niov; 1117 1118 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 1119 return false; 1120 1121 niov = netmem_to_net_iov(netmem); 1122 net_mp_niov_clear_page_pool(niov); 1123 io_zcrx_return_niov_freelist(niov); 1124 return false; 1125 } 1126 1127 static int io_pp_zc_init(struct page_pool *pp) 1128 { 1129 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); 1130 1131 if (WARN_ON_ONCE(!ifq)) 1132 return -EINVAL; 1133 if (WARN_ON_ONCE(ifq->dev != pp->p.dev)) 1134 return -EINVAL; 1135 if (WARN_ON_ONCE(!pp->dma_map)) 1136 return -EOPNOTSUPP; 1137 if (pp->p.order + PAGE_SHIFT != ifq->niov_shift) 1138 return -EINVAL; 1139 if (pp->p.dma_dir != DMA_FROM_DEVICE) 1140 return -EOPNOTSUPP; 1141 1142 refcount_inc(&ifq->refs); 1143 return 0; 1144 } 1145 1146 static void io_pp_zc_destroy(struct page_pool *pp) 1147 { 1148 io_put_zcrx_ifq(io_pp_to_ifq(pp)); 1149 } 1150 1151 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp, 1152 struct netdev_rx_queue *rxq) 1153 { 1154 struct nlattr *nest; 1155 int type; 1156 1157 type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING; 1158 nest = nla_nest_start(rsp, type); 1159 if (!nest) 1160 return -EMSGSIZE; 1161 nla_nest_end(rsp, nest); 1162 1163 return 0; 1164 } 1165 1166 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq) 1167 { 1168 struct pp_memory_provider_params *p = &rxq->mp_params; 1169 struct io_zcrx_ifq *ifq = mp_priv; 1170 1171 io_zcrx_drop_netdev(ifq); 1172 if (ifq->area) 1173 io_zcrx_unmap_area(ifq, ifq->area); 1174 1175 p->mp_ops = NULL; 1176 p->mp_priv = NULL; 1177 } 1178 1179 static const struct memory_provider_ops io_uring_pp_zc_ops = { 1180 .alloc_netmems = io_pp_zc_alloc_netmems, 1181 .release_netmem = io_pp_zc_release_netmem, 1182 .init = io_pp_zc_init, 1183 .destroy = io_pp_zc_destroy, 1184 .nl_fill = io_pp_nl_fill, 1185 .uninstall = io_pp_uninstall, 1186 }; 1187 1188 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr, 1189 struct io_zcrx_ifq *zcrx, struct zcrx_rq *rq) 1190 { 1191 unsigned int mask = rq->nr_entries - 1; 1192 unsigned int i; 1193 1194 nr = min(nr, zcrx_rq_entries(rq)); 1195 for (i = 0; i < nr; i++) { 1196 struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask); 1197 struct net_iov *niov; 1198 1199 if (!io_parse_rqe(rqe, zcrx, &niov)) 1200 break; 1201 netmem_array[i] = net_iov_to_netmem(niov); 1202 } 1203 1204 smp_store_release(&rq->ring->head, rq->cached_head); 1205 return i; 1206 } 1207 1208 #define ZCRX_FLUSH_BATCH 32 1209 1210 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr) 1211 { 1212 unsigned i; 1213 1214 for (i = 0; i < nr; i++) { 1215 netmem_ref netmem = netmems[i]; 1216 struct net_iov *niov = netmem_to_net_iov(netmem); 1217 1218 if (!io_zcrx_put_niov_uref(niov)) 1219 continue; 1220 if (!page_pool_unref_and_test(netmem)) 1221 continue; 1222 io_zcrx_return_niov(niov); 1223 } 1224 } 1225 1226 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx, 1227 struct zcrx_ctrl *ctrl) 1228 { 1229 struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush; 1230 netmem_ref netmems[ZCRX_FLUSH_BATCH]; 1231 unsigned total = 0; 1232 unsigned nr; 1233 1234 if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv))) 1235 return -EINVAL; 1236 1237 do { 1238 struct zcrx_rq *rq = &zcrx->rq; 1239 1240 scoped_guard(spinlock_bh, &rq->lock) { 1241 nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx, rq); 1242 zcrx_return_buffers(netmems, nr); 1243 } 1244 1245 total += nr; 1246 1247 if (fatal_signal_pending(current)) 1248 break; 1249 cond_resched(); 1250 } while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq.nr_entries); 1251 1252 return 0; 1253 } 1254 1255 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) 1256 { 1257 struct zcrx_ctrl ctrl; 1258 struct io_zcrx_ifq *zcrx; 1259 1260 BUILD_BUG_ON(sizeof(ctrl.zc_export) != sizeof(ctrl.zc_flush)); 1261 1262 if (nr_args) 1263 return -EINVAL; 1264 if (copy_from_user(&ctrl, arg, sizeof(ctrl))) 1265 return -EFAULT; 1266 if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv))) 1267 return -EFAULT; 1268 1269 zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id); 1270 if (!zcrx) 1271 return -ENXIO; 1272 1273 switch (ctrl.op) { 1274 case ZCRX_CTRL_FLUSH_RQ: 1275 return zcrx_flush_rq(ctx, zcrx, &ctrl); 1276 case ZCRX_CTRL_EXPORT: 1277 return zcrx_export(ctx, zcrx, &ctrl, arg); 1278 } 1279 1280 return -EOPNOTSUPP; 1281 } 1282 1283 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov, 1284 struct io_zcrx_ifq *ifq, int off, int len) 1285 { 1286 struct io_ring_ctx *ctx = req->ctx; 1287 struct io_uring_zcrx_cqe *rcqe; 1288 struct io_zcrx_area *area; 1289 struct io_uring_cqe *cqe; 1290 u64 offset; 1291 1292 if (!io_defer_get_uncommited_cqe(ctx, &cqe)) 1293 return false; 1294 1295 cqe->user_data = req->cqe.user_data; 1296 cqe->res = len; 1297 cqe->flags = IORING_CQE_F_MORE; 1298 if (ctx->flags & IORING_SETUP_CQE_MIXED) 1299 cqe->flags |= IORING_CQE_F_32; 1300 1301 area = io_zcrx_iov_to_area(niov); 1302 offset = off + (net_iov_idx(niov) << ifq->niov_shift); 1303 rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1); 1304 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT); 1305 rcqe->__pad = 0; 1306 return true; 1307 } 1308 1309 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq) 1310 { 1311 struct io_zcrx_area *area = ifq->area; 1312 struct net_iov *niov = NULL; 1313 1314 if (!ifq->kern_readable) 1315 return NULL; 1316 1317 scoped_guard(spinlock_bh, &area->freelist_lock) 1318 niov = zcrx_get_free_niov(area); 1319 1320 if (niov) 1321 page_pool_fragment_netmem(net_iov_to_netmem(niov), 1); 1322 return niov; 1323 } 1324 1325 struct io_copy_cache { 1326 struct page *page; 1327 unsigned long offset; 1328 size_t size; 1329 }; 1330 1331 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page, 1332 unsigned int src_offset, size_t len) 1333 { 1334 size_t copied = 0; 1335 1336 len = min(len, cc->size); 1337 1338 while (len) { 1339 void *src_addr, *dst_addr; 1340 struct page *dst_page = cc->page; 1341 unsigned dst_offset = cc->offset; 1342 size_t n = len; 1343 1344 if (folio_test_partial_kmap(page_folio(dst_page)) || 1345 folio_test_partial_kmap(page_folio(src_page))) { 1346 dst_page += dst_offset / PAGE_SIZE; 1347 dst_offset = offset_in_page(dst_offset); 1348 src_page += src_offset / PAGE_SIZE; 1349 src_offset = offset_in_page(src_offset); 1350 n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset); 1351 n = min(n, len); 1352 } 1353 1354 dst_addr = kmap_local_page(dst_page) + dst_offset; 1355 src_addr = kmap_local_page(src_page) + src_offset; 1356 1357 memcpy(dst_addr, src_addr, n); 1358 1359 kunmap_local(src_addr); 1360 kunmap_local(dst_addr); 1361 1362 cc->size -= n; 1363 cc->offset += n; 1364 src_offset += n; 1365 len -= n; 1366 copied += n; 1367 } 1368 return copied; 1369 } 1370 1371 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1372 struct page *src_page, unsigned int src_offset, 1373 size_t len) 1374 { 1375 size_t copied = 0; 1376 int ret = 0; 1377 1378 while (len) { 1379 struct io_copy_cache cc; 1380 struct net_iov *niov; 1381 size_t n; 1382 1383 niov = io_alloc_fallback_niov(ifq); 1384 if (!niov) { 1385 ret = -ENOMEM; 1386 break; 1387 } 1388 1389 cc.page = io_zcrx_iov_page(niov); 1390 cc.offset = 0; 1391 cc.size = PAGE_SIZE; 1392 1393 n = io_copy_page(&cc, src_page, src_offset, len); 1394 1395 if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) { 1396 io_zcrx_return_niov(niov); 1397 ret = -ENOSPC; 1398 break; 1399 } 1400 1401 io_zcrx_get_niov_uref(niov); 1402 src_offset += n; 1403 len -= n; 1404 copied += n; 1405 } 1406 1407 return copied ? copied : ret; 1408 } 1409 1410 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1411 const skb_frag_t *frag, int off, int len) 1412 { 1413 struct page *page = skb_frag_page(frag); 1414 1415 return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len); 1416 } 1417 1418 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1419 const skb_frag_t *frag, int off, int len) 1420 { 1421 struct net_iov *niov; 1422 struct page_pool *pp; 1423 1424 if (unlikely(!skb_frag_is_net_iov(frag))) 1425 return io_zcrx_copy_frag(req, ifq, frag, off, len); 1426 1427 niov = netmem_to_net_iov(frag->netmem); 1428 pp = niov->desc.pp; 1429 1430 if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq) 1431 return -EFAULT; 1432 1433 if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len)) 1434 return -ENOSPC; 1435 1436 /* 1437 * Prevent it from being recycled while user is accessing it. 1438 * It has to be done before grabbing a user reference. 1439 */ 1440 page_pool_ref_netmem(net_iov_to_netmem(niov)); 1441 io_zcrx_get_niov_uref(niov); 1442 return len; 1443 } 1444 1445 static int 1446 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, 1447 unsigned int offset, size_t len) 1448 { 1449 struct io_zcrx_args *args = desc->arg.data; 1450 struct io_zcrx_ifq *ifq = args->ifq; 1451 struct io_kiocb *req = args->req; 1452 struct sk_buff *frag_iter; 1453 unsigned start, start_off = offset; 1454 int i, copy, end, off; 1455 int ret = 0; 1456 1457 len = min_t(size_t, len, desc->count); 1458 /* 1459 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even 1460 * if desc->count is already 0. This is caused by the if (offset + 1 != 1461 * skb->len) check. Return early in this case to break out of 1462 * __tcp_read_sock(). 1463 */ 1464 if (!len) 1465 return 0; 1466 if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT)) 1467 return -EAGAIN; 1468 1469 if (unlikely(offset < skb_headlen(skb))) { 1470 ssize_t copied; 1471 size_t to_copy; 1472 1473 to_copy = min_t(size_t, skb_headlen(skb) - offset, len); 1474 copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data), 1475 offset_in_page(skb->data) + offset, 1476 to_copy); 1477 if (copied < 0) { 1478 ret = copied; 1479 goto out; 1480 } 1481 offset += copied; 1482 len -= copied; 1483 if (!len) 1484 goto out; 1485 if (offset != skb_headlen(skb)) 1486 goto out; 1487 } 1488 1489 start = skb_headlen(skb); 1490 1491 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1492 const skb_frag_t *frag; 1493 1494 if (WARN_ON(start > offset + len)) 1495 return -EFAULT; 1496 1497 frag = &skb_shinfo(skb)->frags[i]; 1498 end = start + skb_frag_size(frag); 1499 1500 if (offset < end) { 1501 copy = end - offset; 1502 if (copy > len) 1503 copy = len; 1504 1505 off = offset - start; 1506 ret = io_zcrx_recv_frag(req, ifq, frag, off, copy); 1507 if (ret < 0) 1508 goto out; 1509 1510 offset += ret; 1511 len -= ret; 1512 if (len == 0 || ret != copy) 1513 goto out; 1514 } 1515 start = end; 1516 } 1517 1518 skb_walk_frags(skb, frag_iter) { 1519 if (WARN_ON(start > offset + len)) 1520 return -EFAULT; 1521 1522 end = start + frag_iter->len; 1523 if (offset < end) { 1524 size_t count; 1525 1526 copy = end - offset; 1527 if (copy > len) 1528 copy = len; 1529 1530 off = offset - start; 1531 count = desc->count; 1532 ret = io_zcrx_recv_skb(desc, frag_iter, off, copy); 1533 desc->count = count; 1534 if (ret < 0) 1535 goto out; 1536 1537 offset += ret; 1538 len -= ret; 1539 if (len == 0 || ret != copy) 1540 goto out; 1541 } 1542 start = end; 1543 } 1544 1545 out: 1546 if (offset == start_off) 1547 return ret; 1548 desc->count -= (offset - start_off); 1549 return offset - start_off; 1550 } 1551 1552 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1553 struct sock *sk, int flags, 1554 unsigned issue_flags, unsigned int *outlen) 1555 { 1556 unsigned int len = *outlen; 1557 struct io_zcrx_args args = { 1558 .req = req, 1559 .ifq = ifq, 1560 .sock = sk->sk_socket, 1561 }; 1562 read_descriptor_t rd_desc = { 1563 .count = len ? len : UINT_MAX, 1564 .arg.data = &args, 1565 }; 1566 int ret; 1567 1568 lock_sock(sk); 1569 ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb); 1570 if (len && ret > 0) 1571 *outlen = len - ret; 1572 if (ret <= 0) { 1573 if (ret < 0 || sock_flag(sk, SOCK_DONE)) 1574 goto out; 1575 if (sk->sk_err) 1576 ret = sock_error(sk); 1577 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1578 goto out; 1579 else if (sk->sk_state == TCP_CLOSE) 1580 ret = -ENOTCONN; 1581 else 1582 ret = -EAGAIN; 1583 } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) && 1584 (issue_flags & IO_URING_F_MULTISHOT)) { 1585 ret = IOU_REQUEUE; 1586 } else if (sock_flag(sk, SOCK_DONE)) { 1587 /* Make it to retry until it finally gets 0. */ 1588 if (issue_flags & IO_URING_F_MULTISHOT) 1589 ret = IOU_REQUEUE; 1590 else 1591 ret = -EAGAIN; 1592 } 1593 out: 1594 release_sock(sk); 1595 return ret; 1596 } 1597 1598 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq, 1599 struct socket *sock, unsigned int flags, 1600 unsigned issue_flags, unsigned int *len) 1601 { 1602 struct sock *sk = sock->sk; 1603 const struct proto *prot = READ_ONCE(sk->sk_prot); 1604 1605 if (prot->recvmsg != tcp_recvmsg) 1606 return -EPROTONOSUPPORT; 1607 1608 sock_rps_record_flow(sk); 1609 return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len); 1610 } 1611