1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 #include <linux/anon_inodes.h>
12
13 #include <net/page_pool/helpers.h>
14 #include <net/page_pool/memory_provider.h>
15 #include <net/netlink.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/tcp.h>
19 #include <net/rps.h>
20
21 #include <trace/events/page_pool.h>
22
23 #include <uapi/linux/io_uring.h>
24
25 #include "io_uring.h"
26 #include "kbuf.h"
27 #include "memmap.h"
28 #include "zcrx.h"
29 #include "rsrc.h"
30
31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF)
32
33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
34
io_pp_to_ifq(struct page_pool * pp)35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
36 {
37 return pp->mp_priv;
38 }
39
io_zcrx_iov_to_area(const struct net_iov * niov)40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
41 {
42 struct net_iov_area *owner = net_iov_owner(niov);
43
44 return container_of(owner, struct io_zcrx_area, nia);
45 }
46
io_zcrx_iov_page(const struct net_iov * niov)47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
48 {
49 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
50 unsigned niov_pages_shift;
51
52 lockdep_assert(!area->mem.is_dmabuf);
53
54 niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
55 return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
56 }
57
io_populate_area_dma(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)58 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
59 struct io_zcrx_area *area)
60 {
61 unsigned niov_size = 1U << ifq->niov_shift;
62 struct sg_table *sgt = area->mem.sgt;
63 struct scatterlist *sg;
64 unsigned i, niov_idx = 0;
65
66 for_each_sgtable_dma_sg(sgt, sg, i) {
67 dma_addr_t dma = sg_dma_address(sg);
68 unsigned long sg_len = sg_dma_len(sg);
69
70 if (WARN_ON_ONCE(sg_len % niov_size))
71 return -EINVAL;
72
73 while (sg_len && niov_idx < area->nia.num_niovs) {
74 struct net_iov *niov = &area->nia.niovs[niov_idx];
75
76 if (net_mp_niov_set_dma_addr(niov, dma))
77 return -EFAULT;
78 sg_len -= niov_size;
79 dma += niov_size;
80 niov_idx++;
81 }
82 }
83
84 if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
85 return -EFAULT;
86 return 0;
87 }
88
io_release_dmabuf(struct io_zcrx_mem * mem)89 static void io_release_dmabuf(struct io_zcrx_mem *mem)
90 {
91 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
92 return;
93
94 if (mem->sgt)
95 dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
96 DMA_FROM_DEVICE);
97 if (mem->attach)
98 dma_buf_detach(mem->dmabuf, mem->attach);
99 if (mem->dmabuf)
100 dma_buf_put(mem->dmabuf);
101
102 mem->sgt = NULL;
103 mem->attach = NULL;
104 mem->dmabuf = NULL;
105 }
106
io_import_dmabuf(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)107 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
108 struct io_zcrx_mem *mem,
109 struct io_uring_zcrx_area_reg *area_reg)
110 {
111 unsigned long off = (unsigned long)area_reg->addr;
112 unsigned long len = (unsigned long)area_reg->len;
113 unsigned long total_size = 0;
114 struct scatterlist *sg;
115 int dmabuf_fd = area_reg->dmabuf_fd;
116 int i, ret;
117
118 if (off)
119 return -EINVAL;
120 if (WARN_ON_ONCE(!ifq->dev))
121 return -EFAULT;
122 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
123 return -EINVAL;
124
125 mem->is_dmabuf = true;
126 mem->dmabuf = dma_buf_get(dmabuf_fd);
127 if (IS_ERR(mem->dmabuf)) {
128 ret = PTR_ERR(mem->dmabuf);
129 mem->dmabuf = NULL;
130 goto err;
131 }
132
133 mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
134 if (IS_ERR(mem->attach)) {
135 ret = PTR_ERR(mem->attach);
136 mem->attach = NULL;
137 goto err;
138 }
139
140 mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
141 if (IS_ERR(mem->sgt)) {
142 ret = PTR_ERR(mem->sgt);
143 mem->sgt = NULL;
144 goto err;
145 }
146
147 for_each_sgtable_dma_sg(mem->sgt, sg, i)
148 total_size += sg_dma_len(sg);
149
150 if (total_size != len) {
151 ret = -EINVAL;
152 goto err;
153 }
154
155 mem->size = len;
156 return 0;
157 err:
158 io_release_dmabuf(mem);
159 return ret;
160 }
161
io_count_account_pages(struct page ** pages,unsigned nr_pages)162 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
163 {
164 struct folio *last_folio = NULL;
165 unsigned long res = 0;
166 int i;
167
168 for (i = 0; i < nr_pages; i++) {
169 struct folio *folio = page_folio(pages[i]);
170
171 if (folio == last_folio)
172 continue;
173 last_folio = folio;
174 res += folio_nr_pages(folio);
175 }
176 return res;
177 }
178
io_import_umem(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)179 static int io_import_umem(struct io_zcrx_ifq *ifq,
180 struct io_zcrx_mem *mem,
181 struct io_uring_zcrx_area_reg *area_reg)
182 {
183 struct page **pages;
184 int nr_pages, ret;
185
186 if (area_reg->dmabuf_fd)
187 return -EINVAL;
188 if (!area_reg->addr)
189 return -EFAULT;
190 pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
191 &nr_pages);
192 if (IS_ERR(pages))
193 return PTR_ERR(pages);
194
195 ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
196 0, nr_pages << PAGE_SHIFT,
197 GFP_KERNEL_ACCOUNT);
198 if (ret) {
199 unpin_user_pages(pages, nr_pages);
200 kvfree(pages);
201 return ret;
202 }
203
204 mem->account_pages = io_count_account_pages(pages, nr_pages);
205 ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
206 if (ret < 0)
207 mem->account_pages = 0;
208
209 mem->sgt = &mem->page_sg_table;
210 mem->pages = pages;
211 mem->nr_folios = nr_pages;
212 mem->size = area_reg->len;
213 return ret;
214 }
215
io_release_area_mem(struct io_zcrx_mem * mem)216 static void io_release_area_mem(struct io_zcrx_mem *mem)
217 {
218 if (mem->is_dmabuf) {
219 io_release_dmabuf(mem);
220 return;
221 }
222 if (mem->pages) {
223 unpin_user_pages(mem->pages, mem->nr_folios);
224 sg_free_table(mem->sgt);
225 mem->sgt = NULL;
226 kvfree(mem->pages);
227 }
228 }
229
io_import_area(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)230 static int io_import_area(struct io_zcrx_ifq *ifq,
231 struct io_zcrx_mem *mem,
232 struct io_uring_zcrx_area_reg *area_reg)
233 {
234 int ret;
235
236 if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
237 return -EINVAL;
238 if (area_reg->rq_area_token)
239 return -EINVAL;
240 if (area_reg->__resv2[0] || area_reg->__resv2[1])
241 return -EINVAL;
242
243 ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
244 if (ret)
245 return ret;
246 if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
247 return -EINVAL;
248
249 if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
250 return io_import_dmabuf(ifq, mem, area_reg);
251 return io_import_umem(ifq, mem, area_reg);
252 }
253
io_zcrx_unmap_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)254 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
255 struct io_zcrx_area *area)
256 {
257 int i;
258
259 guard(mutex)(&ifq->pp_lock);
260 if (!area->is_mapped)
261 return;
262 area->is_mapped = false;
263
264 for (i = 0; i < area->nia.num_niovs; i++)
265 net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
266
267 if (area->mem.is_dmabuf) {
268 io_release_dmabuf(&area->mem);
269 } else {
270 dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
271 DMA_FROM_DEVICE, IO_DMA_ATTR);
272 }
273 }
274
io_zcrx_map_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)275 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
276 {
277 int ret;
278
279 guard(mutex)(&ifq->pp_lock);
280 if (area->is_mapped)
281 return 0;
282
283 if (!area->mem.is_dmabuf) {
284 ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
285 DMA_FROM_DEVICE, IO_DMA_ATTR);
286 if (ret < 0)
287 return ret;
288 }
289
290 ret = io_populate_area_dma(ifq, area);
291 if (ret == 0)
292 area->is_mapped = true;
293 return ret;
294 }
295
io_zcrx_sync_for_device(struct page_pool * pool,struct net_iov * niov)296 static void io_zcrx_sync_for_device(struct page_pool *pool,
297 struct net_iov *niov)
298 {
299 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
300 dma_addr_t dma_addr;
301
302 unsigned niov_size;
303
304 if (!dma_dev_need_sync(pool->p.dev))
305 return;
306
307 niov_size = 1U << io_pp_to_ifq(pool)->niov_shift;
308 dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
309 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
310 niov_size, pool->p.dma_dir);
311 #endif
312 }
313
314 #define IO_RQ_MAX_ENTRIES 32768
315
316 #define IO_SKBS_PER_CALL_LIMIT 20
317
318 struct io_zcrx_args {
319 struct io_kiocb *req;
320 struct io_zcrx_ifq *ifq;
321 struct socket *sock;
322 unsigned nr_skbs;
323 };
324
325 static const struct memory_provider_ops io_uring_pp_zc_ops;
326
io_get_user_counter(struct net_iov * niov)327 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
328 {
329 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
330
331 return &area->user_refs[net_iov_idx(niov)];
332 }
333
io_zcrx_put_niov_uref(struct net_iov * niov)334 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
335 {
336 atomic_t *uref = io_get_user_counter(niov);
337
338 if (unlikely(!atomic_read(uref)))
339 return false;
340 atomic_dec(uref);
341 return true;
342 }
343
io_zcrx_get_niov_uref(struct net_iov * niov)344 static void io_zcrx_get_niov_uref(struct net_iov *niov)
345 {
346 atomic_inc(io_get_user_counter(niov));
347 }
348
io_fill_zcrx_offsets(struct io_uring_zcrx_offsets * offsets)349 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
350 {
351 offsets->head = offsetof(struct io_uring, head);
352 offsets->tail = offsetof(struct io_uring, tail);
353 offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
354 }
355
io_allocate_rbuf_ring(struct io_ring_ctx * ctx,struct io_zcrx_ifq * ifq,struct io_uring_zcrx_ifq_reg * reg,struct io_uring_region_desc * rd,u32 id)356 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
357 struct io_zcrx_ifq *ifq,
358 struct io_uring_zcrx_ifq_reg *reg,
359 struct io_uring_region_desc *rd,
360 u32 id)
361 {
362 u64 mmap_offset;
363 size_t off, size;
364 void *ptr;
365 int ret;
366
367 io_fill_zcrx_offsets(®->offsets);
368 off = reg->offsets.rqes;
369 size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
370 if (size > rd->size)
371 return -EINVAL;
372
373 mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
374 mmap_offset += id << IORING_OFF_PBUF_SHIFT;
375
376 ret = io_create_region(ctx, &ifq->region, rd, mmap_offset);
377 if (ret < 0)
378 return ret;
379
380 ptr = io_region_get_ptr(&ifq->region);
381 ifq->rq_ring = (struct io_uring *)ptr;
382 ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
383
384 return 0;
385 }
386
io_free_rbuf_ring(struct io_zcrx_ifq * ifq)387 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
388 {
389 io_free_region(ifq->user, &ifq->region);
390 ifq->rq_ring = NULL;
391 ifq->rqes = NULL;
392 }
393
io_zcrx_free_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)394 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
395 struct io_zcrx_area *area)
396 {
397 io_zcrx_unmap_area(ifq, area);
398 io_release_area_mem(&area->mem);
399
400 if (area->mem.account_pages)
401 io_unaccount_mem(ifq->user, ifq->mm_account,
402 area->mem.account_pages);
403
404 kvfree(area->freelist);
405 kvfree(area->nia.niovs);
406 kvfree(area->user_refs);
407 kfree(area);
408 }
409
io_zcrx_append_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)410 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
411 struct io_zcrx_area *area)
412 {
413 if (ifq->area)
414 return -EINVAL;
415 ifq->area = area;
416 return 0;
417 }
418
io_zcrx_create_area(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_area_reg * area_reg)419 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
420 struct io_uring_zcrx_area_reg *area_reg)
421 {
422 struct io_zcrx_area *area;
423 unsigned nr_iovs;
424 int i, ret;
425
426 ret = -ENOMEM;
427 area = kzalloc(sizeof(*area), GFP_KERNEL);
428 if (!area)
429 goto err;
430 area->ifq = ifq;
431
432 ret = io_import_area(ifq, &area->mem, area_reg);
433 if (ret)
434 goto err;
435
436 ifq->niov_shift = PAGE_SHIFT;
437 nr_iovs = area->mem.size >> ifq->niov_shift;
438 area->nia.num_niovs = nr_iovs;
439
440 ret = -ENOMEM;
441 area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
442 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
443 if (!area->nia.niovs)
444 goto err;
445
446 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
447 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
448 if (!area->freelist)
449 goto err;
450
451 area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
452 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
453 if (!area->user_refs)
454 goto err;
455
456 for (i = 0; i < nr_iovs; i++) {
457 struct net_iov *niov = &area->nia.niovs[i];
458
459 niov->owner = &area->nia;
460 area->freelist[i] = i;
461 atomic_set(&area->user_refs[i], 0);
462 niov->type = NET_IOV_IOURING;
463 }
464
465 area->free_count = nr_iovs;
466 /* we're only supporting one area per ifq for now */
467 area->area_id = 0;
468 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
469 spin_lock_init(&area->freelist_lock);
470
471 ret = io_zcrx_append_area(ifq, area);
472 if (!ret)
473 return 0;
474 err:
475 if (area)
476 io_zcrx_free_area(ifq, area);
477 return ret;
478 }
479
io_zcrx_ifq_alloc(struct io_ring_ctx * ctx)480 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
481 {
482 struct io_zcrx_ifq *ifq;
483
484 ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
485 if (!ifq)
486 return NULL;
487
488 ifq->if_rxq = -1;
489 spin_lock_init(&ifq->rq_lock);
490 mutex_init(&ifq->pp_lock);
491 refcount_set(&ifq->refs, 1);
492 refcount_set(&ifq->user_refs, 1);
493 return ifq;
494 }
495
io_zcrx_drop_netdev(struct io_zcrx_ifq * ifq)496 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
497 {
498 guard(mutex)(&ifq->pp_lock);
499
500 if (!ifq->netdev)
501 return;
502 netdev_put(ifq->netdev, &ifq->netdev_tracker);
503 ifq->netdev = NULL;
504 }
505
io_close_queue(struct io_zcrx_ifq * ifq)506 static void io_close_queue(struct io_zcrx_ifq *ifq)
507 {
508 struct net_device *netdev;
509 netdevice_tracker netdev_tracker;
510 struct pp_memory_provider_params p = {
511 .mp_ops = &io_uring_pp_zc_ops,
512 .mp_priv = ifq,
513 };
514
515 if (ifq->if_rxq == -1)
516 return;
517
518 scoped_guard(mutex, &ifq->pp_lock) {
519 netdev = ifq->netdev;
520 netdev_tracker = ifq->netdev_tracker;
521 ifq->netdev = NULL;
522 }
523
524 if (netdev) {
525 net_mp_close_rxq(netdev, ifq->if_rxq, &p);
526 netdev_put(netdev, &netdev_tracker);
527 }
528 ifq->if_rxq = -1;
529 }
530
io_zcrx_ifq_free(struct io_zcrx_ifq * ifq)531 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
532 {
533 io_close_queue(ifq);
534
535 if (ifq->area)
536 io_zcrx_free_area(ifq, ifq->area);
537 free_uid(ifq->user);
538 if (ifq->mm_account)
539 mmdrop(ifq->mm_account);
540 if (ifq->dev)
541 put_device(ifq->dev);
542
543 io_free_rbuf_ring(ifq);
544 mutex_destroy(&ifq->pp_lock);
545 kfree(ifq);
546 }
547
io_put_zcrx_ifq(struct io_zcrx_ifq * ifq)548 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
549 {
550 if (refcount_dec_and_test(&ifq->refs))
551 io_zcrx_ifq_free(ifq);
552 }
553
io_zcrx_return_niov_freelist(struct net_iov * niov)554 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
555 {
556 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
557
558 spin_lock_bh(&area->freelist_lock);
559 area->freelist[area->free_count++] = net_iov_idx(niov);
560 spin_unlock_bh(&area->freelist_lock);
561 }
562
io_zcrx_return_niov(struct net_iov * niov)563 static void io_zcrx_return_niov(struct net_iov *niov)
564 {
565 netmem_ref netmem = net_iov_to_netmem(niov);
566
567 if (!niov->desc.pp) {
568 /* copy fallback allocated niovs */
569 io_zcrx_return_niov_freelist(niov);
570 return;
571 }
572 page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
573 }
574
io_zcrx_scrub(struct io_zcrx_ifq * ifq)575 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
576 {
577 struct io_zcrx_area *area = ifq->area;
578 int i;
579
580 if (!area)
581 return;
582
583 /* Reclaim back all buffers given to the user space. */
584 for (i = 0; i < area->nia.num_niovs; i++) {
585 struct net_iov *niov = &area->nia.niovs[i];
586 int nr;
587
588 if (!atomic_read(io_get_user_counter(niov)))
589 continue;
590 nr = atomic_xchg(io_get_user_counter(niov), 0);
591 if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
592 io_zcrx_return_niov(niov);
593 }
594 }
595
zcrx_unregister(struct io_zcrx_ifq * ifq)596 static void zcrx_unregister(struct io_zcrx_ifq *ifq)
597 {
598 if (refcount_dec_and_test(&ifq->user_refs)) {
599 io_close_queue(ifq);
600 io_zcrx_scrub(ifq);
601 }
602 io_put_zcrx_ifq(ifq);
603 }
604
io_zcrx_get_region(struct io_ring_ctx * ctx,unsigned int id)605 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
606 unsigned int id)
607 {
608 struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
609
610 lockdep_assert_held(&ctx->mmap_lock);
611
612 return ifq ? &ifq->region : NULL;
613 }
614
zcrx_box_release(struct inode * inode,struct file * file)615 static int zcrx_box_release(struct inode *inode, struct file *file)
616 {
617 struct io_zcrx_ifq *ifq = file->private_data;
618
619 if (WARN_ON_ONCE(!ifq))
620 return -EFAULT;
621 zcrx_unregister(ifq);
622 return 0;
623 }
624
625 static const struct file_operations zcrx_box_fops = {
626 .owner = THIS_MODULE,
627 .release = zcrx_box_release,
628 };
629
zcrx_export(struct io_ring_ctx * ctx,struct io_zcrx_ifq * ifq,struct zcrx_ctrl * ctrl,void __user * arg)630 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
631 struct zcrx_ctrl *ctrl, void __user *arg)
632 {
633 struct zcrx_ctrl_export *ce = &ctrl->zc_export;
634 struct file *file;
635 int fd = -1;
636
637 if (!mem_is_zero(ce, sizeof(*ce)))
638 return -EINVAL;
639 fd = get_unused_fd_flags(O_CLOEXEC);
640 if (fd < 0)
641 return fd;
642
643 ce->zcrx_fd = fd;
644 if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
645 put_unused_fd(fd);
646 return -EFAULT;
647 }
648
649 refcount_inc(&ifq->refs);
650 refcount_inc(&ifq->user_refs);
651
652 file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
653 ifq, O_CLOEXEC, NULL);
654 if (IS_ERR(file)) {
655 put_unused_fd(fd);
656 zcrx_unregister(ifq);
657 return PTR_ERR(file);
658 }
659
660 fd_install(fd, file);
661 return 0;
662 }
663
import_zcrx(struct io_ring_ctx * ctx,struct io_uring_zcrx_ifq_reg __user * arg,struct io_uring_zcrx_ifq_reg * reg)664 static int import_zcrx(struct io_ring_ctx *ctx,
665 struct io_uring_zcrx_ifq_reg __user *arg,
666 struct io_uring_zcrx_ifq_reg *reg)
667 {
668 struct io_zcrx_ifq *ifq;
669 struct file *file;
670 int fd, ret;
671 u32 id;
672
673 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
674 return -EINVAL;
675 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
676 return -EINVAL;
677 if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
678 return -EINVAL;
679
680 fd = reg->if_idx;
681 CLASS(fd, f)(fd);
682 if (fd_empty(f))
683 return -EBADF;
684
685 file = fd_file(f);
686 if (file->f_op != &zcrx_box_fops || !file->private_data)
687 return -EBADF;
688
689 ifq = file->private_data;
690 refcount_inc(&ifq->refs);
691 refcount_inc(&ifq->user_refs);
692
693 scoped_guard(mutex, &ctx->mmap_lock) {
694 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
695 if (ret)
696 goto err;
697 }
698
699 reg->zcrx_id = id;
700 io_fill_zcrx_offsets(®->offsets);
701 if (copy_to_user(arg, reg, sizeof(*reg))) {
702 ret = -EFAULT;
703 goto err_xa_erase;
704 }
705
706 scoped_guard(mutex, &ctx->mmap_lock) {
707 ret = -ENOMEM;
708 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
709 goto err_xa_erase;
710 }
711
712 return 0;
713 err_xa_erase:
714 scoped_guard(mutex, &ctx->mmap_lock)
715 xa_erase(&ctx->zcrx_ctxs, id);
716 err:
717 zcrx_unregister(ifq);
718 return ret;
719 }
720
io_register_zcrx_ifq(struct io_ring_ctx * ctx,struct io_uring_zcrx_ifq_reg __user * arg)721 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
722 struct io_uring_zcrx_ifq_reg __user *arg)
723 {
724 struct pp_memory_provider_params mp_param = {};
725 struct io_uring_zcrx_area_reg area;
726 struct io_uring_zcrx_ifq_reg reg;
727 struct io_uring_region_desc rd;
728 struct io_zcrx_ifq *ifq;
729 int ret;
730 u32 id;
731
732 /*
733 * 1. Interface queue allocation.
734 * 2. It can observe data destined for sockets of other tasks.
735 */
736 if (!capable(CAP_NET_ADMIN))
737 return -EPERM;
738
739 /* mandatory io_uring features for zc rx */
740 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
741 return -EINVAL;
742 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
743 return -EINVAL;
744 if (copy_from_user(®, arg, sizeof(reg)))
745 return -EFAULT;
746 if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) ||
747 reg.__resv2 || reg.zcrx_id)
748 return -EINVAL;
749 if (reg.flags & ZCRX_REG_IMPORT)
750 return import_zcrx(ctx, arg, ®);
751 if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
752 return -EFAULT;
753 if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
754 return -EINVAL;
755 if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
756 if (!(ctx->flags & IORING_SETUP_CLAMP))
757 return -EINVAL;
758 reg.rq_entries = IO_RQ_MAX_ENTRIES;
759 }
760 reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
761
762 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
763 return -EFAULT;
764
765 ifq = io_zcrx_ifq_alloc(ctx);
766 if (!ifq)
767 return -ENOMEM;
768
769 if (ctx->user) {
770 get_uid(ctx->user);
771 ifq->user = ctx->user;
772 }
773 if (ctx->mm_account) {
774 mmgrab(ctx->mm_account);
775 ifq->mm_account = ctx->mm_account;
776 }
777 ifq->rq_entries = reg.rq_entries;
778
779 scoped_guard(mutex, &ctx->mmap_lock) {
780 /* preallocate id */
781 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
782 if (ret)
783 goto ifq_free;
784 }
785
786 ret = io_allocate_rbuf_ring(ctx, ifq, ®, &rd, id);
787 if (ret)
788 goto err;
789
790 ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns, reg.if_idx);
791 if (!ifq->netdev) {
792 ret = -ENODEV;
793 goto err;
794 }
795 netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
796
797 ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq);
798 if (!ifq->dev) {
799 ret = -EOPNOTSUPP;
800 goto netdev_put_unlock;
801 }
802 get_device(ifq->dev);
803
804 ret = io_zcrx_create_area(ifq, &area);
805 if (ret)
806 goto netdev_put_unlock;
807
808 mp_param.mp_ops = &io_uring_pp_zc_ops;
809 mp_param.mp_priv = ifq;
810 ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
811 if (ret)
812 goto netdev_put_unlock;
813 netdev_unlock(ifq->netdev);
814 ifq->if_rxq = reg.if_rxq;
815
816 reg.zcrx_id = id;
817
818 scoped_guard(mutex, &ctx->mmap_lock) {
819 /* publish ifq */
820 ret = -ENOMEM;
821 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
822 goto err;
823 }
824
825 if (copy_to_user(arg, ®, sizeof(reg)) ||
826 copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
827 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
828 ret = -EFAULT;
829 goto err;
830 }
831 return 0;
832 netdev_put_unlock:
833 netdev_put(ifq->netdev, &ifq->netdev_tracker);
834 netdev_unlock(ifq->netdev);
835 err:
836 scoped_guard(mutex, &ctx->mmap_lock)
837 xa_erase(&ctx->zcrx_ctxs, id);
838 ifq_free:
839 io_zcrx_ifq_free(ifq);
840 return ret;
841 }
842
__io_zcrx_get_free_niov(struct io_zcrx_area * area)843 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
844 {
845 unsigned niov_idx;
846
847 lockdep_assert_held(&area->freelist_lock);
848
849 niov_idx = area->freelist[--area->free_count];
850 return &area->nia.niovs[niov_idx];
851 }
852
io_unregister_zcrx_ifqs(struct io_ring_ctx * ctx)853 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
854 {
855 struct io_zcrx_ifq *ifq;
856
857 lockdep_assert_held(&ctx->uring_lock);
858
859 while (1) {
860 scoped_guard(mutex, &ctx->mmap_lock) {
861 unsigned long id = 0;
862
863 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
864 if (ifq)
865 xa_erase(&ctx->zcrx_ctxs, id);
866 }
867 if (!ifq)
868 break;
869 zcrx_unregister(ifq);
870 }
871
872 xa_destroy(&ctx->zcrx_ctxs);
873 }
874
io_zcrx_rqring_entries(struct io_zcrx_ifq * ifq)875 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
876 {
877 u32 entries;
878
879 entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
880 return min(entries, ifq->rq_entries);
881 }
882
io_zcrx_get_rqe(struct io_zcrx_ifq * ifq,unsigned mask)883 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
884 unsigned mask)
885 {
886 unsigned int idx = ifq->cached_rq_head++ & mask;
887
888 return &ifq->rqes[idx];
889 }
890
io_parse_rqe(struct io_uring_zcrx_rqe * rqe,struct io_zcrx_ifq * ifq,struct net_iov ** ret_niov)891 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
892 struct io_zcrx_ifq *ifq,
893 struct net_iov **ret_niov)
894 {
895 unsigned niov_idx, area_idx;
896 struct io_zcrx_area *area;
897
898 area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
899 niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
900
901 if (unlikely(rqe->__pad || area_idx))
902 return false;
903 area = ifq->area;
904
905 if (unlikely(niov_idx >= area->nia.num_niovs))
906 return false;
907 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
908
909 *ret_niov = &area->nia.niovs[niov_idx];
910 return true;
911 }
912
io_zcrx_ring_refill(struct page_pool * pp,struct io_zcrx_ifq * ifq)913 static void io_zcrx_ring_refill(struct page_pool *pp,
914 struct io_zcrx_ifq *ifq)
915 {
916 unsigned int mask = ifq->rq_entries - 1;
917 unsigned int entries;
918
919 guard(spinlock_bh)(&ifq->rq_lock);
920
921 entries = io_zcrx_rqring_entries(ifq);
922 entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
923 if (unlikely(!entries))
924 return;
925
926 do {
927 struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
928 struct net_iov *niov;
929 netmem_ref netmem;
930
931 if (!io_parse_rqe(rqe, ifq, &niov))
932 continue;
933 if (!io_zcrx_put_niov_uref(niov))
934 continue;
935
936 netmem = net_iov_to_netmem(niov);
937 if (!page_pool_unref_and_test(netmem))
938 continue;
939
940 if (unlikely(niov->desc.pp != pp)) {
941 io_zcrx_return_niov(niov);
942 continue;
943 }
944
945 io_zcrx_sync_for_device(pp, niov);
946 net_mp_netmem_place_in_cache(pp, netmem);
947 } while (--entries);
948
949 smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
950 }
951
io_zcrx_refill_slow(struct page_pool * pp,struct io_zcrx_ifq * ifq)952 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
953 {
954 struct io_zcrx_area *area = ifq->area;
955
956 spin_lock_bh(&area->freelist_lock);
957 while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
958 struct net_iov *niov = __io_zcrx_get_free_niov(area);
959 netmem_ref netmem = net_iov_to_netmem(niov);
960
961 net_mp_niov_set_page_pool(pp, niov);
962 io_zcrx_sync_for_device(pp, niov);
963 net_mp_netmem_place_in_cache(pp, netmem);
964 }
965 spin_unlock_bh(&area->freelist_lock);
966 }
967
io_pp_zc_alloc_netmems(struct page_pool * pp,gfp_t gfp)968 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
969 {
970 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
971
972 /* pp should already be ensuring that */
973 if (unlikely(pp->alloc.count))
974 goto out_return;
975
976 io_zcrx_ring_refill(pp, ifq);
977 if (likely(pp->alloc.count))
978 goto out_return;
979
980 io_zcrx_refill_slow(pp, ifq);
981 if (!pp->alloc.count)
982 return 0;
983 out_return:
984 return pp->alloc.cache[--pp->alloc.count];
985 }
986
io_pp_zc_release_netmem(struct page_pool * pp,netmem_ref netmem)987 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
988 {
989 struct net_iov *niov;
990
991 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
992 return false;
993
994 niov = netmem_to_net_iov(netmem);
995 net_mp_niov_clear_page_pool(niov);
996 io_zcrx_return_niov_freelist(niov);
997 return false;
998 }
999
io_pp_zc_init(struct page_pool * pp)1000 static int io_pp_zc_init(struct page_pool *pp)
1001 {
1002 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1003 int ret;
1004
1005 if (WARN_ON_ONCE(!ifq))
1006 return -EINVAL;
1007 if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
1008 return -EINVAL;
1009 if (WARN_ON_ONCE(!pp->dma_map))
1010 return -EOPNOTSUPP;
1011 if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
1012 return -EINVAL;
1013 if (pp->p.dma_dir != DMA_FROM_DEVICE)
1014 return -EOPNOTSUPP;
1015
1016 ret = io_zcrx_map_area(ifq, ifq->area);
1017 if (ret)
1018 return ret;
1019
1020 refcount_inc(&ifq->refs);
1021 return 0;
1022 }
1023
io_pp_zc_destroy(struct page_pool * pp)1024 static void io_pp_zc_destroy(struct page_pool *pp)
1025 {
1026 io_put_zcrx_ifq(io_pp_to_ifq(pp));
1027 }
1028
io_pp_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)1029 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
1030 struct netdev_rx_queue *rxq)
1031 {
1032 struct nlattr *nest;
1033 int type;
1034
1035 type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
1036 nest = nla_nest_start(rsp, type);
1037 if (!nest)
1038 return -EMSGSIZE;
1039 nla_nest_end(rsp, nest);
1040
1041 return 0;
1042 }
1043
io_pp_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)1044 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
1045 {
1046 struct pp_memory_provider_params *p = &rxq->mp_params;
1047 struct io_zcrx_ifq *ifq = mp_priv;
1048
1049 io_zcrx_drop_netdev(ifq);
1050 if (ifq->area)
1051 io_zcrx_unmap_area(ifq, ifq->area);
1052
1053 p->mp_ops = NULL;
1054 p->mp_priv = NULL;
1055 }
1056
1057 static const struct memory_provider_ops io_uring_pp_zc_ops = {
1058 .alloc_netmems = io_pp_zc_alloc_netmems,
1059 .release_netmem = io_pp_zc_release_netmem,
1060 .init = io_pp_zc_init,
1061 .destroy = io_pp_zc_destroy,
1062 .nl_fill = io_pp_nl_fill,
1063 .uninstall = io_pp_uninstall,
1064 };
1065
zcrx_parse_rq(netmem_ref * netmem_array,unsigned nr,struct io_zcrx_ifq * zcrx)1066 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
1067 struct io_zcrx_ifq *zcrx)
1068 {
1069 unsigned int mask = zcrx->rq_entries - 1;
1070 unsigned int i;
1071
1072 nr = min(nr, io_zcrx_rqring_entries(zcrx));
1073 for (i = 0; i < nr; i++) {
1074 struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
1075 struct net_iov *niov;
1076
1077 if (!io_parse_rqe(rqe, zcrx, &niov))
1078 break;
1079 netmem_array[i] = net_iov_to_netmem(niov);
1080 }
1081
1082 smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
1083 return i;
1084 }
1085
1086 #define ZCRX_FLUSH_BATCH 32
1087
zcrx_return_buffers(netmem_ref * netmems,unsigned nr)1088 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
1089 {
1090 unsigned i;
1091
1092 for (i = 0; i < nr; i++) {
1093 netmem_ref netmem = netmems[i];
1094 struct net_iov *niov = netmem_to_net_iov(netmem);
1095
1096 if (!io_zcrx_put_niov_uref(niov))
1097 continue;
1098 if (!page_pool_unref_and_test(netmem))
1099 continue;
1100 io_zcrx_return_niov(niov);
1101 }
1102 }
1103
zcrx_flush_rq(struct io_ring_ctx * ctx,struct io_zcrx_ifq * zcrx,struct zcrx_ctrl * ctrl)1104 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
1105 struct zcrx_ctrl *ctrl)
1106 {
1107 struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
1108 netmem_ref netmems[ZCRX_FLUSH_BATCH];
1109 unsigned total = 0;
1110 unsigned nr;
1111
1112 if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
1113 return -EINVAL;
1114
1115 do {
1116 scoped_guard(spinlock_bh, &zcrx->rq_lock) {
1117 nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
1118 zcrx_return_buffers(netmems, nr);
1119 }
1120
1121 total += nr;
1122
1123 if (fatal_signal_pending(current))
1124 break;
1125 cond_resched();
1126 } while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
1127
1128 return 0;
1129 }
1130
io_zcrx_ctrl(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)1131 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
1132 {
1133 struct zcrx_ctrl ctrl;
1134 struct io_zcrx_ifq *zcrx;
1135
1136 if (nr_args)
1137 return -EINVAL;
1138 if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
1139 return -EFAULT;
1140 if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
1141 return -EFAULT;
1142
1143 zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
1144 if (!zcrx)
1145 return -ENXIO;
1146
1147 switch (ctrl.op) {
1148 case ZCRX_CTRL_FLUSH_RQ:
1149 return zcrx_flush_rq(ctx, zcrx, &ctrl);
1150 case ZCRX_CTRL_EXPORT:
1151 return zcrx_export(ctx, zcrx, &ctrl, arg);
1152 }
1153
1154 return -EOPNOTSUPP;
1155 }
1156
io_zcrx_queue_cqe(struct io_kiocb * req,struct net_iov * niov,struct io_zcrx_ifq * ifq,int off,int len)1157 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1158 struct io_zcrx_ifq *ifq, int off, int len)
1159 {
1160 struct io_ring_ctx *ctx = req->ctx;
1161 struct io_uring_zcrx_cqe *rcqe;
1162 struct io_zcrx_area *area;
1163 struct io_uring_cqe *cqe;
1164 u64 offset;
1165
1166 if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1167 return false;
1168
1169 cqe->user_data = req->cqe.user_data;
1170 cqe->res = len;
1171 cqe->flags = IORING_CQE_F_MORE;
1172 if (ctx->flags & IORING_SETUP_CQE_MIXED)
1173 cqe->flags |= IORING_CQE_F_32;
1174
1175 area = io_zcrx_iov_to_area(niov);
1176 offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1177 rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1178 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1179 rcqe->__pad = 0;
1180 return true;
1181 }
1182
io_alloc_fallback_niov(struct io_zcrx_ifq * ifq)1183 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1184 {
1185 struct io_zcrx_area *area = ifq->area;
1186 struct net_iov *niov = NULL;
1187
1188 if (area->mem.is_dmabuf)
1189 return NULL;
1190
1191 spin_lock_bh(&area->freelist_lock);
1192 if (area->free_count)
1193 niov = __io_zcrx_get_free_niov(area);
1194 spin_unlock_bh(&area->freelist_lock);
1195
1196 if (niov)
1197 page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1198 return niov;
1199 }
1200
1201 struct io_copy_cache {
1202 struct page *page;
1203 unsigned long offset;
1204 size_t size;
1205 };
1206
io_copy_page(struct io_copy_cache * cc,struct page * src_page,unsigned int src_offset,size_t len)1207 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1208 unsigned int src_offset, size_t len)
1209 {
1210 size_t copied = 0;
1211
1212 len = min(len, cc->size);
1213
1214 while (len) {
1215 void *src_addr, *dst_addr;
1216 struct page *dst_page = cc->page;
1217 unsigned dst_offset = cc->offset;
1218 size_t n = len;
1219
1220 if (folio_test_partial_kmap(page_folio(dst_page)) ||
1221 folio_test_partial_kmap(page_folio(src_page))) {
1222 dst_page += dst_offset / PAGE_SIZE;
1223 dst_offset = offset_in_page(dst_offset);
1224 src_page += src_offset / PAGE_SIZE;
1225 src_offset = offset_in_page(src_offset);
1226 n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1227 n = min(n, len);
1228 }
1229
1230 dst_addr = kmap_local_page(dst_page) + dst_offset;
1231 src_addr = kmap_local_page(src_page) + src_offset;
1232
1233 memcpy(dst_addr, src_addr, n);
1234
1235 kunmap_local(src_addr);
1236 kunmap_local(dst_addr);
1237
1238 cc->size -= n;
1239 cc->offset += n;
1240 src_offset += n;
1241 len -= n;
1242 copied += n;
1243 }
1244 return copied;
1245 }
1246
io_zcrx_copy_chunk(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct page * src_page,unsigned int src_offset,size_t len)1247 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1248 struct page *src_page, unsigned int src_offset,
1249 size_t len)
1250 {
1251 size_t copied = 0;
1252 int ret = 0;
1253
1254 while (len) {
1255 struct io_copy_cache cc;
1256 struct net_iov *niov;
1257 size_t n;
1258
1259 niov = io_alloc_fallback_niov(ifq);
1260 if (!niov) {
1261 ret = -ENOMEM;
1262 break;
1263 }
1264
1265 cc.page = io_zcrx_iov_page(niov);
1266 cc.offset = 0;
1267 cc.size = PAGE_SIZE;
1268
1269 n = io_copy_page(&cc, src_page, src_offset, len);
1270
1271 if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1272 io_zcrx_return_niov(niov);
1273 ret = -ENOSPC;
1274 break;
1275 }
1276
1277 io_zcrx_get_niov_uref(niov);
1278 src_offset += n;
1279 len -= n;
1280 copied += n;
1281 }
1282
1283 return copied ? copied : ret;
1284 }
1285
io_zcrx_copy_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1286 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1287 const skb_frag_t *frag, int off, int len)
1288 {
1289 struct page *page = skb_frag_page(frag);
1290
1291 return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1292 }
1293
io_zcrx_recv_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1294 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1295 const skb_frag_t *frag, int off, int len)
1296 {
1297 struct net_iov *niov;
1298 struct page_pool *pp;
1299
1300 if (unlikely(!skb_frag_is_net_iov(frag)))
1301 return io_zcrx_copy_frag(req, ifq, frag, off, len);
1302
1303 niov = netmem_to_net_iov(frag->netmem);
1304 pp = niov->desc.pp;
1305
1306 if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
1307 return -EFAULT;
1308
1309 if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1310 return -ENOSPC;
1311
1312 /*
1313 * Prevent it from being recycled while user is accessing it.
1314 * It has to be done before grabbing a user reference.
1315 */
1316 page_pool_ref_netmem(net_iov_to_netmem(niov));
1317 io_zcrx_get_niov_uref(niov);
1318 return len;
1319 }
1320
1321 static int
io_zcrx_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)1322 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1323 unsigned int offset, size_t len)
1324 {
1325 struct io_zcrx_args *args = desc->arg.data;
1326 struct io_zcrx_ifq *ifq = args->ifq;
1327 struct io_kiocb *req = args->req;
1328 struct sk_buff *frag_iter;
1329 unsigned start, start_off = offset;
1330 int i, copy, end, off;
1331 int ret = 0;
1332
1333 len = min_t(size_t, len, desc->count);
1334 /*
1335 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1336 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1337 * skb->len) check. Return early in this case to break out of
1338 * __tcp_read_sock().
1339 */
1340 if (!len)
1341 return 0;
1342 if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1343 return -EAGAIN;
1344
1345 if (unlikely(offset < skb_headlen(skb))) {
1346 ssize_t copied;
1347 size_t to_copy;
1348
1349 to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1350 copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1351 offset_in_page(skb->data) + offset,
1352 to_copy);
1353 if (copied < 0) {
1354 ret = copied;
1355 goto out;
1356 }
1357 offset += copied;
1358 len -= copied;
1359 if (!len)
1360 goto out;
1361 if (offset != skb_headlen(skb))
1362 goto out;
1363 }
1364
1365 start = skb_headlen(skb);
1366
1367 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1368 const skb_frag_t *frag;
1369
1370 if (WARN_ON(start > offset + len))
1371 return -EFAULT;
1372
1373 frag = &skb_shinfo(skb)->frags[i];
1374 end = start + skb_frag_size(frag);
1375
1376 if (offset < end) {
1377 copy = end - offset;
1378 if (copy > len)
1379 copy = len;
1380
1381 off = offset - start;
1382 ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1383 if (ret < 0)
1384 goto out;
1385
1386 offset += ret;
1387 len -= ret;
1388 if (len == 0 || ret != copy)
1389 goto out;
1390 }
1391 start = end;
1392 }
1393
1394 skb_walk_frags(skb, frag_iter) {
1395 if (WARN_ON(start > offset + len))
1396 return -EFAULT;
1397
1398 end = start + frag_iter->len;
1399 if (offset < end) {
1400 size_t count;
1401
1402 copy = end - offset;
1403 if (copy > len)
1404 copy = len;
1405
1406 off = offset - start;
1407 count = desc->count;
1408 ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1409 desc->count = count;
1410 if (ret < 0)
1411 goto out;
1412
1413 offset += ret;
1414 len -= ret;
1415 if (len == 0 || ret != copy)
1416 goto out;
1417 }
1418 start = end;
1419 }
1420
1421 out:
1422 if (offset == start_off)
1423 return ret;
1424 desc->count -= (offset - start_off);
1425 return offset - start_off;
1426 }
1427
io_zcrx_tcp_recvmsg(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct sock * sk,int flags,unsigned issue_flags,unsigned int * outlen)1428 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1429 struct sock *sk, int flags,
1430 unsigned issue_flags, unsigned int *outlen)
1431 {
1432 unsigned int len = *outlen;
1433 struct io_zcrx_args args = {
1434 .req = req,
1435 .ifq = ifq,
1436 .sock = sk->sk_socket,
1437 };
1438 read_descriptor_t rd_desc = {
1439 .count = len ? len : UINT_MAX,
1440 .arg.data = &args,
1441 };
1442 int ret;
1443
1444 lock_sock(sk);
1445 ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1446 if (len && ret > 0)
1447 *outlen = len - ret;
1448 if (ret <= 0) {
1449 if (ret < 0 || sock_flag(sk, SOCK_DONE))
1450 goto out;
1451 if (sk->sk_err)
1452 ret = sock_error(sk);
1453 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1454 goto out;
1455 else if (sk->sk_state == TCP_CLOSE)
1456 ret = -ENOTCONN;
1457 else
1458 ret = -EAGAIN;
1459 } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1460 (issue_flags & IO_URING_F_MULTISHOT)) {
1461 ret = IOU_REQUEUE;
1462 } else if (sock_flag(sk, SOCK_DONE)) {
1463 /* Make it to retry until it finally gets 0. */
1464 if (issue_flags & IO_URING_F_MULTISHOT)
1465 ret = IOU_REQUEUE;
1466 else
1467 ret = -EAGAIN;
1468 }
1469 out:
1470 release_sock(sk);
1471 return ret;
1472 }
1473
io_zcrx_recv(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct socket * sock,unsigned int flags,unsigned issue_flags,unsigned int * len)1474 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1475 struct socket *sock, unsigned int flags,
1476 unsigned issue_flags, unsigned int *len)
1477 {
1478 struct sock *sk = sock->sk;
1479 const struct proto *prot = READ_ONCE(sk->sk_prot);
1480
1481 if (prot->recvmsg != tcp_recvmsg)
1482 return -EPROTONOSUPPORT;
1483
1484 sock_rps_record_flow(sk);
1485 return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1486 }
1487