1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 #include <linux/anon_inodes.h>
12
13 #include <net/page_pool/helpers.h>
14 #include <net/page_pool/memory_provider.h>
15 #include <net/netlink.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/tcp.h>
19 #include <net/rps.h>
20
21 #include <trace/events/page_pool.h>
22
23 #include <uapi/linux/io_uring.h>
24
25 #include "io_uring.h"
26 #include "kbuf.h"
27 #include "memmap.h"
28 #include "zcrx.h"
29 #include "rsrc.h"
30
31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF)
32
33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
34
io_pp_to_ifq(struct page_pool * pp)35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
36 {
37 return pp->mp_priv;
38 }
39
io_zcrx_iov_to_area(const struct net_iov * niov)40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
41 {
42 struct net_iov_area *owner = net_iov_owner(niov);
43
44 return container_of(owner, struct io_zcrx_area, nia);
45 }
46
io_zcrx_iov_page(const struct net_iov * niov)47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
48 {
49 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
50 unsigned niov_pages_shift;
51
52 lockdep_assert(!area->mem.is_dmabuf);
53
54 niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
55 return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
56 }
57
io_area_max_shift(struct io_zcrx_mem * mem)58 static int io_area_max_shift(struct io_zcrx_mem *mem)
59 {
60 struct sg_table *sgt = mem->sgt;
61 struct scatterlist *sg;
62 unsigned shift = -1U;
63 unsigned i;
64
65 for_each_sgtable_dma_sg(sgt, sg, i)
66 shift = min(shift, __ffs(sg_dma_len(sg)));
67 return shift;
68 }
69
io_populate_area_dma(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)70 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
71 struct io_zcrx_area *area)
72 {
73 unsigned niov_size = 1U << ifq->niov_shift;
74 struct sg_table *sgt = area->mem.sgt;
75 struct scatterlist *sg;
76 unsigned i, niov_idx = 0;
77
78 for_each_sgtable_dma_sg(sgt, sg, i) {
79 dma_addr_t dma = sg_dma_address(sg);
80 unsigned long sg_len = sg_dma_len(sg);
81
82 if (WARN_ON_ONCE(sg_len % niov_size))
83 return -EINVAL;
84
85 while (sg_len && niov_idx < area->nia.num_niovs) {
86 struct net_iov *niov = &area->nia.niovs[niov_idx];
87
88 if (net_mp_niov_set_dma_addr(niov, dma))
89 return -EFAULT;
90 sg_len -= niov_size;
91 dma += niov_size;
92 niov_idx++;
93 }
94 }
95
96 if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
97 return -EFAULT;
98 return 0;
99 }
100
io_release_dmabuf(struct io_zcrx_mem * mem)101 static void io_release_dmabuf(struct io_zcrx_mem *mem)
102 {
103 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
104 return;
105
106 if (mem->sgt)
107 dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
108 DMA_FROM_DEVICE);
109 if (mem->attach)
110 dma_buf_detach(mem->dmabuf, mem->attach);
111 if (mem->dmabuf)
112 dma_buf_put(mem->dmabuf);
113
114 mem->sgt = NULL;
115 mem->attach = NULL;
116 mem->dmabuf = NULL;
117 }
118
io_import_dmabuf(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)119 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
120 struct io_zcrx_mem *mem,
121 struct io_uring_zcrx_area_reg *area_reg)
122 {
123 unsigned long off = (unsigned long)area_reg->addr;
124 unsigned long len = (unsigned long)area_reg->len;
125 unsigned long total_size = 0;
126 struct scatterlist *sg;
127 int dmabuf_fd = area_reg->dmabuf_fd;
128 int i, ret;
129
130 if (!ifq->dev)
131 return -EINVAL;
132 if (off)
133 return -EINVAL;
134 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
135 return -EINVAL;
136
137 mem->is_dmabuf = true;
138 mem->dmabuf = dma_buf_get(dmabuf_fd);
139 if (IS_ERR(mem->dmabuf)) {
140 ret = PTR_ERR(mem->dmabuf);
141 mem->dmabuf = NULL;
142 goto err;
143 }
144
145 mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
146 if (IS_ERR(mem->attach)) {
147 ret = PTR_ERR(mem->attach);
148 mem->attach = NULL;
149 goto err;
150 }
151
152 mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
153 if (IS_ERR(mem->sgt)) {
154 ret = PTR_ERR(mem->sgt);
155 mem->sgt = NULL;
156 goto err;
157 }
158
159 for_each_sgtable_dma_sg(mem->sgt, sg, i)
160 total_size += sg_dma_len(sg);
161
162 if (total_size != len) {
163 ret = -EINVAL;
164 goto err;
165 }
166
167 mem->size = len;
168 return 0;
169 err:
170 io_release_dmabuf(mem);
171 return ret;
172 }
173
io_count_account_pages(struct page ** pages,unsigned nr_pages)174 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
175 {
176 struct folio *last_folio = NULL;
177 unsigned long res = 0;
178 int i;
179
180 for (i = 0; i < nr_pages; i++) {
181 struct folio *folio = page_folio(pages[i]);
182
183 if (folio == last_folio)
184 continue;
185 last_folio = folio;
186 res += folio_nr_pages(folio);
187 }
188 return res;
189 }
190
io_import_umem(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)191 static int io_import_umem(struct io_zcrx_ifq *ifq,
192 struct io_zcrx_mem *mem,
193 struct io_uring_zcrx_area_reg *area_reg)
194 {
195 struct page **pages;
196 int nr_pages, ret;
197 bool mapped = false;
198
199 if (area_reg->dmabuf_fd)
200 return -EINVAL;
201 if (!area_reg->addr)
202 return -EFAULT;
203 pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
204 &nr_pages);
205 if (IS_ERR(pages))
206 return PTR_ERR(pages);
207
208 ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
209 0, (unsigned long)nr_pages << PAGE_SHIFT,
210 GFP_KERNEL_ACCOUNT);
211 if (ret)
212 goto out_err;
213
214 if (ifq->dev) {
215 ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
216 DMA_FROM_DEVICE, IO_DMA_ATTR);
217 if (ret < 0)
218 goto out_err;
219 mapped = true;
220 }
221
222 mem->account_pages = io_count_account_pages(pages, nr_pages);
223 ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
224 if (ret < 0) {
225 mem->account_pages = 0;
226 goto out_err;
227 }
228
229 mem->sgt = &mem->page_sg_table;
230 mem->pages = pages;
231 mem->nr_folios = nr_pages;
232 mem->size = area_reg->len;
233 return ret;
234 out_err:
235 if (mapped)
236 dma_unmap_sgtable(ifq->dev, &mem->page_sg_table,
237 DMA_FROM_DEVICE, IO_DMA_ATTR);
238 sg_free_table(&mem->page_sg_table);
239 unpin_user_pages(pages, nr_pages);
240 kvfree(pages);
241 return ret;
242 }
243
io_release_area_mem(struct io_zcrx_mem * mem)244 static void io_release_area_mem(struct io_zcrx_mem *mem)
245 {
246 if (mem->is_dmabuf) {
247 io_release_dmabuf(mem);
248 return;
249 }
250 if (mem->pages) {
251 unpin_user_pages(mem->pages, mem->nr_folios);
252 sg_free_table(mem->sgt);
253 mem->sgt = NULL;
254 kvfree(mem->pages);
255 }
256 }
257
io_import_area(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)258 static int io_import_area(struct io_zcrx_ifq *ifq,
259 struct io_zcrx_mem *mem,
260 struct io_uring_zcrx_area_reg *area_reg)
261 {
262 int ret;
263
264 if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
265 return -EINVAL;
266 if (area_reg->rq_area_token)
267 return -EINVAL;
268 if (area_reg->__resv2[0] || area_reg->__resv2[1])
269 return -EINVAL;
270
271 ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
272 if (ret)
273 return ret;
274 if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
275 return -EINVAL;
276
277 if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
278 return io_import_dmabuf(ifq, mem, area_reg);
279 return io_import_umem(ifq, mem, area_reg);
280 }
281
io_zcrx_unmap_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)282 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
283 struct io_zcrx_area *area)
284 {
285 int i;
286
287 guard(mutex)(&ifq->pp_lock);
288 if (!area->is_mapped)
289 return;
290 area->is_mapped = false;
291
292 if (area->nia.niovs) {
293 for (i = 0; i < area->nia.num_niovs; i++)
294 net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
295 }
296
297 if (area->mem.is_dmabuf) {
298 io_release_dmabuf(&area->mem);
299 } else {
300 dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
301 DMA_FROM_DEVICE, IO_DMA_ATTR);
302 }
303 }
304
zcrx_sync_for_device(struct page_pool * pp,struct io_zcrx_ifq * zcrx,netmem_ref * netmems,unsigned nr)305 static void zcrx_sync_for_device(struct page_pool *pp, struct io_zcrx_ifq *zcrx,
306 netmem_ref *netmems, unsigned nr)
307 {
308 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
309 struct device *dev = pp->p.dev;
310 unsigned i, niov_size;
311 dma_addr_t dma_addr;
312
313 if (!dma_dev_need_sync(dev))
314 return;
315 niov_size = 1U << zcrx->niov_shift;
316
317 for (i = 0; i < nr; i++) {
318 dma_addr = page_pool_get_dma_addr_netmem(netmems[i]);
319 __dma_sync_single_for_device(dev, dma_addr + pp->p.offset,
320 niov_size, pp->p.dma_dir);
321 }
322 #endif
323 }
324
325 #define IO_RQ_MAX_ENTRIES 32768
326
327 #define IO_SKBS_PER_CALL_LIMIT 20
328
329 struct io_zcrx_args {
330 struct io_kiocb *req;
331 struct io_zcrx_ifq *ifq;
332 struct socket *sock;
333 unsigned nr_skbs;
334 };
335
336 static const struct memory_provider_ops io_uring_pp_zc_ops;
337
io_get_user_counter(struct net_iov * niov)338 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
339 {
340 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
341
342 return &area->user_refs[net_iov_idx(niov)];
343 }
344
io_zcrx_put_niov_uref(struct net_iov * niov)345 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
346 {
347 atomic_t *uref = io_get_user_counter(niov);
348 int old;
349
350 old = atomic_read(uref);
351 do {
352 if (unlikely(old == 0))
353 return false;
354 } while (!atomic_try_cmpxchg(uref, &old, old - 1));
355
356 return true;
357 }
358
io_zcrx_get_niov_uref(struct net_iov * niov)359 static void io_zcrx_get_niov_uref(struct net_iov *niov)
360 {
361 atomic_inc(io_get_user_counter(niov));
362 }
363
io_fill_zcrx_offsets(struct io_uring_zcrx_offsets * offsets)364 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
365 {
366 offsets->head = offsetof(struct io_uring, head);
367 offsets->tail = offsetof(struct io_uring, tail);
368 offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
369 }
370
io_allocate_rbuf_ring(struct io_ring_ctx * ctx,struct io_zcrx_ifq * ifq,struct io_uring_zcrx_ifq_reg * reg,struct io_uring_region_desc * rd,u32 id)371 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
372 struct io_zcrx_ifq *ifq,
373 struct io_uring_zcrx_ifq_reg *reg,
374 struct io_uring_region_desc *rd,
375 u32 id)
376 {
377 u64 mmap_offset;
378 size_t off, size;
379 void *ptr;
380 int ret;
381
382 io_fill_zcrx_offsets(®->offsets);
383 off = reg->offsets.rqes;
384 size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
385 if (size > rd->size)
386 return -EINVAL;
387
388 mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
389 mmap_offset += (u64)id << IORING_OFF_ZCRX_SHIFT;
390
391 ret = io_create_region(ctx, &ifq->rq_region, rd, mmap_offset);
392 if (ret < 0)
393 return ret;
394
395 ptr = io_region_get_ptr(&ifq->rq_region);
396 ifq->rq.ring = (struct io_uring *)ptr;
397 ifq->rq.rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
398
399 memset(ifq->rq.ring, 0, sizeof(*ifq->rq.ring));
400 return 0;
401 }
402
io_free_rbuf_ring(struct io_zcrx_ifq * ifq)403 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
404 {
405 io_free_region(ifq->user, &ifq->rq_region);
406 ifq->rq.ring = NULL;
407 ifq->rq.rqes = NULL;
408 }
409
io_zcrx_free_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)410 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
411 struct io_zcrx_area *area)
412 {
413 io_zcrx_unmap_area(ifq, area);
414 io_release_area_mem(&area->mem);
415
416 if (area->mem.account_pages)
417 io_unaccount_mem(ifq->user, ifq->mm_account,
418 area->mem.account_pages);
419
420 kvfree(area->freelist);
421 kvfree(area->nia.niovs);
422 kvfree(area->user_refs);
423 kfree(area);
424 }
425
io_zcrx_append_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)426 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
427 struct io_zcrx_area *area)
428 {
429 bool kern_readable = !area->mem.is_dmabuf;
430
431 if (WARN_ON_ONCE(ifq->area))
432 return -EINVAL;
433 if (WARN_ON_ONCE(ifq->kern_readable != kern_readable))
434 return -EINVAL;
435
436 ifq->area = area;
437 return 0;
438 }
439
io_zcrx_create_area(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_area_reg * area_reg,struct io_uring_zcrx_ifq_reg * reg)440 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
441 struct io_uring_zcrx_area_reg *area_reg,
442 struct io_uring_zcrx_ifq_reg *reg)
443 {
444 int buf_size_shift = PAGE_SHIFT;
445 struct io_zcrx_area *area;
446 unsigned nr_iovs;
447 int i, ret;
448
449 if (reg->rx_buf_len) {
450 if (!is_power_of_2(reg->rx_buf_len) ||
451 reg->rx_buf_len < PAGE_SIZE)
452 return -EINVAL;
453 buf_size_shift = ilog2(reg->rx_buf_len);
454 }
455 if (!ifq->dev && buf_size_shift != PAGE_SHIFT)
456 return -EOPNOTSUPP;
457
458 ret = -ENOMEM;
459 area = kzalloc_obj(*area);
460 if (!area)
461 goto err;
462 area->ifq = ifq;
463
464 ret = io_import_area(ifq, &area->mem, area_reg);
465 if (ret)
466 goto err;
467 if (ifq->dev)
468 area->is_mapped = true;
469
470 if (ifq->dev && buf_size_shift > io_area_max_shift(&area->mem)) {
471 ret = -ERANGE;
472 goto err;
473 }
474
475 ifq->niov_shift = buf_size_shift;
476 nr_iovs = area->mem.size >> ifq->niov_shift;
477 area->nia.num_niovs = nr_iovs;
478
479 ret = -ENOMEM;
480 area->nia.niovs = kvmalloc_objs(area->nia.niovs[0], nr_iovs,
481 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
482 if (!area->nia.niovs)
483 goto err;
484
485 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
486 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
487 if (!area->freelist)
488 goto err;
489
490 area->user_refs = kvmalloc_objs(area->user_refs[0], nr_iovs,
491 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
492 if (!area->user_refs)
493 goto err;
494
495 for (i = 0; i < nr_iovs; i++) {
496 struct net_iov *niov = &area->nia.niovs[i];
497
498 net_iov_init(niov, &area->nia, NET_IOV_IOURING);
499 area->freelist[i] = i;
500 atomic_set(&area->user_refs[i], 0);
501 }
502
503 if (ifq->dev) {
504 ret = io_populate_area_dma(ifq, area);
505 if (ret)
506 goto err;
507 }
508
509 area->free_count = nr_iovs;
510 /* we're only supporting one area per ifq for now */
511 area->area_id = 0;
512 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
513 spin_lock_init(&area->freelist_lock);
514
515 ret = io_zcrx_append_area(ifq, area);
516 if (!ret)
517 return 0;
518 err:
519 if (area)
520 io_zcrx_free_area(ifq, area);
521 return ret;
522 }
523
io_zcrx_ifq_alloc(struct io_ring_ctx * ctx)524 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
525 {
526 struct io_zcrx_ifq *ifq;
527
528 ifq = kzalloc_obj(*ifq);
529 if (!ifq)
530 return NULL;
531
532 ifq->if_rxq = -1;
533 spin_lock_init(&ifq->rq.lock);
534 mutex_init(&ifq->pp_lock);
535 refcount_set(&ifq->refs, 1);
536 refcount_set(&ifq->user_refs, 1);
537 return ifq;
538 }
539
io_zcrx_drop_netdev(struct io_zcrx_ifq * ifq)540 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
541 {
542 guard(mutex)(&ifq->pp_lock);
543
544 if (!ifq->netdev)
545 return;
546 netdev_put(ifq->netdev, &ifq->netdev_tracker);
547 ifq->netdev = NULL;
548 }
549
io_close_queue(struct io_zcrx_ifq * ifq)550 static void io_close_queue(struct io_zcrx_ifq *ifq)
551 {
552 struct net_device *netdev;
553 netdevice_tracker netdev_tracker;
554 struct pp_memory_provider_params p = {
555 .mp_ops = &io_uring_pp_zc_ops,
556 .mp_priv = ifq,
557 };
558
559 scoped_guard(mutex, &ifq->pp_lock) {
560 netdev = ifq->netdev;
561 netdev_tracker = ifq->netdev_tracker;
562 ifq->netdev = NULL;
563 }
564
565 if (netdev) {
566 if (ifq->if_rxq != -1) {
567 netdev_lock(netdev);
568 netif_mp_close_rxq(netdev, ifq->if_rxq, &p);
569 netdev_unlock(netdev);
570 }
571 netdev_put(netdev, &netdev_tracker);
572 }
573 ifq->if_rxq = -1;
574 }
575
io_zcrx_ifq_free(struct io_zcrx_ifq * ifq)576 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
577 {
578 io_close_queue(ifq);
579
580 if (ifq->area)
581 io_zcrx_free_area(ifq, ifq->area);
582 if (ifq->mm_account)
583 mmdrop(ifq->mm_account);
584 if (ifq->dev)
585 put_device(ifq->dev);
586
587 io_free_rbuf_ring(ifq);
588 free_uid(ifq->user);
589 mutex_destroy(&ifq->pp_lock);
590 kfree(ifq);
591 }
592
io_put_zcrx_ifq(struct io_zcrx_ifq * ifq)593 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
594 {
595 if (refcount_dec_and_test(&ifq->refs))
596 io_zcrx_ifq_free(ifq);
597 }
598
io_zcrx_return_niov_freelist(struct net_iov * niov)599 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
600 {
601 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
602
603 guard(spinlock_bh)(&area->freelist_lock);
604 if (WARN_ON_ONCE(area->free_count >= area->nia.num_niovs))
605 return;
606 area->freelist[area->free_count++] = net_iov_idx(niov);
607 }
608
zcrx_get_free_niov(struct io_zcrx_area * area)609 static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area)
610 {
611 unsigned niov_idx;
612
613 lockdep_assert_held(&area->freelist_lock);
614
615 if (unlikely(!area->free_count))
616 return NULL;
617
618 niov_idx = area->freelist[--area->free_count];
619 return &area->nia.niovs[niov_idx];
620 }
621
io_zcrx_return_niov(struct net_iov * niov)622 static void io_zcrx_return_niov(struct net_iov *niov)
623 {
624 netmem_ref netmem = net_iov_to_netmem(niov);
625
626 if (!niov->desc.pp) {
627 /* copy fallback allocated niovs */
628 io_zcrx_return_niov_freelist(niov);
629 return;
630 }
631 page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
632 }
633
io_zcrx_scrub(struct io_zcrx_ifq * ifq)634 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
635 {
636 struct io_zcrx_area *area = ifq->area;
637 int i;
638
639 if (!area)
640 return;
641
642 /* Reclaim back all buffers given to the user space. */
643 for (i = 0; i < area->nia.num_niovs; i++) {
644 struct net_iov *niov = &area->nia.niovs[i];
645 int nr;
646
647 if (!atomic_read(io_get_user_counter(niov)))
648 continue;
649 nr = atomic_xchg(io_get_user_counter(niov), 0);
650 if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
651 io_zcrx_return_niov(niov);
652 }
653 }
654
zcrx_unregister_user(struct io_zcrx_ifq * ifq)655 static void zcrx_unregister_user(struct io_zcrx_ifq *ifq)
656 {
657 if (refcount_dec_and_test(&ifq->user_refs)) {
658 io_close_queue(ifq);
659 io_zcrx_scrub(ifq);
660 }
661 }
662
zcrx_unregister(struct io_zcrx_ifq * ifq)663 static void zcrx_unregister(struct io_zcrx_ifq *ifq)
664 {
665 zcrx_unregister_user(ifq);
666 io_put_zcrx_ifq(ifq);
667 }
668
io_zcrx_get_region(struct io_ring_ctx * ctx,unsigned int id)669 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
670 unsigned int id)
671 {
672 struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
673
674 lockdep_assert_held(&ctx->mmap_lock);
675
676 return ifq ? &ifq->rq_region : NULL;
677 }
678
zcrx_box_release(struct inode * inode,struct file * file)679 static int zcrx_box_release(struct inode *inode, struct file *file)
680 {
681 struct io_zcrx_ifq *ifq = file->private_data;
682
683 if (WARN_ON_ONCE(!ifq))
684 return -EFAULT;
685 zcrx_unregister(ifq);
686 return 0;
687 }
688
689 static const struct file_operations zcrx_box_fops = {
690 .owner = THIS_MODULE,
691 .release = zcrx_box_release,
692 };
693
zcrx_export(struct io_ring_ctx * ctx,struct io_zcrx_ifq * ifq,struct zcrx_ctrl * ctrl,void __user * arg)694 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
695 struct zcrx_ctrl *ctrl, void __user *arg)
696 {
697 struct zcrx_ctrl_export *ce = &ctrl->zc_export;
698 struct file *file;
699 int fd = -1;
700
701 if (!mem_is_zero(ce, sizeof(*ce)))
702 return -EINVAL;
703 fd = get_unused_fd_flags(O_CLOEXEC);
704 if (fd < 0)
705 return fd;
706
707 ce->zcrx_fd = fd;
708 if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
709 put_unused_fd(fd);
710 return -EFAULT;
711 }
712
713 refcount_inc(&ifq->refs);
714 refcount_inc(&ifq->user_refs);
715
716 file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
717 ifq, O_CLOEXEC, NULL);
718 if (IS_ERR(file)) {
719 put_unused_fd(fd);
720 zcrx_unregister(ifq);
721 return PTR_ERR(file);
722 }
723
724 fd_install(fd, file);
725 return 0;
726 }
727
import_zcrx(struct io_ring_ctx * ctx,struct io_uring_zcrx_ifq_reg __user * arg,struct io_uring_zcrx_ifq_reg * reg)728 static int import_zcrx(struct io_ring_ctx *ctx,
729 struct io_uring_zcrx_ifq_reg __user *arg,
730 struct io_uring_zcrx_ifq_reg *reg)
731 {
732 struct io_zcrx_ifq *ifq;
733 struct file *file;
734 int fd, ret;
735 u32 id;
736
737 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
738 return -EINVAL;
739 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
740 return -EINVAL;
741 if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
742 return -EINVAL;
743 if (reg->flags & ~ZCRX_REG_IMPORT)
744 return -EINVAL;
745
746 fd = reg->if_idx;
747 CLASS(fd, f)(fd);
748 if (fd_empty(f))
749 return -EBADF;
750
751 file = fd_file(f);
752 if (file->f_op != &zcrx_box_fops || !file->private_data)
753 return -EBADF;
754
755 ifq = file->private_data;
756 refcount_inc(&ifq->refs);
757 refcount_inc(&ifq->user_refs);
758
759 scoped_guard(mutex, &ctx->mmap_lock) {
760 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
761 if (ret)
762 goto err;
763 }
764
765 reg->zcrx_id = id;
766 io_fill_zcrx_offsets(®->offsets);
767 if (copy_to_user(arg, reg, sizeof(*reg))) {
768 ret = -EFAULT;
769 goto err_xa_erase;
770 }
771
772 scoped_guard(mutex, &ctx->mmap_lock) {
773 ret = -ENOMEM;
774 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
775 goto err_xa_erase;
776 }
777
778 return 0;
779 err_xa_erase:
780 scoped_guard(mutex, &ctx->mmap_lock)
781 xa_erase(&ctx->zcrx_ctxs, id);
782 err:
783 zcrx_unregister(ifq);
784 return ret;
785 }
786
zcrx_register_netdev(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_ifq_reg * reg,struct io_uring_zcrx_area_reg * area)787 static int zcrx_register_netdev(struct io_zcrx_ifq *ifq,
788 struct io_uring_zcrx_ifq_reg *reg,
789 struct io_uring_zcrx_area_reg *area)
790 {
791 struct pp_memory_provider_params mp_param = {};
792 unsigned if_rxq = reg->if_rxq;
793 int ret;
794
795 ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns,
796 reg->if_idx);
797 if (!ifq->netdev)
798 return -ENODEV;
799
800 netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
801
802 ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, if_rxq, NETDEV_QUEUE_TYPE_RX);
803 if (!ifq->dev) {
804 ret = -EOPNOTSUPP;
805 goto netdev_put_unlock;
806 }
807 get_device(ifq->dev);
808
809 ret = io_zcrx_create_area(ifq, area, reg);
810 if (ret)
811 goto netdev_put_unlock;
812
813 if (reg->rx_buf_len)
814 mp_param.rx_page_size = 1U << ifq->niov_shift;
815 mp_param.mp_ops = &io_uring_pp_zc_ops;
816 mp_param.mp_priv = ifq;
817 ret = netif_mp_open_rxq(ifq->netdev, if_rxq, &mp_param, NULL);
818 if (ret)
819 goto netdev_put_unlock;
820
821 ifq->if_rxq = if_rxq;
822 ret = 0;
823 netdev_put_unlock:
824 netdev_unlock(ifq->netdev);
825 return ret;
826 }
827
io_register_zcrx(struct io_ring_ctx * ctx,struct io_uring_zcrx_ifq_reg __user * arg)828 int io_register_zcrx(struct io_ring_ctx *ctx,
829 struct io_uring_zcrx_ifq_reg __user *arg)
830 {
831 struct io_uring_zcrx_area_reg area;
832 struct io_uring_zcrx_ifq_reg reg;
833 struct io_uring_region_desc rd;
834 struct io_zcrx_ifq *ifq;
835 int ret;
836 u32 id;
837
838 /*
839 * 1. Interface queue allocation.
840 * 2. It can observe data destined for sockets of other tasks.
841 */
842 if (!capable(CAP_NET_ADMIN))
843 return -EPERM;
844
845 /* mandatory io_uring features for zc rx */
846 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
847 return -EINVAL;
848 if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
849 return -EINVAL;
850 if (copy_from_user(®, arg, sizeof(reg)))
851 return -EFAULT;
852 if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
853 return -EINVAL;
854 if (reg.flags & ~ZCRX_SUPPORTED_REG_FLAGS)
855 return -EINVAL;
856 if (reg.flags & ZCRX_REG_IMPORT)
857 return import_zcrx(ctx, arg, ®);
858 if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
859 return -EFAULT;
860 if (reg.if_rxq == -1 || !reg.rq_entries)
861 return -EINVAL;
862 if ((reg.if_rxq || reg.if_idx) && (reg.flags & ZCRX_REG_NODEV))
863 return -EINVAL;
864 if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
865 if (!(ctx->flags & IORING_SETUP_CLAMP))
866 return -EINVAL;
867 reg.rq_entries = IO_RQ_MAX_ENTRIES;
868 }
869 reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
870
871 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
872 return -EFAULT;
873
874 ifq = io_zcrx_ifq_alloc(ctx);
875 if (!ifq)
876 return -ENOMEM;
877
878 if (ctx->user) {
879 get_uid(ctx->user);
880 ifq->user = ctx->user;
881 }
882 if (ctx->mm_account) {
883 mmgrab(ctx->mm_account);
884 ifq->mm_account = ctx->mm_account;
885 }
886 ifq->rq.nr_entries = reg.rq_entries;
887
888 scoped_guard(mutex, &ctx->mmap_lock) {
889 /* preallocate id */
890 ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
891 if (ret)
892 goto ifq_free;
893 }
894
895 ret = io_allocate_rbuf_ring(ctx, ifq, ®, &rd, id);
896 if (ret)
897 goto err;
898
899 ifq->kern_readable = !(area.flags & IORING_ZCRX_AREA_DMABUF);
900
901 if (!(reg.flags & ZCRX_REG_NODEV)) {
902 ret = zcrx_register_netdev(ifq, ®, &area);
903 if (ret)
904 goto err;
905 } else {
906 ret = io_zcrx_create_area(ifq, &area, ®);
907 if (ret)
908 goto err;
909 }
910
911 reg.zcrx_id = id;
912
913 scoped_guard(mutex, &ctx->mmap_lock) {
914 /* publish ifq */
915 ret = -ENOMEM;
916 if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
917 goto err;
918 }
919
920 reg.rx_buf_len = 1U << ifq->niov_shift;
921
922 if (copy_to_user(arg, ®, sizeof(reg)) ||
923 copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
924 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
925 ret = -EFAULT;
926 goto err;
927 }
928 return 0;
929 err:
930 scoped_guard(mutex, &ctx->mmap_lock)
931 xa_erase(&ctx->zcrx_ctxs, id);
932 ifq_free:
933 zcrx_unregister(ifq);
934 return ret;
935 }
936
is_zcrx_entry_marked(struct io_ring_ctx * ctx,unsigned long id)937 static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id)
938 {
939 return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_1);
940 }
941
set_zcrx_entry_mark(struct io_ring_ctx * ctx,unsigned long id)942 static inline void set_zcrx_entry_mark(struct io_ring_ctx *ctx, unsigned long id)
943 {
944 xa_set_mark(&ctx->zcrx_ctxs, id, XA_MARK_1);
945 }
946
io_terminate_zcrx(struct io_ring_ctx * ctx)947 void io_terminate_zcrx(struct io_ring_ctx *ctx)
948 {
949 struct io_zcrx_ifq *ifq;
950 unsigned long id = 0;
951
952 lockdep_assert_held(&ctx->uring_lock);
953
954 while (1) {
955 scoped_guard(mutex, &ctx->mmap_lock)
956 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
957 if (!ifq)
958 break;
959 if (WARN_ON_ONCE(is_zcrx_entry_marked(ctx, id)))
960 break;
961 set_zcrx_entry_mark(ctx, id);
962 id++;
963 zcrx_unregister_user(ifq);
964 }
965 }
966
io_unregister_zcrx(struct io_ring_ctx * ctx)967 void io_unregister_zcrx(struct io_ring_ctx *ctx)
968 {
969 struct io_zcrx_ifq *ifq;
970
971 lockdep_assert_held(&ctx->uring_lock);
972
973 while (1) {
974 scoped_guard(mutex, &ctx->mmap_lock) {
975 unsigned long id = 0;
976
977 ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
978 if (ifq) {
979 if (WARN_ON_ONCE(!is_zcrx_entry_marked(ctx, id))) {
980 ifq = NULL;
981 break;
982 }
983 xa_erase(&ctx->zcrx_ctxs, id);
984 }
985 }
986 if (!ifq)
987 break;
988 io_put_zcrx_ifq(ifq);
989 }
990
991 xa_destroy(&ctx->zcrx_ctxs);
992 }
993
zcrx_rq_entries(struct zcrx_rq * rq)994 static inline u32 zcrx_rq_entries(struct zcrx_rq *rq)
995 {
996 u32 entries;
997
998 entries = smp_load_acquire(&rq->ring->tail) - rq->cached_head;
999 return min(entries, rq->nr_entries);
1000 }
1001
zcrx_next_rqe(struct zcrx_rq * rq,unsigned mask)1002 static struct io_uring_zcrx_rqe *zcrx_next_rqe(struct zcrx_rq *rq, unsigned mask)
1003 {
1004 unsigned int idx = rq->cached_head++ & mask;
1005
1006 return &rq->rqes[idx];
1007 }
1008
io_parse_rqe(struct io_uring_zcrx_rqe * rqe,struct io_zcrx_ifq * ifq,struct net_iov ** ret_niov)1009 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
1010 struct io_zcrx_ifq *ifq,
1011 struct net_iov **ret_niov)
1012 {
1013 __u64 off = READ_ONCE(rqe->off);
1014 unsigned niov_idx, area_idx;
1015 struct io_zcrx_area *area;
1016
1017 area_idx = off >> IORING_ZCRX_AREA_SHIFT;
1018 niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
1019
1020 if (unlikely(rqe->__pad || area_idx))
1021 return false;
1022 area = ifq->area;
1023
1024 if (unlikely(niov_idx >= area->nia.num_niovs))
1025 return false;
1026 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
1027
1028 *ret_niov = &area->nia.niovs[niov_idx];
1029 return true;
1030 }
1031
io_zcrx_ring_refill(struct page_pool * pp,struct io_zcrx_ifq * ifq,netmem_ref * netmems,unsigned to_alloc)1032 static unsigned io_zcrx_ring_refill(struct page_pool *pp,
1033 struct io_zcrx_ifq *ifq,
1034 netmem_ref *netmems, unsigned to_alloc)
1035 {
1036 struct zcrx_rq *rq = &ifq->rq;
1037 unsigned int mask = rq->nr_entries - 1;
1038 unsigned int entries;
1039 unsigned allocated = 0;
1040
1041 guard(spinlock_bh)(&rq->lock);
1042
1043 entries = zcrx_rq_entries(rq);
1044 entries = min_t(unsigned, entries, to_alloc);
1045 if (unlikely(!entries))
1046 return 0;
1047
1048 do {
1049 struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask);
1050 struct net_iov *niov;
1051 netmem_ref netmem;
1052
1053 if (!io_parse_rqe(rqe, ifq, &niov))
1054 continue;
1055 if (!io_zcrx_put_niov_uref(niov))
1056 continue;
1057
1058 netmem = net_iov_to_netmem(niov);
1059 if (!page_pool_unref_and_test(netmem))
1060 continue;
1061
1062 if (unlikely(niov->desc.pp != pp)) {
1063 io_zcrx_return_niov(niov);
1064 continue;
1065 }
1066
1067 netmems[allocated] = netmem;
1068 allocated++;
1069 } while (--entries);
1070
1071 smp_store_release(&rq->ring->head, rq->cached_head);
1072 return allocated;
1073 }
1074
io_zcrx_refill_slow(struct page_pool * pp,struct io_zcrx_ifq * ifq,netmem_ref * netmems,unsigned to_alloc)1075 static unsigned io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq,
1076 netmem_ref *netmems, unsigned to_alloc)
1077 {
1078 struct io_zcrx_area *area = ifq->area;
1079 unsigned allocated = 0;
1080
1081 guard(spinlock_bh)(&area->freelist_lock);
1082
1083 for (allocated = 0; allocated < to_alloc; allocated++) {
1084 struct net_iov *niov = zcrx_get_free_niov(area);
1085
1086 if (!niov)
1087 break;
1088 net_mp_niov_set_page_pool(pp, niov);
1089 netmems[allocated] = net_iov_to_netmem(niov);
1090 }
1091 return allocated;
1092 }
1093
io_pp_zc_alloc_netmems(struct page_pool * pp,gfp_t gfp)1094 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
1095 {
1096 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1097 netmem_ref *netmems = pp->alloc.cache;
1098 unsigned to_alloc = PP_ALLOC_CACHE_REFILL;
1099 unsigned allocated;
1100
1101 /* pp should already be ensuring that */
1102 if (WARN_ON_ONCE(pp->alloc.count))
1103 return 0;
1104
1105 allocated = io_zcrx_ring_refill(pp, ifq, netmems, to_alloc);
1106 if (likely(allocated))
1107 goto out_return;
1108
1109 allocated = io_zcrx_refill_slow(pp, ifq, netmems, to_alloc);
1110 if (!allocated)
1111 return 0;
1112 out_return:
1113 zcrx_sync_for_device(pp, ifq, netmems, allocated);
1114 allocated--;
1115 pp->alloc.count += allocated;
1116 return netmems[allocated];
1117 }
1118
io_pp_zc_release_netmem(struct page_pool * pp,netmem_ref netmem)1119 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
1120 {
1121 struct net_iov *niov;
1122
1123 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1124 return false;
1125
1126 niov = netmem_to_net_iov(netmem);
1127 net_mp_niov_clear_page_pool(niov);
1128 io_zcrx_return_niov_freelist(niov);
1129 return false;
1130 }
1131
io_pp_zc_init(struct page_pool * pp)1132 static int io_pp_zc_init(struct page_pool *pp)
1133 {
1134 struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1135
1136 if (WARN_ON_ONCE(!ifq))
1137 return -EINVAL;
1138 if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
1139 return -EINVAL;
1140 if (WARN_ON_ONCE(!pp->dma_map))
1141 return -EOPNOTSUPP;
1142 if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
1143 return -EINVAL;
1144 if (pp->p.dma_dir != DMA_FROM_DEVICE)
1145 return -EOPNOTSUPP;
1146
1147 refcount_inc(&ifq->refs);
1148 return 0;
1149 }
1150
io_pp_zc_destroy(struct page_pool * pp)1151 static void io_pp_zc_destroy(struct page_pool *pp)
1152 {
1153 io_put_zcrx_ifq(io_pp_to_ifq(pp));
1154 }
1155
io_pp_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)1156 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
1157 struct netdev_rx_queue *rxq)
1158 {
1159 struct nlattr *nest;
1160 int type;
1161
1162 type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
1163 nest = nla_nest_start(rsp, type);
1164 if (!nest)
1165 return -EMSGSIZE;
1166 nla_nest_end(rsp, nest);
1167
1168 return 0;
1169 }
1170
io_pp_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)1171 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
1172 {
1173 struct pp_memory_provider_params *p = &rxq->mp_params;
1174 struct io_zcrx_ifq *ifq = mp_priv;
1175
1176 io_zcrx_drop_netdev(ifq);
1177 if (ifq->area)
1178 io_zcrx_unmap_area(ifq, ifq->area);
1179
1180 p->mp_ops = NULL;
1181 p->mp_priv = NULL;
1182 }
1183
1184 static const struct memory_provider_ops io_uring_pp_zc_ops = {
1185 .alloc_netmems = io_pp_zc_alloc_netmems,
1186 .release_netmem = io_pp_zc_release_netmem,
1187 .init = io_pp_zc_init,
1188 .destroy = io_pp_zc_destroy,
1189 .nl_fill = io_pp_nl_fill,
1190 .uninstall = io_pp_uninstall,
1191 };
1192
zcrx_parse_rq(netmem_ref * netmem_array,unsigned nr,struct io_zcrx_ifq * zcrx,struct zcrx_rq * rq)1193 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
1194 struct io_zcrx_ifq *zcrx, struct zcrx_rq *rq)
1195 {
1196 unsigned int mask = rq->nr_entries - 1;
1197 unsigned int i;
1198
1199 nr = min(nr, zcrx_rq_entries(rq));
1200 for (i = 0; i < nr; i++) {
1201 struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask);
1202 struct net_iov *niov;
1203
1204 if (!io_parse_rqe(rqe, zcrx, &niov))
1205 break;
1206 netmem_array[i] = net_iov_to_netmem(niov);
1207 }
1208
1209 smp_store_release(&rq->ring->head, rq->cached_head);
1210 return i;
1211 }
1212
1213 #define ZCRX_FLUSH_BATCH 32
1214
zcrx_return_buffers(netmem_ref * netmems,unsigned nr)1215 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
1216 {
1217 unsigned i;
1218
1219 for (i = 0; i < nr; i++) {
1220 netmem_ref netmem = netmems[i];
1221 struct net_iov *niov = netmem_to_net_iov(netmem);
1222
1223 if (!io_zcrx_put_niov_uref(niov))
1224 continue;
1225 if (!page_pool_unref_and_test(netmem))
1226 continue;
1227 io_zcrx_return_niov(niov);
1228 }
1229 }
1230
zcrx_flush_rq(struct io_ring_ctx * ctx,struct io_zcrx_ifq * zcrx,struct zcrx_ctrl * ctrl)1231 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
1232 struct zcrx_ctrl *ctrl)
1233 {
1234 struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
1235 netmem_ref netmems[ZCRX_FLUSH_BATCH];
1236 unsigned total = 0;
1237 unsigned nr;
1238
1239 if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
1240 return -EINVAL;
1241
1242 do {
1243 struct zcrx_rq *rq = &zcrx->rq;
1244
1245 scoped_guard(spinlock_bh, &rq->lock) {
1246 nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx, rq);
1247 zcrx_return_buffers(netmems, nr);
1248 }
1249
1250 total += nr;
1251
1252 if (fatal_signal_pending(current))
1253 break;
1254 cond_resched();
1255 } while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq.nr_entries);
1256
1257 return 0;
1258 }
1259
io_zcrx_ctrl(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)1260 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
1261 {
1262 struct zcrx_ctrl ctrl;
1263 struct io_zcrx_ifq *zcrx;
1264
1265 BUILD_BUG_ON(sizeof(ctrl.zc_export) != sizeof(ctrl.zc_flush));
1266
1267 if (nr_args)
1268 return -EINVAL;
1269 if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
1270 return -EFAULT;
1271 if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
1272 return -EFAULT;
1273
1274 zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
1275 if (!zcrx)
1276 return -ENXIO;
1277
1278 switch (ctrl.op) {
1279 case ZCRX_CTRL_FLUSH_RQ:
1280 return zcrx_flush_rq(ctx, zcrx, &ctrl);
1281 case ZCRX_CTRL_EXPORT:
1282 return zcrx_export(ctx, zcrx, &ctrl, arg);
1283 }
1284
1285 return -EOPNOTSUPP;
1286 }
1287
io_zcrx_queue_cqe(struct io_kiocb * req,struct net_iov * niov,struct io_zcrx_ifq * ifq,int off,int len)1288 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1289 struct io_zcrx_ifq *ifq, int off, int len)
1290 {
1291 struct io_ring_ctx *ctx = req->ctx;
1292 struct io_uring_zcrx_cqe *rcqe;
1293 struct io_zcrx_area *area;
1294 struct io_uring_cqe *cqe;
1295 u64 offset;
1296
1297 if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1298 return false;
1299
1300 cqe->user_data = req->cqe.user_data;
1301 cqe->res = len;
1302 cqe->flags = IORING_CQE_F_MORE;
1303 if (ctx->flags & IORING_SETUP_CQE_MIXED)
1304 cqe->flags |= IORING_CQE_F_32;
1305
1306 area = io_zcrx_iov_to_area(niov);
1307 offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1308 rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1309 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1310 rcqe->__pad = 0;
1311 return true;
1312 }
1313
io_alloc_fallback_niov(struct io_zcrx_ifq * ifq)1314 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1315 {
1316 struct io_zcrx_area *area = ifq->area;
1317 struct net_iov *niov = NULL;
1318
1319 if (!ifq->kern_readable)
1320 return NULL;
1321
1322 scoped_guard(spinlock_bh, &area->freelist_lock)
1323 niov = zcrx_get_free_niov(area);
1324
1325 if (niov)
1326 page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1327 return niov;
1328 }
1329
1330 struct io_copy_cache {
1331 struct page *page;
1332 unsigned long offset;
1333 size_t size;
1334 };
1335
io_copy_page(struct io_copy_cache * cc,struct page * src_page,unsigned int src_offset,size_t len)1336 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1337 unsigned int src_offset, size_t len)
1338 {
1339 size_t copied = 0;
1340
1341 len = min(len, cc->size);
1342
1343 while (len) {
1344 void *src_addr, *dst_addr;
1345 struct page *dst_page = cc->page;
1346 unsigned dst_offset = cc->offset;
1347 size_t n = len;
1348
1349 if (folio_test_partial_kmap(page_folio(dst_page)) ||
1350 folio_test_partial_kmap(page_folio(src_page))) {
1351 dst_page += dst_offset / PAGE_SIZE;
1352 dst_offset = offset_in_page(dst_offset);
1353 src_page += src_offset / PAGE_SIZE;
1354 src_offset = offset_in_page(src_offset);
1355 n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1356 n = min(n, len);
1357 }
1358
1359 dst_addr = kmap_local_page(dst_page) + dst_offset;
1360 src_addr = kmap_local_page(src_page) + src_offset;
1361
1362 memcpy(dst_addr, src_addr, n);
1363
1364 kunmap_local(src_addr);
1365 kunmap_local(dst_addr);
1366
1367 cc->size -= n;
1368 cc->offset += n;
1369 src_offset += n;
1370 len -= n;
1371 copied += n;
1372 }
1373 return copied;
1374 }
1375
io_zcrx_copy_chunk(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct page * src_page,unsigned int src_offset,size_t len)1376 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1377 struct page *src_page, unsigned int src_offset,
1378 size_t len)
1379 {
1380 size_t copied = 0;
1381 int ret = 0;
1382
1383 while (len) {
1384 struct io_copy_cache cc;
1385 struct net_iov *niov;
1386 size_t n;
1387
1388 niov = io_alloc_fallback_niov(ifq);
1389 if (!niov) {
1390 ret = -ENOMEM;
1391 break;
1392 }
1393
1394 cc.page = io_zcrx_iov_page(niov);
1395 cc.offset = 0;
1396 cc.size = PAGE_SIZE;
1397
1398 n = io_copy_page(&cc, src_page, src_offset, len);
1399
1400 if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1401 io_zcrx_return_niov(niov);
1402 ret = -ENOSPC;
1403 break;
1404 }
1405
1406 io_zcrx_get_niov_uref(niov);
1407 src_offset += n;
1408 len -= n;
1409 copied += n;
1410 }
1411
1412 return copied ? copied : ret;
1413 }
1414
io_zcrx_copy_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1415 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1416 const skb_frag_t *frag, int off, int len)
1417 {
1418 struct page *page = skb_frag_page(frag);
1419
1420 return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1421 }
1422
io_zcrx_recv_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1423 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1424 const skb_frag_t *frag, int off, int len)
1425 {
1426 struct net_iov *niov;
1427 struct page_pool *pp;
1428
1429 if (unlikely(!skb_frag_is_net_iov(frag)))
1430 return io_zcrx_copy_frag(req, ifq, frag, off, len);
1431
1432 niov = netmem_to_net_iov(frag->netmem);
1433 pp = niov->desc.pp;
1434
1435 if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
1436 return -EFAULT;
1437
1438 if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1439 return -ENOSPC;
1440
1441 /*
1442 * Prevent it from being recycled while user is accessing it.
1443 * It has to be done before grabbing a user reference.
1444 */
1445 page_pool_ref_netmem(net_iov_to_netmem(niov));
1446 io_zcrx_get_niov_uref(niov);
1447 return len;
1448 }
1449
1450 static int
io_zcrx_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)1451 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1452 unsigned int offset, size_t len)
1453 {
1454 struct io_zcrx_args *args = desc->arg.data;
1455 struct io_zcrx_ifq *ifq = args->ifq;
1456 struct io_kiocb *req = args->req;
1457 struct sk_buff *frag_iter;
1458 unsigned start, start_off = offset;
1459 int i, copy, end, off;
1460 int ret = 0;
1461
1462 len = min_t(size_t, len, desc->count);
1463 /*
1464 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1465 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1466 * skb->len) check. Return early in this case to break out of
1467 * __tcp_read_sock().
1468 */
1469 if (!len)
1470 return 0;
1471 if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1472 return -EAGAIN;
1473
1474 if (unlikely(offset < skb_headlen(skb))) {
1475 ssize_t copied;
1476 size_t to_copy;
1477
1478 to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1479 copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1480 offset_in_page(skb->data) + offset,
1481 to_copy);
1482 if (copied < 0) {
1483 ret = copied;
1484 goto out;
1485 }
1486 offset += copied;
1487 len -= copied;
1488 if (!len)
1489 goto out;
1490 if (offset != skb_headlen(skb))
1491 goto out;
1492 }
1493
1494 start = skb_headlen(skb);
1495
1496 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1497 const skb_frag_t *frag;
1498
1499 if (WARN_ON(start > offset + len))
1500 return -EFAULT;
1501
1502 frag = &skb_shinfo(skb)->frags[i];
1503 end = start + skb_frag_size(frag);
1504
1505 if (offset < end) {
1506 copy = end - offset;
1507 if (copy > len)
1508 copy = len;
1509
1510 off = offset - start;
1511 ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1512 if (ret < 0)
1513 goto out;
1514
1515 offset += ret;
1516 len -= ret;
1517 if (len == 0 || ret != copy)
1518 goto out;
1519 }
1520 start = end;
1521 }
1522
1523 skb_walk_frags(skb, frag_iter) {
1524 if (WARN_ON(start > offset + len))
1525 return -EFAULT;
1526
1527 end = start + frag_iter->len;
1528 if (offset < end) {
1529 size_t count;
1530
1531 copy = end - offset;
1532 if (copy > len)
1533 copy = len;
1534
1535 off = offset - start;
1536 count = desc->count;
1537 ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1538 desc->count = count;
1539 if (ret < 0)
1540 goto out;
1541
1542 offset += ret;
1543 len -= ret;
1544 if (len == 0 || ret != copy)
1545 goto out;
1546 }
1547 start = end;
1548 }
1549
1550 out:
1551 if (offset == start_off)
1552 return ret;
1553 desc->count -= (offset - start_off);
1554 return offset - start_off;
1555 }
1556
io_zcrx_tcp_recvmsg(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct sock * sk,int flags,unsigned issue_flags,unsigned int * outlen)1557 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1558 struct sock *sk, int flags,
1559 unsigned issue_flags, unsigned int *outlen)
1560 {
1561 unsigned int len = *outlen;
1562 struct io_zcrx_args args = {
1563 .req = req,
1564 .ifq = ifq,
1565 .sock = sk->sk_socket,
1566 };
1567 read_descriptor_t rd_desc = {
1568 .count = len ? len : UINT_MAX,
1569 .arg.data = &args,
1570 };
1571 int ret;
1572
1573 lock_sock(sk);
1574 ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1575 if (len && ret > 0)
1576 *outlen = len - ret;
1577 if (ret <= 0) {
1578 if (ret < 0 || sock_flag(sk, SOCK_DONE))
1579 goto out;
1580 if (sk->sk_err)
1581 ret = sock_error(sk);
1582 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1583 goto out;
1584 else if (sk->sk_state == TCP_CLOSE)
1585 ret = -ENOTCONN;
1586 else
1587 ret = -EAGAIN;
1588 } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1589 (issue_flags & IO_URING_F_MULTISHOT)) {
1590 ret = IOU_REQUEUE;
1591 } else if (sock_flag(sk, SOCK_DONE)) {
1592 /* Make it to retry until it finally gets 0. */
1593 if (issue_flags & IO_URING_F_MULTISHOT)
1594 ret = IOU_REQUEUE;
1595 else
1596 ret = -EAGAIN;
1597 }
1598 out:
1599 release_sock(sk);
1600 return ret;
1601 }
1602
io_zcrx_recv(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct socket * sock,unsigned int flags,unsigned issue_flags,unsigned int * len)1603 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1604 struct socket *sock, unsigned int flags,
1605 unsigned issue_flags, unsigned int *len)
1606 {
1607 struct sock *sk = sock->sk;
1608 const struct proto *prot = READ_ONCE(sk->sk_prot);
1609
1610 if (prot->recvmsg != tcp_recvmsg)
1611 return -EPROTONOSUPPORT;
1612
1613 sock_rps_record_flow(sk);
1614 return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1615 }
1616