xref: /linux/io_uring/zcrx.c (revision 39f1c201b93f4ff71631bac72cff6eb155f976a4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 #include <linux/anon_inodes.h>
12 
13 #include <net/page_pool/helpers.h>
14 #include <net/page_pool/memory_provider.h>
15 #include <net/netlink.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/tcp.h>
19 #include <net/rps.h>
20 
21 #include <trace/events/page_pool.h>
22 
23 #include <uapi/linux/io_uring.h>
24 
25 #include "io_uring.h"
26 #include "kbuf.h"
27 #include "memmap.h"
28 #include "zcrx.h"
29 #include "rsrc.h"
30 
31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS	(IORING_ZCRX_AREA_DMABUF)
32 
33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
34 
35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
36 {
37 	return pp->mp_priv;
38 }
39 
40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
41 {
42 	struct net_iov_area *owner = net_iov_owner(niov);
43 
44 	return container_of(owner, struct io_zcrx_area, nia);
45 }
46 
47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
48 {
49 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
50 	unsigned niov_pages_shift;
51 
52 	lockdep_assert(!area->mem.is_dmabuf);
53 
54 	niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
55 	return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
56 }
57 
58 static int io_area_max_shift(struct io_zcrx_mem *mem)
59 {
60 	struct sg_table *sgt = mem->sgt;
61 	struct scatterlist *sg;
62 	unsigned shift = -1U;
63 	unsigned i;
64 
65 	for_each_sgtable_dma_sg(sgt, sg, i)
66 		shift = min(shift, __ffs(sg_dma_len(sg)));
67 	return shift;
68 }
69 
70 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
71 				struct io_zcrx_area *area)
72 {
73 	unsigned niov_size = 1U << ifq->niov_shift;
74 	struct sg_table *sgt = area->mem.sgt;
75 	struct scatterlist *sg;
76 	unsigned i, niov_idx = 0;
77 
78 	for_each_sgtable_dma_sg(sgt, sg, i) {
79 		dma_addr_t dma = sg_dma_address(sg);
80 		unsigned long sg_len = sg_dma_len(sg);
81 
82 		if (WARN_ON_ONCE(sg_len % niov_size))
83 			return -EINVAL;
84 
85 		while (sg_len && niov_idx < area->nia.num_niovs) {
86 			struct net_iov *niov = &area->nia.niovs[niov_idx];
87 
88 			if (net_mp_niov_set_dma_addr(niov, dma))
89 				return -EFAULT;
90 			sg_len -= niov_size;
91 			dma += niov_size;
92 			niov_idx++;
93 		}
94 	}
95 
96 	if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
97 		return -EFAULT;
98 	return 0;
99 }
100 
101 static void io_release_dmabuf(struct io_zcrx_mem *mem)
102 {
103 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
104 		return;
105 
106 	if (mem->sgt)
107 		dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
108 						  DMA_FROM_DEVICE);
109 	if (mem->attach)
110 		dma_buf_detach(mem->dmabuf, mem->attach);
111 	if (mem->dmabuf)
112 		dma_buf_put(mem->dmabuf);
113 
114 	mem->sgt = NULL;
115 	mem->attach = NULL;
116 	mem->dmabuf = NULL;
117 }
118 
119 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
120 			    struct io_zcrx_mem *mem,
121 			    struct io_uring_zcrx_area_reg *area_reg)
122 {
123 	unsigned long off = (unsigned long)area_reg->addr;
124 	unsigned long len = (unsigned long)area_reg->len;
125 	unsigned long total_size = 0;
126 	struct scatterlist *sg;
127 	int dmabuf_fd = area_reg->dmabuf_fd;
128 	int i, ret;
129 
130 	if (!ifq->dev)
131 		return -EINVAL;
132 	if (off)
133 		return -EINVAL;
134 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
135 		return -EINVAL;
136 
137 	mem->is_dmabuf = true;
138 	mem->dmabuf = dma_buf_get(dmabuf_fd);
139 	if (IS_ERR(mem->dmabuf)) {
140 		ret = PTR_ERR(mem->dmabuf);
141 		mem->dmabuf = NULL;
142 		goto err;
143 	}
144 
145 	mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
146 	if (IS_ERR(mem->attach)) {
147 		ret = PTR_ERR(mem->attach);
148 		mem->attach = NULL;
149 		goto err;
150 	}
151 
152 	mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
153 	if (IS_ERR(mem->sgt)) {
154 		ret = PTR_ERR(mem->sgt);
155 		mem->sgt = NULL;
156 		goto err;
157 	}
158 
159 	for_each_sgtable_dma_sg(mem->sgt, sg, i)
160 		total_size += sg_dma_len(sg);
161 
162 	if (total_size != len) {
163 		ret = -EINVAL;
164 		goto err;
165 	}
166 
167 	mem->size = len;
168 	return 0;
169 err:
170 	io_release_dmabuf(mem);
171 	return ret;
172 }
173 
174 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
175 {
176 	struct folio *last_folio = NULL;
177 	unsigned long res = 0;
178 	int i;
179 
180 	for (i = 0; i < nr_pages; i++) {
181 		struct folio *folio = page_folio(pages[i]);
182 
183 		if (folio == last_folio)
184 			continue;
185 		last_folio = folio;
186 		res += folio_nr_pages(folio);
187 	}
188 	return res;
189 }
190 
191 static int io_import_umem(struct io_zcrx_ifq *ifq,
192 			  struct io_zcrx_mem *mem,
193 			  struct io_uring_zcrx_area_reg *area_reg)
194 {
195 	struct page **pages;
196 	int nr_pages, ret;
197 	bool mapped = false;
198 
199 	if (area_reg->dmabuf_fd)
200 		return -EINVAL;
201 	if (!area_reg->addr)
202 		return -EFAULT;
203 	pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
204 				   &nr_pages);
205 	if (IS_ERR(pages))
206 		return PTR_ERR(pages);
207 
208 	ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
209 					0, (unsigned long)nr_pages << PAGE_SHIFT,
210 					GFP_KERNEL_ACCOUNT);
211 	if (ret)
212 		goto out_err;
213 
214 	if (ifq->dev) {
215 		ret = dma_map_sgtable(ifq->dev, &mem->page_sg_table,
216 				      DMA_FROM_DEVICE, IO_DMA_ATTR);
217 		if (ret < 0)
218 			goto out_err;
219 		mapped = true;
220 	}
221 
222 	mem->account_pages = io_count_account_pages(pages, nr_pages);
223 	ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
224 	if (ret < 0) {
225 		mem->account_pages = 0;
226 		goto out_err;
227 	}
228 
229 	mem->sgt = &mem->page_sg_table;
230 	mem->pages = pages;
231 	mem->nr_folios = nr_pages;
232 	mem->size = area_reg->len;
233 	return ret;
234 out_err:
235 	if (mapped)
236 		dma_unmap_sgtable(ifq->dev, &mem->page_sg_table,
237 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
238 	sg_free_table(&mem->page_sg_table);
239 	unpin_user_pages(pages, nr_pages);
240 	kvfree(pages);
241 	return ret;
242 }
243 
244 static void io_release_area_mem(struct io_zcrx_mem *mem)
245 {
246 	if (mem->is_dmabuf) {
247 		io_release_dmabuf(mem);
248 		return;
249 	}
250 	if (mem->pages) {
251 		unpin_user_pages(mem->pages, mem->nr_folios);
252 		sg_free_table(mem->sgt);
253 		mem->sgt = NULL;
254 		kvfree(mem->pages);
255 	}
256 }
257 
258 static int io_import_area(struct io_zcrx_ifq *ifq,
259 			  struct io_zcrx_mem *mem,
260 			  struct io_uring_zcrx_area_reg *area_reg)
261 {
262 	int ret;
263 
264 	if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
265 		return -EINVAL;
266 	if (area_reg->rq_area_token)
267 		return -EINVAL;
268 	if (area_reg->__resv2[0] || area_reg->__resv2[1])
269 		return -EINVAL;
270 
271 	ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
272 	if (ret)
273 		return ret;
274 	if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
275 		return -EINVAL;
276 
277 	if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
278 		return io_import_dmabuf(ifq, mem, area_reg);
279 	return io_import_umem(ifq, mem, area_reg);
280 }
281 
282 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
283 				struct io_zcrx_area *area)
284 {
285 	int i;
286 
287 	guard(mutex)(&ifq->pp_lock);
288 	if (!area->is_mapped)
289 		return;
290 	area->is_mapped = false;
291 
292 	if (area->nia.niovs) {
293 		for (i = 0; i < area->nia.num_niovs; i++)
294 			net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
295 	}
296 
297 	if (area->mem.is_dmabuf) {
298 		io_release_dmabuf(&area->mem);
299 	} else {
300 		dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
301 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
302 	}
303 }
304 
305 static void zcrx_sync_for_device(struct page_pool *pp, struct io_zcrx_ifq *zcrx,
306 				 netmem_ref *netmems, unsigned nr)
307 {
308 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
309 	struct device *dev = pp->p.dev;
310 	unsigned i, niov_size;
311 	dma_addr_t dma_addr;
312 
313 	if (!dma_dev_need_sync(dev))
314 		return;
315 	niov_size = 1U << zcrx->niov_shift;
316 
317 	for (i = 0; i < nr; i++) {
318 		dma_addr = page_pool_get_dma_addr_netmem(netmems[i]);
319 		__dma_sync_single_for_device(dev, dma_addr + pp->p.offset,
320 					     niov_size, pp->p.dma_dir);
321 	}
322 #endif
323 }
324 
325 #define IO_RQ_MAX_ENTRIES		32768
326 
327 #define IO_SKBS_PER_CALL_LIMIT	20
328 
329 struct io_zcrx_args {
330 	struct io_kiocb		*req;
331 	struct io_zcrx_ifq	*ifq;
332 	struct socket		*sock;
333 	unsigned		nr_skbs;
334 };
335 
336 static const struct memory_provider_ops io_uring_pp_zc_ops;
337 
338 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
339 {
340 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
341 
342 	return &area->user_refs[net_iov_idx(niov)];
343 }
344 
345 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
346 {
347 	atomic_t *uref = io_get_user_counter(niov);
348 	int old;
349 
350 	old = atomic_read(uref);
351 	do {
352 		if (unlikely(old == 0))
353 			return false;
354 	} while (!atomic_try_cmpxchg(uref, &old, old - 1));
355 
356 	return true;
357 }
358 
359 static void io_zcrx_get_niov_uref(struct net_iov *niov)
360 {
361 	atomic_inc(io_get_user_counter(niov));
362 }
363 
364 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
365 {
366 	offsets->head = offsetof(struct io_uring, head);
367 	offsets->tail = offsetof(struct io_uring, tail);
368 	offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
369 }
370 
371 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
372 				 struct io_zcrx_ifq *ifq,
373 				 struct io_uring_zcrx_ifq_reg *reg,
374 				 struct io_uring_region_desc *rd,
375 				 u32 id)
376 {
377 	u64 mmap_offset;
378 	size_t off, size;
379 	void *ptr;
380 	int ret;
381 
382 	io_fill_zcrx_offsets(&reg->offsets);
383 	off = reg->offsets.rqes;
384 	size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
385 	if (size > rd->size)
386 		return -EINVAL;
387 
388 	mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
389 	mmap_offset += (u64)id << IORING_OFF_ZCRX_SHIFT;
390 
391 	ret = io_create_region(ctx, &ifq->rq_region, rd, mmap_offset);
392 	if (ret < 0)
393 		return ret;
394 
395 	ptr = io_region_get_ptr(&ifq->rq_region);
396 	ifq->rq.ring = (struct io_uring *)ptr;
397 	ifq->rq.rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
398 
399 	memset(ifq->rq.ring, 0, sizeof(*ifq->rq.ring));
400 	return 0;
401 }
402 
403 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
404 {
405 	io_free_region(ifq->user, &ifq->rq_region);
406 	ifq->rq.ring = NULL;
407 	ifq->rq.rqes = NULL;
408 }
409 
410 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
411 			      struct io_zcrx_area *area)
412 {
413 	io_zcrx_unmap_area(ifq, area);
414 	io_release_area_mem(&area->mem);
415 
416 	if (area->mem.account_pages)
417 		io_unaccount_mem(ifq->user, ifq->mm_account,
418 				 area->mem.account_pages);
419 
420 	kvfree(area->freelist);
421 	kvfree(area->nia.niovs);
422 	kvfree(area->user_refs);
423 	kfree(area);
424 }
425 
426 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
427 				struct io_zcrx_area *area)
428 {
429 	bool kern_readable = !area->mem.is_dmabuf;
430 
431 	if (WARN_ON_ONCE(ifq->area))
432 		return -EINVAL;
433 	if (WARN_ON_ONCE(ifq->kern_readable != kern_readable))
434 		return -EINVAL;
435 
436 	ifq->area = area;
437 	return 0;
438 }
439 
440 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
441 			       struct io_uring_zcrx_area_reg *area_reg,
442 			       struct io_uring_zcrx_ifq_reg *reg)
443 {
444 	int buf_size_shift = PAGE_SHIFT;
445 	struct io_zcrx_area *area;
446 	unsigned nr_iovs;
447 	int i, ret;
448 
449 	if (reg->rx_buf_len) {
450 		if (!is_power_of_2(reg->rx_buf_len) ||
451 		     reg->rx_buf_len < PAGE_SIZE)
452 			return -EINVAL;
453 		buf_size_shift = ilog2(reg->rx_buf_len);
454 	}
455 	if (!ifq->dev && buf_size_shift != PAGE_SHIFT)
456 		return -EOPNOTSUPP;
457 
458 	ret = -ENOMEM;
459 	area = kzalloc_obj(*area);
460 	if (!area)
461 		goto err;
462 	area->ifq = ifq;
463 
464 	ret = io_import_area(ifq, &area->mem, area_reg);
465 	if (ret)
466 		goto err;
467 	if (ifq->dev)
468 		area->is_mapped = true;
469 
470 	if (ifq->dev && buf_size_shift > io_area_max_shift(&area->mem)) {
471 		ret = -ERANGE;
472 		goto err;
473 	}
474 
475 	ifq->niov_shift = buf_size_shift;
476 	nr_iovs = area->mem.size >> ifq->niov_shift;
477 	area->nia.num_niovs = nr_iovs;
478 
479 	ret = -ENOMEM;
480 	area->nia.niovs = kvmalloc_objs(area->nia.niovs[0], nr_iovs,
481 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
482 	if (!area->nia.niovs)
483 		goto err;
484 
485 	area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
486 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
487 	if (!area->freelist)
488 		goto err;
489 
490 	area->user_refs = kvmalloc_objs(area->user_refs[0], nr_iovs,
491 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
492 	if (!area->user_refs)
493 		goto err;
494 
495 	for (i = 0; i < nr_iovs; i++) {
496 		struct net_iov *niov = &area->nia.niovs[i];
497 
498 		niov->owner = &area->nia;
499 		area->freelist[i] = i;
500 		atomic_set(&area->user_refs[i], 0);
501 		niov->type = NET_IOV_IOURING;
502 	}
503 
504 	if (ifq->dev) {
505 		ret = io_populate_area_dma(ifq, area);
506 		if (ret)
507 			goto err;
508 	}
509 
510 	area->free_count = nr_iovs;
511 	/* we're only supporting one area per ifq for now */
512 	area->area_id = 0;
513 	area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
514 	spin_lock_init(&area->freelist_lock);
515 
516 	ret = io_zcrx_append_area(ifq, area);
517 	if (!ret)
518 		return 0;
519 err:
520 	if (area)
521 		io_zcrx_free_area(ifq, area);
522 	return ret;
523 }
524 
525 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
526 {
527 	struct io_zcrx_ifq *ifq;
528 
529 	ifq = kzalloc_obj(*ifq);
530 	if (!ifq)
531 		return NULL;
532 
533 	ifq->if_rxq = -1;
534 	spin_lock_init(&ifq->rq.lock);
535 	mutex_init(&ifq->pp_lock);
536 	refcount_set(&ifq->refs, 1);
537 	refcount_set(&ifq->user_refs, 1);
538 	return ifq;
539 }
540 
541 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
542 {
543 	guard(mutex)(&ifq->pp_lock);
544 
545 	if (!ifq->netdev)
546 		return;
547 	netdev_put(ifq->netdev, &ifq->netdev_tracker);
548 	ifq->netdev = NULL;
549 }
550 
551 static void io_close_queue(struct io_zcrx_ifq *ifq)
552 {
553 	struct net_device *netdev;
554 	netdevice_tracker netdev_tracker;
555 	struct pp_memory_provider_params p = {
556 		.mp_ops = &io_uring_pp_zc_ops,
557 		.mp_priv = ifq,
558 	};
559 
560 	scoped_guard(mutex, &ifq->pp_lock) {
561 		netdev = ifq->netdev;
562 		netdev_tracker = ifq->netdev_tracker;
563 		ifq->netdev = NULL;
564 	}
565 
566 	if (netdev) {
567 		if (ifq->if_rxq != -1) {
568 			netdev_lock(netdev);
569 			netif_mp_close_rxq(netdev, ifq->if_rxq, &p);
570 			netdev_unlock(netdev);
571 		}
572 		netdev_put(netdev, &netdev_tracker);
573 	}
574 	ifq->if_rxq = -1;
575 }
576 
577 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
578 {
579 	io_close_queue(ifq);
580 
581 	if (ifq->area)
582 		io_zcrx_free_area(ifq, ifq->area);
583 	if (ifq->mm_account)
584 		mmdrop(ifq->mm_account);
585 	if (ifq->dev)
586 		put_device(ifq->dev);
587 
588 	io_free_rbuf_ring(ifq);
589 	free_uid(ifq->user);
590 	mutex_destroy(&ifq->pp_lock);
591 	kfree(ifq);
592 }
593 
594 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
595 {
596 	if (refcount_dec_and_test(&ifq->refs))
597 		io_zcrx_ifq_free(ifq);
598 }
599 
600 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
601 {
602 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
603 
604 	guard(spinlock_bh)(&area->freelist_lock);
605 	if (WARN_ON_ONCE(area->free_count >= area->nia.num_niovs))
606 		return;
607 	area->freelist[area->free_count++] = net_iov_idx(niov);
608 }
609 
610 static struct net_iov *zcrx_get_free_niov(struct io_zcrx_area *area)
611 {
612 	unsigned niov_idx;
613 
614 	lockdep_assert_held(&area->freelist_lock);
615 
616 	if (unlikely(!area->free_count))
617 		return NULL;
618 
619 	niov_idx = area->freelist[--area->free_count];
620 	return &area->nia.niovs[niov_idx];
621 }
622 
623 static void io_zcrx_return_niov(struct net_iov *niov)
624 {
625 	netmem_ref netmem = net_iov_to_netmem(niov);
626 
627 	if (!niov->desc.pp) {
628 		/* copy fallback allocated niovs */
629 		io_zcrx_return_niov_freelist(niov);
630 		return;
631 	}
632 	page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
633 }
634 
635 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
636 {
637 	struct io_zcrx_area *area = ifq->area;
638 	int i;
639 
640 	if (!area)
641 		return;
642 
643 	/* Reclaim back all buffers given to the user space. */
644 	for (i = 0; i < area->nia.num_niovs; i++) {
645 		struct net_iov *niov = &area->nia.niovs[i];
646 		int nr;
647 
648 		if (!atomic_read(io_get_user_counter(niov)))
649 			continue;
650 		nr = atomic_xchg(io_get_user_counter(niov), 0);
651 		if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
652 			io_zcrx_return_niov(niov);
653 	}
654 }
655 
656 static void zcrx_unregister_user(struct io_zcrx_ifq *ifq)
657 {
658 	if (refcount_dec_and_test(&ifq->user_refs)) {
659 		io_close_queue(ifq);
660 		io_zcrx_scrub(ifq);
661 	}
662 }
663 
664 static void zcrx_unregister(struct io_zcrx_ifq *ifq)
665 {
666 	zcrx_unregister_user(ifq);
667 	io_put_zcrx_ifq(ifq);
668 }
669 
670 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
671 					    unsigned int id)
672 {
673 	struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
674 
675 	lockdep_assert_held(&ctx->mmap_lock);
676 
677 	return ifq ? &ifq->rq_region : NULL;
678 }
679 
680 static int zcrx_box_release(struct inode *inode, struct file *file)
681 {
682 	struct io_zcrx_ifq *ifq = file->private_data;
683 
684 	if (WARN_ON_ONCE(!ifq))
685 		return -EFAULT;
686 	zcrx_unregister(ifq);
687 	return 0;
688 }
689 
690 static const struct file_operations zcrx_box_fops = {
691 	.owner		= THIS_MODULE,
692 	.release	= zcrx_box_release,
693 };
694 
695 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
696 		       struct zcrx_ctrl *ctrl, void __user *arg)
697 {
698 	struct zcrx_ctrl_export *ce = &ctrl->zc_export;
699 	struct file *file;
700 	int fd = -1;
701 
702 	if (!mem_is_zero(ce, sizeof(*ce)))
703 		return -EINVAL;
704 	fd = get_unused_fd_flags(O_CLOEXEC);
705 	if (fd < 0)
706 		return fd;
707 
708 	ce->zcrx_fd = fd;
709 	if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
710 		put_unused_fd(fd);
711 		return -EFAULT;
712 	}
713 
714 	refcount_inc(&ifq->refs);
715 	refcount_inc(&ifq->user_refs);
716 
717 	file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
718 					 ifq, O_CLOEXEC, NULL);
719 	if (IS_ERR(file)) {
720 		put_unused_fd(fd);
721 		zcrx_unregister(ifq);
722 		return PTR_ERR(file);
723 	}
724 
725 	fd_install(fd, file);
726 	return 0;
727 }
728 
729 static int import_zcrx(struct io_ring_ctx *ctx,
730 		       struct io_uring_zcrx_ifq_reg __user *arg,
731 		       struct io_uring_zcrx_ifq_reg *reg)
732 {
733 	struct io_zcrx_ifq *ifq;
734 	struct file *file;
735 	int fd, ret;
736 	u32 id;
737 
738 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
739 		return -EINVAL;
740 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
741 		return -EINVAL;
742 	if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
743 		return -EINVAL;
744 	if (reg->flags & ~ZCRX_REG_IMPORT)
745 		return -EINVAL;
746 
747 	fd = reg->if_idx;
748 	CLASS(fd, f)(fd);
749 	if (fd_empty(f))
750 		return -EBADF;
751 
752 	file = fd_file(f);
753 	if (file->f_op != &zcrx_box_fops || !file->private_data)
754 		return -EBADF;
755 
756 	ifq = file->private_data;
757 	refcount_inc(&ifq->refs);
758 	refcount_inc(&ifq->user_refs);
759 
760 	scoped_guard(mutex, &ctx->mmap_lock) {
761 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
762 		if (ret)
763 			goto err;
764 	}
765 
766 	reg->zcrx_id = id;
767 	io_fill_zcrx_offsets(&reg->offsets);
768 	if (copy_to_user(arg, reg, sizeof(*reg))) {
769 		ret = -EFAULT;
770 		goto err_xa_erase;
771 	}
772 
773 	scoped_guard(mutex, &ctx->mmap_lock) {
774 		ret = -ENOMEM;
775 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
776 			goto err_xa_erase;
777 	}
778 
779 	return 0;
780 err_xa_erase:
781 	scoped_guard(mutex, &ctx->mmap_lock)
782 		xa_erase(&ctx->zcrx_ctxs, id);
783 err:
784 	zcrx_unregister(ifq);
785 	return ret;
786 }
787 
788 static int zcrx_register_netdev(struct io_zcrx_ifq *ifq,
789 				struct io_uring_zcrx_ifq_reg *reg,
790 				struct io_uring_zcrx_area_reg *area)
791 {
792 	struct pp_memory_provider_params mp_param = {};
793 	unsigned if_rxq = reg->if_rxq;
794 	int ret;
795 
796 	ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns,
797 						reg->if_idx);
798 	if (!ifq->netdev)
799 		return -ENODEV;
800 
801 	netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
802 
803 	ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, if_rxq, NETDEV_QUEUE_TYPE_RX);
804 	if (!ifq->dev) {
805 		ret = -EOPNOTSUPP;
806 		goto netdev_put_unlock;
807 	}
808 	get_device(ifq->dev);
809 
810 	ret = io_zcrx_create_area(ifq, area, reg);
811 	if (ret)
812 		goto netdev_put_unlock;
813 
814 	if (reg->rx_buf_len)
815 		mp_param.rx_page_size = 1U << ifq->niov_shift;
816 	mp_param.mp_ops = &io_uring_pp_zc_ops;
817 	mp_param.mp_priv = ifq;
818 	ret = netif_mp_open_rxq(ifq->netdev, if_rxq, &mp_param, NULL);
819 	if (ret)
820 		goto netdev_put_unlock;
821 
822 	ifq->if_rxq = if_rxq;
823 	ret = 0;
824 netdev_put_unlock:
825 	netdev_unlock(ifq->netdev);
826 	return ret;
827 }
828 
829 int io_register_zcrx(struct io_ring_ctx *ctx,
830 		     struct io_uring_zcrx_ifq_reg __user *arg)
831 {
832 	struct io_uring_zcrx_area_reg area;
833 	struct io_uring_zcrx_ifq_reg reg;
834 	struct io_uring_region_desc rd;
835 	struct io_zcrx_ifq *ifq;
836 	int ret;
837 	u32 id;
838 
839 	/*
840 	 * 1. Interface queue allocation.
841 	 * 2. It can observe data destined for sockets of other tasks.
842 	 */
843 	if (!capable(CAP_NET_ADMIN))
844 		return -EPERM;
845 
846 	/* mandatory io_uring features for zc rx */
847 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
848 		return -EINVAL;
849 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
850 		return -EINVAL;
851 	if (copy_from_user(&reg, arg, sizeof(reg)))
852 		return -EFAULT;
853 	if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
854 		return -EINVAL;
855 	if (reg.flags & ~ZCRX_SUPPORTED_REG_FLAGS)
856 		return -EINVAL;
857 	if (reg.flags & ZCRX_REG_IMPORT)
858 		return import_zcrx(ctx, arg, &reg);
859 	if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
860 		return -EFAULT;
861 	if (reg.if_rxq == -1 || !reg.rq_entries)
862 		return -EINVAL;
863 	if ((reg.if_rxq || reg.if_idx) && (reg.flags & ZCRX_REG_NODEV))
864 		return -EINVAL;
865 	if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
866 		if (!(ctx->flags & IORING_SETUP_CLAMP))
867 			return -EINVAL;
868 		reg.rq_entries = IO_RQ_MAX_ENTRIES;
869 	}
870 	reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
871 
872 	if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
873 		return -EFAULT;
874 
875 	ifq = io_zcrx_ifq_alloc(ctx);
876 	if (!ifq)
877 		return -ENOMEM;
878 
879 	if (ctx->user) {
880 		get_uid(ctx->user);
881 		ifq->user = ctx->user;
882 	}
883 	if (ctx->mm_account) {
884 		mmgrab(ctx->mm_account);
885 		ifq->mm_account = ctx->mm_account;
886 	}
887 	ifq->rq.nr_entries = reg.rq_entries;
888 
889 	scoped_guard(mutex, &ctx->mmap_lock) {
890 		/* preallocate id */
891 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
892 		if (ret)
893 			goto ifq_free;
894 	}
895 
896 	ret = io_allocate_rbuf_ring(ctx, ifq, &reg, &rd, id);
897 	if (ret)
898 		goto err;
899 
900 	ifq->kern_readable = !(area.flags & IORING_ZCRX_AREA_DMABUF);
901 
902 	if (!(reg.flags & ZCRX_REG_NODEV)) {
903 		ret = zcrx_register_netdev(ifq, &reg, &area);
904 		if (ret)
905 			goto err;
906 	} else {
907 		ret = io_zcrx_create_area(ifq, &area, &reg);
908 		if (ret)
909 			goto err;
910 	}
911 
912 	reg.zcrx_id = id;
913 
914 	scoped_guard(mutex, &ctx->mmap_lock) {
915 		/* publish ifq */
916 		ret = -ENOMEM;
917 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
918 			goto err;
919 	}
920 
921 	reg.rx_buf_len = 1U << ifq->niov_shift;
922 
923 	if (copy_to_user(arg, &reg, sizeof(reg)) ||
924 	    copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
925 	    copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
926 		ret = -EFAULT;
927 		goto err;
928 	}
929 	return 0;
930 err:
931 	scoped_guard(mutex, &ctx->mmap_lock)
932 		xa_erase(&ctx->zcrx_ctxs, id);
933 ifq_free:
934 	zcrx_unregister(ifq);
935 	return ret;
936 }
937 
938 static inline bool is_zcrx_entry_marked(struct io_ring_ctx *ctx, unsigned long id)
939 {
940 	return xa_get_mark(&ctx->zcrx_ctxs, id, XA_MARK_1);
941 }
942 
943 static inline void set_zcrx_entry_mark(struct io_ring_ctx *ctx, unsigned long id)
944 {
945 	xa_set_mark(&ctx->zcrx_ctxs, id, XA_MARK_1);
946 }
947 
948 void io_terminate_zcrx(struct io_ring_ctx *ctx)
949 {
950 	struct io_zcrx_ifq *ifq;
951 	unsigned long id = 0;
952 
953 	lockdep_assert_held(&ctx->uring_lock);
954 
955 	while (1) {
956 		scoped_guard(mutex, &ctx->mmap_lock)
957 			ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
958 		if (!ifq)
959 			break;
960 		if (WARN_ON_ONCE(is_zcrx_entry_marked(ctx, id)))
961 			break;
962 		set_zcrx_entry_mark(ctx, id);
963 		id++;
964 		zcrx_unregister_user(ifq);
965 	}
966 }
967 
968 void io_unregister_zcrx(struct io_ring_ctx *ctx)
969 {
970 	struct io_zcrx_ifq *ifq;
971 
972 	lockdep_assert_held(&ctx->uring_lock);
973 
974 	while (1) {
975 		scoped_guard(mutex, &ctx->mmap_lock) {
976 			unsigned long id = 0;
977 
978 			ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
979 			if (ifq) {
980 				if (WARN_ON_ONCE(!is_zcrx_entry_marked(ctx, id))) {
981 					ifq = NULL;
982 					break;
983 				}
984 				xa_erase(&ctx->zcrx_ctxs, id);
985 			}
986 		}
987 		if (!ifq)
988 			break;
989 		io_put_zcrx_ifq(ifq);
990 	}
991 
992 	xa_destroy(&ctx->zcrx_ctxs);
993 }
994 
995 static inline u32 zcrx_rq_entries(struct zcrx_rq *rq)
996 {
997 	u32 entries;
998 
999 	entries = smp_load_acquire(&rq->ring->tail) - rq->cached_head;
1000 	return min(entries, rq->nr_entries);
1001 }
1002 
1003 static struct io_uring_zcrx_rqe *zcrx_next_rqe(struct zcrx_rq *rq, unsigned mask)
1004 {
1005 	unsigned int idx = rq->cached_head++ & mask;
1006 
1007 	return &rq->rqes[idx];
1008 }
1009 
1010 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
1011 				struct io_zcrx_ifq *ifq,
1012 				struct net_iov **ret_niov)
1013 {
1014 	__u64 off = READ_ONCE(rqe->off);
1015 	unsigned niov_idx, area_idx;
1016 	struct io_zcrx_area *area;
1017 
1018 	area_idx = off >> IORING_ZCRX_AREA_SHIFT;
1019 	niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
1020 
1021 	if (unlikely(rqe->__pad || area_idx))
1022 		return false;
1023 	area = ifq->area;
1024 
1025 	if (unlikely(niov_idx >= area->nia.num_niovs))
1026 		return false;
1027 	niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
1028 
1029 	*ret_niov = &area->nia.niovs[niov_idx];
1030 	return true;
1031 }
1032 
1033 static unsigned io_zcrx_ring_refill(struct page_pool *pp,
1034 				    struct io_zcrx_ifq *ifq,
1035 				    netmem_ref *netmems, unsigned to_alloc)
1036 {
1037 	struct zcrx_rq *rq = &ifq->rq;
1038 	unsigned int mask = rq->nr_entries - 1;
1039 	unsigned int entries;
1040 	unsigned allocated = 0;
1041 
1042 	guard(spinlock_bh)(&rq->lock);
1043 
1044 	entries = zcrx_rq_entries(rq);
1045 	entries = min_t(unsigned, entries, to_alloc);
1046 	if (unlikely(!entries))
1047 		return 0;
1048 
1049 	do {
1050 		struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask);
1051 		struct net_iov *niov;
1052 		netmem_ref netmem;
1053 
1054 		if (!io_parse_rqe(rqe, ifq, &niov))
1055 			continue;
1056 		if (!io_zcrx_put_niov_uref(niov))
1057 			continue;
1058 
1059 		netmem = net_iov_to_netmem(niov);
1060 		if (!page_pool_unref_and_test(netmem))
1061 			continue;
1062 
1063 		if (unlikely(niov->desc.pp != pp)) {
1064 			io_zcrx_return_niov(niov);
1065 			continue;
1066 		}
1067 
1068 		netmems[allocated] = netmem;
1069 		allocated++;
1070 	} while (--entries);
1071 
1072 	smp_store_release(&rq->ring->head, rq->cached_head);
1073 	return allocated;
1074 }
1075 
1076 static unsigned io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq,
1077 				    netmem_ref *netmems, unsigned to_alloc)
1078 {
1079 	struct io_zcrx_area *area = ifq->area;
1080 	unsigned allocated = 0;
1081 
1082 	guard(spinlock_bh)(&area->freelist_lock);
1083 
1084 	for (allocated = 0; allocated < to_alloc; allocated++) {
1085 		struct net_iov *niov = zcrx_get_free_niov(area);
1086 
1087 		if (!niov)
1088 			break;
1089 		net_mp_niov_set_page_pool(pp, niov);
1090 		netmems[allocated] = net_iov_to_netmem(niov);
1091 	}
1092 	return allocated;
1093 }
1094 
1095 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
1096 {
1097 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1098 	netmem_ref *netmems = pp->alloc.cache;
1099 	unsigned to_alloc = PP_ALLOC_CACHE_REFILL;
1100 	unsigned allocated;
1101 
1102 	/* pp should already be ensuring that */
1103 	if (WARN_ON_ONCE(pp->alloc.count))
1104 		return 0;
1105 
1106 	allocated = io_zcrx_ring_refill(pp, ifq, netmems, to_alloc);
1107 	if (likely(allocated))
1108 		goto out_return;
1109 
1110 	allocated = io_zcrx_refill_slow(pp, ifq, netmems, to_alloc);
1111 	if (!allocated)
1112 		return 0;
1113 out_return:
1114 	zcrx_sync_for_device(pp, ifq, netmems, allocated);
1115 	allocated--;
1116 	pp->alloc.count += allocated;
1117 	return netmems[allocated];
1118 }
1119 
1120 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
1121 {
1122 	struct net_iov *niov;
1123 
1124 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1125 		return false;
1126 
1127 	niov = netmem_to_net_iov(netmem);
1128 	net_mp_niov_clear_page_pool(niov);
1129 	io_zcrx_return_niov_freelist(niov);
1130 	return false;
1131 }
1132 
1133 static int io_pp_zc_init(struct page_pool *pp)
1134 {
1135 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1136 
1137 	if (WARN_ON_ONCE(!ifq))
1138 		return -EINVAL;
1139 	if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
1140 		return -EINVAL;
1141 	if (WARN_ON_ONCE(!pp->dma_map))
1142 		return -EOPNOTSUPP;
1143 	if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
1144 		return -EINVAL;
1145 	if (pp->p.dma_dir != DMA_FROM_DEVICE)
1146 		return -EOPNOTSUPP;
1147 
1148 	refcount_inc(&ifq->refs);
1149 	return 0;
1150 }
1151 
1152 static void io_pp_zc_destroy(struct page_pool *pp)
1153 {
1154 	io_put_zcrx_ifq(io_pp_to_ifq(pp));
1155 }
1156 
1157 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
1158 			 struct netdev_rx_queue *rxq)
1159 {
1160 	struct nlattr *nest;
1161 	int type;
1162 
1163 	type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
1164 	nest = nla_nest_start(rsp, type);
1165 	if (!nest)
1166 		return -EMSGSIZE;
1167 	nla_nest_end(rsp, nest);
1168 
1169 	return 0;
1170 }
1171 
1172 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
1173 {
1174 	struct pp_memory_provider_params *p = &rxq->mp_params;
1175 	struct io_zcrx_ifq *ifq = mp_priv;
1176 
1177 	io_zcrx_drop_netdev(ifq);
1178 	if (ifq->area)
1179 		io_zcrx_unmap_area(ifq, ifq->area);
1180 
1181 	p->mp_ops = NULL;
1182 	p->mp_priv = NULL;
1183 }
1184 
1185 static const struct memory_provider_ops io_uring_pp_zc_ops = {
1186 	.alloc_netmems		= io_pp_zc_alloc_netmems,
1187 	.release_netmem		= io_pp_zc_release_netmem,
1188 	.init			= io_pp_zc_init,
1189 	.destroy		= io_pp_zc_destroy,
1190 	.nl_fill		= io_pp_nl_fill,
1191 	.uninstall		= io_pp_uninstall,
1192 };
1193 
1194 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
1195 			      struct io_zcrx_ifq *zcrx, struct zcrx_rq *rq)
1196 {
1197 	unsigned int mask = rq->nr_entries - 1;
1198 	unsigned int i;
1199 
1200 	nr = min(nr, zcrx_rq_entries(rq));
1201 	for (i = 0; i < nr; i++) {
1202 		struct io_uring_zcrx_rqe *rqe = zcrx_next_rqe(rq, mask);
1203 		struct net_iov *niov;
1204 
1205 		if (!io_parse_rqe(rqe, zcrx, &niov))
1206 			break;
1207 		netmem_array[i] = net_iov_to_netmem(niov);
1208 	}
1209 
1210 	smp_store_release(&rq->ring->head, rq->cached_head);
1211 	return i;
1212 }
1213 
1214 #define ZCRX_FLUSH_BATCH 32
1215 
1216 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
1217 {
1218 	unsigned i;
1219 
1220 	for (i = 0; i < nr; i++) {
1221 		netmem_ref netmem = netmems[i];
1222 		struct net_iov *niov = netmem_to_net_iov(netmem);
1223 
1224 		if (!io_zcrx_put_niov_uref(niov))
1225 			continue;
1226 		if (!page_pool_unref_and_test(netmem))
1227 			continue;
1228 		io_zcrx_return_niov(niov);
1229 	}
1230 }
1231 
1232 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
1233 			 struct zcrx_ctrl *ctrl)
1234 {
1235 	struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
1236 	netmem_ref netmems[ZCRX_FLUSH_BATCH];
1237 	unsigned total = 0;
1238 	unsigned nr;
1239 
1240 	if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
1241 		return -EINVAL;
1242 
1243 	do {
1244 		struct zcrx_rq *rq = &zcrx->rq;
1245 
1246 		scoped_guard(spinlock_bh, &rq->lock) {
1247 			nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx, rq);
1248 			zcrx_return_buffers(netmems, nr);
1249 		}
1250 
1251 		total += nr;
1252 
1253 		if (fatal_signal_pending(current))
1254 			break;
1255 		cond_resched();
1256 	} while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq.nr_entries);
1257 
1258 	return 0;
1259 }
1260 
1261 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
1262 {
1263 	struct zcrx_ctrl ctrl;
1264 	struct io_zcrx_ifq *zcrx;
1265 
1266 	BUILD_BUG_ON(sizeof(ctrl.zc_export) != sizeof(ctrl.zc_flush));
1267 
1268 	if (nr_args)
1269 		return -EINVAL;
1270 	if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
1271 		return -EFAULT;
1272 	if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
1273 		return -EFAULT;
1274 
1275 	zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
1276 	if (!zcrx)
1277 		return -ENXIO;
1278 
1279 	switch (ctrl.op) {
1280 	case ZCRX_CTRL_FLUSH_RQ:
1281 		return zcrx_flush_rq(ctx, zcrx, &ctrl);
1282 	case ZCRX_CTRL_EXPORT:
1283 		return zcrx_export(ctx, zcrx, &ctrl, arg);
1284 	}
1285 
1286 	return -EOPNOTSUPP;
1287 }
1288 
1289 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1290 			      struct io_zcrx_ifq *ifq, int off, int len)
1291 {
1292 	struct io_ring_ctx *ctx = req->ctx;
1293 	struct io_uring_zcrx_cqe *rcqe;
1294 	struct io_zcrx_area *area;
1295 	struct io_uring_cqe *cqe;
1296 	u64 offset;
1297 
1298 	if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1299 		return false;
1300 
1301 	cqe->user_data = req->cqe.user_data;
1302 	cqe->res = len;
1303 	cqe->flags = IORING_CQE_F_MORE;
1304 	if (ctx->flags & IORING_SETUP_CQE_MIXED)
1305 		cqe->flags |= IORING_CQE_F_32;
1306 
1307 	area = io_zcrx_iov_to_area(niov);
1308 	offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1309 	rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1310 	rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1311 	rcqe->__pad = 0;
1312 	return true;
1313 }
1314 
1315 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1316 {
1317 	struct io_zcrx_area *area = ifq->area;
1318 	struct net_iov *niov = NULL;
1319 
1320 	if (!ifq->kern_readable)
1321 		return NULL;
1322 
1323 	scoped_guard(spinlock_bh, &area->freelist_lock)
1324 		niov = zcrx_get_free_niov(area);
1325 
1326 	if (niov)
1327 		page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1328 	return niov;
1329 }
1330 
1331 struct io_copy_cache {
1332 	struct page		*page;
1333 	unsigned long		offset;
1334 	size_t			size;
1335 };
1336 
1337 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1338 			    unsigned int src_offset, size_t len)
1339 {
1340 	size_t copied = 0;
1341 
1342 	len = min(len, cc->size);
1343 
1344 	while (len) {
1345 		void *src_addr, *dst_addr;
1346 		struct page *dst_page = cc->page;
1347 		unsigned dst_offset = cc->offset;
1348 		size_t n = len;
1349 
1350 		if (folio_test_partial_kmap(page_folio(dst_page)) ||
1351 		    folio_test_partial_kmap(page_folio(src_page))) {
1352 			dst_page += dst_offset / PAGE_SIZE;
1353 			dst_offset = offset_in_page(dst_offset);
1354 			src_page += src_offset / PAGE_SIZE;
1355 			src_offset = offset_in_page(src_offset);
1356 			n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1357 			n = min(n, len);
1358 		}
1359 
1360 		dst_addr = kmap_local_page(dst_page) + dst_offset;
1361 		src_addr = kmap_local_page(src_page) + src_offset;
1362 
1363 		memcpy(dst_addr, src_addr, n);
1364 
1365 		kunmap_local(src_addr);
1366 		kunmap_local(dst_addr);
1367 
1368 		cc->size -= n;
1369 		cc->offset += n;
1370 		src_offset += n;
1371 		len -= n;
1372 		copied += n;
1373 	}
1374 	return copied;
1375 }
1376 
1377 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1378 				  struct page *src_page, unsigned int src_offset,
1379 				  size_t len)
1380 {
1381 	size_t copied = 0;
1382 	int ret = 0;
1383 
1384 	while (len) {
1385 		struct io_copy_cache cc;
1386 		struct net_iov *niov;
1387 		size_t n;
1388 
1389 		niov = io_alloc_fallback_niov(ifq);
1390 		if (!niov) {
1391 			ret = -ENOMEM;
1392 			break;
1393 		}
1394 
1395 		cc.page = io_zcrx_iov_page(niov);
1396 		cc.offset = 0;
1397 		cc.size = PAGE_SIZE;
1398 
1399 		n = io_copy_page(&cc, src_page, src_offset, len);
1400 
1401 		if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1402 			io_zcrx_return_niov(niov);
1403 			ret = -ENOSPC;
1404 			break;
1405 		}
1406 
1407 		io_zcrx_get_niov_uref(niov);
1408 		src_offset += n;
1409 		len -= n;
1410 		copied += n;
1411 	}
1412 
1413 	return copied ? copied : ret;
1414 }
1415 
1416 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1417 			     const skb_frag_t *frag, int off, int len)
1418 {
1419 	struct page *page = skb_frag_page(frag);
1420 
1421 	return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1422 }
1423 
1424 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1425 			     const skb_frag_t *frag, int off, int len)
1426 {
1427 	struct net_iov *niov;
1428 	struct page_pool *pp;
1429 
1430 	if (unlikely(!skb_frag_is_net_iov(frag)))
1431 		return io_zcrx_copy_frag(req, ifq, frag, off, len);
1432 
1433 	niov = netmem_to_net_iov(frag->netmem);
1434 	pp = niov->desc.pp;
1435 
1436 	if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
1437 		return -EFAULT;
1438 
1439 	if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1440 		return -ENOSPC;
1441 
1442 	/*
1443 	 * Prevent it from being recycled while user is accessing it.
1444 	 * It has to be done before grabbing a user reference.
1445 	 */
1446 	page_pool_ref_netmem(net_iov_to_netmem(niov));
1447 	io_zcrx_get_niov_uref(niov);
1448 	return len;
1449 }
1450 
1451 static int
1452 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1453 		 unsigned int offset, size_t len)
1454 {
1455 	struct io_zcrx_args *args = desc->arg.data;
1456 	struct io_zcrx_ifq *ifq = args->ifq;
1457 	struct io_kiocb *req = args->req;
1458 	struct sk_buff *frag_iter;
1459 	unsigned start, start_off = offset;
1460 	int i, copy, end, off;
1461 	int ret = 0;
1462 
1463 	len = min_t(size_t, len, desc->count);
1464 	/*
1465 	 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1466 	 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1467 	 * skb->len) check. Return early in this case to break out of
1468 	 * __tcp_read_sock().
1469 	 */
1470 	if (!len)
1471 		return 0;
1472 	if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1473 		return -EAGAIN;
1474 
1475 	if (unlikely(offset < skb_headlen(skb))) {
1476 		ssize_t copied;
1477 		size_t to_copy;
1478 
1479 		to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1480 		copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1481 					    offset_in_page(skb->data) + offset,
1482 					    to_copy);
1483 		if (copied < 0) {
1484 			ret = copied;
1485 			goto out;
1486 		}
1487 		offset += copied;
1488 		len -= copied;
1489 		if (!len)
1490 			goto out;
1491 		if (offset != skb_headlen(skb))
1492 			goto out;
1493 	}
1494 
1495 	start = skb_headlen(skb);
1496 
1497 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1498 		const skb_frag_t *frag;
1499 
1500 		if (WARN_ON(start > offset + len))
1501 			return -EFAULT;
1502 
1503 		frag = &skb_shinfo(skb)->frags[i];
1504 		end = start + skb_frag_size(frag);
1505 
1506 		if (offset < end) {
1507 			copy = end - offset;
1508 			if (copy > len)
1509 				copy = len;
1510 
1511 			off = offset - start;
1512 			ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1513 			if (ret < 0)
1514 				goto out;
1515 
1516 			offset += ret;
1517 			len -= ret;
1518 			if (len == 0 || ret != copy)
1519 				goto out;
1520 		}
1521 		start = end;
1522 	}
1523 
1524 	skb_walk_frags(skb, frag_iter) {
1525 		if (WARN_ON(start > offset + len))
1526 			return -EFAULT;
1527 
1528 		end = start + frag_iter->len;
1529 		if (offset < end) {
1530 			size_t count;
1531 
1532 			copy = end - offset;
1533 			if (copy > len)
1534 				copy = len;
1535 
1536 			off = offset - start;
1537 			count = desc->count;
1538 			ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1539 			desc->count = count;
1540 			if (ret < 0)
1541 				goto out;
1542 
1543 			offset += ret;
1544 			len -= ret;
1545 			if (len == 0 || ret != copy)
1546 				goto out;
1547 		}
1548 		start = end;
1549 	}
1550 
1551 out:
1552 	if (offset == start_off)
1553 		return ret;
1554 	desc->count -= (offset - start_off);
1555 	return offset - start_off;
1556 }
1557 
1558 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1559 				struct sock *sk, int flags,
1560 				unsigned issue_flags, unsigned int *outlen)
1561 {
1562 	unsigned int len = *outlen;
1563 	struct io_zcrx_args args = {
1564 		.req = req,
1565 		.ifq = ifq,
1566 		.sock = sk->sk_socket,
1567 	};
1568 	read_descriptor_t rd_desc = {
1569 		.count = len ? len : UINT_MAX,
1570 		.arg.data = &args,
1571 	};
1572 	int ret;
1573 
1574 	lock_sock(sk);
1575 	ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1576 	if (len && ret > 0)
1577 		*outlen = len - ret;
1578 	if (ret <= 0) {
1579 		if (ret < 0 || sock_flag(sk, SOCK_DONE))
1580 			goto out;
1581 		if (sk->sk_err)
1582 			ret = sock_error(sk);
1583 		else if (sk->sk_shutdown & RCV_SHUTDOWN)
1584 			goto out;
1585 		else if (sk->sk_state == TCP_CLOSE)
1586 			ret = -ENOTCONN;
1587 		else
1588 			ret = -EAGAIN;
1589 	} else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1590 		   (issue_flags & IO_URING_F_MULTISHOT)) {
1591 		ret = IOU_REQUEUE;
1592 	} else if (sock_flag(sk, SOCK_DONE)) {
1593 		/* Make it to retry until it finally gets 0. */
1594 		if (issue_flags & IO_URING_F_MULTISHOT)
1595 			ret = IOU_REQUEUE;
1596 		else
1597 			ret = -EAGAIN;
1598 	}
1599 out:
1600 	release_sock(sk);
1601 	return ret;
1602 }
1603 
1604 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1605 		 struct socket *sock, unsigned int flags,
1606 		 unsigned issue_flags, unsigned int *len)
1607 {
1608 	struct sock *sk = sock->sk;
1609 	const struct proto *prot = READ_ONCE(sk->sk_prot);
1610 
1611 	if (prot->recvmsg != tcp_recvmsg)
1612 		return -EPROTONOSUPPORT;
1613 
1614 	sock_rps_record_flow(sk);
1615 	return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1616 }
1617