xref: /linux/io_uring/zcrx.c (revision 3d2c3d2eea9acdbee5b5742d15d021069b49d3f9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 #include <linux/anon_inodes.h>
12 
13 #include <net/page_pool/helpers.h>
14 #include <net/page_pool/memory_provider.h>
15 #include <net/netlink.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/tcp.h>
19 #include <net/rps.h>
20 
21 #include <trace/events/page_pool.h>
22 
23 #include <uapi/linux/io_uring.h>
24 
25 #include "io_uring.h"
26 #include "kbuf.h"
27 #include "memmap.h"
28 #include "zcrx.h"
29 #include "rsrc.h"
30 
31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS	(IORING_ZCRX_AREA_DMABUF)
32 
33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
34 
35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
36 {
37 	return pp->mp_priv;
38 }
39 
40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
41 {
42 	struct net_iov_area *owner = net_iov_owner(niov);
43 
44 	return container_of(owner, struct io_zcrx_area, nia);
45 }
46 
47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
48 {
49 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
50 	unsigned niov_pages_shift;
51 
52 	lockdep_assert(!area->mem.is_dmabuf);
53 
54 	niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
55 	return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
56 }
57 
58 static int io_area_max_shift(struct io_zcrx_mem *mem)
59 {
60 	struct sg_table *sgt = mem->sgt;
61 	struct scatterlist *sg;
62 	unsigned shift = -1U;
63 	unsigned i;
64 
65 	for_each_sgtable_dma_sg(sgt, sg, i)
66 		shift = min(shift, __ffs(sg->length));
67 	return shift;
68 }
69 
70 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
71 				struct io_zcrx_area *area)
72 {
73 	unsigned niov_size = 1U << ifq->niov_shift;
74 	struct sg_table *sgt = area->mem.sgt;
75 	struct scatterlist *sg;
76 	unsigned i, niov_idx = 0;
77 
78 	for_each_sgtable_dma_sg(sgt, sg, i) {
79 		dma_addr_t dma = sg_dma_address(sg);
80 		unsigned long sg_len = sg_dma_len(sg);
81 
82 		if (WARN_ON_ONCE(sg_len % niov_size))
83 			return -EINVAL;
84 
85 		while (sg_len && niov_idx < area->nia.num_niovs) {
86 			struct net_iov *niov = &area->nia.niovs[niov_idx];
87 
88 			if (net_mp_niov_set_dma_addr(niov, dma))
89 				return -EFAULT;
90 			sg_len -= niov_size;
91 			dma += niov_size;
92 			niov_idx++;
93 		}
94 	}
95 
96 	if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
97 		return -EFAULT;
98 	return 0;
99 }
100 
101 static void io_release_dmabuf(struct io_zcrx_mem *mem)
102 {
103 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
104 		return;
105 
106 	if (mem->sgt)
107 		dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
108 						  DMA_FROM_DEVICE);
109 	if (mem->attach)
110 		dma_buf_detach(mem->dmabuf, mem->attach);
111 	if (mem->dmabuf)
112 		dma_buf_put(mem->dmabuf);
113 
114 	mem->sgt = NULL;
115 	mem->attach = NULL;
116 	mem->dmabuf = NULL;
117 }
118 
119 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
120 			    struct io_zcrx_mem *mem,
121 			    struct io_uring_zcrx_area_reg *area_reg)
122 {
123 	unsigned long off = (unsigned long)area_reg->addr;
124 	unsigned long len = (unsigned long)area_reg->len;
125 	unsigned long total_size = 0;
126 	struct scatterlist *sg;
127 	int dmabuf_fd = area_reg->dmabuf_fd;
128 	int i, ret;
129 
130 	if (off)
131 		return -EINVAL;
132 	if (WARN_ON_ONCE(!ifq->dev))
133 		return -EFAULT;
134 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
135 		return -EINVAL;
136 
137 	mem->is_dmabuf = true;
138 	mem->dmabuf = dma_buf_get(dmabuf_fd);
139 	if (IS_ERR(mem->dmabuf)) {
140 		ret = PTR_ERR(mem->dmabuf);
141 		mem->dmabuf = NULL;
142 		goto err;
143 	}
144 
145 	mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
146 	if (IS_ERR(mem->attach)) {
147 		ret = PTR_ERR(mem->attach);
148 		mem->attach = NULL;
149 		goto err;
150 	}
151 
152 	mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
153 	if (IS_ERR(mem->sgt)) {
154 		ret = PTR_ERR(mem->sgt);
155 		mem->sgt = NULL;
156 		goto err;
157 	}
158 
159 	for_each_sgtable_dma_sg(mem->sgt, sg, i)
160 		total_size += sg_dma_len(sg);
161 
162 	if (total_size != len) {
163 		ret = -EINVAL;
164 		goto err;
165 	}
166 
167 	mem->size = len;
168 	return 0;
169 err:
170 	io_release_dmabuf(mem);
171 	return ret;
172 }
173 
174 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
175 {
176 	struct folio *last_folio = NULL;
177 	unsigned long res = 0;
178 	int i;
179 
180 	for (i = 0; i < nr_pages; i++) {
181 		struct folio *folio = page_folio(pages[i]);
182 
183 		if (folio == last_folio)
184 			continue;
185 		last_folio = folio;
186 		res += folio_nr_pages(folio);
187 	}
188 	return res;
189 }
190 
191 static int io_import_umem(struct io_zcrx_ifq *ifq,
192 			  struct io_zcrx_mem *mem,
193 			  struct io_uring_zcrx_area_reg *area_reg)
194 {
195 	struct page **pages;
196 	int nr_pages, ret;
197 
198 	if (area_reg->dmabuf_fd)
199 		return -EINVAL;
200 	if (!area_reg->addr)
201 		return -EFAULT;
202 	pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
203 				   &nr_pages);
204 	if (IS_ERR(pages))
205 		return PTR_ERR(pages);
206 
207 	ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
208 					0, (unsigned long)nr_pages << PAGE_SHIFT,
209 					GFP_KERNEL_ACCOUNT);
210 	if (ret) {
211 		unpin_user_pages(pages, nr_pages);
212 		kvfree(pages);
213 		return ret;
214 	}
215 
216 	mem->account_pages = io_count_account_pages(pages, nr_pages);
217 	ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
218 	if (ret < 0)
219 		mem->account_pages = 0;
220 
221 	mem->sgt = &mem->page_sg_table;
222 	mem->pages = pages;
223 	mem->nr_folios = nr_pages;
224 	mem->size = area_reg->len;
225 	return ret;
226 }
227 
228 static void io_release_area_mem(struct io_zcrx_mem *mem)
229 {
230 	if (mem->is_dmabuf) {
231 		io_release_dmabuf(mem);
232 		return;
233 	}
234 	if (mem->pages) {
235 		unpin_user_pages(mem->pages, mem->nr_folios);
236 		sg_free_table(mem->sgt);
237 		mem->sgt = NULL;
238 		kvfree(mem->pages);
239 	}
240 }
241 
242 static int io_import_area(struct io_zcrx_ifq *ifq,
243 			  struct io_zcrx_mem *mem,
244 			  struct io_uring_zcrx_area_reg *area_reg)
245 {
246 	int ret;
247 
248 	if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
249 		return -EINVAL;
250 	if (area_reg->rq_area_token)
251 		return -EINVAL;
252 	if (area_reg->__resv2[0] || area_reg->__resv2[1])
253 		return -EINVAL;
254 
255 	ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
256 	if (ret)
257 		return ret;
258 	if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
259 		return -EINVAL;
260 
261 	if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
262 		return io_import_dmabuf(ifq, mem, area_reg);
263 	return io_import_umem(ifq, mem, area_reg);
264 }
265 
266 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
267 				struct io_zcrx_area *area)
268 {
269 	int i;
270 
271 	guard(mutex)(&ifq->pp_lock);
272 	if (!area->is_mapped)
273 		return;
274 	area->is_mapped = false;
275 
276 	for (i = 0; i < area->nia.num_niovs; i++)
277 		net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
278 
279 	if (area->mem.is_dmabuf) {
280 		io_release_dmabuf(&area->mem);
281 	} else {
282 		dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
283 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
284 	}
285 }
286 
287 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
288 {
289 	int ret;
290 
291 	guard(mutex)(&ifq->pp_lock);
292 	if (area->is_mapped)
293 		return 0;
294 
295 	if (!area->mem.is_dmabuf) {
296 		ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
297 				      DMA_FROM_DEVICE, IO_DMA_ATTR);
298 		if (ret < 0)
299 			return ret;
300 	}
301 
302 	ret = io_populate_area_dma(ifq, area);
303 	if (ret && !area->mem.is_dmabuf)
304 		dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
305 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
306 	if (ret == 0)
307 		area->is_mapped = true;
308 	return ret;
309 }
310 
311 static void io_zcrx_sync_for_device(struct page_pool *pool,
312 				    struct net_iov *niov)
313 {
314 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
315 	dma_addr_t dma_addr;
316 
317 	unsigned niov_size;
318 
319 	if (!dma_dev_need_sync(pool->p.dev))
320 		return;
321 
322 	niov_size = 1U << io_pp_to_ifq(pool)->niov_shift;
323 	dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
324 	__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
325 				     niov_size, pool->p.dma_dir);
326 #endif
327 }
328 
329 #define IO_RQ_MAX_ENTRIES		32768
330 
331 #define IO_SKBS_PER_CALL_LIMIT	20
332 
333 struct io_zcrx_args {
334 	struct io_kiocb		*req;
335 	struct io_zcrx_ifq	*ifq;
336 	struct socket		*sock;
337 	unsigned		nr_skbs;
338 };
339 
340 static const struct memory_provider_ops io_uring_pp_zc_ops;
341 
342 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
343 {
344 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
345 
346 	return &area->user_refs[net_iov_idx(niov)];
347 }
348 
349 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
350 {
351 	atomic_t *uref = io_get_user_counter(niov);
352 	int old;
353 
354 	old = atomic_read(uref);
355 	do {
356 		if (unlikely(old == 0))
357 			return false;
358 	} while (!atomic_try_cmpxchg(uref, &old, old - 1));
359 
360 	return true;
361 }
362 
363 static void io_zcrx_get_niov_uref(struct net_iov *niov)
364 {
365 	atomic_inc(io_get_user_counter(niov));
366 }
367 
368 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
369 {
370 	offsets->head = offsetof(struct io_uring, head);
371 	offsets->tail = offsetof(struct io_uring, tail);
372 	offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
373 }
374 
375 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
376 				 struct io_zcrx_ifq *ifq,
377 				 struct io_uring_zcrx_ifq_reg *reg,
378 				 struct io_uring_region_desc *rd,
379 				 u32 id)
380 {
381 	u64 mmap_offset;
382 	size_t off, size;
383 	void *ptr;
384 	int ret;
385 
386 	io_fill_zcrx_offsets(&reg->offsets);
387 	off = reg->offsets.rqes;
388 	size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
389 	if (size > rd->size)
390 		return -EINVAL;
391 
392 	mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
393 	mmap_offset += id << IORING_OFF_PBUF_SHIFT;
394 
395 	ret = io_create_region(ctx, &ifq->region, rd, mmap_offset);
396 	if (ret < 0)
397 		return ret;
398 
399 	ptr = io_region_get_ptr(&ifq->region);
400 	ifq->rq_ring = (struct io_uring *)ptr;
401 	ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
402 
403 	return 0;
404 }
405 
406 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
407 {
408 	io_free_region(ifq->user, &ifq->region);
409 	ifq->rq_ring = NULL;
410 	ifq->rqes = NULL;
411 }
412 
413 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
414 			      struct io_zcrx_area *area)
415 {
416 	io_zcrx_unmap_area(ifq, area);
417 	io_release_area_mem(&area->mem);
418 
419 	if (area->mem.account_pages)
420 		io_unaccount_mem(ifq->user, ifq->mm_account,
421 				 area->mem.account_pages);
422 
423 	kvfree(area->freelist);
424 	kvfree(area->nia.niovs);
425 	kvfree(area->user_refs);
426 	kfree(area);
427 }
428 
429 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
430 				struct io_zcrx_area *area)
431 {
432 	if (ifq->area)
433 		return -EINVAL;
434 	ifq->area = area;
435 	return 0;
436 }
437 
438 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
439 			       struct io_uring_zcrx_area_reg *area_reg,
440 			       struct io_uring_zcrx_ifq_reg *reg)
441 {
442 	int buf_size_shift = PAGE_SHIFT;
443 	struct io_zcrx_area *area;
444 	unsigned nr_iovs;
445 	int i, ret;
446 
447 	if (reg->rx_buf_len) {
448 		if (!is_power_of_2(reg->rx_buf_len) ||
449 		     reg->rx_buf_len < PAGE_SIZE)
450 			return -EINVAL;
451 		buf_size_shift = ilog2(reg->rx_buf_len);
452 	}
453 
454 	ret = -ENOMEM;
455 	area = kzalloc_obj(*area);
456 	if (!area)
457 		goto err;
458 	area->ifq = ifq;
459 
460 	ret = io_import_area(ifq, &area->mem, area_reg);
461 	if (ret)
462 		goto err;
463 
464 	if (buf_size_shift > io_area_max_shift(&area->mem)) {
465 		ret = -ERANGE;
466 		goto err;
467 	}
468 
469 	ifq->niov_shift = buf_size_shift;
470 	nr_iovs = area->mem.size >> ifq->niov_shift;
471 	area->nia.num_niovs = nr_iovs;
472 
473 	ret = -ENOMEM;
474 	area->nia.niovs = kvmalloc_objs(area->nia.niovs[0], nr_iovs,
475 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
476 	if (!area->nia.niovs)
477 		goto err;
478 
479 	area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
480 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
481 	if (!area->freelist)
482 		goto err;
483 
484 	area->user_refs = kvmalloc_objs(area->user_refs[0], nr_iovs,
485 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
486 	if (!area->user_refs)
487 		goto err;
488 
489 	for (i = 0; i < nr_iovs; i++) {
490 		struct net_iov *niov = &area->nia.niovs[i];
491 
492 		niov->owner = &area->nia;
493 		area->freelist[i] = i;
494 		atomic_set(&area->user_refs[i], 0);
495 		niov->type = NET_IOV_IOURING;
496 	}
497 
498 	area->free_count = nr_iovs;
499 	/* we're only supporting one area per ifq for now */
500 	area->area_id = 0;
501 	area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
502 	spin_lock_init(&area->freelist_lock);
503 
504 	ret = io_zcrx_append_area(ifq, area);
505 	if (!ret)
506 		return 0;
507 err:
508 	if (area)
509 		io_zcrx_free_area(ifq, area);
510 	return ret;
511 }
512 
513 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
514 {
515 	struct io_zcrx_ifq *ifq;
516 
517 	ifq = kzalloc_obj(*ifq);
518 	if (!ifq)
519 		return NULL;
520 
521 	ifq->if_rxq = -1;
522 	spin_lock_init(&ifq->rq_lock);
523 	mutex_init(&ifq->pp_lock);
524 	refcount_set(&ifq->refs, 1);
525 	refcount_set(&ifq->user_refs, 1);
526 	return ifq;
527 }
528 
529 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
530 {
531 	guard(mutex)(&ifq->pp_lock);
532 
533 	if (!ifq->netdev)
534 		return;
535 	netdev_put(ifq->netdev, &ifq->netdev_tracker);
536 	ifq->netdev = NULL;
537 }
538 
539 static void io_close_queue(struct io_zcrx_ifq *ifq)
540 {
541 	struct net_device *netdev;
542 	netdevice_tracker netdev_tracker;
543 	struct pp_memory_provider_params p = {
544 		.mp_ops = &io_uring_pp_zc_ops,
545 		.mp_priv = ifq,
546 	};
547 
548 	scoped_guard(mutex, &ifq->pp_lock) {
549 		netdev = ifq->netdev;
550 		netdev_tracker = ifq->netdev_tracker;
551 		ifq->netdev = NULL;
552 	}
553 
554 	if (netdev) {
555 		if (ifq->if_rxq != -1) {
556 			netdev_lock(netdev);
557 			netif_mp_close_rxq(netdev, ifq->if_rxq, &p);
558 			netdev_unlock(netdev);
559 		}
560 		netdev_put(netdev, &netdev_tracker);
561 	}
562 	ifq->if_rxq = -1;
563 }
564 
565 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
566 {
567 	io_close_queue(ifq);
568 
569 	if (ifq->area)
570 		io_zcrx_free_area(ifq, ifq->area);
571 	free_uid(ifq->user);
572 	if (ifq->mm_account)
573 		mmdrop(ifq->mm_account);
574 	if (ifq->dev)
575 		put_device(ifq->dev);
576 
577 	io_free_rbuf_ring(ifq);
578 	mutex_destroy(&ifq->pp_lock);
579 	kfree(ifq);
580 }
581 
582 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
583 {
584 	if (refcount_dec_and_test(&ifq->refs))
585 		io_zcrx_ifq_free(ifq);
586 }
587 
588 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
589 {
590 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
591 
592 	spin_lock_bh(&area->freelist_lock);
593 	area->freelist[area->free_count++] = net_iov_idx(niov);
594 	spin_unlock_bh(&area->freelist_lock);
595 }
596 
597 static void io_zcrx_return_niov(struct net_iov *niov)
598 {
599 	netmem_ref netmem = net_iov_to_netmem(niov);
600 
601 	if (!niov->desc.pp) {
602 		/* copy fallback allocated niovs */
603 		io_zcrx_return_niov_freelist(niov);
604 		return;
605 	}
606 	page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
607 }
608 
609 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
610 {
611 	struct io_zcrx_area *area = ifq->area;
612 	int i;
613 
614 	if (!area)
615 		return;
616 
617 	/* Reclaim back all buffers given to the user space. */
618 	for (i = 0; i < area->nia.num_niovs; i++) {
619 		struct net_iov *niov = &area->nia.niovs[i];
620 		int nr;
621 
622 		if (!atomic_read(io_get_user_counter(niov)))
623 			continue;
624 		nr = atomic_xchg(io_get_user_counter(niov), 0);
625 		if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
626 			io_zcrx_return_niov(niov);
627 	}
628 }
629 
630 static void zcrx_unregister(struct io_zcrx_ifq *ifq)
631 {
632 	if (refcount_dec_and_test(&ifq->user_refs)) {
633 		io_close_queue(ifq);
634 		io_zcrx_scrub(ifq);
635 	}
636 	io_put_zcrx_ifq(ifq);
637 }
638 
639 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
640 					    unsigned int id)
641 {
642 	struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
643 
644 	lockdep_assert_held(&ctx->mmap_lock);
645 
646 	return ifq ? &ifq->region : NULL;
647 }
648 
649 static int zcrx_box_release(struct inode *inode, struct file *file)
650 {
651 	struct io_zcrx_ifq *ifq = file->private_data;
652 
653 	if (WARN_ON_ONCE(!ifq))
654 		return -EFAULT;
655 	zcrx_unregister(ifq);
656 	return 0;
657 }
658 
659 static const struct file_operations zcrx_box_fops = {
660 	.owner		= THIS_MODULE,
661 	.release	= zcrx_box_release,
662 };
663 
664 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
665 		       struct zcrx_ctrl *ctrl, void __user *arg)
666 {
667 	struct zcrx_ctrl_export *ce = &ctrl->zc_export;
668 	struct file *file;
669 	int fd = -1;
670 
671 	if (!mem_is_zero(ce, sizeof(*ce)))
672 		return -EINVAL;
673 	fd = get_unused_fd_flags(O_CLOEXEC);
674 	if (fd < 0)
675 		return fd;
676 
677 	ce->zcrx_fd = fd;
678 	if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
679 		put_unused_fd(fd);
680 		return -EFAULT;
681 	}
682 
683 	refcount_inc(&ifq->refs);
684 	refcount_inc(&ifq->user_refs);
685 
686 	file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
687 					 ifq, O_CLOEXEC, NULL);
688 	if (IS_ERR(file)) {
689 		put_unused_fd(fd);
690 		zcrx_unregister(ifq);
691 		return PTR_ERR(file);
692 	}
693 
694 	fd_install(fd, file);
695 	return 0;
696 }
697 
698 static int import_zcrx(struct io_ring_ctx *ctx,
699 		       struct io_uring_zcrx_ifq_reg __user *arg,
700 		       struct io_uring_zcrx_ifq_reg *reg)
701 {
702 	struct io_zcrx_ifq *ifq;
703 	struct file *file;
704 	int fd, ret;
705 	u32 id;
706 
707 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
708 		return -EINVAL;
709 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
710 		return -EINVAL;
711 	if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
712 		return -EINVAL;
713 	if (reg->flags & ~ZCRX_REG_IMPORT)
714 		return -EINVAL;
715 
716 	fd = reg->if_idx;
717 	CLASS(fd, f)(fd);
718 	if (fd_empty(f))
719 		return -EBADF;
720 
721 	file = fd_file(f);
722 	if (file->f_op != &zcrx_box_fops || !file->private_data)
723 		return -EBADF;
724 
725 	ifq = file->private_data;
726 	refcount_inc(&ifq->refs);
727 	refcount_inc(&ifq->user_refs);
728 
729 	scoped_guard(mutex, &ctx->mmap_lock) {
730 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
731 		if (ret)
732 			goto err;
733 	}
734 
735 	reg->zcrx_id = id;
736 	io_fill_zcrx_offsets(&reg->offsets);
737 	if (copy_to_user(arg, reg, sizeof(*reg))) {
738 		ret = -EFAULT;
739 		goto err_xa_erase;
740 	}
741 
742 	scoped_guard(mutex, &ctx->mmap_lock) {
743 		ret = -ENOMEM;
744 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
745 			goto err_xa_erase;
746 	}
747 
748 	return 0;
749 err_xa_erase:
750 	scoped_guard(mutex, &ctx->mmap_lock)
751 		xa_erase(&ctx->zcrx_ctxs, id);
752 err:
753 	zcrx_unregister(ifq);
754 	return ret;
755 }
756 
757 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
758 			  struct io_uring_zcrx_ifq_reg __user *arg)
759 {
760 	struct pp_memory_provider_params mp_param = {};
761 	struct io_uring_zcrx_area_reg area;
762 	struct io_uring_zcrx_ifq_reg reg;
763 	struct io_uring_region_desc rd;
764 	struct io_zcrx_ifq *ifq;
765 	int ret;
766 	u32 id;
767 
768 	/*
769 	 * 1. Interface queue allocation.
770 	 * 2. It can observe data destined for sockets of other tasks.
771 	 */
772 	if (!capable(CAP_NET_ADMIN))
773 		return -EPERM;
774 
775 	/* mandatory io_uring features for zc rx */
776 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
777 		return -EINVAL;
778 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
779 		return -EINVAL;
780 	if (copy_from_user(&reg, arg, sizeof(reg)))
781 		return -EFAULT;
782 	if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
783 		return -EINVAL;
784 	if (reg.flags & ZCRX_REG_IMPORT)
785 		return import_zcrx(ctx, arg, &reg);
786 	if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
787 		return -EFAULT;
788 	if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
789 		return -EINVAL;
790 	if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
791 		if (!(ctx->flags & IORING_SETUP_CLAMP))
792 			return -EINVAL;
793 		reg.rq_entries = IO_RQ_MAX_ENTRIES;
794 	}
795 	reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
796 
797 	if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
798 		return -EFAULT;
799 
800 	ifq = io_zcrx_ifq_alloc(ctx);
801 	if (!ifq)
802 		return -ENOMEM;
803 
804 	if (ctx->user) {
805 		get_uid(ctx->user);
806 		ifq->user = ctx->user;
807 	}
808 	if (ctx->mm_account) {
809 		mmgrab(ctx->mm_account);
810 		ifq->mm_account = ctx->mm_account;
811 	}
812 	ifq->rq_entries = reg.rq_entries;
813 
814 	scoped_guard(mutex, &ctx->mmap_lock) {
815 		/* preallocate id */
816 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
817 		if (ret)
818 			goto ifq_free;
819 	}
820 
821 	ret = io_allocate_rbuf_ring(ctx, ifq, &reg, &rd, id);
822 	if (ret)
823 		goto err;
824 
825 	ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns, reg.if_idx);
826 	if (!ifq->netdev) {
827 		ret = -ENODEV;
828 		goto err;
829 	}
830 	netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
831 
832 	ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq,
833 					    NETDEV_QUEUE_TYPE_RX);
834 	if (!ifq->dev) {
835 		ret = -EOPNOTSUPP;
836 		goto netdev_put_unlock;
837 	}
838 	get_device(ifq->dev);
839 
840 	ret = io_zcrx_create_area(ifq, &area, &reg);
841 	if (ret)
842 		goto netdev_put_unlock;
843 
844 	if (reg.rx_buf_len)
845 		mp_param.rx_page_size = 1U << ifq->niov_shift;
846 	mp_param.mp_ops = &io_uring_pp_zc_ops;
847 	mp_param.mp_priv = ifq;
848 	ret = netif_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
849 	if (ret)
850 		goto netdev_put_unlock;
851 	netdev_unlock(ifq->netdev);
852 	ifq->if_rxq = reg.if_rxq;
853 
854 	reg.zcrx_id = id;
855 
856 	scoped_guard(mutex, &ctx->mmap_lock) {
857 		/* publish ifq */
858 		ret = -ENOMEM;
859 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
860 			goto err;
861 	}
862 
863 	reg.rx_buf_len = 1U << ifq->niov_shift;
864 
865 	if (copy_to_user(arg, &reg, sizeof(reg)) ||
866 	    copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
867 	    copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
868 		ret = -EFAULT;
869 		goto err;
870 	}
871 	return 0;
872 netdev_put_unlock:
873 	netdev_unlock(ifq->netdev);
874 err:
875 	scoped_guard(mutex, &ctx->mmap_lock)
876 		xa_erase(&ctx->zcrx_ctxs, id);
877 ifq_free:
878 	zcrx_unregister(ifq);
879 	return ret;
880 }
881 
882 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
883 {
884 	unsigned niov_idx;
885 
886 	lockdep_assert_held(&area->freelist_lock);
887 
888 	niov_idx = area->freelist[--area->free_count];
889 	return &area->nia.niovs[niov_idx];
890 }
891 
892 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
893 {
894 	struct io_zcrx_ifq *ifq;
895 
896 	lockdep_assert_held(&ctx->uring_lock);
897 
898 	while (1) {
899 		scoped_guard(mutex, &ctx->mmap_lock) {
900 			unsigned long id = 0;
901 
902 			ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
903 			if (ifq)
904 				xa_erase(&ctx->zcrx_ctxs, id);
905 		}
906 		if (!ifq)
907 			break;
908 		zcrx_unregister(ifq);
909 	}
910 
911 	xa_destroy(&ctx->zcrx_ctxs);
912 }
913 
914 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
915 {
916 	u32 entries;
917 
918 	entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
919 	return min(entries, ifq->rq_entries);
920 }
921 
922 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
923 						 unsigned mask)
924 {
925 	unsigned int idx = ifq->cached_rq_head++ & mask;
926 
927 	return &ifq->rqes[idx];
928 }
929 
930 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
931 				struct io_zcrx_ifq *ifq,
932 				struct net_iov **ret_niov)
933 {
934 	__u64 off = READ_ONCE(rqe->off);
935 	unsigned niov_idx, area_idx;
936 	struct io_zcrx_area *area;
937 
938 	area_idx = off >> IORING_ZCRX_AREA_SHIFT;
939 	niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
940 
941 	if (unlikely(rqe->__pad || area_idx))
942 		return false;
943 	area = ifq->area;
944 
945 	if (unlikely(niov_idx >= area->nia.num_niovs))
946 		return false;
947 	niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
948 
949 	*ret_niov = &area->nia.niovs[niov_idx];
950 	return true;
951 }
952 
953 static void io_zcrx_ring_refill(struct page_pool *pp,
954 				struct io_zcrx_ifq *ifq)
955 {
956 	unsigned int mask = ifq->rq_entries - 1;
957 	unsigned int entries;
958 
959 	guard(spinlock_bh)(&ifq->rq_lock);
960 
961 	entries = io_zcrx_rqring_entries(ifq);
962 	entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
963 	if (unlikely(!entries))
964 		return;
965 
966 	do {
967 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
968 		struct net_iov *niov;
969 		netmem_ref netmem;
970 
971 		if (!io_parse_rqe(rqe, ifq, &niov))
972 			continue;
973 		if (!io_zcrx_put_niov_uref(niov))
974 			continue;
975 
976 		netmem = net_iov_to_netmem(niov);
977 		if (!page_pool_unref_and_test(netmem))
978 			continue;
979 
980 		if (unlikely(niov->desc.pp != pp)) {
981 			io_zcrx_return_niov(niov);
982 			continue;
983 		}
984 
985 		io_zcrx_sync_for_device(pp, niov);
986 		net_mp_netmem_place_in_cache(pp, netmem);
987 	} while (--entries);
988 
989 	smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
990 }
991 
992 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
993 {
994 	struct io_zcrx_area *area = ifq->area;
995 
996 	spin_lock_bh(&area->freelist_lock);
997 	while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
998 		struct net_iov *niov = __io_zcrx_get_free_niov(area);
999 		netmem_ref netmem = net_iov_to_netmem(niov);
1000 
1001 		net_mp_niov_set_page_pool(pp, niov);
1002 		io_zcrx_sync_for_device(pp, niov);
1003 		net_mp_netmem_place_in_cache(pp, netmem);
1004 	}
1005 	spin_unlock_bh(&area->freelist_lock);
1006 }
1007 
1008 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
1009 {
1010 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1011 
1012 	/* pp should already be ensuring that */
1013 	if (unlikely(pp->alloc.count))
1014 		goto out_return;
1015 
1016 	io_zcrx_ring_refill(pp, ifq);
1017 	if (likely(pp->alloc.count))
1018 		goto out_return;
1019 
1020 	io_zcrx_refill_slow(pp, ifq);
1021 	if (!pp->alloc.count)
1022 		return 0;
1023 out_return:
1024 	return pp->alloc.cache[--pp->alloc.count];
1025 }
1026 
1027 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
1028 {
1029 	struct net_iov *niov;
1030 
1031 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1032 		return false;
1033 
1034 	niov = netmem_to_net_iov(netmem);
1035 	net_mp_niov_clear_page_pool(niov);
1036 	io_zcrx_return_niov_freelist(niov);
1037 	return false;
1038 }
1039 
1040 static int io_pp_zc_init(struct page_pool *pp)
1041 {
1042 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1043 	int ret;
1044 
1045 	if (WARN_ON_ONCE(!ifq))
1046 		return -EINVAL;
1047 	if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
1048 		return -EINVAL;
1049 	if (WARN_ON_ONCE(!pp->dma_map))
1050 		return -EOPNOTSUPP;
1051 	if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
1052 		return -EINVAL;
1053 	if (pp->p.dma_dir != DMA_FROM_DEVICE)
1054 		return -EOPNOTSUPP;
1055 
1056 	ret = io_zcrx_map_area(ifq, ifq->area);
1057 	if (ret)
1058 		return ret;
1059 
1060 	refcount_inc(&ifq->refs);
1061 	return 0;
1062 }
1063 
1064 static void io_pp_zc_destroy(struct page_pool *pp)
1065 {
1066 	io_put_zcrx_ifq(io_pp_to_ifq(pp));
1067 }
1068 
1069 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
1070 			 struct netdev_rx_queue *rxq)
1071 {
1072 	struct nlattr *nest;
1073 	int type;
1074 
1075 	type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
1076 	nest = nla_nest_start(rsp, type);
1077 	if (!nest)
1078 		return -EMSGSIZE;
1079 	nla_nest_end(rsp, nest);
1080 
1081 	return 0;
1082 }
1083 
1084 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
1085 {
1086 	struct pp_memory_provider_params *p = &rxq->mp_params;
1087 	struct io_zcrx_ifq *ifq = mp_priv;
1088 
1089 	io_zcrx_drop_netdev(ifq);
1090 	if (ifq->area)
1091 		io_zcrx_unmap_area(ifq, ifq->area);
1092 
1093 	p->mp_ops = NULL;
1094 	p->mp_priv = NULL;
1095 }
1096 
1097 static const struct memory_provider_ops io_uring_pp_zc_ops = {
1098 	.alloc_netmems		= io_pp_zc_alloc_netmems,
1099 	.release_netmem		= io_pp_zc_release_netmem,
1100 	.init			= io_pp_zc_init,
1101 	.destroy		= io_pp_zc_destroy,
1102 	.nl_fill		= io_pp_nl_fill,
1103 	.uninstall		= io_pp_uninstall,
1104 };
1105 
1106 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
1107 			      struct io_zcrx_ifq *zcrx)
1108 {
1109 	unsigned int mask = zcrx->rq_entries - 1;
1110 	unsigned int i;
1111 
1112 	nr = min(nr, io_zcrx_rqring_entries(zcrx));
1113 	for (i = 0; i < nr; i++) {
1114 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
1115 		struct net_iov *niov;
1116 
1117 		if (!io_parse_rqe(rqe, zcrx, &niov))
1118 			break;
1119 		netmem_array[i] = net_iov_to_netmem(niov);
1120 	}
1121 
1122 	smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
1123 	return i;
1124 }
1125 
1126 #define ZCRX_FLUSH_BATCH 32
1127 
1128 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
1129 {
1130 	unsigned i;
1131 
1132 	for (i = 0; i < nr; i++) {
1133 		netmem_ref netmem = netmems[i];
1134 		struct net_iov *niov = netmem_to_net_iov(netmem);
1135 
1136 		if (!io_zcrx_put_niov_uref(niov))
1137 			continue;
1138 		if (!page_pool_unref_and_test(netmem))
1139 			continue;
1140 		io_zcrx_return_niov(niov);
1141 	}
1142 }
1143 
1144 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
1145 			 struct zcrx_ctrl *ctrl)
1146 {
1147 	struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
1148 	netmem_ref netmems[ZCRX_FLUSH_BATCH];
1149 	unsigned total = 0;
1150 	unsigned nr;
1151 
1152 	if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
1153 		return -EINVAL;
1154 
1155 	do {
1156 		scoped_guard(spinlock_bh, &zcrx->rq_lock) {
1157 			nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
1158 			zcrx_return_buffers(netmems, nr);
1159 		}
1160 
1161 		total += nr;
1162 
1163 		if (fatal_signal_pending(current))
1164 			break;
1165 		cond_resched();
1166 	} while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
1167 
1168 	return 0;
1169 }
1170 
1171 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
1172 {
1173 	struct zcrx_ctrl ctrl;
1174 	struct io_zcrx_ifq *zcrx;
1175 
1176 	if (nr_args)
1177 		return -EINVAL;
1178 	if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
1179 		return -EFAULT;
1180 	if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
1181 		return -EFAULT;
1182 
1183 	zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
1184 	if (!zcrx)
1185 		return -ENXIO;
1186 
1187 	switch (ctrl.op) {
1188 	case ZCRX_CTRL_FLUSH_RQ:
1189 		return zcrx_flush_rq(ctx, zcrx, &ctrl);
1190 	case ZCRX_CTRL_EXPORT:
1191 		return zcrx_export(ctx, zcrx, &ctrl, arg);
1192 	}
1193 
1194 	return -EOPNOTSUPP;
1195 }
1196 
1197 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1198 			      struct io_zcrx_ifq *ifq, int off, int len)
1199 {
1200 	struct io_ring_ctx *ctx = req->ctx;
1201 	struct io_uring_zcrx_cqe *rcqe;
1202 	struct io_zcrx_area *area;
1203 	struct io_uring_cqe *cqe;
1204 	u64 offset;
1205 
1206 	if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1207 		return false;
1208 
1209 	cqe->user_data = req->cqe.user_data;
1210 	cqe->res = len;
1211 	cqe->flags = IORING_CQE_F_MORE;
1212 	if (ctx->flags & IORING_SETUP_CQE_MIXED)
1213 		cqe->flags |= IORING_CQE_F_32;
1214 
1215 	area = io_zcrx_iov_to_area(niov);
1216 	offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1217 	rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1218 	rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1219 	rcqe->__pad = 0;
1220 	return true;
1221 }
1222 
1223 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1224 {
1225 	struct io_zcrx_area *area = ifq->area;
1226 	struct net_iov *niov = NULL;
1227 
1228 	if (area->mem.is_dmabuf)
1229 		return NULL;
1230 
1231 	spin_lock_bh(&area->freelist_lock);
1232 	if (area->free_count)
1233 		niov = __io_zcrx_get_free_niov(area);
1234 	spin_unlock_bh(&area->freelist_lock);
1235 
1236 	if (niov)
1237 		page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1238 	return niov;
1239 }
1240 
1241 struct io_copy_cache {
1242 	struct page		*page;
1243 	unsigned long		offset;
1244 	size_t			size;
1245 };
1246 
1247 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1248 			    unsigned int src_offset, size_t len)
1249 {
1250 	size_t copied = 0;
1251 
1252 	len = min(len, cc->size);
1253 
1254 	while (len) {
1255 		void *src_addr, *dst_addr;
1256 		struct page *dst_page = cc->page;
1257 		unsigned dst_offset = cc->offset;
1258 		size_t n = len;
1259 
1260 		if (folio_test_partial_kmap(page_folio(dst_page)) ||
1261 		    folio_test_partial_kmap(page_folio(src_page))) {
1262 			dst_page += dst_offset / PAGE_SIZE;
1263 			dst_offset = offset_in_page(dst_offset);
1264 			src_page += src_offset / PAGE_SIZE;
1265 			src_offset = offset_in_page(src_offset);
1266 			n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1267 			n = min(n, len);
1268 		}
1269 
1270 		dst_addr = kmap_local_page(dst_page) + dst_offset;
1271 		src_addr = kmap_local_page(src_page) + src_offset;
1272 
1273 		memcpy(dst_addr, src_addr, n);
1274 
1275 		kunmap_local(src_addr);
1276 		kunmap_local(dst_addr);
1277 
1278 		cc->size -= n;
1279 		cc->offset += n;
1280 		src_offset += n;
1281 		len -= n;
1282 		copied += n;
1283 	}
1284 	return copied;
1285 }
1286 
1287 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1288 				  struct page *src_page, unsigned int src_offset,
1289 				  size_t len)
1290 {
1291 	size_t copied = 0;
1292 	int ret = 0;
1293 
1294 	while (len) {
1295 		struct io_copy_cache cc;
1296 		struct net_iov *niov;
1297 		size_t n;
1298 
1299 		niov = io_alloc_fallback_niov(ifq);
1300 		if (!niov) {
1301 			ret = -ENOMEM;
1302 			break;
1303 		}
1304 
1305 		cc.page = io_zcrx_iov_page(niov);
1306 		cc.offset = 0;
1307 		cc.size = PAGE_SIZE;
1308 
1309 		n = io_copy_page(&cc, src_page, src_offset, len);
1310 
1311 		if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1312 			io_zcrx_return_niov(niov);
1313 			ret = -ENOSPC;
1314 			break;
1315 		}
1316 
1317 		io_zcrx_get_niov_uref(niov);
1318 		src_offset += n;
1319 		len -= n;
1320 		copied += n;
1321 	}
1322 
1323 	return copied ? copied : ret;
1324 }
1325 
1326 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1327 			     const skb_frag_t *frag, int off, int len)
1328 {
1329 	struct page *page = skb_frag_page(frag);
1330 
1331 	return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1332 }
1333 
1334 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1335 			     const skb_frag_t *frag, int off, int len)
1336 {
1337 	struct net_iov *niov;
1338 	struct page_pool *pp;
1339 
1340 	if (unlikely(!skb_frag_is_net_iov(frag)))
1341 		return io_zcrx_copy_frag(req, ifq, frag, off, len);
1342 
1343 	niov = netmem_to_net_iov(frag->netmem);
1344 	pp = niov->desc.pp;
1345 
1346 	if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
1347 		return -EFAULT;
1348 
1349 	if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1350 		return -ENOSPC;
1351 
1352 	/*
1353 	 * Prevent it from being recycled while user is accessing it.
1354 	 * It has to be done before grabbing a user reference.
1355 	 */
1356 	page_pool_ref_netmem(net_iov_to_netmem(niov));
1357 	io_zcrx_get_niov_uref(niov);
1358 	return len;
1359 }
1360 
1361 static int
1362 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1363 		 unsigned int offset, size_t len)
1364 {
1365 	struct io_zcrx_args *args = desc->arg.data;
1366 	struct io_zcrx_ifq *ifq = args->ifq;
1367 	struct io_kiocb *req = args->req;
1368 	struct sk_buff *frag_iter;
1369 	unsigned start, start_off = offset;
1370 	int i, copy, end, off;
1371 	int ret = 0;
1372 
1373 	len = min_t(size_t, len, desc->count);
1374 	/*
1375 	 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1376 	 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1377 	 * skb->len) check. Return early in this case to break out of
1378 	 * __tcp_read_sock().
1379 	 */
1380 	if (!len)
1381 		return 0;
1382 	if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1383 		return -EAGAIN;
1384 
1385 	if (unlikely(offset < skb_headlen(skb))) {
1386 		ssize_t copied;
1387 		size_t to_copy;
1388 
1389 		to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1390 		copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1391 					    offset_in_page(skb->data) + offset,
1392 					    to_copy);
1393 		if (copied < 0) {
1394 			ret = copied;
1395 			goto out;
1396 		}
1397 		offset += copied;
1398 		len -= copied;
1399 		if (!len)
1400 			goto out;
1401 		if (offset != skb_headlen(skb))
1402 			goto out;
1403 	}
1404 
1405 	start = skb_headlen(skb);
1406 
1407 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1408 		const skb_frag_t *frag;
1409 
1410 		if (WARN_ON(start > offset + len))
1411 			return -EFAULT;
1412 
1413 		frag = &skb_shinfo(skb)->frags[i];
1414 		end = start + skb_frag_size(frag);
1415 
1416 		if (offset < end) {
1417 			copy = end - offset;
1418 			if (copy > len)
1419 				copy = len;
1420 
1421 			off = offset - start;
1422 			ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1423 			if (ret < 0)
1424 				goto out;
1425 
1426 			offset += ret;
1427 			len -= ret;
1428 			if (len == 0 || ret != copy)
1429 				goto out;
1430 		}
1431 		start = end;
1432 	}
1433 
1434 	skb_walk_frags(skb, frag_iter) {
1435 		if (WARN_ON(start > offset + len))
1436 			return -EFAULT;
1437 
1438 		end = start + frag_iter->len;
1439 		if (offset < end) {
1440 			size_t count;
1441 
1442 			copy = end - offset;
1443 			if (copy > len)
1444 				copy = len;
1445 
1446 			off = offset - start;
1447 			count = desc->count;
1448 			ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1449 			desc->count = count;
1450 			if (ret < 0)
1451 				goto out;
1452 
1453 			offset += ret;
1454 			len -= ret;
1455 			if (len == 0 || ret != copy)
1456 				goto out;
1457 		}
1458 		start = end;
1459 	}
1460 
1461 out:
1462 	if (offset == start_off)
1463 		return ret;
1464 	desc->count -= (offset - start_off);
1465 	return offset - start_off;
1466 }
1467 
1468 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1469 				struct sock *sk, int flags,
1470 				unsigned issue_flags, unsigned int *outlen)
1471 {
1472 	unsigned int len = *outlen;
1473 	struct io_zcrx_args args = {
1474 		.req = req,
1475 		.ifq = ifq,
1476 		.sock = sk->sk_socket,
1477 	};
1478 	read_descriptor_t rd_desc = {
1479 		.count = len ? len : UINT_MAX,
1480 		.arg.data = &args,
1481 	};
1482 	int ret;
1483 
1484 	lock_sock(sk);
1485 	ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1486 	if (len && ret > 0)
1487 		*outlen = len - ret;
1488 	if (ret <= 0) {
1489 		if (ret < 0 || sock_flag(sk, SOCK_DONE))
1490 			goto out;
1491 		if (sk->sk_err)
1492 			ret = sock_error(sk);
1493 		else if (sk->sk_shutdown & RCV_SHUTDOWN)
1494 			goto out;
1495 		else if (sk->sk_state == TCP_CLOSE)
1496 			ret = -ENOTCONN;
1497 		else
1498 			ret = -EAGAIN;
1499 	} else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1500 		   (issue_flags & IO_URING_F_MULTISHOT)) {
1501 		ret = IOU_REQUEUE;
1502 	} else if (sock_flag(sk, SOCK_DONE)) {
1503 		/* Make it to retry until it finally gets 0. */
1504 		if (issue_flags & IO_URING_F_MULTISHOT)
1505 			ret = IOU_REQUEUE;
1506 		else
1507 			ret = -EAGAIN;
1508 	}
1509 out:
1510 	release_sock(sk);
1511 	return ret;
1512 }
1513 
1514 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1515 		 struct socket *sock, unsigned int flags,
1516 		 unsigned issue_flags, unsigned int *len)
1517 {
1518 	struct sock *sk = sock->sk;
1519 	const struct proto *prot = READ_ONCE(sk->sk_prot);
1520 
1521 	if (prot->recvmsg != tcp_recvmsg)
1522 		return -EPROTONOSUPPORT;
1523 
1524 	sock_rps_record_flow(sk);
1525 	return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1526 }
1527