xref: /linux/io_uring/zcrx.c (revision 041c16acbafbdd8c089cc077c78e060322dde18c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 #include <linux/anon_inodes.h>
12 
13 #include <net/page_pool/helpers.h>
14 #include <net/page_pool/memory_provider.h>
15 #include <net/netlink.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/tcp.h>
19 #include <net/rps.h>
20 
21 #include <trace/events/page_pool.h>
22 
23 #include <uapi/linux/io_uring.h>
24 
25 #include "io_uring.h"
26 #include "kbuf.h"
27 #include "memmap.h"
28 #include "zcrx.h"
29 #include "rsrc.h"
30 
31 #define IO_ZCRX_AREA_SUPPORTED_FLAGS	(IORING_ZCRX_AREA_DMABUF)
32 
33 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
34 
35 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
36 {
37 	return pp->mp_priv;
38 }
39 
40 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
41 {
42 	struct net_iov_area *owner = net_iov_owner(niov);
43 
44 	return container_of(owner, struct io_zcrx_area, nia);
45 }
46 
47 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
48 {
49 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
50 	unsigned niov_pages_shift;
51 
52 	lockdep_assert(!area->mem.is_dmabuf);
53 
54 	niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
55 	return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
56 }
57 
58 static int io_area_max_shift(struct io_zcrx_mem *mem)
59 {
60 	struct sg_table *sgt = mem->sgt;
61 	struct scatterlist *sg;
62 	unsigned shift = -1U;
63 	unsigned i;
64 
65 	for_each_sgtable_dma_sg(sgt, sg, i)
66 		shift = min(shift, __ffs(sg->length));
67 	return shift;
68 }
69 
70 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
71 				struct io_zcrx_area *area)
72 {
73 	unsigned niov_size = 1U << ifq->niov_shift;
74 	struct sg_table *sgt = area->mem.sgt;
75 	struct scatterlist *sg;
76 	unsigned i, niov_idx = 0;
77 
78 	for_each_sgtable_dma_sg(sgt, sg, i) {
79 		dma_addr_t dma = sg_dma_address(sg);
80 		unsigned long sg_len = sg_dma_len(sg);
81 
82 		if (WARN_ON_ONCE(sg_len % niov_size))
83 			return -EINVAL;
84 
85 		while (sg_len && niov_idx < area->nia.num_niovs) {
86 			struct net_iov *niov = &area->nia.niovs[niov_idx];
87 
88 			if (net_mp_niov_set_dma_addr(niov, dma))
89 				return -EFAULT;
90 			sg_len -= niov_size;
91 			dma += niov_size;
92 			niov_idx++;
93 		}
94 	}
95 
96 	if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
97 		return -EFAULT;
98 	return 0;
99 }
100 
101 static void io_release_dmabuf(struct io_zcrx_mem *mem)
102 {
103 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
104 		return;
105 
106 	if (mem->sgt)
107 		dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
108 						  DMA_FROM_DEVICE);
109 	if (mem->attach)
110 		dma_buf_detach(mem->dmabuf, mem->attach);
111 	if (mem->dmabuf)
112 		dma_buf_put(mem->dmabuf);
113 
114 	mem->sgt = NULL;
115 	mem->attach = NULL;
116 	mem->dmabuf = NULL;
117 }
118 
119 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
120 			    struct io_zcrx_mem *mem,
121 			    struct io_uring_zcrx_area_reg *area_reg)
122 {
123 	unsigned long off = (unsigned long)area_reg->addr;
124 	unsigned long len = (unsigned long)area_reg->len;
125 	unsigned long total_size = 0;
126 	struct scatterlist *sg;
127 	int dmabuf_fd = area_reg->dmabuf_fd;
128 	int i, ret;
129 
130 	if (off)
131 		return -EINVAL;
132 	if (WARN_ON_ONCE(!ifq->dev))
133 		return -EFAULT;
134 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
135 		return -EINVAL;
136 
137 	mem->is_dmabuf = true;
138 	mem->dmabuf = dma_buf_get(dmabuf_fd);
139 	if (IS_ERR(mem->dmabuf)) {
140 		ret = PTR_ERR(mem->dmabuf);
141 		mem->dmabuf = NULL;
142 		goto err;
143 	}
144 
145 	mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
146 	if (IS_ERR(mem->attach)) {
147 		ret = PTR_ERR(mem->attach);
148 		mem->attach = NULL;
149 		goto err;
150 	}
151 
152 	mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
153 	if (IS_ERR(mem->sgt)) {
154 		ret = PTR_ERR(mem->sgt);
155 		mem->sgt = NULL;
156 		goto err;
157 	}
158 
159 	for_each_sgtable_dma_sg(mem->sgt, sg, i)
160 		total_size += sg_dma_len(sg);
161 
162 	if (total_size != len) {
163 		ret = -EINVAL;
164 		goto err;
165 	}
166 
167 	mem->size = len;
168 	return 0;
169 err:
170 	io_release_dmabuf(mem);
171 	return ret;
172 }
173 
174 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
175 {
176 	struct folio *last_folio = NULL;
177 	unsigned long res = 0;
178 	int i;
179 
180 	for (i = 0; i < nr_pages; i++) {
181 		struct folio *folio = page_folio(pages[i]);
182 
183 		if (folio == last_folio)
184 			continue;
185 		last_folio = folio;
186 		res += folio_nr_pages(folio);
187 	}
188 	return res;
189 }
190 
191 static int io_import_umem(struct io_zcrx_ifq *ifq,
192 			  struct io_zcrx_mem *mem,
193 			  struct io_uring_zcrx_area_reg *area_reg)
194 {
195 	struct page **pages;
196 	int nr_pages, ret;
197 
198 	if (area_reg->dmabuf_fd)
199 		return -EINVAL;
200 	if (!area_reg->addr)
201 		return -EFAULT;
202 	pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
203 				   &nr_pages);
204 	if (IS_ERR(pages))
205 		return PTR_ERR(pages);
206 
207 	ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
208 					0, nr_pages << PAGE_SHIFT,
209 					GFP_KERNEL_ACCOUNT);
210 	if (ret) {
211 		unpin_user_pages(pages, nr_pages);
212 		kvfree(pages);
213 		return ret;
214 	}
215 
216 	mem->account_pages = io_count_account_pages(pages, nr_pages);
217 	ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
218 	if (ret < 0)
219 		mem->account_pages = 0;
220 
221 	mem->sgt = &mem->page_sg_table;
222 	mem->pages = pages;
223 	mem->nr_folios = nr_pages;
224 	mem->size = area_reg->len;
225 	return ret;
226 }
227 
228 static void io_release_area_mem(struct io_zcrx_mem *mem)
229 {
230 	if (mem->is_dmabuf) {
231 		io_release_dmabuf(mem);
232 		return;
233 	}
234 	if (mem->pages) {
235 		unpin_user_pages(mem->pages, mem->nr_folios);
236 		sg_free_table(mem->sgt);
237 		mem->sgt = NULL;
238 		kvfree(mem->pages);
239 	}
240 }
241 
242 static int io_import_area(struct io_zcrx_ifq *ifq,
243 			  struct io_zcrx_mem *mem,
244 			  struct io_uring_zcrx_area_reg *area_reg)
245 {
246 	int ret;
247 
248 	if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
249 		return -EINVAL;
250 	if (area_reg->rq_area_token)
251 		return -EINVAL;
252 	if (area_reg->__resv2[0] || area_reg->__resv2[1])
253 		return -EINVAL;
254 
255 	ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
256 	if (ret)
257 		return ret;
258 	if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
259 		return -EINVAL;
260 
261 	if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
262 		return io_import_dmabuf(ifq, mem, area_reg);
263 	return io_import_umem(ifq, mem, area_reg);
264 }
265 
266 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
267 				struct io_zcrx_area *area)
268 {
269 	int i;
270 
271 	guard(mutex)(&ifq->pp_lock);
272 	if (!area->is_mapped)
273 		return;
274 	area->is_mapped = false;
275 
276 	for (i = 0; i < area->nia.num_niovs; i++)
277 		net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
278 
279 	if (area->mem.is_dmabuf) {
280 		io_release_dmabuf(&area->mem);
281 	} else {
282 		dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
283 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
284 	}
285 }
286 
287 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
288 {
289 	int ret;
290 
291 	guard(mutex)(&ifq->pp_lock);
292 	if (area->is_mapped)
293 		return 0;
294 
295 	if (!area->mem.is_dmabuf) {
296 		ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
297 				      DMA_FROM_DEVICE, IO_DMA_ATTR);
298 		if (ret < 0)
299 			return ret;
300 	}
301 
302 	ret = io_populate_area_dma(ifq, area);
303 	if (ret == 0)
304 		area->is_mapped = true;
305 	return ret;
306 }
307 
308 static void io_zcrx_sync_for_device(struct page_pool *pool,
309 				    struct net_iov *niov)
310 {
311 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
312 	dma_addr_t dma_addr;
313 
314 	unsigned niov_size;
315 
316 	if (!dma_dev_need_sync(pool->p.dev))
317 		return;
318 
319 	niov_size = 1U << io_pp_to_ifq(pool)->niov_shift;
320 	dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
321 	__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
322 				     niov_size, pool->p.dma_dir);
323 #endif
324 }
325 
326 #define IO_RQ_MAX_ENTRIES		32768
327 
328 #define IO_SKBS_PER_CALL_LIMIT	20
329 
330 struct io_zcrx_args {
331 	struct io_kiocb		*req;
332 	struct io_zcrx_ifq	*ifq;
333 	struct socket		*sock;
334 	unsigned		nr_skbs;
335 };
336 
337 static const struct memory_provider_ops io_uring_pp_zc_ops;
338 
339 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
340 {
341 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
342 
343 	return &area->user_refs[net_iov_idx(niov)];
344 }
345 
346 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
347 {
348 	atomic_t *uref = io_get_user_counter(niov);
349 
350 	if (unlikely(!atomic_read(uref)))
351 		return false;
352 	atomic_dec(uref);
353 	return true;
354 }
355 
356 static void io_zcrx_get_niov_uref(struct net_iov *niov)
357 {
358 	atomic_inc(io_get_user_counter(niov));
359 }
360 
361 static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
362 {
363 	offsets->head = offsetof(struct io_uring, head);
364 	offsets->tail = offsetof(struct io_uring, tail);
365 	offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
366 }
367 
368 static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
369 				 struct io_zcrx_ifq *ifq,
370 				 struct io_uring_zcrx_ifq_reg *reg,
371 				 struct io_uring_region_desc *rd,
372 				 u32 id)
373 {
374 	u64 mmap_offset;
375 	size_t off, size;
376 	void *ptr;
377 	int ret;
378 
379 	io_fill_zcrx_offsets(&reg->offsets);
380 	off = reg->offsets.rqes;
381 	size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
382 	if (size > rd->size)
383 		return -EINVAL;
384 
385 	mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
386 	mmap_offset += id << IORING_OFF_PBUF_SHIFT;
387 
388 	ret = io_create_region(ctx, &ifq->region, rd, mmap_offset);
389 	if (ret < 0)
390 		return ret;
391 
392 	ptr = io_region_get_ptr(&ifq->region);
393 	ifq->rq_ring = (struct io_uring *)ptr;
394 	ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
395 
396 	return 0;
397 }
398 
399 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
400 {
401 	io_free_region(ifq->user, &ifq->region);
402 	ifq->rq_ring = NULL;
403 	ifq->rqes = NULL;
404 }
405 
406 static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
407 			      struct io_zcrx_area *area)
408 {
409 	io_zcrx_unmap_area(ifq, area);
410 	io_release_area_mem(&area->mem);
411 
412 	if (area->mem.account_pages)
413 		io_unaccount_mem(ifq->user, ifq->mm_account,
414 				 area->mem.account_pages);
415 
416 	kvfree(area->freelist);
417 	kvfree(area->nia.niovs);
418 	kvfree(area->user_refs);
419 	kfree(area);
420 }
421 
422 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
423 				struct io_zcrx_area *area)
424 {
425 	if (ifq->area)
426 		return -EINVAL;
427 	ifq->area = area;
428 	return 0;
429 }
430 
431 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
432 			       struct io_uring_zcrx_area_reg *area_reg,
433 			       struct io_uring_zcrx_ifq_reg *reg)
434 {
435 	int buf_size_shift = PAGE_SHIFT;
436 	struct io_zcrx_area *area;
437 	unsigned nr_iovs;
438 	int i, ret;
439 
440 	if (reg->rx_buf_len) {
441 		if (!is_power_of_2(reg->rx_buf_len) ||
442 		     reg->rx_buf_len < PAGE_SIZE)
443 			return -EINVAL;
444 		buf_size_shift = ilog2(reg->rx_buf_len);
445 	}
446 
447 	ret = -ENOMEM;
448 	area = kzalloc(sizeof(*area), GFP_KERNEL);
449 	if (!area)
450 		goto err;
451 	area->ifq = ifq;
452 
453 	ret = io_import_area(ifq, &area->mem, area_reg);
454 	if (ret)
455 		goto err;
456 
457 	if (buf_size_shift > io_area_max_shift(&area->mem)) {
458 		ret = -ERANGE;
459 		goto err;
460 	}
461 
462 	ifq->niov_shift = buf_size_shift;
463 	nr_iovs = area->mem.size >> ifq->niov_shift;
464 	area->nia.num_niovs = nr_iovs;
465 
466 	ret = -ENOMEM;
467 	area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
468 					 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
469 	if (!area->nia.niovs)
470 		goto err;
471 
472 	area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
473 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
474 	if (!area->freelist)
475 		goto err;
476 
477 	area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
478 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
479 	if (!area->user_refs)
480 		goto err;
481 
482 	for (i = 0; i < nr_iovs; i++) {
483 		struct net_iov *niov = &area->nia.niovs[i];
484 
485 		niov->owner = &area->nia;
486 		area->freelist[i] = i;
487 		atomic_set(&area->user_refs[i], 0);
488 		niov->type = NET_IOV_IOURING;
489 	}
490 
491 	area->free_count = nr_iovs;
492 	/* we're only supporting one area per ifq for now */
493 	area->area_id = 0;
494 	area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
495 	spin_lock_init(&area->freelist_lock);
496 
497 	ret = io_zcrx_append_area(ifq, area);
498 	if (!ret)
499 		return 0;
500 err:
501 	if (area)
502 		io_zcrx_free_area(ifq, area);
503 	return ret;
504 }
505 
506 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
507 {
508 	struct io_zcrx_ifq *ifq;
509 
510 	ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
511 	if (!ifq)
512 		return NULL;
513 
514 	ifq->if_rxq = -1;
515 	spin_lock_init(&ifq->rq_lock);
516 	mutex_init(&ifq->pp_lock);
517 	refcount_set(&ifq->refs, 1);
518 	refcount_set(&ifq->user_refs, 1);
519 	return ifq;
520 }
521 
522 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
523 {
524 	guard(mutex)(&ifq->pp_lock);
525 
526 	if (!ifq->netdev)
527 		return;
528 	netdev_put(ifq->netdev, &ifq->netdev_tracker);
529 	ifq->netdev = NULL;
530 }
531 
532 static void io_close_queue(struct io_zcrx_ifq *ifq)
533 {
534 	struct net_device *netdev;
535 	netdevice_tracker netdev_tracker;
536 	struct pp_memory_provider_params p = {
537 		.mp_ops = &io_uring_pp_zc_ops,
538 		.mp_priv = ifq,
539 	};
540 
541 	if (ifq->if_rxq == -1)
542 		return;
543 
544 	scoped_guard(mutex, &ifq->pp_lock) {
545 		netdev = ifq->netdev;
546 		netdev_tracker = ifq->netdev_tracker;
547 		ifq->netdev = NULL;
548 	}
549 
550 	if (netdev) {
551 		net_mp_close_rxq(netdev, ifq->if_rxq, &p);
552 		netdev_put(netdev, &netdev_tracker);
553 	}
554 	ifq->if_rxq = -1;
555 }
556 
557 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
558 {
559 	io_close_queue(ifq);
560 
561 	if (ifq->area)
562 		io_zcrx_free_area(ifq, ifq->area);
563 	free_uid(ifq->user);
564 	if (ifq->mm_account)
565 		mmdrop(ifq->mm_account);
566 	if (ifq->dev)
567 		put_device(ifq->dev);
568 
569 	io_free_rbuf_ring(ifq);
570 	mutex_destroy(&ifq->pp_lock);
571 	kfree(ifq);
572 }
573 
574 static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
575 {
576 	if (refcount_dec_and_test(&ifq->refs))
577 		io_zcrx_ifq_free(ifq);
578 }
579 
580 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
581 {
582 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
583 
584 	spin_lock_bh(&area->freelist_lock);
585 	area->freelist[area->free_count++] = net_iov_idx(niov);
586 	spin_unlock_bh(&area->freelist_lock);
587 }
588 
589 static void io_zcrx_return_niov(struct net_iov *niov)
590 {
591 	netmem_ref netmem = net_iov_to_netmem(niov);
592 
593 	if (!niov->desc.pp) {
594 		/* copy fallback allocated niovs */
595 		io_zcrx_return_niov_freelist(niov);
596 		return;
597 	}
598 	page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
599 }
600 
601 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
602 {
603 	struct io_zcrx_area *area = ifq->area;
604 	int i;
605 
606 	if (!area)
607 		return;
608 
609 	/* Reclaim back all buffers given to the user space. */
610 	for (i = 0; i < area->nia.num_niovs; i++) {
611 		struct net_iov *niov = &area->nia.niovs[i];
612 		int nr;
613 
614 		if (!atomic_read(io_get_user_counter(niov)))
615 			continue;
616 		nr = atomic_xchg(io_get_user_counter(niov), 0);
617 		if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
618 			io_zcrx_return_niov(niov);
619 	}
620 }
621 
622 static void zcrx_unregister(struct io_zcrx_ifq *ifq)
623 {
624 	if (refcount_dec_and_test(&ifq->user_refs)) {
625 		io_close_queue(ifq);
626 		io_zcrx_scrub(ifq);
627 	}
628 	io_put_zcrx_ifq(ifq);
629 }
630 
631 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
632 					    unsigned int id)
633 {
634 	struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
635 
636 	lockdep_assert_held(&ctx->mmap_lock);
637 
638 	return ifq ? &ifq->region : NULL;
639 }
640 
641 static int zcrx_box_release(struct inode *inode, struct file *file)
642 {
643 	struct io_zcrx_ifq *ifq = file->private_data;
644 
645 	if (WARN_ON_ONCE(!ifq))
646 		return -EFAULT;
647 	zcrx_unregister(ifq);
648 	return 0;
649 }
650 
651 static const struct file_operations zcrx_box_fops = {
652 	.owner		= THIS_MODULE,
653 	.release	= zcrx_box_release,
654 };
655 
656 static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
657 		       struct zcrx_ctrl *ctrl, void __user *arg)
658 {
659 	struct zcrx_ctrl_export *ce = &ctrl->zc_export;
660 	struct file *file;
661 	int fd = -1;
662 
663 	if (!mem_is_zero(ce, sizeof(*ce)))
664 		return -EINVAL;
665 	fd = get_unused_fd_flags(O_CLOEXEC);
666 	if (fd < 0)
667 		return fd;
668 
669 	ce->zcrx_fd = fd;
670 	if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
671 		put_unused_fd(fd);
672 		return -EFAULT;
673 	}
674 
675 	refcount_inc(&ifq->refs);
676 	refcount_inc(&ifq->user_refs);
677 
678 	file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
679 					 ifq, O_CLOEXEC, NULL);
680 	if (IS_ERR(file)) {
681 		put_unused_fd(fd);
682 		zcrx_unregister(ifq);
683 		return PTR_ERR(file);
684 	}
685 
686 	fd_install(fd, file);
687 	return 0;
688 }
689 
690 static int import_zcrx(struct io_ring_ctx *ctx,
691 		       struct io_uring_zcrx_ifq_reg __user *arg,
692 		       struct io_uring_zcrx_ifq_reg *reg)
693 {
694 	struct io_zcrx_ifq *ifq;
695 	struct file *file;
696 	int fd, ret;
697 	u32 id;
698 
699 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
700 		return -EINVAL;
701 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
702 		return -EINVAL;
703 	if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
704 		return -EINVAL;
705 
706 	fd = reg->if_idx;
707 	CLASS(fd, f)(fd);
708 	if (fd_empty(f))
709 		return -EBADF;
710 
711 	file = fd_file(f);
712 	if (file->f_op != &zcrx_box_fops || !file->private_data)
713 		return -EBADF;
714 
715 	ifq = file->private_data;
716 	refcount_inc(&ifq->refs);
717 	refcount_inc(&ifq->user_refs);
718 
719 	scoped_guard(mutex, &ctx->mmap_lock) {
720 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
721 		if (ret)
722 			goto err;
723 	}
724 
725 	reg->zcrx_id = id;
726 	io_fill_zcrx_offsets(&reg->offsets);
727 	if (copy_to_user(arg, reg, sizeof(*reg))) {
728 		ret = -EFAULT;
729 		goto err_xa_erase;
730 	}
731 
732 	scoped_guard(mutex, &ctx->mmap_lock) {
733 		ret = -ENOMEM;
734 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
735 			goto err_xa_erase;
736 	}
737 
738 	return 0;
739 err_xa_erase:
740 	scoped_guard(mutex, &ctx->mmap_lock)
741 		xa_erase(&ctx->zcrx_ctxs, id);
742 err:
743 	zcrx_unregister(ifq);
744 	return ret;
745 }
746 
747 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
748 			  struct io_uring_zcrx_ifq_reg __user *arg)
749 {
750 	struct pp_memory_provider_params mp_param = {};
751 	struct io_uring_zcrx_area_reg area;
752 	struct io_uring_zcrx_ifq_reg reg;
753 	struct io_uring_region_desc rd;
754 	struct io_zcrx_ifq *ifq;
755 	int ret;
756 	u32 id;
757 
758 	/*
759 	 * 1. Interface queue allocation.
760 	 * 2. It can observe data destined for sockets of other tasks.
761 	 */
762 	if (!capable(CAP_NET_ADMIN))
763 		return -EPERM;
764 
765 	/* mandatory io_uring features for zc rx */
766 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
767 		return -EINVAL;
768 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
769 		return -EINVAL;
770 	if (copy_from_user(&reg, arg, sizeof(reg)))
771 		return -EFAULT;
772 	if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
773 		return -EINVAL;
774 	if (reg.flags & ZCRX_REG_IMPORT)
775 		return import_zcrx(ctx, arg, &reg);
776 	if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
777 		return -EFAULT;
778 	if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
779 		return -EINVAL;
780 	if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
781 		if (!(ctx->flags & IORING_SETUP_CLAMP))
782 			return -EINVAL;
783 		reg.rq_entries = IO_RQ_MAX_ENTRIES;
784 	}
785 	reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
786 
787 	if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
788 		return -EFAULT;
789 
790 	ifq = io_zcrx_ifq_alloc(ctx);
791 	if (!ifq)
792 		return -ENOMEM;
793 
794 	if (ctx->user) {
795 		get_uid(ctx->user);
796 		ifq->user = ctx->user;
797 	}
798 	if (ctx->mm_account) {
799 		mmgrab(ctx->mm_account);
800 		ifq->mm_account = ctx->mm_account;
801 	}
802 	ifq->rq_entries = reg.rq_entries;
803 
804 	scoped_guard(mutex, &ctx->mmap_lock) {
805 		/* preallocate id */
806 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
807 		if (ret)
808 			goto ifq_free;
809 	}
810 
811 	ret = io_allocate_rbuf_ring(ctx, ifq, &reg, &rd, id);
812 	if (ret)
813 		goto err;
814 
815 	ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns, reg.if_idx);
816 	if (!ifq->netdev) {
817 		ret = -ENODEV;
818 		goto err;
819 	}
820 	netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
821 
822 	ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq);
823 	if (!ifq->dev) {
824 		ret = -EOPNOTSUPP;
825 		goto netdev_put_unlock;
826 	}
827 	get_device(ifq->dev);
828 
829 	ret = io_zcrx_create_area(ifq, &area, &reg);
830 	if (ret)
831 		goto netdev_put_unlock;
832 
833 	mp_param.rx_page_size = 1U << ifq->niov_shift;
834 	mp_param.mp_ops = &io_uring_pp_zc_ops;
835 	mp_param.mp_priv = ifq;
836 	ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
837 	if (ret)
838 		goto netdev_put_unlock;
839 	netdev_unlock(ifq->netdev);
840 	ifq->if_rxq = reg.if_rxq;
841 
842 	reg.zcrx_id = id;
843 
844 	scoped_guard(mutex, &ctx->mmap_lock) {
845 		/* publish ifq */
846 		ret = -ENOMEM;
847 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
848 			goto err;
849 	}
850 
851 	reg.rx_buf_len = 1U << ifq->niov_shift;
852 
853 	if (copy_to_user(arg, &reg, sizeof(reg)) ||
854 	    copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
855 	    copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
856 		ret = -EFAULT;
857 		goto err;
858 	}
859 	return 0;
860 netdev_put_unlock:
861 	netdev_put(ifq->netdev, &ifq->netdev_tracker);
862 	netdev_unlock(ifq->netdev);
863 err:
864 	scoped_guard(mutex, &ctx->mmap_lock)
865 		xa_erase(&ctx->zcrx_ctxs, id);
866 ifq_free:
867 	io_zcrx_ifq_free(ifq);
868 	return ret;
869 }
870 
871 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
872 {
873 	unsigned niov_idx;
874 
875 	lockdep_assert_held(&area->freelist_lock);
876 
877 	niov_idx = area->freelist[--area->free_count];
878 	return &area->nia.niovs[niov_idx];
879 }
880 
881 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
882 {
883 	struct io_zcrx_ifq *ifq;
884 
885 	lockdep_assert_held(&ctx->uring_lock);
886 
887 	while (1) {
888 		scoped_guard(mutex, &ctx->mmap_lock) {
889 			unsigned long id = 0;
890 
891 			ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
892 			if (ifq)
893 				xa_erase(&ctx->zcrx_ctxs, id);
894 		}
895 		if (!ifq)
896 			break;
897 		zcrx_unregister(ifq);
898 	}
899 
900 	xa_destroy(&ctx->zcrx_ctxs);
901 }
902 
903 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
904 {
905 	u32 entries;
906 
907 	entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
908 	return min(entries, ifq->rq_entries);
909 }
910 
911 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
912 						 unsigned mask)
913 {
914 	unsigned int idx = ifq->cached_rq_head++ & mask;
915 
916 	return &ifq->rqes[idx];
917 }
918 
919 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
920 				struct io_zcrx_ifq *ifq,
921 				struct net_iov **ret_niov)
922 {
923 	unsigned niov_idx, area_idx;
924 	struct io_zcrx_area *area;
925 
926 	area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
927 	niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
928 
929 	if (unlikely(rqe->__pad || area_idx))
930 		return false;
931 	area = ifq->area;
932 
933 	if (unlikely(niov_idx >= area->nia.num_niovs))
934 		return false;
935 	niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
936 
937 	*ret_niov = &area->nia.niovs[niov_idx];
938 	return true;
939 }
940 
941 static void io_zcrx_ring_refill(struct page_pool *pp,
942 				struct io_zcrx_ifq *ifq)
943 {
944 	unsigned int mask = ifq->rq_entries - 1;
945 	unsigned int entries;
946 
947 	guard(spinlock_bh)(&ifq->rq_lock);
948 
949 	entries = io_zcrx_rqring_entries(ifq);
950 	entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
951 	if (unlikely(!entries))
952 		return;
953 
954 	do {
955 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
956 		struct net_iov *niov;
957 		netmem_ref netmem;
958 
959 		if (!io_parse_rqe(rqe, ifq, &niov))
960 			continue;
961 		if (!io_zcrx_put_niov_uref(niov))
962 			continue;
963 
964 		netmem = net_iov_to_netmem(niov);
965 		if (!page_pool_unref_and_test(netmem))
966 			continue;
967 
968 		if (unlikely(niov->desc.pp != pp)) {
969 			io_zcrx_return_niov(niov);
970 			continue;
971 		}
972 
973 		io_zcrx_sync_for_device(pp, niov);
974 		net_mp_netmem_place_in_cache(pp, netmem);
975 	} while (--entries);
976 
977 	smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
978 }
979 
980 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
981 {
982 	struct io_zcrx_area *area = ifq->area;
983 
984 	spin_lock_bh(&area->freelist_lock);
985 	while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
986 		struct net_iov *niov = __io_zcrx_get_free_niov(area);
987 		netmem_ref netmem = net_iov_to_netmem(niov);
988 
989 		net_mp_niov_set_page_pool(pp, niov);
990 		io_zcrx_sync_for_device(pp, niov);
991 		net_mp_netmem_place_in_cache(pp, netmem);
992 	}
993 	spin_unlock_bh(&area->freelist_lock);
994 }
995 
996 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
997 {
998 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
999 
1000 	/* pp should already be ensuring that */
1001 	if (unlikely(pp->alloc.count))
1002 		goto out_return;
1003 
1004 	io_zcrx_ring_refill(pp, ifq);
1005 	if (likely(pp->alloc.count))
1006 		goto out_return;
1007 
1008 	io_zcrx_refill_slow(pp, ifq);
1009 	if (!pp->alloc.count)
1010 		return 0;
1011 out_return:
1012 	return pp->alloc.cache[--pp->alloc.count];
1013 }
1014 
1015 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
1016 {
1017 	struct net_iov *niov;
1018 
1019 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1020 		return false;
1021 
1022 	niov = netmem_to_net_iov(netmem);
1023 	net_mp_niov_clear_page_pool(niov);
1024 	io_zcrx_return_niov_freelist(niov);
1025 	return false;
1026 }
1027 
1028 static int io_pp_zc_init(struct page_pool *pp)
1029 {
1030 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
1031 	int ret;
1032 
1033 	if (WARN_ON_ONCE(!ifq))
1034 		return -EINVAL;
1035 	if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
1036 		return -EINVAL;
1037 	if (WARN_ON_ONCE(!pp->dma_map))
1038 		return -EOPNOTSUPP;
1039 	if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
1040 		return -EINVAL;
1041 	if (pp->p.dma_dir != DMA_FROM_DEVICE)
1042 		return -EOPNOTSUPP;
1043 
1044 	ret = io_zcrx_map_area(ifq, ifq->area);
1045 	if (ret)
1046 		return ret;
1047 
1048 	refcount_inc(&ifq->refs);
1049 	return 0;
1050 }
1051 
1052 static void io_pp_zc_destroy(struct page_pool *pp)
1053 {
1054 	io_put_zcrx_ifq(io_pp_to_ifq(pp));
1055 }
1056 
1057 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
1058 			 struct netdev_rx_queue *rxq)
1059 {
1060 	struct nlattr *nest;
1061 	int type;
1062 
1063 	type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
1064 	nest = nla_nest_start(rsp, type);
1065 	if (!nest)
1066 		return -EMSGSIZE;
1067 	nla_nest_end(rsp, nest);
1068 
1069 	return 0;
1070 }
1071 
1072 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
1073 {
1074 	struct pp_memory_provider_params *p = &rxq->mp_params;
1075 	struct io_zcrx_ifq *ifq = mp_priv;
1076 
1077 	io_zcrx_drop_netdev(ifq);
1078 	if (ifq->area)
1079 		io_zcrx_unmap_area(ifq, ifq->area);
1080 
1081 	p->mp_ops = NULL;
1082 	p->mp_priv = NULL;
1083 }
1084 
1085 static const struct memory_provider_ops io_uring_pp_zc_ops = {
1086 	.alloc_netmems		= io_pp_zc_alloc_netmems,
1087 	.release_netmem		= io_pp_zc_release_netmem,
1088 	.init			= io_pp_zc_init,
1089 	.destroy		= io_pp_zc_destroy,
1090 	.nl_fill		= io_pp_nl_fill,
1091 	.uninstall		= io_pp_uninstall,
1092 };
1093 
1094 static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
1095 			      struct io_zcrx_ifq *zcrx)
1096 {
1097 	unsigned int mask = zcrx->rq_entries - 1;
1098 	unsigned int i;
1099 
1100 	nr = min(nr, io_zcrx_rqring_entries(zcrx));
1101 	for (i = 0; i < nr; i++) {
1102 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
1103 		struct net_iov *niov;
1104 
1105 		if (!io_parse_rqe(rqe, zcrx, &niov))
1106 			break;
1107 		netmem_array[i] = net_iov_to_netmem(niov);
1108 	}
1109 
1110 	smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
1111 	return i;
1112 }
1113 
1114 #define ZCRX_FLUSH_BATCH 32
1115 
1116 static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
1117 {
1118 	unsigned i;
1119 
1120 	for (i = 0; i < nr; i++) {
1121 		netmem_ref netmem = netmems[i];
1122 		struct net_iov *niov = netmem_to_net_iov(netmem);
1123 
1124 		if (!io_zcrx_put_niov_uref(niov))
1125 			continue;
1126 		if (!page_pool_unref_and_test(netmem))
1127 			continue;
1128 		io_zcrx_return_niov(niov);
1129 	}
1130 }
1131 
1132 static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
1133 			 struct zcrx_ctrl *ctrl)
1134 {
1135 	struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
1136 	netmem_ref netmems[ZCRX_FLUSH_BATCH];
1137 	unsigned total = 0;
1138 	unsigned nr;
1139 
1140 	if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
1141 		return -EINVAL;
1142 
1143 	do {
1144 		scoped_guard(spinlock_bh, &zcrx->rq_lock) {
1145 			nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
1146 			zcrx_return_buffers(netmems, nr);
1147 		}
1148 
1149 		total += nr;
1150 
1151 		if (fatal_signal_pending(current))
1152 			break;
1153 		cond_resched();
1154 	} while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
1155 
1156 	return 0;
1157 }
1158 
1159 int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
1160 {
1161 	struct zcrx_ctrl ctrl;
1162 	struct io_zcrx_ifq *zcrx;
1163 
1164 	if (nr_args)
1165 		return -EINVAL;
1166 	if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
1167 		return -EFAULT;
1168 	if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
1169 		return -EFAULT;
1170 
1171 	zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
1172 	if (!zcrx)
1173 		return -ENXIO;
1174 
1175 	switch (ctrl.op) {
1176 	case ZCRX_CTRL_FLUSH_RQ:
1177 		return zcrx_flush_rq(ctx, zcrx, &ctrl);
1178 	case ZCRX_CTRL_EXPORT:
1179 		return zcrx_export(ctx, zcrx, &ctrl, arg);
1180 	}
1181 
1182 	return -EOPNOTSUPP;
1183 }
1184 
1185 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1186 			      struct io_zcrx_ifq *ifq, int off, int len)
1187 {
1188 	struct io_ring_ctx *ctx = req->ctx;
1189 	struct io_uring_zcrx_cqe *rcqe;
1190 	struct io_zcrx_area *area;
1191 	struct io_uring_cqe *cqe;
1192 	u64 offset;
1193 
1194 	if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1195 		return false;
1196 
1197 	cqe->user_data = req->cqe.user_data;
1198 	cqe->res = len;
1199 	cqe->flags = IORING_CQE_F_MORE;
1200 	if (ctx->flags & IORING_SETUP_CQE_MIXED)
1201 		cqe->flags |= IORING_CQE_F_32;
1202 
1203 	area = io_zcrx_iov_to_area(niov);
1204 	offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1205 	rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1206 	rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1207 	rcqe->__pad = 0;
1208 	return true;
1209 }
1210 
1211 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1212 {
1213 	struct io_zcrx_area *area = ifq->area;
1214 	struct net_iov *niov = NULL;
1215 
1216 	if (area->mem.is_dmabuf)
1217 		return NULL;
1218 
1219 	spin_lock_bh(&area->freelist_lock);
1220 	if (area->free_count)
1221 		niov = __io_zcrx_get_free_niov(area);
1222 	spin_unlock_bh(&area->freelist_lock);
1223 
1224 	if (niov)
1225 		page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1226 	return niov;
1227 }
1228 
1229 struct io_copy_cache {
1230 	struct page		*page;
1231 	unsigned long		offset;
1232 	size_t			size;
1233 };
1234 
1235 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1236 			    unsigned int src_offset, size_t len)
1237 {
1238 	size_t copied = 0;
1239 
1240 	len = min(len, cc->size);
1241 
1242 	while (len) {
1243 		void *src_addr, *dst_addr;
1244 		struct page *dst_page = cc->page;
1245 		unsigned dst_offset = cc->offset;
1246 		size_t n = len;
1247 
1248 		if (folio_test_partial_kmap(page_folio(dst_page)) ||
1249 		    folio_test_partial_kmap(page_folio(src_page))) {
1250 			dst_page += dst_offset / PAGE_SIZE;
1251 			dst_offset = offset_in_page(dst_offset);
1252 			src_page += src_offset / PAGE_SIZE;
1253 			src_offset = offset_in_page(src_offset);
1254 			n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1255 			n = min(n, len);
1256 		}
1257 
1258 		dst_addr = kmap_local_page(dst_page) + dst_offset;
1259 		src_addr = kmap_local_page(src_page) + src_offset;
1260 
1261 		memcpy(dst_addr, src_addr, n);
1262 
1263 		kunmap_local(src_addr);
1264 		kunmap_local(dst_addr);
1265 
1266 		cc->size -= n;
1267 		cc->offset += n;
1268 		src_offset += n;
1269 		len -= n;
1270 		copied += n;
1271 	}
1272 	return copied;
1273 }
1274 
1275 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1276 				  struct page *src_page, unsigned int src_offset,
1277 				  size_t len)
1278 {
1279 	size_t copied = 0;
1280 	int ret = 0;
1281 
1282 	while (len) {
1283 		struct io_copy_cache cc;
1284 		struct net_iov *niov;
1285 		size_t n;
1286 
1287 		niov = io_alloc_fallback_niov(ifq);
1288 		if (!niov) {
1289 			ret = -ENOMEM;
1290 			break;
1291 		}
1292 
1293 		cc.page = io_zcrx_iov_page(niov);
1294 		cc.offset = 0;
1295 		cc.size = PAGE_SIZE;
1296 
1297 		n = io_copy_page(&cc, src_page, src_offset, len);
1298 
1299 		if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1300 			io_zcrx_return_niov(niov);
1301 			ret = -ENOSPC;
1302 			break;
1303 		}
1304 
1305 		io_zcrx_get_niov_uref(niov);
1306 		src_offset += n;
1307 		len -= n;
1308 		copied += n;
1309 	}
1310 
1311 	return copied ? copied : ret;
1312 }
1313 
1314 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1315 			     const skb_frag_t *frag, int off, int len)
1316 {
1317 	struct page *page = skb_frag_page(frag);
1318 
1319 	return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1320 }
1321 
1322 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1323 			     const skb_frag_t *frag, int off, int len)
1324 {
1325 	struct net_iov *niov;
1326 	struct page_pool *pp;
1327 
1328 	if (unlikely(!skb_frag_is_net_iov(frag)))
1329 		return io_zcrx_copy_frag(req, ifq, frag, off, len);
1330 
1331 	niov = netmem_to_net_iov(frag->netmem);
1332 	pp = niov->desc.pp;
1333 
1334 	if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
1335 		return -EFAULT;
1336 
1337 	if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1338 		return -ENOSPC;
1339 
1340 	/*
1341 	 * Prevent it from being recycled while user is accessing it.
1342 	 * It has to be done before grabbing a user reference.
1343 	 */
1344 	page_pool_ref_netmem(net_iov_to_netmem(niov));
1345 	io_zcrx_get_niov_uref(niov);
1346 	return len;
1347 }
1348 
1349 static int
1350 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1351 		 unsigned int offset, size_t len)
1352 {
1353 	struct io_zcrx_args *args = desc->arg.data;
1354 	struct io_zcrx_ifq *ifq = args->ifq;
1355 	struct io_kiocb *req = args->req;
1356 	struct sk_buff *frag_iter;
1357 	unsigned start, start_off = offset;
1358 	int i, copy, end, off;
1359 	int ret = 0;
1360 
1361 	len = min_t(size_t, len, desc->count);
1362 	/*
1363 	 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1364 	 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1365 	 * skb->len) check. Return early in this case to break out of
1366 	 * __tcp_read_sock().
1367 	 */
1368 	if (!len)
1369 		return 0;
1370 	if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1371 		return -EAGAIN;
1372 
1373 	if (unlikely(offset < skb_headlen(skb))) {
1374 		ssize_t copied;
1375 		size_t to_copy;
1376 
1377 		to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1378 		copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1379 					    offset_in_page(skb->data) + offset,
1380 					    to_copy);
1381 		if (copied < 0) {
1382 			ret = copied;
1383 			goto out;
1384 		}
1385 		offset += copied;
1386 		len -= copied;
1387 		if (!len)
1388 			goto out;
1389 		if (offset != skb_headlen(skb))
1390 			goto out;
1391 	}
1392 
1393 	start = skb_headlen(skb);
1394 
1395 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1396 		const skb_frag_t *frag;
1397 
1398 		if (WARN_ON(start > offset + len))
1399 			return -EFAULT;
1400 
1401 		frag = &skb_shinfo(skb)->frags[i];
1402 		end = start + skb_frag_size(frag);
1403 
1404 		if (offset < end) {
1405 			copy = end - offset;
1406 			if (copy > len)
1407 				copy = len;
1408 
1409 			off = offset - start;
1410 			ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1411 			if (ret < 0)
1412 				goto out;
1413 
1414 			offset += ret;
1415 			len -= ret;
1416 			if (len == 0 || ret != copy)
1417 				goto out;
1418 		}
1419 		start = end;
1420 	}
1421 
1422 	skb_walk_frags(skb, frag_iter) {
1423 		if (WARN_ON(start > offset + len))
1424 			return -EFAULT;
1425 
1426 		end = start + frag_iter->len;
1427 		if (offset < end) {
1428 			size_t count;
1429 
1430 			copy = end - offset;
1431 			if (copy > len)
1432 				copy = len;
1433 
1434 			off = offset - start;
1435 			count = desc->count;
1436 			ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1437 			desc->count = count;
1438 			if (ret < 0)
1439 				goto out;
1440 
1441 			offset += ret;
1442 			len -= ret;
1443 			if (len == 0 || ret != copy)
1444 				goto out;
1445 		}
1446 		start = end;
1447 	}
1448 
1449 out:
1450 	if (offset == start_off)
1451 		return ret;
1452 	desc->count -= (offset - start_off);
1453 	return offset - start_off;
1454 }
1455 
1456 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1457 				struct sock *sk, int flags,
1458 				unsigned issue_flags, unsigned int *outlen)
1459 {
1460 	unsigned int len = *outlen;
1461 	struct io_zcrx_args args = {
1462 		.req = req,
1463 		.ifq = ifq,
1464 		.sock = sk->sk_socket,
1465 	};
1466 	read_descriptor_t rd_desc = {
1467 		.count = len ? len : UINT_MAX,
1468 		.arg.data = &args,
1469 	};
1470 	int ret;
1471 
1472 	lock_sock(sk);
1473 	ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1474 	if (len && ret > 0)
1475 		*outlen = len - ret;
1476 	if (ret <= 0) {
1477 		if (ret < 0 || sock_flag(sk, SOCK_DONE))
1478 			goto out;
1479 		if (sk->sk_err)
1480 			ret = sock_error(sk);
1481 		else if (sk->sk_shutdown & RCV_SHUTDOWN)
1482 			goto out;
1483 		else if (sk->sk_state == TCP_CLOSE)
1484 			ret = -ENOTCONN;
1485 		else
1486 			ret = -EAGAIN;
1487 	} else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1488 		   (issue_flags & IO_URING_F_MULTISHOT)) {
1489 		ret = IOU_REQUEUE;
1490 	} else if (sock_flag(sk, SOCK_DONE)) {
1491 		/* Make it to retry until it finally gets 0. */
1492 		if (issue_flags & IO_URING_F_MULTISHOT)
1493 			ret = IOU_REQUEUE;
1494 		else
1495 			ret = -EAGAIN;
1496 	}
1497 out:
1498 	release_sock(sk);
1499 	return ret;
1500 }
1501 
1502 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1503 		 struct socket *sock, unsigned int flags,
1504 		 unsigned issue_flags, unsigned int *len)
1505 {
1506 	struct sock *sk = sock->sk;
1507 	const struct proto *prot = READ_ONCE(sk->sk_prot);
1508 
1509 	if (prot->recvmsg != tcp_recvmsg)
1510 		return -EPROTONOSUPPORT;
1511 
1512 	sock_rps_record_flow(sk);
1513 	return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1514 }
1515