xref: /linux/io_uring/zcrx.c (revision d7484babd2c4dcfa1ca02e7e303fab3fab529d75)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 
12 #include <net/page_pool/helpers.h>
13 #include <net/page_pool/memory_provider.h>
14 #include <net/netlink.h>
15 #include <net/netdev_rx_queue.h>
16 #include <net/tcp.h>
17 #include <net/rps.h>
18 
19 #include <trace/events/page_pool.h>
20 
21 #include <uapi/linux/io_uring.h>
22 
23 #include "io_uring.h"
24 #include "kbuf.h"
25 #include "memmap.h"
26 #include "zcrx.h"
27 #include "rsrc.h"
28 
29 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
30 
31 static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
32 				 struct io_zcrx_area *area, int nr_mapped)
33 {
34 	int i;
35 
36 	for (i = 0; i < nr_mapped; i++) {
37 		struct net_iov *niov = &area->nia.niovs[i];
38 		dma_addr_t dma;
39 
40 		dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
41 		dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
42 				     DMA_FROM_DEVICE, IO_DMA_ATTR);
43 		net_mp_niov_set_dma_addr(niov, 0);
44 	}
45 }
46 
47 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
48 {
49 	if (area->is_mapped)
50 		__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
51 }
52 
53 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
54 {
55 	int i;
56 
57 	for (i = 0; i < area->nia.num_niovs; i++) {
58 		struct net_iov *niov = &area->nia.niovs[i];
59 		dma_addr_t dma;
60 
61 		dma = dma_map_page_attrs(ifq->dev, area->pages[i], 0, PAGE_SIZE,
62 					 DMA_FROM_DEVICE, IO_DMA_ATTR);
63 		if (dma_mapping_error(ifq->dev, dma))
64 			break;
65 		if (net_mp_niov_set_dma_addr(niov, dma)) {
66 			dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
67 					     DMA_FROM_DEVICE, IO_DMA_ATTR);
68 			break;
69 		}
70 	}
71 
72 	if (i != area->nia.num_niovs) {
73 		__io_zcrx_unmap_area(ifq, area, i);
74 		return -EINVAL;
75 	}
76 
77 	area->is_mapped = true;
78 	return 0;
79 }
80 
81 static void io_zcrx_sync_for_device(const struct page_pool *pool,
82 				    struct net_iov *niov)
83 {
84 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
85 	dma_addr_t dma_addr;
86 
87 	if (!dma_dev_need_sync(pool->p.dev))
88 		return;
89 
90 	dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
91 	__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
92 				     PAGE_SIZE, pool->p.dma_dir);
93 #endif
94 }
95 
96 #define IO_RQ_MAX_ENTRIES		32768
97 
98 #define IO_SKBS_PER_CALL_LIMIT	20
99 
100 struct io_zcrx_args {
101 	struct io_kiocb		*req;
102 	struct io_zcrx_ifq	*ifq;
103 	struct socket		*sock;
104 	unsigned		nr_skbs;
105 };
106 
107 static const struct memory_provider_ops io_uring_pp_zc_ops;
108 
109 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
110 {
111 	struct net_iov_area *owner = net_iov_owner(niov);
112 
113 	return container_of(owner, struct io_zcrx_area, nia);
114 }
115 
116 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
117 {
118 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
119 
120 	return &area->user_refs[net_iov_idx(niov)];
121 }
122 
123 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
124 {
125 	atomic_t *uref = io_get_user_counter(niov);
126 
127 	if (unlikely(!atomic_read(uref)))
128 		return false;
129 	atomic_dec(uref);
130 	return true;
131 }
132 
133 static void io_zcrx_get_niov_uref(struct net_iov *niov)
134 {
135 	atomic_inc(io_get_user_counter(niov));
136 }
137 
138 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
139 {
140 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
141 
142 	return area->pages[net_iov_idx(niov)];
143 }
144 
145 static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
146 				 struct io_uring_zcrx_ifq_reg *reg,
147 				 struct io_uring_region_desc *rd)
148 {
149 	size_t off, size;
150 	void *ptr;
151 	int ret;
152 
153 	off = sizeof(struct io_uring);
154 	size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
155 	if (size > rd->size)
156 		return -EINVAL;
157 
158 	ret = io_create_region_mmap_safe(ifq->ctx, &ifq->ctx->zcrx_region, rd,
159 					 IORING_MAP_OFF_ZCRX_REGION);
160 	if (ret < 0)
161 		return ret;
162 
163 	ptr = io_region_get_ptr(&ifq->ctx->zcrx_region);
164 	ifq->rq_ring = (struct io_uring *)ptr;
165 	ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
166 	return 0;
167 }
168 
169 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
170 {
171 	io_free_region(ifq->ctx, &ifq->ctx->zcrx_region);
172 	ifq->rq_ring = NULL;
173 	ifq->rqes = NULL;
174 }
175 
176 static void io_zcrx_free_area(struct io_zcrx_area *area)
177 {
178 	io_zcrx_unmap_area(area->ifq, area);
179 
180 	kvfree(area->freelist);
181 	kvfree(area->nia.niovs);
182 	kvfree(area->user_refs);
183 	if (area->pages) {
184 		unpin_user_pages(area->pages, area->nia.num_niovs);
185 		kvfree(area->pages);
186 	}
187 	kfree(area);
188 }
189 
190 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
191 			       struct io_zcrx_area **res,
192 			       struct io_uring_zcrx_area_reg *area_reg)
193 {
194 	struct io_zcrx_area *area;
195 	int i, ret, nr_pages;
196 	struct iovec iov;
197 
198 	if (area_reg->flags || area_reg->rq_area_token)
199 		return -EINVAL;
200 	if (area_reg->__resv1 || area_reg->__resv2[0] || area_reg->__resv2[1])
201 		return -EINVAL;
202 	if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
203 		return -EINVAL;
204 
205 	iov.iov_base = u64_to_user_ptr(area_reg->addr);
206 	iov.iov_len = area_reg->len;
207 	ret = io_buffer_validate(&iov);
208 	if (ret)
209 		return ret;
210 
211 	ret = -ENOMEM;
212 	area = kzalloc(sizeof(*area), GFP_KERNEL);
213 	if (!area)
214 		goto err;
215 
216 	area->pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
217 				   &nr_pages);
218 	if (IS_ERR(area->pages)) {
219 		ret = PTR_ERR(area->pages);
220 		area->pages = NULL;
221 		goto err;
222 	}
223 	area->nia.num_niovs = nr_pages;
224 
225 	area->nia.niovs = kvmalloc_array(nr_pages, sizeof(area->nia.niovs[0]),
226 					 GFP_KERNEL | __GFP_ZERO);
227 	if (!area->nia.niovs)
228 		goto err;
229 
230 	area->freelist = kvmalloc_array(nr_pages, sizeof(area->freelist[0]),
231 					GFP_KERNEL | __GFP_ZERO);
232 	if (!area->freelist)
233 		goto err;
234 
235 	for (i = 0; i < nr_pages; i++)
236 		area->freelist[i] = i;
237 
238 	area->user_refs = kvmalloc_array(nr_pages, sizeof(area->user_refs[0]),
239 					GFP_KERNEL | __GFP_ZERO);
240 	if (!area->user_refs)
241 		goto err;
242 
243 	for (i = 0; i < nr_pages; i++) {
244 		struct net_iov *niov = &area->nia.niovs[i];
245 
246 		niov->owner = &area->nia;
247 		area->freelist[i] = i;
248 		atomic_set(&area->user_refs[i], 0);
249 	}
250 
251 	area->free_count = nr_pages;
252 	area->ifq = ifq;
253 	/* we're only supporting one area per ifq for now */
254 	area->area_id = 0;
255 	area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
256 	spin_lock_init(&area->freelist_lock);
257 	*res = area;
258 	return 0;
259 err:
260 	if (area)
261 		io_zcrx_free_area(area);
262 	return ret;
263 }
264 
265 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
266 {
267 	struct io_zcrx_ifq *ifq;
268 
269 	ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
270 	if (!ifq)
271 		return NULL;
272 
273 	ifq->if_rxq = -1;
274 	ifq->ctx = ctx;
275 	spin_lock_init(&ifq->lock);
276 	spin_lock_init(&ifq->rq_lock);
277 	return ifq;
278 }
279 
280 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
281 {
282 	spin_lock(&ifq->lock);
283 	if (ifq->netdev) {
284 		netdev_put(ifq->netdev, &ifq->netdev_tracker);
285 		ifq->netdev = NULL;
286 	}
287 	spin_unlock(&ifq->lock);
288 }
289 
290 static void io_close_queue(struct io_zcrx_ifq *ifq)
291 {
292 	struct net_device *netdev;
293 	netdevice_tracker netdev_tracker;
294 	struct pp_memory_provider_params p = {
295 		.mp_ops = &io_uring_pp_zc_ops,
296 		.mp_priv = ifq,
297 	};
298 
299 	if (ifq->if_rxq == -1)
300 		return;
301 
302 	spin_lock(&ifq->lock);
303 	netdev = ifq->netdev;
304 	netdev_tracker = ifq->netdev_tracker;
305 	ifq->netdev = NULL;
306 	spin_unlock(&ifq->lock);
307 
308 	if (netdev) {
309 		net_mp_close_rxq(netdev, ifq->if_rxq, &p);
310 		netdev_put(netdev, &netdev_tracker);
311 	}
312 	ifq->if_rxq = -1;
313 }
314 
315 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
316 {
317 	io_close_queue(ifq);
318 	io_zcrx_drop_netdev(ifq);
319 
320 	if (ifq->area)
321 		io_zcrx_free_area(ifq->area);
322 	if (ifq->dev)
323 		put_device(ifq->dev);
324 
325 	io_free_rbuf_ring(ifq);
326 	kfree(ifq);
327 }
328 
329 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
330 			  struct io_uring_zcrx_ifq_reg __user *arg)
331 {
332 	struct pp_memory_provider_params mp_param = {};
333 	struct io_uring_zcrx_area_reg area;
334 	struct io_uring_zcrx_ifq_reg reg;
335 	struct io_uring_region_desc rd;
336 	struct io_zcrx_ifq *ifq;
337 	int ret;
338 
339 	/*
340 	 * 1. Interface queue allocation.
341 	 * 2. It can observe data destined for sockets of other tasks.
342 	 */
343 	if (!capable(CAP_NET_ADMIN))
344 		return -EPERM;
345 
346 	/* mandatory io_uring features for zc rx */
347 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
348 	      ctx->flags & IORING_SETUP_CQE32))
349 		return -EINVAL;
350 	if (ctx->ifq)
351 		return -EBUSY;
352 	if (copy_from_user(&reg, arg, sizeof(reg)))
353 		return -EFAULT;
354 	if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
355 		return -EFAULT;
356 	if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)))
357 		return -EINVAL;
358 	if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
359 		return -EINVAL;
360 	if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
361 		if (!(ctx->flags & IORING_SETUP_CLAMP))
362 			return -EINVAL;
363 		reg.rq_entries = IO_RQ_MAX_ENTRIES;
364 	}
365 	reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
366 
367 	if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
368 		return -EFAULT;
369 
370 	ifq = io_zcrx_ifq_alloc(ctx);
371 	if (!ifq)
372 		return -ENOMEM;
373 
374 	ret = io_allocate_rbuf_ring(ifq, &reg, &rd);
375 	if (ret)
376 		goto err;
377 
378 	ret = io_zcrx_create_area(ifq, &ifq->area, &area);
379 	if (ret)
380 		goto err;
381 
382 	ifq->rq_entries = reg.rq_entries;
383 
384 	ret = -ENODEV;
385 	ifq->netdev = netdev_get_by_index(current->nsproxy->net_ns, reg.if_idx,
386 					  &ifq->netdev_tracker, GFP_KERNEL);
387 	if (!ifq->netdev)
388 		goto err;
389 
390 	ifq->dev = ifq->netdev->dev.parent;
391 	ret = -EOPNOTSUPP;
392 	if (!ifq->dev)
393 		goto err;
394 	get_device(ifq->dev);
395 
396 	ret = io_zcrx_map_area(ifq, ifq->area);
397 	if (ret)
398 		goto err;
399 
400 	mp_param.mp_ops = &io_uring_pp_zc_ops;
401 	mp_param.mp_priv = ifq;
402 	ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param);
403 	if (ret)
404 		goto err;
405 	ifq->if_rxq = reg.if_rxq;
406 
407 	reg.offsets.rqes = sizeof(struct io_uring);
408 	reg.offsets.head = offsetof(struct io_uring, head);
409 	reg.offsets.tail = offsetof(struct io_uring, tail);
410 
411 	if (copy_to_user(arg, &reg, sizeof(reg)) ||
412 	    copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
413 	    copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
414 		ret = -EFAULT;
415 		goto err;
416 	}
417 	ctx->ifq = ifq;
418 	return 0;
419 err:
420 	io_zcrx_ifq_free(ifq);
421 	return ret;
422 }
423 
424 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
425 {
426 	struct io_zcrx_ifq *ifq = ctx->ifq;
427 
428 	lockdep_assert_held(&ctx->uring_lock);
429 
430 	if (!ifq)
431 		return;
432 
433 	ctx->ifq = NULL;
434 	io_zcrx_ifq_free(ifq);
435 }
436 
437 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
438 {
439 	unsigned niov_idx;
440 
441 	lockdep_assert_held(&area->freelist_lock);
442 
443 	niov_idx = area->freelist[--area->free_count];
444 	return &area->nia.niovs[niov_idx];
445 }
446 
447 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
448 {
449 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
450 
451 	spin_lock_bh(&area->freelist_lock);
452 	area->freelist[area->free_count++] = net_iov_idx(niov);
453 	spin_unlock_bh(&area->freelist_lock);
454 }
455 
456 static void io_zcrx_return_niov(struct net_iov *niov)
457 {
458 	netmem_ref netmem = net_iov_to_netmem(niov);
459 
460 	if (!niov->pp) {
461 		/* copy fallback allocated niovs */
462 		io_zcrx_return_niov_freelist(niov);
463 		return;
464 	}
465 	page_pool_put_unrefed_netmem(niov->pp, netmem, -1, false);
466 }
467 
468 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
469 {
470 	struct io_zcrx_area *area = ifq->area;
471 	int i;
472 
473 	if (!area)
474 		return;
475 
476 	/* Reclaim back all buffers given to the user space. */
477 	for (i = 0; i < area->nia.num_niovs; i++) {
478 		struct net_iov *niov = &area->nia.niovs[i];
479 		int nr;
480 
481 		if (!atomic_read(io_get_user_counter(niov)))
482 			continue;
483 		nr = atomic_xchg(io_get_user_counter(niov), 0);
484 		if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
485 			io_zcrx_return_niov(niov);
486 	}
487 }
488 
489 void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx)
490 {
491 	lockdep_assert_held(&ctx->uring_lock);
492 
493 	if (!ctx->ifq)
494 		return;
495 	io_zcrx_scrub(ctx->ifq);
496 	io_close_queue(ctx->ifq);
497 }
498 
499 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
500 {
501 	u32 entries;
502 
503 	entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
504 	return min(entries, ifq->rq_entries);
505 }
506 
507 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
508 						 unsigned mask)
509 {
510 	unsigned int idx = ifq->cached_rq_head++ & mask;
511 
512 	return &ifq->rqes[idx];
513 }
514 
515 static void io_zcrx_ring_refill(struct page_pool *pp,
516 				struct io_zcrx_ifq *ifq)
517 {
518 	unsigned int mask = ifq->rq_entries - 1;
519 	unsigned int entries;
520 	netmem_ref netmem;
521 
522 	spin_lock_bh(&ifq->rq_lock);
523 
524 	entries = io_zcrx_rqring_entries(ifq);
525 	entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL - pp->alloc.count);
526 	if (unlikely(!entries)) {
527 		spin_unlock_bh(&ifq->rq_lock);
528 		return;
529 	}
530 
531 	do {
532 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
533 		struct io_zcrx_area *area;
534 		struct net_iov *niov;
535 		unsigned niov_idx, area_idx;
536 
537 		area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
538 		niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> PAGE_SHIFT;
539 
540 		if (unlikely(rqe->__pad || area_idx))
541 			continue;
542 		area = ifq->area;
543 
544 		if (unlikely(niov_idx >= area->nia.num_niovs))
545 			continue;
546 		niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
547 
548 		niov = &area->nia.niovs[niov_idx];
549 		if (!io_zcrx_put_niov_uref(niov))
550 			continue;
551 
552 		netmem = net_iov_to_netmem(niov);
553 		if (page_pool_unref_netmem(netmem, 1) != 0)
554 			continue;
555 
556 		if (unlikely(niov->pp != pp)) {
557 			io_zcrx_return_niov(niov);
558 			continue;
559 		}
560 
561 		io_zcrx_sync_for_device(pp, niov);
562 		net_mp_netmem_place_in_cache(pp, netmem);
563 	} while (--entries);
564 
565 	smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
566 	spin_unlock_bh(&ifq->rq_lock);
567 }
568 
569 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
570 {
571 	struct io_zcrx_area *area = ifq->area;
572 
573 	spin_lock_bh(&area->freelist_lock);
574 	while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
575 		struct net_iov *niov = __io_zcrx_get_free_niov(area);
576 		netmem_ref netmem = net_iov_to_netmem(niov);
577 
578 		net_mp_niov_set_page_pool(pp, niov);
579 		io_zcrx_sync_for_device(pp, niov);
580 		net_mp_netmem_place_in_cache(pp, netmem);
581 	}
582 	spin_unlock_bh(&area->freelist_lock);
583 }
584 
585 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
586 {
587 	struct io_zcrx_ifq *ifq = pp->mp_priv;
588 
589 	/* pp should already be ensuring that */
590 	if (unlikely(pp->alloc.count))
591 		goto out_return;
592 
593 	io_zcrx_ring_refill(pp, ifq);
594 	if (likely(pp->alloc.count))
595 		goto out_return;
596 
597 	io_zcrx_refill_slow(pp, ifq);
598 	if (!pp->alloc.count)
599 		return 0;
600 out_return:
601 	return pp->alloc.cache[--pp->alloc.count];
602 }
603 
604 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
605 {
606 	struct net_iov *niov;
607 
608 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
609 		return false;
610 
611 	niov = netmem_to_net_iov(netmem);
612 	net_mp_niov_clear_page_pool(niov);
613 	io_zcrx_return_niov_freelist(niov);
614 	return false;
615 }
616 
617 static int io_pp_zc_init(struct page_pool *pp)
618 {
619 	struct io_zcrx_ifq *ifq = pp->mp_priv;
620 
621 	if (WARN_ON_ONCE(!ifq))
622 		return -EINVAL;
623 	if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
624 		return -EINVAL;
625 	if (WARN_ON_ONCE(!pp->dma_map))
626 		return -EOPNOTSUPP;
627 	if (pp->p.order != 0)
628 		return -EOPNOTSUPP;
629 	if (pp->p.dma_dir != DMA_FROM_DEVICE)
630 		return -EOPNOTSUPP;
631 
632 	percpu_ref_get(&ifq->ctx->refs);
633 	return 0;
634 }
635 
636 static void io_pp_zc_destroy(struct page_pool *pp)
637 {
638 	struct io_zcrx_ifq *ifq = pp->mp_priv;
639 	struct io_zcrx_area *area = ifq->area;
640 
641 	if (WARN_ON_ONCE(area->free_count != area->nia.num_niovs))
642 		return;
643 	percpu_ref_put(&ifq->ctx->refs);
644 }
645 
646 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
647 			 struct netdev_rx_queue *rxq)
648 {
649 	struct nlattr *nest;
650 	int type;
651 
652 	type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
653 	nest = nla_nest_start(rsp, type);
654 	if (!nest)
655 		return -EMSGSIZE;
656 	nla_nest_end(rsp, nest);
657 
658 	return 0;
659 }
660 
661 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
662 {
663 	struct pp_memory_provider_params *p = &rxq->mp_params;
664 	struct io_zcrx_ifq *ifq = mp_priv;
665 
666 	io_zcrx_drop_netdev(ifq);
667 	p->mp_ops = NULL;
668 	p->mp_priv = NULL;
669 }
670 
671 static const struct memory_provider_ops io_uring_pp_zc_ops = {
672 	.alloc_netmems		= io_pp_zc_alloc_netmems,
673 	.release_netmem		= io_pp_zc_release_netmem,
674 	.init			= io_pp_zc_init,
675 	.destroy		= io_pp_zc_destroy,
676 	.nl_fill		= io_pp_nl_fill,
677 	.uninstall		= io_pp_uninstall,
678 };
679 
680 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
681 			      struct io_zcrx_ifq *ifq, int off, int len)
682 {
683 	struct io_uring_zcrx_cqe *rcqe;
684 	struct io_zcrx_area *area;
685 	struct io_uring_cqe *cqe;
686 	u64 offset;
687 
688 	if (!io_defer_get_uncommited_cqe(req->ctx, &cqe))
689 		return false;
690 
691 	cqe->user_data = req->cqe.user_data;
692 	cqe->res = len;
693 	cqe->flags = IORING_CQE_F_MORE;
694 
695 	area = io_zcrx_iov_to_area(niov);
696 	offset = off + (net_iov_idx(niov) << PAGE_SHIFT);
697 	rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
698 	rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
699 	rcqe->__pad = 0;
700 	return true;
701 }
702 
703 static struct net_iov *io_zcrx_alloc_fallback(struct io_zcrx_area *area)
704 {
705 	struct net_iov *niov = NULL;
706 
707 	spin_lock_bh(&area->freelist_lock);
708 	if (area->free_count)
709 		niov = __io_zcrx_get_free_niov(area);
710 	spin_unlock_bh(&area->freelist_lock);
711 
712 	if (niov)
713 		page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
714 	return niov;
715 }
716 
717 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
718 				  void *src_base, struct page *src_page,
719 				  unsigned int src_offset, size_t len)
720 {
721 	struct io_zcrx_area *area = ifq->area;
722 	size_t copied = 0;
723 	int ret = 0;
724 
725 	while (len) {
726 		size_t copy_size = min_t(size_t, PAGE_SIZE, len);
727 		const int dst_off = 0;
728 		struct net_iov *niov;
729 		struct page *dst_page;
730 		void *dst_addr;
731 
732 		niov = io_zcrx_alloc_fallback(area);
733 		if (!niov) {
734 			ret = -ENOMEM;
735 			break;
736 		}
737 
738 		dst_page = io_zcrx_iov_page(niov);
739 		dst_addr = kmap_local_page(dst_page);
740 		if (src_page)
741 			src_base = kmap_local_page(src_page);
742 
743 		memcpy(dst_addr, src_base + src_offset, copy_size);
744 
745 		if (src_page)
746 			kunmap_local(src_base);
747 		kunmap_local(dst_addr);
748 
749 		if (!io_zcrx_queue_cqe(req, niov, ifq, dst_off, copy_size)) {
750 			io_zcrx_return_niov(niov);
751 			ret = -ENOSPC;
752 			break;
753 		}
754 
755 		io_zcrx_get_niov_uref(niov);
756 		src_offset += copy_size;
757 		len -= copy_size;
758 		copied += copy_size;
759 	}
760 
761 	return copied ? copied : ret;
762 }
763 
764 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
765 			     const skb_frag_t *frag, int off, int len)
766 {
767 	struct page *page = skb_frag_page(frag);
768 	u32 p_off, p_len, t, copied = 0;
769 	int ret = 0;
770 
771 	off += skb_frag_off(frag);
772 
773 	skb_frag_foreach_page(frag, off, len,
774 			      page, p_off, p_len, t) {
775 		ret = io_zcrx_copy_chunk(req, ifq, NULL, page, p_off, p_len);
776 		if (ret < 0)
777 			return copied ? copied : ret;
778 		copied += ret;
779 	}
780 	return copied;
781 }
782 
783 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
784 			     const skb_frag_t *frag, int off, int len)
785 {
786 	struct net_iov *niov;
787 
788 	if (unlikely(!skb_frag_is_net_iov(frag)))
789 		return io_zcrx_copy_frag(req, ifq, frag, off, len);
790 
791 	niov = netmem_to_net_iov(frag->netmem);
792 	if (niov->pp->mp_ops != &io_uring_pp_zc_ops ||
793 	    niov->pp->mp_priv != ifq)
794 		return -EFAULT;
795 
796 	if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
797 		return -ENOSPC;
798 
799 	/*
800 	 * Prevent it from being recycled while user is accessing it.
801 	 * It has to be done before grabbing a user reference.
802 	 */
803 	page_pool_ref_netmem(net_iov_to_netmem(niov));
804 	io_zcrx_get_niov_uref(niov);
805 	return len;
806 }
807 
808 static int
809 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
810 		 unsigned int offset, size_t len)
811 {
812 	struct io_zcrx_args *args = desc->arg.data;
813 	struct io_zcrx_ifq *ifq = args->ifq;
814 	struct io_kiocb *req = args->req;
815 	struct sk_buff *frag_iter;
816 	unsigned start, start_off = offset;
817 	int i, copy, end, off;
818 	int ret = 0;
819 
820 	len = min_t(size_t, len, desc->count);
821 	/*
822 	 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
823 	 * if desc->count is already 0. This is caused by the if (offset + 1 !=
824 	 * skb->len) check. Return early in this case to break out of
825 	 * __tcp_read_sock().
826 	 */
827 	if (!len)
828 		return 0;
829 	if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
830 		return -EAGAIN;
831 
832 	if (unlikely(offset < skb_headlen(skb))) {
833 		ssize_t copied;
834 		size_t to_copy;
835 
836 		to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
837 		copied = io_zcrx_copy_chunk(req, ifq, skb->data, NULL,
838 					    offset, to_copy);
839 		if (copied < 0) {
840 			ret = copied;
841 			goto out;
842 		}
843 		offset += copied;
844 		len -= copied;
845 		if (!len)
846 			goto out;
847 		if (offset != skb_headlen(skb))
848 			goto out;
849 	}
850 
851 	start = skb_headlen(skb);
852 
853 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
854 		const skb_frag_t *frag;
855 
856 		if (WARN_ON(start > offset + len))
857 			return -EFAULT;
858 
859 		frag = &skb_shinfo(skb)->frags[i];
860 		end = start + skb_frag_size(frag);
861 
862 		if (offset < end) {
863 			copy = end - offset;
864 			if (copy > len)
865 				copy = len;
866 
867 			off = offset - start;
868 			ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
869 			if (ret < 0)
870 				goto out;
871 
872 			offset += ret;
873 			len -= ret;
874 			if (len == 0 || ret != copy)
875 				goto out;
876 		}
877 		start = end;
878 	}
879 
880 	skb_walk_frags(skb, frag_iter) {
881 		if (WARN_ON(start > offset + len))
882 			return -EFAULT;
883 
884 		end = start + frag_iter->len;
885 		if (offset < end) {
886 			copy = end - offset;
887 			if (copy > len)
888 				copy = len;
889 
890 			off = offset - start;
891 			ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
892 			if (ret < 0)
893 				goto out;
894 
895 			offset += ret;
896 			len -= ret;
897 			if (len == 0 || ret != copy)
898 				goto out;
899 		}
900 		start = end;
901 	}
902 
903 out:
904 	if (offset == start_off)
905 		return ret;
906 	desc->count -= (offset - start_off);
907 	return offset - start_off;
908 }
909 
910 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
911 				struct sock *sk, int flags,
912 				unsigned issue_flags, unsigned int *outlen)
913 {
914 	unsigned int len = *outlen;
915 	struct io_zcrx_args args = {
916 		.req = req,
917 		.ifq = ifq,
918 		.sock = sk->sk_socket,
919 	};
920 	read_descriptor_t rd_desc = {
921 		.count = len ? len : UINT_MAX,
922 		.arg.data = &args,
923 	};
924 	int ret;
925 
926 	lock_sock(sk);
927 	ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
928 	if (len && ret > 0)
929 		*outlen = len - ret;
930 	if (ret <= 0) {
931 		if (ret < 0 || sock_flag(sk, SOCK_DONE))
932 			goto out;
933 		if (sk->sk_err)
934 			ret = sock_error(sk);
935 		else if (sk->sk_shutdown & RCV_SHUTDOWN)
936 			goto out;
937 		else if (sk->sk_state == TCP_CLOSE)
938 			ret = -ENOTCONN;
939 		else
940 			ret = -EAGAIN;
941 	} else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
942 		   (issue_flags & IO_URING_F_MULTISHOT)) {
943 		ret = IOU_REQUEUE;
944 	} else if (sock_flag(sk, SOCK_DONE)) {
945 		/* Make it to retry until it finally gets 0. */
946 		if (issue_flags & IO_URING_F_MULTISHOT)
947 			ret = IOU_REQUEUE;
948 		else
949 			ret = -EAGAIN;
950 	}
951 out:
952 	release_sock(sk);
953 	return ret;
954 }
955 
956 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
957 		 struct socket *sock, unsigned int flags,
958 		 unsigned issue_flags, unsigned int *len)
959 {
960 	struct sock *sk = sock->sk;
961 	const struct proto *prot = READ_ONCE(sk->sk_prot);
962 
963 	if (prot->recvmsg != tcp_recvmsg)
964 		return -EPROTONOSUPPORT;
965 
966 	sock_rps_record_flow(sk);
967 	return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
968 }
969