xref: /linux/io_uring/zcrx.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/dma-map-ops.h>
5 #include <linux/mm.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 #include <linux/netdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/skbuff_ref.h>
11 
12 #include <net/page_pool/helpers.h>
13 #include <net/page_pool/memory_provider.h>
14 #include <net/netlink.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/tcp.h>
18 #include <net/rps.h>
19 
20 #include <trace/events/page_pool.h>
21 
22 #include <uapi/linux/io_uring.h>
23 
24 #include "io_uring.h"
25 #include "kbuf.h"
26 #include "memmap.h"
27 #include "zcrx.h"
28 #include "rsrc.h"
29 
30 #define IO_ZCRX_AREA_SUPPORTED_FLAGS	(IORING_ZCRX_AREA_DMABUF)
31 
32 #define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
33 
io_pp_to_ifq(struct page_pool * pp)34 static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
35 {
36 	return pp->mp_priv;
37 }
38 
io_zcrx_iov_to_area(const struct net_iov * niov)39 static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
40 {
41 	struct net_iov_area *owner = net_iov_owner(niov);
42 
43 	return container_of(owner, struct io_zcrx_area, nia);
44 }
45 
io_zcrx_iov_page(const struct net_iov * niov)46 static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
47 {
48 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
49 	unsigned niov_pages_shift;
50 
51 	lockdep_assert(!area->mem.is_dmabuf);
52 
53 	niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
54 	return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
55 }
56 
io_populate_area_dma(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)57 static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
58 				struct io_zcrx_area *area)
59 {
60 	unsigned niov_size = 1U << ifq->niov_shift;
61 	struct sg_table *sgt = area->mem.sgt;
62 	struct scatterlist *sg;
63 	unsigned i, niov_idx = 0;
64 
65 	for_each_sgtable_dma_sg(sgt, sg, i) {
66 		dma_addr_t dma = sg_dma_address(sg);
67 		unsigned long sg_len = sg_dma_len(sg);
68 
69 		if (WARN_ON_ONCE(sg_len % niov_size))
70 			return -EINVAL;
71 
72 		while (sg_len && niov_idx < area->nia.num_niovs) {
73 			struct net_iov *niov = &area->nia.niovs[niov_idx];
74 
75 			if (net_mp_niov_set_dma_addr(niov, dma))
76 				return -EFAULT;
77 			sg_len -= niov_size;
78 			dma += niov_size;
79 			niov_idx++;
80 		}
81 	}
82 
83 	if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
84 		return -EFAULT;
85 	return 0;
86 }
87 
io_release_dmabuf(struct io_zcrx_mem * mem)88 static void io_release_dmabuf(struct io_zcrx_mem *mem)
89 {
90 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
91 		return;
92 
93 	if (mem->sgt)
94 		dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
95 						  DMA_FROM_DEVICE);
96 	if (mem->attach)
97 		dma_buf_detach(mem->dmabuf, mem->attach);
98 	if (mem->dmabuf)
99 		dma_buf_put(mem->dmabuf);
100 
101 	mem->sgt = NULL;
102 	mem->attach = NULL;
103 	mem->dmabuf = NULL;
104 }
105 
io_import_dmabuf(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)106 static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
107 			    struct io_zcrx_mem *mem,
108 			    struct io_uring_zcrx_area_reg *area_reg)
109 {
110 	unsigned long off = (unsigned long)area_reg->addr;
111 	unsigned long len = (unsigned long)area_reg->len;
112 	unsigned long total_size = 0;
113 	struct scatterlist *sg;
114 	int dmabuf_fd = area_reg->dmabuf_fd;
115 	int i, ret;
116 
117 	if (off)
118 		return -EINVAL;
119 	if (WARN_ON_ONCE(!ifq->dev))
120 		return -EFAULT;
121 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
122 		return -EINVAL;
123 
124 	mem->is_dmabuf = true;
125 	mem->dmabuf = dma_buf_get(dmabuf_fd);
126 	if (IS_ERR(mem->dmabuf)) {
127 		ret = PTR_ERR(mem->dmabuf);
128 		mem->dmabuf = NULL;
129 		goto err;
130 	}
131 
132 	mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
133 	if (IS_ERR(mem->attach)) {
134 		ret = PTR_ERR(mem->attach);
135 		mem->attach = NULL;
136 		goto err;
137 	}
138 
139 	mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
140 	if (IS_ERR(mem->sgt)) {
141 		ret = PTR_ERR(mem->sgt);
142 		mem->sgt = NULL;
143 		goto err;
144 	}
145 
146 	for_each_sgtable_dma_sg(mem->sgt, sg, i)
147 		total_size += sg_dma_len(sg);
148 
149 	if (total_size != len) {
150 		ret = -EINVAL;
151 		goto err;
152 	}
153 
154 	mem->size = len;
155 	return 0;
156 err:
157 	io_release_dmabuf(mem);
158 	return ret;
159 }
160 
io_count_account_pages(struct page ** pages,unsigned nr_pages)161 static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
162 {
163 	struct folio *last_folio = NULL;
164 	unsigned long res = 0;
165 	int i;
166 
167 	for (i = 0; i < nr_pages; i++) {
168 		struct folio *folio = page_folio(pages[i]);
169 
170 		if (folio == last_folio)
171 			continue;
172 		last_folio = folio;
173 		res += 1UL << folio_order(folio);
174 	}
175 	return res;
176 }
177 
io_import_umem(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)178 static int io_import_umem(struct io_zcrx_ifq *ifq,
179 			  struct io_zcrx_mem *mem,
180 			  struct io_uring_zcrx_area_reg *area_reg)
181 {
182 	struct page **pages;
183 	int nr_pages, ret;
184 
185 	if (area_reg->dmabuf_fd)
186 		return -EINVAL;
187 	if (!area_reg->addr)
188 		return -EFAULT;
189 	pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
190 				   &nr_pages);
191 	if (IS_ERR(pages))
192 		return PTR_ERR(pages);
193 
194 	ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
195 					0, nr_pages << PAGE_SHIFT,
196 					GFP_KERNEL_ACCOUNT);
197 	if (ret) {
198 		unpin_user_pages(pages, nr_pages);
199 		return ret;
200 	}
201 
202 	mem->account_pages = io_count_account_pages(pages, nr_pages);
203 	ret = io_account_mem(ifq->ctx, mem->account_pages);
204 	if (ret < 0)
205 		mem->account_pages = 0;
206 
207 	mem->sgt = &mem->page_sg_table;
208 	mem->pages = pages;
209 	mem->nr_folios = nr_pages;
210 	mem->size = area_reg->len;
211 	return ret;
212 }
213 
io_release_area_mem(struct io_zcrx_mem * mem)214 static void io_release_area_mem(struct io_zcrx_mem *mem)
215 {
216 	if (mem->is_dmabuf) {
217 		io_release_dmabuf(mem);
218 		return;
219 	}
220 	if (mem->pages) {
221 		unpin_user_pages(mem->pages, mem->nr_folios);
222 		sg_free_table(mem->sgt);
223 		mem->sgt = NULL;
224 		kvfree(mem->pages);
225 	}
226 }
227 
io_import_area(struct io_zcrx_ifq * ifq,struct io_zcrx_mem * mem,struct io_uring_zcrx_area_reg * area_reg)228 static int io_import_area(struct io_zcrx_ifq *ifq,
229 			  struct io_zcrx_mem *mem,
230 			  struct io_uring_zcrx_area_reg *area_reg)
231 {
232 	int ret;
233 
234 	if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
235 		return -EINVAL;
236 	if (area_reg->rq_area_token)
237 		return -EINVAL;
238 	if (area_reg->__resv2[0] || area_reg->__resv2[1])
239 		return -EINVAL;
240 
241 	ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
242 	if (ret)
243 		return ret;
244 	if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
245 		return -EINVAL;
246 
247 	if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
248 		return io_import_dmabuf(ifq, mem, area_reg);
249 	return io_import_umem(ifq, mem, area_reg);
250 }
251 
io_zcrx_unmap_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)252 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
253 				struct io_zcrx_area *area)
254 {
255 	int i;
256 
257 	guard(mutex)(&ifq->pp_lock);
258 	if (!area->is_mapped)
259 		return;
260 	area->is_mapped = false;
261 
262 	for (i = 0; i < area->nia.num_niovs; i++)
263 		net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
264 
265 	if (area->mem.is_dmabuf) {
266 		io_release_dmabuf(&area->mem);
267 	} else {
268 		dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
269 				  DMA_FROM_DEVICE, IO_DMA_ATTR);
270 	}
271 }
272 
io_zcrx_map_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)273 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
274 {
275 	int ret;
276 
277 	guard(mutex)(&ifq->pp_lock);
278 	if (area->is_mapped)
279 		return 0;
280 
281 	if (!area->mem.is_dmabuf) {
282 		ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
283 				      DMA_FROM_DEVICE, IO_DMA_ATTR);
284 		if (ret < 0)
285 			return ret;
286 	}
287 
288 	ret = io_populate_area_dma(ifq, area);
289 	if (ret == 0)
290 		area->is_mapped = true;
291 	return ret;
292 }
293 
io_zcrx_sync_for_device(struct page_pool * pool,struct net_iov * niov)294 static void io_zcrx_sync_for_device(struct page_pool *pool,
295 				    struct net_iov *niov)
296 {
297 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
298 	dma_addr_t dma_addr;
299 
300 	unsigned niov_size;
301 
302 	if (!dma_dev_need_sync(pool->p.dev))
303 		return;
304 
305 	niov_size = 1U << io_pp_to_ifq(pool)->niov_shift;
306 	dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
307 	__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
308 				     niov_size, pool->p.dma_dir);
309 #endif
310 }
311 
312 #define IO_RQ_MAX_ENTRIES		32768
313 
314 #define IO_SKBS_PER_CALL_LIMIT	20
315 
316 struct io_zcrx_args {
317 	struct io_kiocb		*req;
318 	struct io_zcrx_ifq	*ifq;
319 	struct socket		*sock;
320 	unsigned		nr_skbs;
321 };
322 
323 static const struct memory_provider_ops io_uring_pp_zc_ops;
324 
io_get_user_counter(struct net_iov * niov)325 static inline atomic_t *io_get_user_counter(struct net_iov *niov)
326 {
327 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
328 
329 	return &area->user_refs[net_iov_idx(niov)];
330 }
331 
io_zcrx_put_niov_uref(struct net_iov * niov)332 static bool io_zcrx_put_niov_uref(struct net_iov *niov)
333 {
334 	atomic_t *uref = io_get_user_counter(niov);
335 
336 	if (unlikely(!atomic_read(uref)))
337 		return false;
338 	atomic_dec(uref);
339 	return true;
340 }
341 
io_zcrx_get_niov_uref(struct net_iov * niov)342 static void io_zcrx_get_niov_uref(struct net_iov *niov)
343 {
344 	atomic_inc(io_get_user_counter(niov));
345 }
346 
io_allocate_rbuf_ring(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_ifq_reg * reg,struct io_uring_region_desc * rd,u32 id)347 static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
348 				 struct io_uring_zcrx_ifq_reg *reg,
349 				 struct io_uring_region_desc *rd,
350 				 u32 id)
351 {
352 	u64 mmap_offset;
353 	size_t off, size;
354 	void *ptr;
355 	int ret;
356 
357 	off = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
358 	size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
359 	if (size > rd->size)
360 		return -EINVAL;
361 
362 	mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
363 	mmap_offset += id << IORING_OFF_PBUF_SHIFT;
364 
365 	ret = io_create_region(ifq->ctx, &ifq->region, rd, mmap_offset);
366 	if (ret < 0)
367 		return ret;
368 
369 	ptr = io_region_get_ptr(&ifq->region);
370 	ifq->rq_ring = (struct io_uring *)ptr;
371 	ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
372 
373 	reg->offsets.head = offsetof(struct io_uring, head);
374 	reg->offsets.tail = offsetof(struct io_uring, tail);
375 	reg->offsets.rqes = off;
376 	return 0;
377 }
378 
io_free_rbuf_ring(struct io_zcrx_ifq * ifq)379 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
380 {
381 	io_free_region(ifq->ctx, &ifq->region);
382 	ifq->rq_ring = NULL;
383 	ifq->rqes = NULL;
384 }
385 
io_zcrx_free_area(struct io_zcrx_area * area)386 static void io_zcrx_free_area(struct io_zcrx_area *area)
387 {
388 	io_zcrx_unmap_area(area->ifq, area);
389 	io_release_area_mem(&area->mem);
390 
391 	if (area->mem.account_pages)
392 		io_unaccount_mem(area->ifq->ctx, area->mem.account_pages);
393 
394 	kvfree(area->freelist);
395 	kvfree(area->nia.niovs);
396 	kvfree(area->user_refs);
397 	kfree(area);
398 }
399 
io_zcrx_append_area(struct io_zcrx_ifq * ifq,struct io_zcrx_area * area)400 static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
401 				struct io_zcrx_area *area)
402 {
403 	if (ifq->area)
404 		return -EINVAL;
405 	ifq->area = area;
406 	return 0;
407 }
408 
io_zcrx_create_area(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_area_reg * area_reg)409 static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
410 			       struct io_uring_zcrx_area_reg *area_reg)
411 {
412 	struct io_zcrx_area *area;
413 	unsigned nr_iovs;
414 	int i, ret;
415 
416 	ret = -ENOMEM;
417 	area = kzalloc(sizeof(*area), GFP_KERNEL);
418 	if (!area)
419 		goto err;
420 	area->ifq = ifq;
421 
422 	ret = io_import_area(ifq, &area->mem, area_reg);
423 	if (ret)
424 		goto err;
425 
426 	ifq->niov_shift = PAGE_SHIFT;
427 	nr_iovs = area->mem.size >> ifq->niov_shift;
428 	area->nia.num_niovs = nr_iovs;
429 
430 	ret = -ENOMEM;
431 	area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
432 					 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
433 	if (!area->nia.niovs)
434 		goto err;
435 
436 	area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
437 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
438 	if (!area->freelist)
439 		goto err;
440 
441 	area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
442 					GFP_KERNEL_ACCOUNT | __GFP_ZERO);
443 	if (!area->user_refs)
444 		goto err;
445 
446 	for (i = 0; i < nr_iovs; i++) {
447 		struct net_iov *niov = &area->nia.niovs[i];
448 
449 		niov->owner = &area->nia;
450 		area->freelist[i] = i;
451 		atomic_set(&area->user_refs[i], 0);
452 		niov->type = NET_IOV_IOURING;
453 	}
454 
455 	area->free_count = nr_iovs;
456 	/* we're only supporting one area per ifq for now */
457 	area->area_id = 0;
458 	area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
459 	spin_lock_init(&area->freelist_lock);
460 
461 	ret = io_zcrx_append_area(ifq, area);
462 	if (!ret)
463 		return 0;
464 err:
465 	if (area)
466 		io_zcrx_free_area(area);
467 	return ret;
468 }
469 
io_zcrx_ifq_alloc(struct io_ring_ctx * ctx)470 static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
471 {
472 	struct io_zcrx_ifq *ifq;
473 
474 	ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
475 	if (!ifq)
476 		return NULL;
477 
478 	ifq->if_rxq = -1;
479 	ifq->ctx = ctx;
480 	spin_lock_init(&ifq->rq_lock);
481 	mutex_init(&ifq->pp_lock);
482 	return ifq;
483 }
484 
io_zcrx_drop_netdev(struct io_zcrx_ifq * ifq)485 static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
486 {
487 	guard(mutex)(&ifq->pp_lock);
488 
489 	if (!ifq->netdev)
490 		return;
491 	netdev_put(ifq->netdev, &ifq->netdev_tracker);
492 	ifq->netdev = NULL;
493 }
494 
io_close_queue(struct io_zcrx_ifq * ifq)495 static void io_close_queue(struct io_zcrx_ifq *ifq)
496 {
497 	struct net_device *netdev;
498 	netdevice_tracker netdev_tracker;
499 	struct pp_memory_provider_params p = {
500 		.mp_ops = &io_uring_pp_zc_ops,
501 		.mp_priv = ifq,
502 	};
503 
504 	if (ifq->if_rxq == -1)
505 		return;
506 
507 	scoped_guard(mutex, &ifq->pp_lock) {
508 		netdev = ifq->netdev;
509 		netdev_tracker = ifq->netdev_tracker;
510 		ifq->netdev = NULL;
511 	}
512 
513 	if (netdev) {
514 		net_mp_close_rxq(netdev, ifq->if_rxq, &p);
515 		netdev_put(netdev, &netdev_tracker);
516 	}
517 	ifq->if_rxq = -1;
518 }
519 
io_zcrx_ifq_free(struct io_zcrx_ifq * ifq)520 static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
521 {
522 	io_close_queue(ifq);
523 
524 	if (ifq->area)
525 		io_zcrx_free_area(ifq->area);
526 	if (ifq->dev)
527 		put_device(ifq->dev);
528 
529 	io_free_rbuf_ring(ifq);
530 	mutex_destroy(&ifq->pp_lock);
531 	kfree(ifq);
532 }
533 
io_zcrx_get_region(struct io_ring_ctx * ctx,unsigned int id)534 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
535 					    unsigned int id)
536 {
537 	struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
538 
539 	lockdep_assert_held(&ctx->mmap_lock);
540 
541 	return ifq ? &ifq->region : NULL;
542 }
543 
io_register_zcrx_ifq(struct io_ring_ctx * ctx,struct io_uring_zcrx_ifq_reg __user * arg)544 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
545 			  struct io_uring_zcrx_ifq_reg __user *arg)
546 {
547 	struct pp_memory_provider_params mp_param = {};
548 	struct io_uring_zcrx_area_reg area;
549 	struct io_uring_zcrx_ifq_reg reg;
550 	struct io_uring_region_desc rd;
551 	struct io_zcrx_ifq *ifq;
552 	int ret;
553 	u32 id;
554 
555 	/*
556 	 * 1. Interface queue allocation.
557 	 * 2. It can observe data destined for sockets of other tasks.
558 	 */
559 	if (!capable(CAP_NET_ADMIN))
560 		return -EPERM;
561 
562 	/* mandatory io_uring features for zc rx */
563 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
564 		return -EINVAL;
565 	if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
566 		return -EINVAL;
567 	if (copy_from_user(&reg, arg, sizeof(reg)))
568 		return -EFAULT;
569 	if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
570 		return -EFAULT;
571 	if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) ||
572 	    reg.__resv2 || reg.zcrx_id)
573 		return -EINVAL;
574 	if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
575 		return -EINVAL;
576 	if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
577 		if (!(ctx->flags & IORING_SETUP_CLAMP))
578 			return -EINVAL;
579 		reg.rq_entries = IO_RQ_MAX_ENTRIES;
580 	}
581 	reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
582 
583 	if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
584 		return -EFAULT;
585 
586 	ifq = io_zcrx_ifq_alloc(ctx);
587 	if (!ifq)
588 		return -ENOMEM;
589 	ifq->rq_entries = reg.rq_entries;
590 
591 	scoped_guard(mutex, &ctx->mmap_lock) {
592 		/* preallocate id */
593 		ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
594 		if (ret)
595 			goto ifq_free;
596 	}
597 
598 	ret = io_allocate_rbuf_ring(ifq, &reg, &rd, id);
599 	if (ret)
600 		goto err;
601 
602 	ifq->netdev = netdev_get_by_index(current->nsproxy->net_ns, reg.if_idx,
603 					  &ifq->netdev_tracker, GFP_KERNEL);
604 	if (!ifq->netdev) {
605 		ret = -ENODEV;
606 		goto err;
607 	}
608 
609 	ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq);
610 	if (!ifq->dev) {
611 		ret = -EOPNOTSUPP;
612 		goto err;
613 	}
614 	get_device(ifq->dev);
615 
616 	ret = io_zcrx_create_area(ifq, &area);
617 	if (ret)
618 		goto err;
619 
620 	mp_param.mp_ops = &io_uring_pp_zc_ops;
621 	mp_param.mp_priv = ifq;
622 	ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param);
623 	if (ret)
624 		goto err;
625 	ifq->if_rxq = reg.if_rxq;
626 
627 	reg.zcrx_id = id;
628 
629 	scoped_guard(mutex, &ctx->mmap_lock) {
630 		/* publish ifq */
631 		ret = -ENOMEM;
632 		if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
633 			goto err;
634 	}
635 
636 	if (copy_to_user(arg, &reg, sizeof(reg)) ||
637 	    copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
638 	    copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
639 		ret = -EFAULT;
640 		goto err;
641 	}
642 	return 0;
643 err:
644 	scoped_guard(mutex, &ctx->mmap_lock)
645 		xa_erase(&ctx->zcrx_ctxs, id);
646 ifq_free:
647 	io_zcrx_ifq_free(ifq);
648 	return ret;
649 }
650 
io_unregister_zcrx_ifqs(struct io_ring_ctx * ctx)651 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
652 {
653 	struct io_zcrx_ifq *ifq;
654 
655 	lockdep_assert_held(&ctx->uring_lock);
656 
657 	while (1) {
658 		scoped_guard(mutex, &ctx->mmap_lock) {
659 			unsigned long id = 0;
660 
661 			ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
662 			if (ifq)
663 				xa_erase(&ctx->zcrx_ctxs, id);
664 		}
665 		if (!ifq)
666 			break;
667 		io_zcrx_ifq_free(ifq);
668 	}
669 
670 	xa_destroy(&ctx->zcrx_ctxs);
671 }
672 
__io_zcrx_get_free_niov(struct io_zcrx_area * area)673 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
674 {
675 	unsigned niov_idx;
676 
677 	lockdep_assert_held(&area->freelist_lock);
678 
679 	niov_idx = area->freelist[--area->free_count];
680 	return &area->nia.niovs[niov_idx];
681 }
682 
io_zcrx_return_niov_freelist(struct net_iov * niov)683 static void io_zcrx_return_niov_freelist(struct net_iov *niov)
684 {
685 	struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
686 
687 	spin_lock_bh(&area->freelist_lock);
688 	area->freelist[area->free_count++] = net_iov_idx(niov);
689 	spin_unlock_bh(&area->freelist_lock);
690 }
691 
io_zcrx_return_niov(struct net_iov * niov)692 static void io_zcrx_return_niov(struct net_iov *niov)
693 {
694 	netmem_ref netmem = net_iov_to_netmem(niov);
695 
696 	if (!niov->pp) {
697 		/* copy fallback allocated niovs */
698 		io_zcrx_return_niov_freelist(niov);
699 		return;
700 	}
701 	page_pool_put_unrefed_netmem(niov->pp, netmem, -1, false);
702 }
703 
io_zcrx_scrub(struct io_zcrx_ifq * ifq)704 static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
705 {
706 	struct io_zcrx_area *area = ifq->area;
707 	int i;
708 
709 	if (!area)
710 		return;
711 
712 	/* Reclaim back all buffers given to the user space. */
713 	for (i = 0; i < area->nia.num_niovs; i++) {
714 		struct net_iov *niov = &area->nia.niovs[i];
715 		int nr;
716 
717 		if (!atomic_read(io_get_user_counter(niov)))
718 			continue;
719 		nr = atomic_xchg(io_get_user_counter(niov), 0);
720 		if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
721 			io_zcrx_return_niov(niov);
722 	}
723 }
724 
io_shutdown_zcrx_ifqs(struct io_ring_ctx * ctx)725 void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx)
726 {
727 	struct io_zcrx_ifq *ifq;
728 	unsigned long index;
729 
730 	lockdep_assert_held(&ctx->uring_lock);
731 
732 	xa_for_each(&ctx->zcrx_ctxs, index, ifq) {
733 		io_zcrx_scrub(ifq);
734 		io_close_queue(ifq);
735 	}
736 }
737 
io_zcrx_rqring_entries(struct io_zcrx_ifq * ifq)738 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
739 {
740 	u32 entries;
741 
742 	entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
743 	return min(entries, ifq->rq_entries);
744 }
745 
io_zcrx_get_rqe(struct io_zcrx_ifq * ifq,unsigned mask)746 static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
747 						 unsigned mask)
748 {
749 	unsigned int idx = ifq->cached_rq_head++ & mask;
750 
751 	return &ifq->rqes[idx];
752 }
753 
io_parse_rqe(struct io_uring_zcrx_rqe * rqe,struct io_zcrx_ifq * ifq,struct net_iov ** ret_niov)754 static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
755 				struct io_zcrx_ifq *ifq,
756 				struct net_iov **ret_niov)
757 {
758 	unsigned niov_idx, area_idx;
759 	struct io_zcrx_area *area;
760 
761 	area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
762 	niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
763 
764 	if (unlikely(rqe->__pad || area_idx))
765 		return false;
766 	area = ifq->area;
767 
768 	if (unlikely(niov_idx >= area->nia.num_niovs))
769 		return false;
770 	niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
771 
772 	*ret_niov = &area->nia.niovs[niov_idx];
773 	return true;
774 }
775 
io_zcrx_ring_refill(struct page_pool * pp,struct io_zcrx_ifq * ifq)776 static void io_zcrx_ring_refill(struct page_pool *pp,
777 				struct io_zcrx_ifq *ifq)
778 {
779 	unsigned int mask = ifq->rq_entries - 1;
780 	unsigned int entries;
781 
782 	guard(spinlock_bh)(&ifq->rq_lock);
783 
784 	entries = io_zcrx_rqring_entries(ifq);
785 	entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
786 	if (unlikely(!entries))
787 		return;
788 
789 	do {
790 		struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
791 		struct net_iov *niov;
792 		netmem_ref netmem;
793 
794 		if (!io_parse_rqe(rqe, ifq, &niov))
795 			continue;
796 		if (!io_zcrx_put_niov_uref(niov))
797 			continue;
798 
799 		netmem = net_iov_to_netmem(niov);
800 		if (!page_pool_unref_and_test(netmem))
801 			continue;
802 
803 		if (unlikely(niov->pp != pp)) {
804 			io_zcrx_return_niov(niov);
805 			continue;
806 		}
807 
808 		io_zcrx_sync_for_device(pp, niov);
809 		net_mp_netmem_place_in_cache(pp, netmem);
810 	} while (--entries);
811 
812 	smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
813 }
814 
io_zcrx_refill_slow(struct page_pool * pp,struct io_zcrx_ifq * ifq)815 static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
816 {
817 	struct io_zcrx_area *area = ifq->area;
818 
819 	spin_lock_bh(&area->freelist_lock);
820 	while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
821 		struct net_iov *niov = __io_zcrx_get_free_niov(area);
822 		netmem_ref netmem = net_iov_to_netmem(niov);
823 
824 		net_mp_niov_set_page_pool(pp, niov);
825 		io_zcrx_sync_for_device(pp, niov);
826 		net_mp_netmem_place_in_cache(pp, netmem);
827 	}
828 	spin_unlock_bh(&area->freelist_lock);
829 }
830 
io_pp_zc_alloc_netmems(struct page_pool * pp,gfp_t gfp)831 static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
832 {
833 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
834 
835 	/* pp should already be ensuring that */
836 	if (unlikely(pp->alloc.count))
837 		goto out_return;
838 
839 	io_zcrx_ring_refill(pp, ifq);
840 	if (likely(pp->alloc.count))
841 		goto out_return;
842 
843 	io_zcrx_refill_slow(pp, ifq);
844 	if (!pp->alloc.count)
845 		return 0;
846 out_return:
847 	return pp->alloc.cache[--pp->alloc.count];
848 }
849 
io_pp_zc_release_netmem(struct page_pool * pp,netmem_ref netmem)850 static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
851 {
852 	struct net_iov *niov;
853 
854 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
855 		return false;
856 
857 	niov = netmem_to_net_iov(netmem);
858 	net_mp_niov_clear_page_pool(niov);
859 	io_zcrx_return_niov_freelist(niov);
860 	return false;
861 }
862 
io_pp_zc_init(struct page_pool * pp)863 static int io_pp_zc_init(struct page_pool *pp)
864 {
865 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
866 	int ret;
867 
868 	if (WARN_ON_ONCE(!ifq))
869 		return -EINVAL;
870 	if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
871 		return -EINVAL;
872 	if (WARN_ON_ONCE(!pp->dma_map))
873 		return -EOPNOTSUPP;
874 	if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
875 		return -EINVAL;
876 	if (pp->p.dma_dir != DMA_FROM_DEVICE)
877 		return -EOPNOTSUPP;
878 
879 	ret = io_zcrx_map_area(ifq, ifq->area);
880 	if (ret)
881 		return ret;
882 
883 	percpu_ref_get(&ifq->ctx->refs);
884 	return 0;
885 }
886 
io_pp_zc_destroy(struct page_pool * pp)887 static void io_pp_zc_destroy(struct page_pool *pp)
888 {
889 	struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
890 
891 	percpu_ref_put(&ifq->ctx->refs);
892 }
893 
io_pp_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)894 static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
895 			 struct netdev_rx_queue *rxq)
896 {
897 	struct nlattr *nest;
898 	int type;
899 
900 	type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
901 	nest = nla_nest_start(rsp, type);
902 	if (!nest)
903 		return -EMSGSIZE;
904 	nla_nest_end(rsp, nest);
905 
906 	return 0;
907 }
908 
io_pp_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)909 static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
910 {
911 	struct pp_memory_provider_params *p = &rxq->mp_params;
912 	struct io_zcrx_ifq *ifq = mp_priv;
913 
914 	io_zcrx_drop_netdev(ifq);
915 	if (ifq->area)
916 		io_zcrx_unmap_area(ifq, ifq->area);
917 
918 	p->mp_ops = NULL;
919 	p->mp_priv = NULL;
920 }
921 
922 static const struct memory_provider_ops io_uring_pp_zc_ops = {
923 	.alloc_netmems		= io_pp_zc_alloc_netmems,
924 	.release_netmem		= io_pp_zc_release_netmem,
925 	.init			= io_pp_zc_init,
926 	.destroy		= io_pp_zc_destroy,
927 	.nl_fill		= io_pp_nl_fill,
928 	.uninstall		= io_pp_uninstall,
929 };
930 
931 #define IO_ZCRX_MAX_SYS_REFILL_BUFS		(1 << 16)
932 #define IO_ZCRX_SYS_REFILL_BATCH		32
933 
io_return_buffers(struct io_zcrx_ifq * ifq,struct io_uring_zcrx_rqe * rqes,unsigned nr)934 static void io_return_buffers(struct io_zcrx_ifq *ifq,
935 			      struct io_uring_zcrx_rqe *rqes, unsigned nr)
936 {
937 	int i;
938 
939 	for (i = 0; i < nr; i++) {
940 		struct net_iov *niov;
941 		netmem_ref netmem;
942 
943 		if (!io_parse_rqe(&rqes[i], ifq, &niov))
944 			continue;
945 
946 		scoped_guard(spinlock_bh, &ifq->rq_lock) {
947 			if (!io_zcrx_put_niov_uref(niov))
948 				continue;
949 		}
950 
951 		netmem = net_iov_to_netmem(niov);
952 		if (!page_pool_unref_and_test(netmem))
953 			continue;
954 		io_zcrx_return_niov(niov);
955 	}
956 }
957 
io_zcrx_return_bufs(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_arg)958 int io_zcrx_return_bufs(struct io_ring_ctx *ctx,
959 			void __user *arg, unsigned nr_arg)
960 {
961 	struct io_uring_zcrx_rqe rqes[IO_ZCRX_SYS_REFILL_BATCH];
962 	struct io_uring_zcrx_rqe __user *user_rqes;
963 	struct io_uring_zcrx_sync_refill zr;
964 	struct io_zcrx_ifq *ifq;
965 	unsigned nr, i;
966 
967 	if (nr_arg)
968 		return -EINVAL;
969 	if (copy_from_user(&zr, arg, sizeof(zr)))
970 		return -EFAULT;
971 	if (!zr.nr_entries || zr.nr_entries > IO_ZCRX_MAX_SYS_REFILL_BUFS)
972 		return -EINVAL;
973 	if (!mem_is_zero(&zr.__resv, sizeof(zr.__resv)))
974 		return -EINVAL;
975 
976 	ifq = xa_load(&ctx->zcrx_ctxs, zr.zcrx_id);
977 	if (!ifq)
978 		return -EINVAL;
979 	nr = zr.nr_entries;
980 	user_rqes = u64_to_user_ptr(zr.rqes);
981 
982 	for (i = 0; i < nr;) {
983 		unsigned batch = min(nr - i, IO_ZCRX_SYS_REFILL_BATCH);
984 		size_t size = batch * sizeof(rqes[0]);
985 
986 		if (copy_from_user(rqes, user_rqes + i, size))
987 			return i ? i : -EFAULT;
988 		io_return_buffers(ifq, rqes, batch);
989 
990 		i += batch;
991 
992 		if (fatal_signal_pending(current))
993 			return i;
994 		cond_resched();
995 	}
996 	return nr;
997 }
998 
io_zcrx_queue_cqe(struct io_kiocb * req,struct net_iov * niov,struct io_zcrx_ifq * ifq,int off,int len)999 static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
1000 			      struct io_zcrx_ifq *ifq, int off, int len)
1001 {
1002 	struct io_ring_ctx *ctx = req->ctx;
1003 	struct io_uring_zcrx_cqe *rcqe;
1004 	struct io_zcrx_area *area;
1005 	struct io_uring_cqe *cqe;
1006 	u64 offset;
1007 
1008 	if (!io_defer_get_uncommited_cqe(ctx, &cqe))
1009 		return false;
1010 
1011 	cqe->user_data = req->cqe.user_data;
1012 	cqe->res = len;
1013 	cqe->flags = IORING_CQE_F_MORE;
1014 	if (ctx->flags & IORING_SETUP_CQE_MIXED)
1015 		cqe->flags |= IORING_CQE_F_32;
1016 
1017 	area = io_zcrx_iov_to_area(niov);
1018 	offset = off + (net_iov_idx(niov) << ifq->niov_shift);
1019 	rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
1020 	rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
1021 	rcqe->__pad = 0;
1022 	return true;
1023 }
1024 
io_alloc_fallback_niov(struct io_zcrx_ifq * ifq)1025 static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
1026 {
1027 	struct io_zcrx_area *area = ifq->area;
1028 	struct net_iov *niov = NULL;
1029 
1030 	if (area->mem.is_dmabuf)
1031 		return NULL;
1032 
1033 	spin_lock_bh(&area->freelist_lock);
1034 	if (area->free_count)
1035 		niov = __io_zcrx_get_free_niov(area);
1036 	spin_unlock_bh(&area->freelist_lock);
1037 
1038 	if (niov)
1039 		page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
1040 	return niov;
1041 }
1042 
1043 struct io_copy_cache {
1044 	struct page		*page;
1045 	unsigned long		offset;
1046 	size_t			size;
1047 };
1048 
io_copy_page(struct io_copy_cache * cc,struct page * src_page,unsigned int src_offset,size_t len)1049 static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
1050 			    unsigned int src_offset, size_t len)
1051 {
1052 	size_t copied = 0;
1053 
1054 	len = min(len, cc->size);
1055 
1056 	while (len) {
1057 		void *src_addr, *dst_addr;
1058 		struct page *dst_page = cc->page;
1059 		unsigned dst_offset = cc->offset;
1060 		size_t n = len;
1061 
1062 		if (folio_test_partial_kmap(page_folio(dst_page)) ||
1063 		    folio_test_partial_kmap(page_folio(src_page))) {
1064 			dst_page += dst_offset / PAGE_SIZE;
1065 			dst_offset = offset_in_page(dst_offset);
1066 			src_page += src_offset / PAGE_SIZE;
1067 			src_offset = offset_in_page(src_offset);
1068 			n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
1069 			n = min(n, len);
1070 		}
1071 
1072 		dst_addr = kmap_local_page(dst_page) + dst_offset;
1073 		src_addr = kmap_local_page(src_page) + src_offset;
1074 
1075 		memcpy(dst_addr, src_addr, n);
1076 
1077 		kunmap_local(src_addr);
1078 		kunmap_local(dst_addr);
1079 
1080 		cc->size -= n;
1081 		cc->offset += n;
1082 		len -= n;
1083 		copied += n;
1084 	}
1085 	return copied;
1086 }
1087 
io_zcrx_copy_chunk(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct page * src_page,unsigned int src_offset,size_t len)1088 static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1089 				  struct page *src_page, unsigned int src_offset,
1090 				  size_t len)
1091 {
1092 	size_t copied = 0;
1093 	int ret = 0;
1094 
1095 	while (len) {
1096 		struct io_copy_cache cc;
1097 		struct net_iov *niov;
1098 		size_t n;
1099 
1100 		niov = io_alloc_fallback_niov(ifq);
1101 		if (!niov) {
1102 			ret = -ENOMEM;
1103 			break;
1104 		}
1105 
1106 		cc.page = io_zcrx_iov_page(niov);
1107 		cc.offset = 0;
1108 		cc.size = PAGE_SIZE;
1109 
1110 		n = io_copy_page(&cc, src_page, src_offset, len);
1111 
1112 		if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
1113 			io_zcrx_return_niov(niov);
1114 			ret = -ENOSPC;
1115 			break;
1116 		}
1117 
1118 		io_zcrx_get_niov_uref(niov);
1119 		src_offset += n;
1120 		len -= n;
1121 		copied += n;
1122 	}
1123 
1124 	return copied ? copied : ret;
1125 }
1126 
io_zcrx_copy_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1127 static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1128 			     const skb_frag_t *frag, int off, int len)
1129 {
1130 	struct page *page = skb_frag_page(frag);
1131 
1132 	return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
1133 }
1134 
io_zcrx_recv_frag(struct io_kiocb * req,struct io_zcrx_ifq * ifq,const skb_frag_t * frag,int off,int len)1135 static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1136 			     const skb_frag_t *frag, int off, int len)
1137 {
1138 	struct net_iov *niov;
1139 
1140 	if (unlikely(!skb_frag_is_net_iov(frag)))
1141 		return io_zcrx_copy_frag(req, ifq, frag, off, len);
1142 
1143 	niov = netmem_to_net_iov(frag->netmem);
1144 	if (!niov->pp || niov->pp->mp_ops != &io_uring_pp_zc_ops ||
1145 	    io_pp_to_ifq(niov->pp) != ifq)
1146 		return -EFAULT;
1147 
1148 	if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
1149 		return -ENOSPC;
1150 
1151 	/*
1152 	 * Prevent it from being recycled while user is accessing it.
1153 	 * It has to be done before grabbing a user reference.
1154 	 */
1155 	page_pool_ref_netmem(net_iov_to_netmem(niov));
1156 	io_zcrx_get_niov_uref(niov);
1157 	return len;
1158 }
1159 
1160 static int
io_zcrx_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)1161 io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
1162 		 unsigned int offset, size_t len)
1163 {
1164 	struct io_zcrx_args *args = desc->arg.data;
1165 	struct io_zcrx_ifq *ifq = args->ifq;
1166 	struct io_kiocb *req = args->req;
1167 	struct sk_buff *frag_iter;
1168 	unsigned start, start_off = offset;
1169 	int i, copy, end, off;
1170 	int ret = 0;
1171 
1172 	len = min_t(size_t, len, desc->count);
1173 	/*
1174 	 * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
1175 	 * if desc->count is already 0. This is caused by the if (offset + 1 !=
1176 	 * skb->len) check. Return early in this case to break out of
1177 	 * __tcp_read_sock().
1178 	 */
1179 	if (!len)
1180 		return 0;
1181 	if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
1182 		return -EAGAIN;
1183 
1184 	if (unlikely(offset < skb_headlen(skb))) {
1185 		ssize_t copied;
1186 		size_t to_copy;
1187 
1188 		to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
1189 		copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
1190 					    offset_in_page(skb->data) + offset,
1191 					    to_copy);
1192 		if (copied < 0) {
1193 			ret = copied;
1194 			goto out;
1195 		}
1196 		offset += copied;
1197 		len -= copied;
1198 		if (!len)
1199 			goto out;
1200 		if (offset != skb_headlen(skb))
1201 			goto out;
1202 	}
1203 
1204 	start = skb_headlen(skb);
1205 
1206 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1207 		const skb_frag_t *frag;
1208 
1209 		if (WARN_ON(start > offset + len))
1210 			return -EFAULT;
1211 
1212 		frag = &skb_shinfo(skb)->frags[i];
1213 		end = start + skb_frag_size(frag);
1214 
1215 		if (offset < end) {
1216 			copy = end - offset;
1217 			if (copy > len)
1218 				copy = len;
1219 
1220 			off = offset - start;
1221 			ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
1222 			if (ret < 0)
1223 				goto out;
1224 
1225 			offset += ret;
1226 			len -= ret;
1227 			if (len == 0 || ret != copy)
1228 				goto out;
1229 		}
1230 		start = end;
1231 	}
1232 
1233 	skb_walk_frags(skb, frag_iter) {
1234 		if (WARN_ON(start > offset + len))
1235 			return -EFAULT;
1236 
1237 		end = start + frag_iter->len;
1238 		if (offset < end) {
1239 			copy = end - offset;
1240 			if (copy > len)
1241 				copy = len;
1242 
1243 			off = offset - start;
1244 			ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
1245 			if (ret < 0)
1246 				goto out;
1247 
1248 			offset += ret;
1249 			len -= ret;
1250 			if (len == 0 || ret != copy)
1251 				goto out;
1252 		}
1253 		start = end;
1254 	}
1255 
1256 out:
1257 	if (offset == start_off)
1258 		return ret;
1259 	desc->count -= (offset - start_off);
1260 	return offset - start_off;
1261 }
1262 
io_zcrx_tcp_recvmsg(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct sock * sk,int flags,unsigned issue_flags,unsigned int * outlen)1263 static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1264 				struct sock *sk, int flags,
1265 				unsigned issue_flags, unsigned int *outlen)
1266 {
1267 	unsigned int len = *outlen;
1268 	struct io_zcrx_args args = {
1269 		.req = req,
1270 		.ifq = ifq,
1271 		.sock = sk->sk_socket,
1272 	};
1273 	read_descriptor_t rd_desc = {
1274 		.count = len ? len : UINT_MAX,
1275 		.arg.data = &args,
1276 	};
1277 	int ret;
1278 
1279 	lock_sock(sk);
1280 	ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
1281 	if (len && ret > 0)
1282 		*outlen = len - ret;
1283 	if (ret <= 0) {
1284 		if (ret < 0 || sock_flag(sk, SOCK_DONE))
1285 			goto out;
1286 		if (sk->sk_err)
1287 			ret = sock_error(sk);
1288 		else if (sk->sk_shutdown & RCV_SHUTDOWN)
1289 			goto out;
1290 		else if (sk->sk_state == TCP_CLOSE)
1291 			ret = -ENOTCONN;
1292 		else
1293 			ret = -EAGAIN;
1294 	} else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
1295 		   (issue_flags & IO_URING_F_MULTISHOT)) {
1296 		ret = IOU_REQUEUE;
1297 	} else if (sock_flag(sk, SOCK_DONE)) {
1298 		/* Make it to retry until it finally gets 0. */
1299 		if (issue_flags & IO_URING_F_MULTISHOT)
1300 			ret = IOU_REQUEUE;
1301 		else
1302 			ret = -EAGAIN;
1303 	}
1304 out:
1305 	release_sock(sk);
1306 	return ret;
1307 }
1308 
io_zcrx_recv(struct io_kiocb * req,struct io_zcrx_ifq * ifq,struct socket * sock,unsigned int flags,unsigned issue_flags,unsigned int * len)1309 int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
1310 		 struct socket *sock, unsigned int flags,
1311 		 unsigned issue_flags, unsigned int *len)
1312 {
1313 	struct sock *sk = sock->sk;
1314 	const struct proto *prot = READ_ONCE(sk->sk_prot);
1315 
1316 	if (prot->recvmsg != tcp_recvmsg)
1317 		return -EPROTONOSUPPORT;
1318 
1319 	sock_rps_record_flow(sk);
1320 	return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
1321 }
1322