xref: /linux/io_uring/net.c (revision 811f35ff59b6f99ae272d6f5b96bc9e974f88196)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	u32				file_slot;
32 	unsigned long			nofile;
33 };
34 
35 struct io_socket {
36 	struct file			*file;
37 	int				domain;
38 	int				type;
39 	int				protocol;
40 	int				flags;
41 	u32				file_slot;
42 	unsigned long			nofile;
43 };
44 
45 struct io_connect {
46 	struct file			*file;
47 	struct sockaddr __user		*addr;
48 	int				addr_len;
49 	bool				in_progress;
50 };
51 
52 struct io_sr_msg {
53 	struct file			*file;
54 	union {
55 		struct compat_msghdr __user	*umsg_compat;
56 		struct user_msghdr __user	*umsg;
57 		void __user			*buf;
58 	};
59 	unsigned			len;
60 	unsigned			done_io;
61 	unsigned			msg_flags;
62 	u16				flags;
63 	/* initialised and used only by !msg send variants */
64 	u16				addr_len;
65 	u16				buf_group;
66 	void __user			*addr;
67 	/* used only for send zerocopy */
68 	struct io_kiocb 		*notif;
69 };
70 
71 static inline bool io_check_multishot(struct io_kiocb *req,
72 				      unsigned int issue_flags)
73 {
74 	/*
75 	 * When ->locked_cq is set we only allow to post CQEs from the original
76 	 * task context. Usual request completions will be handled in other
77 	 * generic paths but multipoll may decide to post extra cqes.
78 	 */
79 	return !(issue_flags & IO_URING_F_IOWQ) ||
80 		!(issue_flags & IO_URING_F_MULTISHOT) ||
81 		!req->ctx->task_complete;
82 }
83 
84 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
85 {
86 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
87 
88 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
89 		     sqe->buf_index || sqe->splice_fd_in))
90 		return -EINVAL;
91 
92 	shutdown->how = READ_ONCE(sqe->len);
93 	return 0;
94 }
95 
96 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
97 {
98 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
99 	struct socket *sock;
100 	int ret;
101 
102 	if (issue_flags & IO_URING_F_NONBLOCK)
103 		return -EAGAIN;
104 
105 	sock = sock_from_file(req->file);
106 	if (unlikely(!sock))
107 		return -ENOTSOCK;
108 
109 	ret = __sys_shutdown_sock(sock, shutdown->how);
110 	io_req_set_res(req, ret, 0);
111 	return IOU_OK;
112 }
113 
114 static bool io_net_retry(struct socket *sock, int flags)
115 {
116 	if (!(flags & MSG_WAITALL))
117 		return false;
118 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
119 }
120 
121 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
122 {
123 	struct io_async_msghdr *hdr = req->async_data;
124 
125 	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
126 		return;
127 
128 	/* Let normal cleanup path reap it if we fail adding to the cache */
129 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
130 		req->async_data = NULL;
131 		req->flags &= ~REQ_F_ASYNC_DATA;
132 	}
133 }
134 
135 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
136 						  unsigned int issue_flags)
137 {
138 	struct io_ring_ctx *ctx = req->ctx;
139 	struct io_cache_entry *entry;
140 	struct io_async_msghdr *hdr;
141 
142 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
143 		entry = io_alloc_cache_get(&ctx->netmsg_cache);
144 		if (entry) {
145 			hdr = container_of(entry, struct io_async_msghdr, cache);
146 			hdr->free_iov = NULL;
147 			req->flags |= REQ_F_ASYNC_DATA;
148 			req->async_data = hdr;
149 			return hdr;
150 		}
151 	}
152 
153 	if (!io_alloc_async_data(req)) {
154 		hdr = req->async_data;
155 		hdr->free_iov = NULL;
156 		return hdr;
157 	}
158 	return NULL;
159 }
160 
161 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
162 {
163 	/* ->prep_async is always called from the submission context */
164 	return io_msg_alloc_async(req, 0);
165 }
166 
167 static int io_setup_async_msg(struct io_kiocb *req,
168 			      struct io_async_msghdr *kmsg,
169 			      unsigned int issue_flags)
170 {
171 	struct io_async_msghdr *async_msg;
172 
173 	if (req_has_async_data(req))
174 		return -EAGAIN;
175 	async_msg = io_msg_alloc_async(req, issue_flags);
176 	if (!async_msg) {
177 		kfree(kmsg->free_iov);
178 		return -ENOMEM;
179 	}
180 	req->flags |= REQ_F_NEED_CLEANUP;
181 	memcpy(async_msg, kmsg, sizeof(*kmsg));
182 	if (async_msg->msg.msg_name)
183 		async_msg->msg.msg_name = &async_msg->addr;
184 	/* if were using fast_iov, set it to the new one */
185 	if (!kmsg->free_iov) {
186 		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
187 		async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
188 	}
189 
190 	return -EAGAIN;
191 }
192 
193 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
194 			       struct io_async_msghdr *iomsg)
195 {
196 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
197 
198 	iomsg->msg.msg_name = &iomsg->addr;
199 	iomsg->free_iov = iomsg->fast_iov;
200 	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
201 					&iomsg->free_iov);
202 }
203 
204 int io_send_prep_async(struct io_kiocb *req)
205 {
206 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
207 	struct io_async_msghdr *io;
208 	int ret;
209 
210 	if (!zc->addr || req_has_async_data(req))
211 		return 0;
212 	io = io_msg_alloc_async_prep(req);
213 	if (!io)
214 		return -ENOMEM;
215 	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
216 	return ret;
217 }
218 
219 static int io_setup_async_addr(struct io_kiocb *req,
220 			      struct sockaddr_storage *addr_storage,
221 			      unsigned int issue_flags)
222 {
223 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
224 	struct io_async_msghdr *io;
225 
226 	if (!sr->addr || req_has_async_data(req))
227 		return -EAGAIN;
228 	io = io_msg_alloc_async(req, issue_flags);
229 	if (!io)
230 		return -ENOMEM;
231 	memcpy(&io->addr, addr_storage, sizeof(io->addr));
232 	return -EAGAIN;
233 }
234 
235 int io_sendmsg_prep_async(struct io_kiocb *req)
236 {
237 	int ret;
238 
239 	if (!io_msg_alloc_async_prep(req))
240 		return -ENOMEM;
241 	ret = io_sendmsg_copy_hdr(req, req->async_data);
242 	if (!ret)
243 		req->flags |= REQ_F_NEED_CLEANUP;
244 	return ret;
245 }
246 
247 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
248 {
249 	struct io_async_msghdr *io = req->async_data;
250 
251 	kfree(io->free_iov);
252 }
253 
254 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
255 {
256 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
257 
258 	if (req->opcode == IORING_OP_SEND) {
259 		if (READ_ONCE(sqe->__pad3[0]))
260 			return -EINVAL;
261 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
262 		sr->addr_len = READ_ONCE(sqe->addr_len);
263 	} else if (sqe->addr2 || sqe->file_index) {
264 		return -EINVAL;
265 	}
266 
267 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
268 	sr->len = READ_ONCE(sqe->len);
269 	sr->flags = READ_ONCE(sqe->ioprio);
270 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
271 		return -EINVAL;
272 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
273 	if (sr->msg_flags & MSG_DONTWAIT)
274 		req->flags |= REQ_F_NOWAIT;
275 
276 #ifdef CONFIG_COMPAT
277 	if (req->ctx->compat)
278 		sr->msg_flags |= MSG_CMSG_COMPAT;
279 #endif
280 	sr->done_io = 0;
281 	return 0;
282 }
283 
284 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
285 {
286 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
287 	struct io_async_msghdr iomsg, *kmsg;
288 	struct socket *sock;
289 	unsigned flags;
290 	int min_ret = 0;
291 	int ret;
292 
293 	sock = sock_from_file(req->file);
294 	if (unlikely(!sock))
295 		return -ENOTSOCK;
296 
297 	if (req_has_async_data(req)) {
298 		kmsg = req->async_data;
299 	} else {
300 		ret = io_sendmsg_copy_hdr(req, &iomsg);
301 		if (ret)
302 			return ret;
303 		kmsg = &iomsg;
304 	}
305 
306 	if (!(req->flags & REQ_F_POLLED) &&
307 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
308 		return io_setup_async_msg(req, kmsg, issue_flags);
309 
310 	flags = sr->msg_flags;
311 	if (issue_flags & IO_URING_F_NONBLOCK)
312 		flags |= MSG_DONTWAIT;
313 	if (flags & MSG_WAITALL)
314 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
315 
316 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
317 
318 	if (ret < min_ret) {
319 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
320 			return io_setup_async_msg(req, kmsg, issue_flags);
321 		if (ret > 0 && io_net_retry(sock, flags)) {
322 			sr->done_io += ret;
323 			req->flags |= REQ_F_PARTIAL_IO;
324 			return io_setup_async_msg(req, kmsg, issue_flags);
325 		}
326 		if (ret == -ERESTARTSYS)
327 			ret = -EINTR;
328 		req_set_fail(req);
329 	}
330 	/* fast path, check for non-NULL to avoid function call */
331 	if (kmsg->free_iov)
332 		kfree(kmsg->free_iov);
333 	req->flags &= ~REQ_F_NEED_CLEANUP;
334 	io_netmsg_recycle(req, issue_flags);
335 	if (ret >= 0)
336 		ret += sr->done_io;
337 	else if (sr->done_io)
338 		ret = sr->done_io;
339 	io_req_set_res(req, ret, 0);
340 	return IOU_OK;
341 }
342 
343 int io_send(struct io_kiocb *req, unsigned int issue_flags)
344 {
345 	struct sockaddr_storage __address;
346 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
347 	struct msghdr msg;
348 	struct iovec iov;
349 	struct socket *sock;
350 	unsigned flags;
351 	int min_ret = 0;
352 	int ret;
353 
354 	msg.msg_name = NULL;
355 	msg.msg_control = NULL;
356 	msg.msg_controllen = 0;
357 	msg.msg_namelen = 0;
358 	msg.msg_ubuf = NULL;
359 
360 	if (sr->addr) {
361 		if (req_has_async_data(req)) {
362 			struct io_async_msghdr *io = req->async_data;
363 
364 			msg.msg_name = &io->addr;
365 		} else {
366 			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 			if (unlikely(ret < 0))
368 				return ret;
369 			msg.msg_name = (struct sockaddr *)&__address;
370 		}
371 		msg.msg_namelen = sr->addr_len;
372 	}
373 
374 	if (!(req->flags & REQ_F_POLLED) &&
375 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
376 		return io_setup_async_addr(req, &__address, issue_flags);
377 
378 	sock = sock_from_file(req->file);
379 	if (unlikely(!sock))
380 		return -ENOTSOCK;
381 
382 	ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
383 	if (unlikely(ret))
384 		return ret;
385 
386 	flags = sr->msg_flags;
387 	if (issue_flags & IO_URING_F_NONBLOCK)
388 		flags |= MSG_DONTWAIT;
389 	if (flags & MSG_WAITALL)
390 		min_ret = iov_iter_count(&msg.msg_iter);
391 
392 	msg.msg_flags = flags;
393 	ret = sock_sendmsg(sock, &msg);
394 	if (ret < min_ret) {
395 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
396 			return io_setup_async_addr(req, &__address, issue_flags);
397 
398 		if (ret > 0 && io_net_retry(sock, flags)) {
399 			sr->len -= ret;
400 			sr->buf += ret;
401 			sr->done_io += ret;
402 			req->flags |= REQ_F_PARTIAL_IO;
403 			return io_setup_async_addr(req, &__address, issue_flags);
404 		}
405 		if (ret == -ERESTARTSYS)
406 			ret = -EINTR;
407 		req_set_fail(req);
408 	}
409 	if (ret >= 0)
410 		ret += sr->done_io;
411 	else if (sr->done_io)
412 		ret = sr->done_io;
413 	io_req_set_res(req, ret, 0);
414 	return IOU_OK;
415 }
416 
417 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
418 {
419 	int hdr;
420 
421 	if (iomsg->namelen < 0)
422 		return true;
423 	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
424 			       iomsg->namelen, &hdr))
425 		return true;
426 	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
427 		return true;
428 
429 	return false;
430 }
431 
432 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
433 				 struct io_async_msghdr *iomsg)
434 {
435 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
436 	struct user_msghdr msg;
437 	int ret;
438 
439 	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
440 		return -EFAULT;
441 
442 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
443 	if (ret)
444 		return ret;
445 
446 	if (req->flags & REQ_F_BUFFER_SELECT) {
447 		if (msg.msg_iovlen == 0) {
448 			sr->len = iomsg->fast_iov[0].iov_len = 0;
449 			iomsg->fast_iov[0].iov_base = NULL;
450 			iomsg->free_iov = NULL;
451 		} else if (msg.msg_iovlen > 1) {
452 			return -EINVAL;
453 		} else {
454 			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
455 				return -EFAULT;
456 			sr->len = iomsg->fast_iov[0].iov_len;
457 			iomsg->free_iov = NULL;
458 		}
459 
460 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
461 			iomsg->namelen = msg.msg_namelen;
462 			iomsg->controllen = msg.msg_controllen;
463 			if (io_recvmsg_multishot_overflow(iomsg))
464 				return -EOVERFLOW;
465 		}
466 	} else {
467 		iomsg->free_iov = iomsg->fast_iov;
468 		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
469 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
470 				     false);
471 		if (ret > 0)
472 			ret = 0;
473 	}
474 
475 	return ret;
476 }
477 
478 #ifdef CONFIG_COMPAT
479 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
480 					struct io_async_msghdr *iomsg)
481 {
482 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
483 	struct compat_msghdr msg;
484 	struct compat_iovec __user *uiov;
485 	int ret;
486 
487 	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
488 		return -EFAULT;
489 
490 	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
491 	if (ret)
492 		return ret;
493 
494 	uiov = compat_ptr(msg.msg_iov);
495 	if (req->flags & REQ_F_BUFFER_SELECT) {
496 		compat_ssize_t clen;
497 
498 		iomsg->free_iov = NULL;
499 		if (msg.msg_iovlen == 0) {
500 			sr->len = 0;
501 		} else if (msg.msg_iovlen > 1) {
502 			return -EINVAL;
503 		} else {
504 			if (!access_ok(uiov, sizeof(*uiov)))
505 				return -EFAULT;
506 			if (__get_user(clen, &uiov->iov_len))
507 				return -EFAULT;
508 			if (clen < 0)
509 				return -EINVAL;
510 			sr->len = clen;
511 		}
512 
513 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
514 			iomsg->namelen = msg.msg_namelen;
515 			iomsg->controllen = msg.msg_controllen;
516 			if (io_recvmsg_multishot_overflow(iomsg))
517 				return -EOVERFLOW;
518 		}
519 	} else {
520 		iomsg->free_iov = iomsg->fast_iov;
521 		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
522 				   UIO_FASTIOV, &iomsg->free_iov,
523 				   &iomsg->msg.msg_iter, true);
524 		if (ret < 0)
525 			return ret;
526 	}
527 
528 	return 0;
529 }
530 #endif
531 
532 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
533 			       struct io_async_msghdr *iomsg)
534 {
535 	iomsg->msg.msg_name = &iomsg->addr;
536 
537 #ifdef CONFIG_COMPAT
538 	if (req->ctx->compat)
539 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
540 #endif
541 
542 	return __io_recvmsg_copy_hdr(req, iomsg);
543 }
544 
545 int io_recvmsg_prep_async(struct io_kiocb *req)
546 {
547 	int ret;
548 
549 	if (!io_msg_alloc_async_prep(req))
550 		return -ENOMEM;
551 	ret = io_recvmsg_copy_hdr(req, req->async_data);
552 	if (!ret)
553 		req->flags |= REQ_F_NEED_CLEANUP;
554 	return ret;
555 }
556 
557 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
558 
559 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
560 {
561 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
562 
563 	if (unlikely(sqe->file_index || sqe->addr2))
564 		return -EINVAL;
565 
566 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
567 	sr->len = READ_ONCE(sqe->len);
568 	sr->flags = READ_ONCE(sqe->ioprio);
569 	if (sr->flags & ~(RECVMSG_FLAGS))
570 		return -EINVAL;
571 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
572 	if (sr->msg_flags & MSG_DONTWAIT)
573 		req->flags |= REQ_F_NOWAIT;
574 	if (sr->msg_flags & MSG_ERRQUEUE)
575 		req->flags |= REQ_F_CLEAR_POLLIN;
576 	if (sr->flags & IORING_RECV_MULTISHOT) {
577 		if (!(req->flags & REQ_F_BUFFER_SELECT))
578 			return -EINVAL;
579 		if (sr->msg_flags & MSG_WAITALL)
580 			return -EINVAL;
581 		if (req->opcode == IORING_OP_RECV && sr->len)
582 			return -EINVAL;
583 		req->flags |= REQ_F_APOLL_MULTISHOT;
584 		/*
585 		 * Store the buffer group for this multishot receive separately,
586 		 * as if we end up doing an io-wq based issue that selects a
587 		 * buffer, it has to be committed immediately and that will
588 		 * clear ->buf_list. This means we lose the link to the buffer
589 		 * list, and the eventual buffer put on completion then cannot
590 		 * restore it.
591 		 */
592 		sr->buf_group = req->buf_index;
593 	}
594 
595 #ifdef CONFIG_COMPAT
596 	if (req->ctx->compat)
597 		sr->msg_flags |= MSG_CMSG_COMPAT;
598 #endif
599 	sr->done_io = 0;
600 	return 0;
601 }
602 
603 static inline void io_recv_prep_retry(struct io_kiocb *req)
604 {
605 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
606 
607 	sr->done_io = 0;
608 	sr->len = 0; /* get from the provided buffer */
609 	req->buf_index = sr->buf_group;
610 }
611 
612 /*
613  * Finishes io_recv and io_recvmsg.
614  *
615  * Returns true if it is actually finished, or false if it should run
616  * again (for multishot).
617  */
618 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
619 				  unsigned int cflags, bool mshot_finished,
620 				  unsigned issue_flags)
621 {
622 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
623 		io_req_set_res(req, *ret, cflags);
624 		*ret = IOU_OK;
625 		return true;
626 	}
627 
628 	if (!mshot_finished) {
629 		if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
630 			       req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
631 			io_recv_prep_retry(req);
632 			return false;
633 		}
634 		/* Otherwise stop multishot but use the current result. */
635 	}
636 
637 	io_req_set_res(req, *ret, cflags);
638 
639 	if (issue_flags & IO_URING_F_MULTISHOT)
640 		*ret = IOU_STOP_MULTISHOT;
641 	else
642 		*ret = IOU_OK;
643 	return true;
644 }
645 
646 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
647 				     struct io_sr_msg *sr, void __user **buf,
648 				     size_t *len)
649 {
650 	unsigned long ubuf = (unsigned long) *buf;
651 	unsigned long hdr;
652 
653 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
654 		kmsg->controllen;
655 	if (*len < hdr)
656 		return -EFAULT;
657 
658 	if (kmsg->controllen) {
659 		unsigned long control = ubuf + hdr - kmsg->controllen;
660 
661 		kmsg->msg.msg_control_user = (void __user *) control;
662 		kmsg->msg.msg_controllen = kmsg->controllen;
663 	}
664 
665 	sr->buf = *buf; /* stash for later copy */
666 	*buf = (void __user *) (ubuf + hdr);
667 	kmsg->payloadlen = *len = *len - hdr;
668 	return 0;
669 }
670 
671 struct io_recvmsg_multishot_hdr {
672 	struct io_uring_recvmsg_out msg;
673 	struct sockaddr_storage addr;
674 };
675 
676 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
677 				struct io_async_msghdr *kmsg,
678 				unsigned int flags, bool *finished)
679 {
680 	int err;
681 	int copy_len;
682 	struct io_recvmsg_multishot_hdr hdr;
683 
684 	if (kmsg->namelen)
685 		kmsg->msg.msg_name = &hdr.addr;
686 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
687 	kmsg->msg.msg_namelen = 0;
688 
689 	if (sock->file->f_flags & O_NONBLOCK)
690 		flags |= MSG_DONTWAIT;
691 
692 	err = sock_recvmsg(sock, &kmsg->msg, flags);
693 	*finished = err <= 0;
694 	if (err < 0)
695 		return err;
696 
697 	hdr.msg = (struct io_uring_recvmsg_out) {
698 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
699 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
700 	};
701 
702 	hdr.msg.payloadlen = err;
703 	if (err > kmsg->payloadlen)
704 		err = kmsg->payloadlen;
705 
706 	copy_len = sizeof(struct io_uring_recvmsg_out);
707 	if (kmsg->msg.msg_namelen > kmsg->namelen)
708 		copy_len += kmsg->namelen;
709 	else
710 		copy_len += kmsg->msg.msg_namelen;
711 
712 	/*
713 	 *      "fromlen shall refer to the value before truncation.."
714 	 *                      1003.1g
715 	 */
716 	hdr.msg.namelen = kmsg->msg.msg_namelen;
717 
718 	/* ensure that there is no gap between hdr and sockaddr_storage */
719 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
720 		     sizeof(struct io_uring_recvmsg_out));
721 	if (copy_to_user(io->buf, &hdr, copy_len)) {
722 		*finished = true;
723 		return -EFAULT;
724 	}
725 
726 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
727 			kmsg->controllen + err;
728 }
729 
730 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
731 {
732 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
733 	struct io_async_msghdr iomsg, *kmsg;
734 	struct socket *sock;
735 	unsigned int cflags;
736 	unsigned flags;
737 	int ret, min_ret = 0;
738 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
739 	bool mshot_finished = true;
740 
741 	sock = sock_from_file(req->file);
742 	if (unlikely(!sock))
743 		return -ENOTSOCK;
744 
745 	if (req_has_async_data(req)) {
746 		kmsg = req->async_data;
747 	} else {
748 		ret = io_recvmsg_copy_hdr(req, &iomsg);
749 		if (ret)
750 			return ret;
751 		kmsg = &iomsg;
752 	}
753 
754 	if (!(req->flags & REQ_F_POLLED) &&
755 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
756 		return io_setup_async_msg(req, kmsg, issue_flags);
757 
758 	if (!io_check_multishot(req, issue_flags))
759 		return io_setup_async_msg(req, kmsg, issue_flags);
760 
761 retry_multishot:
762 	if (io_do_buffer_select(req)) {
763 		void __user *buf;
764 		size_t len = sr->len;
765 
766 		buf = io_buffer_select(req, &len, issue_flags);
767 		if (!buf)
768 			return -ENOBUFS;
769 
770 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
771 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
772 			if (ret) {
773 				io_kbuf_recycle(req, issue_flags);
774 				return ret;
775 			}
776 		}
777 
778 		kmsg->fast_iov[0].iov_base = buf;
779 		kmsg->fast_iov[0].iov_len = len;
780 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
781 				len);
782 	}
783 
784 	flags = sr->msg_flags;
785 	if (force_nonblock)
786 		flags |= MSG_DONTWAIT;
787 	if (flags & MSG_WAITALL)
788 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
789 
790 	kmsg->msg.msg_get_inq = 1;
791 	if (req->flags & REQ_F_APOLL_MULTISHOT)
792 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
793 					   &mshot_finished);
794 	else
795 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
796 					 kmsg->uaddr, flags);
797 
798 	if (ret < min_ret) {
799 		if (ret == -EAGAIN && force_nonblock) {
800 			ret = io_setup_async_msg(req, kmsg, issue_flags);
801 			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
802 				io_kbuf_recycle(req, issue_flags);
803 				return IOU_ISSUE_SKIP_COMPLETE;
804 			}
805 			return ret;
806 		}
807 		if (ret > 0 && io_net_retry(sock, flags)) {
808 			sr->done_io += ret;
809 			req->flags |= REQ_F_PARTIAL_IO;
810 			return io_setup_async_msg(req, kmsg, issue_flags);
811 		}
812 		if (ret == -ERESTARTSYS)
813 			ret = -EINTR;
814 		req_set_fail(req);
815 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
816 		req_set_fail(req);
817 	}
818 
819 	if (ret > 0)
820 		ret += sr->done_io;
821 	else if (sr->done_io)
822 		ret = sr->done_io;
823 	else
824 		io_kbuf_recycle(req, issue_flags);
825 
826 	cflags = io_put_kbuf(req, issue_flags);
827 	if (kmsg->msg.msg_inq)
828 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
829 
830 	if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
831 		goto retry_multishot;
832 
833 	if (mshot_finished) {
834 		/* fast path, check for non-NULL to avoid function call */
835 		if (kmsg->free_iov)
836 			kfree(kmsg->free_iov);
837 		io_netmsg_recycle(req, issue_flags);
838 		req->flags &= ~REQ_F_NEED_CLEANUP;
839 	}
840 
841 	return ret;
842 }
843 
844 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
845 {
846 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
847 	struct msghdr msg;
848 	struct socket *sock;
849 	struct iovec iov;
850 	unsigned int cflags;
851 	unsigned flags;
852 	int ret, min_ret = 0;
853 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
854 	size_t len = sr->len;
855 
856 	if (!(req->flags & REQ_F_POLLED) &&
857 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
858 		return -EAGAIN;
859 
860 	if (!io_check_multishot(req, issue_flags))
861 		return -EAGAIN;
862 
863 	sock = sock_from_file(req->file);
864 	if (unlikely(!sock))
865 		return -ENOTSOCK;
866 
867 retry_multishot:
868 	if (io_do_buffer_select(req)) {
869 		void __user *buf;
870 
871 		buf = io_buffer_select(req, &len, issue_flags);
872 		if (!buf)
873 			return -ENOBUFS;
874 		sr->buf = buf;
875 	}
876 
877 	ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
878 	if (unlikely(ret))
879 		goto out_free;
880 
881 	msg.msg_name = NULL;
882 	msg.msg_namelen = 0;
883 	msg.msg_control = NULL;
884 	msg.msg_get_inq = 1;
885 	msg.msg_flags = 0;
886 	msg.msg_controllen = 0;
887 	msg.msg_iocb = NULL;
888 	msg.msg_ubuf = NULL;
889 
890 	flags = sr->msg_flags;
891 	if (force_nonblock)
892 		flags |= MSG_DONTWAIT;
893 	if (flags & MSG_WAITALL)
894 		min_ret = iov_iter_count(&msg.msg_iter);
895 
896 	ret = sock_recvmsg(sock, &msg, flags);
897 	if (ret < min_ret) {
898 		if (ret == -EAGAIN && force_nonblock) {
899 			if (issue_flags & IO_URING_F_MULTISHOT) {
900 				io_kbuf_recycle(req, issue_flags);
901 				return IOU_ISSUE_SKIP_COMPLETE;
902 			}
903 
904 			return -EAGAIN;
905 		}
906 		if (ret > 0 && io_net_retry(sock, flags)) {
907 			sr->len -= ret;
908 			sr->buf += ret;
909 			sr->done_io += ret;
910 			req->flags |= REQ_F_PARTIAL_IO;
911 			return -EAGAIN;
912 		}
913 		if (ret == -ERESTARTSYS)
914 			ret = -EINTR;
915 		req_set_fail(req);
916 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
917 out_free:
918 		req_set_fail(req);
919 	}
920 
921 	if (ret > 0)
922 		ret += sr->done_io;
923 	else if (sr->done_io)
924 		ret = sr->done_io;
925 	else
926 		io_kbuf_recycle(req, issue_flags);
927 
928 	cflags = io_put_kbuf(req, issue_flags);
929 	if (msg.msg_inq)
930 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
931 
932 	if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
933 		goto retry_multishot;
934 
935 	return ret;
936 }
937 
938 void io_send_zc_cleanup(struct io_kiocb *req)
939 {
940 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
941 	struct io_async_msghdr *io;
942 
943 	if (req_has_async_data(req)) {
944 		io = req->async_data;
945 		/* might be ->fast_iov if *msg_copy_hdr failed */
946 		if (io->free_iov != io->fast_iov)
947 			kfree(io->free_iov);
948 	}
949 	if (zc->notif) {
950 		io_notif_flush(zc->notif);
951 		zc->notif = NULL;
952 	}
953 }
954 
955 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
956 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
957 
958 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
959 {
960 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
961 	struct io_ring_ctx *ctx = req->ctx;
962 	struct io_kiocb *notif;
963 
964 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
965 		return -EINVAL;
966 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
967 	if (req->flags & REQ_F_CQE_SKIP)
968 		return -EINVAL;
969 
970 	notif = zc->notif = io_alloc_notif(ctx);
971 	if (!notif)
972 		return -ENOMEM;
973 	notif->cqe.user_data = req->cqe.user_data;
974 	notif->cqe.res = 0;
975 	notif->cqe.flags = IORING_CQE_F_NOTIF;
976 	req->flags |= REQ_F_NEED_CLEANUP;
977 
978 	zc->flags = READ_ONCE(sqe->ioprio);
979 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
980 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
981 			return -EINVAL;
982 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
983 			io_notif_set_extended(notif);
984 			io_notif_to_data(notif)->zc_report = true;
985 		}
986 	}
987 
988 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
989 		unsigned idx = READ_ONCE(sqe->buf_index);
990 
991 		if (unlikely(idx >= ctx->nr_user_bufs))
992 			return -EFAULT;
993 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
994 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
995 		io_req_set_rsrc_node(notif, ctx, 0);
996 	}
997 
998 	if (req->opcode == IORING_OP_SEND_ZC) {
999 		if (READ_ONCE(sqe->__pad3[0]))
1000 			return -EINVAL;
1001 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1002 		zc->addr_len = READ_ONCE(sqe->addr_len);
1003 	} else {
1004 		if (unlikely(sqe->addr2 || sqe->file_index))
1005 			return -EINVAL;
1006 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1007 			return -EINVAL;
1008 	}
1009 
1010 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1011 	zc->len = READ_ONCE(sqe->len);
1012 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1013 	if (zc->msg_flags & MSG_DONTWAIT)
1014 		req->flags |= REQ_F_NOWAIT;
1015 
1016 	zc->done_io = 0;
1017 
1018 #ifdef CONFIG_COMPAT
1019 	if (req->ctx->compat)
1020 		zc->msg_flags |= MSG_CMSG_COMPAT;
1021 #endif
1022 	return 0;
1023 }
1024 
1025 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1026 				 struct iov_iter *from, size_t length)
1027 {
1028 	skb_zcopy_downgrade_managed(skb);
1029 	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1030 }
1031 
1032 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1033 			   struct iov_iter *from, size_t length)
1034 {
1035 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1036 	int frag = shinfo->nr_frags;
1037 	int ret = 0;
1038 	struct bvec_iter bi;
1039 	ssize_t copied = 0;
1040 	unsigned long truesize = 0;
1041 
1042 	if (!frag)
1043 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1044 	else if (unlikely(!skb_zcopy_managed(skb)))
1045 		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1046 
1047 	bi.bi_size = min(from->count, length);
1048 	bi.bi_bvec_done = from->iov_offset;
1049 	bi.bi_idx = 0;
1050 
1051 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1052 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1053 
1054 		copied += v.bv_len;
1055 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1056 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1057 					   v.bv_offset, v.bv_len);
1058 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1059 	}
1060 	if (bi.bi_size)
1061 		ret = -EMSGSIZE;
1062 
1063 	shinfo->nr_frags = frag;
1064 	from->bvec += bi.bi_idx;
1065 	from->nr_segs -= bi.bi_idx;
1066 	from->count -= copied;
1067 	from->iov_offset = bi.bi_bvec_done;
1068 
1069 	skb->data_len += copied;
1070 	skb->len += copied;
1071 	skb->truesize += truesize;
1072 
1073 	if (sk && sk->sk_type == SOCK_STREAM) {
1074 		sk_wmem_queued_add(sk, truesize);
1075 		if (!skb_zcopy_pure(skb))
1076 			sk_mem_charge(sk, truesize);
1077 	} else {
1078 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1079 	}
1080 	return ret;
1081 }
1082 
1083 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1084 {
1085 	struct sockaddr_storage __address;
1086 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1087 	struct msghdr msg;
1088 	struct iovec iov;
1089 	struct socket *sock;
1090 	unsigned msg_flags;
1091 	int ret, min_ret = 0;
1092 
1093 	sock = sock_from_file(req->file);
1094 	if (unlikely(!sock))
1095 		return -ENOTSOCK;
1096 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1097 		return -EOPNOTSUPP;
1098 
1099 	msg.msg_name = NULL;
1100 	msg.msg_control = NULL;
1101 	msg.msg_controllen = 0;
1102 	msg.msg_namelen = 0;
1103 
1104 	if (zc->addr) {
1105 		if (req_has_async_data(req)) {
1106 			struct io_async_msghdr *io = req->async_data;
1107 
1108 			msg.msg_name = &io->addr;
1109 		} else {
1110 			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1111 			if (unlikely(ret < 0))
1112 				return ret;
1113 			msg.msg_name = (struct sockaddr *)&__address;
1114 		}
1115 		msg.msg_namelen = zc->addr_len;
1116 	}
1117 
1118 	if (!(req->flags & REQ_F_POLLED) &&
1119 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1120 		return io_setup_async_addr(req, &__address, issue_flags);
1121 
1122 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1123 		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1124 					(u64)(uintptr_t)zc->buf, zc->len);
1125 		if (unlikely(ret))
1126 			return ret;
1127 		msg.sg_from_iter = io_sg_from_iter;
1128 	} else {
1129 		io_notif_set_extended(zc->notif);
1130 		ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1131 					  &msg.msg_iter);
1132 		if (unlikely(ret))
1133 			return ret;
1134 		ret = io_notif_account_mem(zc->notif, zc->len);
1135 		if (unlikely(ret))
1136 			return ret;
1137 		msg.sg_from_iter = io_sg_from_iter_iovec;
1138 	}
1139 
1140 	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1141 	if (issue_flags & IO_URING_F_NONBLOCK)
1142 		msg_flags |= MSG_DONTWAIT;
1143 	if (msg_flags & MSG_WAITALL)
1144 		min_ret = iov_iter_count(&msg.msg_iter);
1145 
1146 	msg.msg_flags = msg_flags;
1147 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1148 	ret = sock_sendmsg(sock, &msg);
1149 
1150 	if (unlikely(ret < min_ret)) {
1151 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1152 			return io_setup_async_addr(req, &__address, issue_flags);
1153 
1154 		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1155 			zc->len -= ret;
1156 			zc->buf += ret;
1157 			zc->done_io += ret;
1158 			req->flags |= REQ_F_PARTIAL_IO;
1159 			return io_setup_async_addr(req, &__address, issue_flags);
1160 		}
1161 		if (ret == -ERESTARTSYS)
1162 			ret = -EINTR;
1163 		req_set_fail(req);
1164 	}
1165 
1166 	if (ret >= 0)
1167 		ret += zc->done_io;
1168 	else if (zc->done_io)
1169 		ret = zc->done_io;
1170 
1171 	/*
1172 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1173 	 * flushing notif to io_send_zc_cleanup()
1174 	 */
1175 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1176 		io_notif_flush(zc->notif);
1177 		req->flags &= ~REQ_F_NEED_CLEANUP;
1178 	}
1179 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1180 	return IOU_OK;
1181 }
1182 
1183 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1184 {
1185 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1186 	struct io_async_msghdr iomsg, *kmsg;
1187 	struct socket *sock;
1188 	unsigned flags;
1189 	int ret, min_ret = 0;
1190 
1191 	io_notif_set_extended(sr->notif);
1192 
1193 	sock = sock_from_file(req->file);
1194 	if (unlikely(!sock))
1195 		return -ENOTSOCK;
1196 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1197 		return -EOPNOTSUPP;
1198 
1199 	if (req_has_async_data(req)) {
1200 		kmsg = req->async_data;
1201 	} else {
1202 		ret = io_sendmsg_copy_hdr(req, &iomsg);
1203 		if (ret)
1204 			return ret;
1205 		kmsg = &iomsg;
1206 	}
1207 
1208 	if (!(req->flags & REQ_F_POLLED) &&
1209 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1210 		return io_setup_async_msg(req, kmsg, issue_flags);
1211 
1212 	flags = sr->msg_flags | MSG_ZEROCOPY;
1213 	if (issue_flags & IO_URING_F_NONBLOCK)
1214 		flags |= MSG_DONTWAIT;
1215 	if (flags & MSG_WAITALL)
1216 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1217 
1218 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1219 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1220 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1221 
1222 	if (unlikely(ret < min_ret)) {
1223 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1224 			return io_setup_async_msg(req, kmsg, issue_flags);
1225 
1226 		if (ret > 0 && io_net_retry(sock, flags)) {
1227 			sr->done_io += ret;
1228 			req->flags |= REQ_F_PARTIAL_IO;
1229 			return io_setup_async_msg(req, kmsg, issue_flags);
1230 		}
1231 		if (ret == -ERESTARTSYS)
1232 			ret = -EINTR;
1233 		req_set_fail(req);
1234 	}
1235 	/* fast path, check for non-NULL to avoid function call */
1236 	if (kmsg->free_iov) {
1237 		kfree(kmsg->free_iov);
1238 		kmsg->free_iov = NULL;
1239 	}
1240 
1241 	io_netmsg_recycle(req, issue_flags);
1242 	if (ret >= 0)
1243 		ret += sr->done_io;
1244 	else if (sr->done_io)
1245 		ret = sr->done_io;
1246 
1247 	/*
1248 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1249 	 * flushing notif to io_send_zc_cleanup()
1250 	 */
1251 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1252 		io_notif_flush(sr->notif);
1253 		req->flags &= ~REQ_F_NEED_CLEANUP;
1254 	}
1255 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1256 	return IOU_OK;
1257 }
1258 
1259 void io_sendrecv_fail(struct io_kiocb *req)
1260 {
1261 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1262 
1263 	if (req->flags & REQ_F_PARTIAL_IO)
1264 		req->cqe.res = sr->done_io;
1265 
1266 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1267 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1268 		req->cqe.flags |= IORING_CQE_F_MORE;
1269 }
1270 
1271 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1272 {
1273 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1274 	unsigned flags;
1275 
1276 	if (sqe->len || sqe->buf_index)
1277 		return -EINVAL;
1278 
1279 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1280 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1281 	accept->flags = READ_ONCE(sqe->accept_flags);
1282 	accept->nofile = rlimit(RLIMIT_NOFILE);
1283 	flags = READ_ONCE(sqe->ioprio);
1284 	if (flags & ~IORING_ACCEPT_MULTISHOT)
1285 		return -EINVAL;
1286 
1287 	accept->file_slot = READ_ONCE(sqe->file_index);
1288 	if (accept->file_slot) {
1289 		if (accept->flags & SOCK_CLOEXEC)
1290 			return -EINVAL;
1291 		if (flags & IORING_ACCEPT_MULTISHOT &&
1292 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1293 			return -EINVAL;
1294 	}
1295 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1296 		return -EINVAL;
1297 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1298 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1299 	if (flags & IORING_ACCEPT_MULTISHOT)
1300 		req->flags |= REQ_F_APOLL_MULTISHOT;
1301 	return 0;
1302 }
1303 
1304 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1305 {
1306 	struct io_ring_ctx *ctx = req->ctx;
1307 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1308 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1309 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1310 	bool fixed = !!accept->file_slot;
1311 	struct file *file;
1312 	int ret, fd;
1313 
1314 	if (!io_check_multishot(req, issue_flags))
1315 		return -EAGAIN;
1316 retry:
1317 	if (!fixed) {
1318 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1319 		if (unlikely(fd < 0))
1320 			return fd;
1321 	}
1322 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1323 			 accept->flags);
1324 	if (IS_ERR(file)) {
1325 		if (!fixed)
1326 			put_unused_fd(fd);
1327 		ret = PTR_ERR(file);
1328 		if (ret == -EAGAIN && force_nonblock) {
1329 			/*
1330 			 * if it's multishot and polled, we don't need to
1331 			 * return EAGAIN to arm the poll infra since it
1332 			 * has already been done
1333 			 */
1334 			if (issue_flags & IO_URING_F_MULTISHOT)
1335 				ret = IOU_ISSUE_SKIP_COMPLETE;
1336 			return ret;
1337 		}
1338 		if (ret == -ERESTARTSYS)
1339 			ret = -EINTR;
1340 		req_set_fail(req);
1341 	} else if (!fixed) {
1342 		fd_install(fd, file);
1343 		ret = fd;
1344 	} else {
1345 		ret = io_fixed_fd_install(req, issue_flags, file,
1346 						accept->file_slot);
1347 	}
1348 
1349 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1350 		io_req_set_res(req, ret, 0);
1351 		return IOU_OK;
1352 	}
1353 
1354 	if (ret < 0)
1355 		return ret;
1356 	if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1357 		       req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1358 		goto retry;
1359 
1360 	return -ECANCELED;
1361 }
1362 
1363 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1364 {
1365 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1366 
1367 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1368 		return -EINVAL;
1369 
1370 	sock->domain = READ_ONCE(sqe->fd);
1371 	sock->type = READ_ONCE(sqe->off);
1372 	sock->protocol = READ_ONCE(sqe->len);
1373 	sock->file_slot = READ_ONCE(sqe->file_index);
1374 	sock->nofile = rlimit(RLIMIT_NOFILE);
1375 
1376 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1377 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1378 		return -EINVAL;
1379 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1380 		return -EINVAL;
1381 	return 0;
1382 }
1383 
1384 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1385 {
1386 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1387 	bool fixed = !!sock->file_slot;
1388 	struct file *file;
1389 	int ret, fd;
1390 
1391 	if (!fixed) {
1392 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1393 		if (unlikely(fd < 0))
1394 			return fd;
1395 	}
1396 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1397 	if (IS_ERR(file)) {
1398 		if (!fixed)
1399 			put_unused_fd(fd);
1400 		ret = PTR_ERR(file);
1401 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1402 			return -EAGAIN;
1403 		if (ret == -ERESTARTSYS)
1404 			ret = -EINTR;
1405 		req_set_fail(req);
1406 	} else if (!fixed) {
1407 		fd_install(fd, file);
1408 		ret = fd;
1409 	} else {
1410 		ret = io_fixed_fd_install(req, issue_flags, file,
1411 					    sock->file_slot);
1412 	}
1413 	io_req_set_res(req, ret, 0);
1414 	return IOU_OK;
1415 }
1416 
1417 int io_connect_prep_async(struct io_kiocb *req)
1418 {
1419 	struct io_async_connect *io = req->async_data;
1420 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1421 
1422 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1423 }
1424 
1425 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1426 {
1427 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1428 
1429 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1430 		return -EINVAL;
1431 
1432 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1433 	conn->addr_len =  READ_ONCE(sqe->addr2);
1434 	conn->in_progress = false;
1435 	return 0;
1436 }
1437 
1438 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1439 {
1440 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1441 	struct io_async_connect __io, *io;
1442 	unsigned file_flags;
1443 	int ret;
1444 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1445 
1446 	if (connect->in_progress) {
1447 		struct socket *socket;
1448 
1449 		ret = -ENOTSOCK;
1450 		socket = sock_from_file(req->file);
1451 		if (socket)
1452 			ret = sock_error(socket->sk);
1453 		goto out;
1454 	}
1455 
1456 	if (req_has_async_data(req)) {
1457 		io = req->async_data;
1458 	} else {
1459 		ret = move_addr_to_kernel(connect->addr,
1460 						connect->addr_len,
1461 						&__io.address);
1462 		if (ret)
1463 			goto out;
1464 		io = &__io;
1465 	}
1466 
1467 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1468 
1469 	ret = __sys_connect_file(req->file, &io->address,
1470 					connect->addr_len, file_flags);
1471 	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1472 		if (ret == -EINPROGRESS) {
1473 			connect->in_progress = true;
1474 		} else {
1475 			if (req_has_async_data(req))
1476 				return -EAGAIN;
1477 			if (io_alloc_async_data(req)) {
1478 				ret = -ENOMEM;
1479 				goto out;
1480 			}
1481 			memcpy(req->async_data, &__io, sizeof(__io));
1482 		}
1483 		return -EAGAIN;
1484 	}
1485 	if (ret == -ERESTARTSYS)
1486 		ret = -EINTR;
1487 out:
1488 	if (ret < 0)
1489 		req_set_fail(req);
1490 	io_req_set_res(req, ret, 0);
1491 	return IOU_OK;
1492 }
1493 
1494 void io_netmsg_cache_free(struct io_cache_entry *entry)
1495 {
1496 	kfree(container_of(entry, struct io_async_msghdr, cache));
1497 }
1498 #endif
1499