xref: /linux/io_uring/net.c (revision 91928e0d3cc29789f4483bffee5f36218f23942b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	int				iou_flags;
32 	u32				file_slot;
33 	unsigned long			nofile;
34 };
35 
36 struct io_socket {
37 	struct file			*file;
38 	int				domain;
39 	int				type;
40 	int				protocol;
41 	int				flags;
42 	u32				file_slot;
43 	unsigned long			nofile;
44 };
45 
46 struct io_connect {
47 	struct file			*file;
48 	struct sockaddr __user		*addr;
49 	int				addr_len;
50 	bool				in_progress;
51 	bool				seen_econnaborted;
52 };
53 
54 struct io_bind {
55 	struct file			*file;
56 	int				addr_len;
57 };
58 
59 struct io_listen {
60 	struct file			*file;
61 	int				backlog;
62 };
63 
64 struct io_sr_msg {
65 	struct file			*file;
66 	union {
67 		struct compat_msghdr __user	*umsg_compat;
68 		struct user_msghdr __user	*umsg;
69 		void __user			*buf;
70 	};
71 	int				len;
72 	unsigned			done_io;
73 	unsigned			msg_flags;
74 	unsigned			nr_multishot_loops;
75 	u16				flags;
76 	/* initialised and used only by !msg send variants */
77 	u16				buf_group;
78 	bool				retry;
79 	void __user			*msg_control;
80 	/* used only for send zerocopy */
81 	struct io_kiocb 		*notif;
82 };
83 
84 /*
85  * Number of times we'll try and do receives if there's more data. If we
86  * exceed this limit, then add us to the back of the queue and retry from
87  * there. This helps fairness between flooding clients.
88  */
89 #define MULTISHOT_MAX_RETRY	32
90 
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)91 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
92 {
93 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
94 
95 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
96 		     sqe->buf_index || sqe->splice_fd_in))
97 		return -EINVAL;
98 
99 	shutdown->how = READ_ONCE(sqe->len);
100 	req->flags |= REQ_F_FORCE_ASYNC;
101 	return 0;
102 }
103 
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)104 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
105 {
106 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
107 	struct socket *sock;
108 	int ret;
109 
110 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
111 
112 	sock = sock_from_file(req->file);
113 	if (unlikely(!sock))
114 		return -ENOTSOCK;
115 
116 	ret = __sys_shutdown_sock(sock, shutdown->how);
117 	io_req_set_res(req, ret, 0);
118 	return IOU_OK;
119 }
120 
io_net_retry(struct socket * sock,int flags)121 static bool io_net_retry(struct socket *sock, int flags)
122 {
123 	if (!(flags & MSG_WAITALL))
124 		return false;
125 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
126 }
127 
io_netmsg_iovec_free(struct io_async_msghdr * kmsg)128 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
129 {
130 	if (kmsg->free_iov) {
131 		kfree(kmsg->free_iov);
132 		kmsg->free_iov_nr = 0;
133 		kmsg->free_iov = NULL;
134 	}
135 }
136 
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)137 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
138 {
139 	struct io_async_msghdr *hdr = req->async_data;
140 
141 	/* can't recycle, ensure we free the iovec if we have one */
142 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
143 		io_netmsg_iovec_free(hdr);
144 		return;
145 	}
146 
147 	/* Let normal cleanup path reap it if we fail adding to the cache */
148 	io_alloc_cache_kasan(&hdr->free_iov, &hdr->free_iov_nr);
149 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
150 		req->async_data = NULL;
151 		req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
152 	}
153 }
154 
io_msg_alloc_async(struct io_kiocb * req)155 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
156 {
157 	struct io_ring_ctx *ctx = req->ctx;
158 	struct io_async_msghdr *hdr;
159 
160 	hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
161 	if (!hdr)
162 		return NULL;
163 
164 	/* If the async data was cached, we might have an iov cached inside. */
165 	if (hdr->free_iov)
166 		req->flags |= REQ_F_NEED_CLEANUP;
167 	return hdr;
168 }
169 
170 /* assign new iovec to kmsg, if we need to */
io_net_vec_assign(struct io_kiocb * req,struct io_async_msghdr * kmsg,struct iovec * iov)171 static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
172 			     struct iovec *iov)
173 {
174 	if (iov) {
175 		req->flags |= REQ_F_NEED_CLEANUP;
176 		kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
177 		if (kmsg->free_iov)
178 			kfree(kmsg->free_iov);
179 		kmsg->free_iov = iov;
180 	}
181 }
182 
io_mshot_prep_retry(struct io_kiocb * req,struct io_async_msghdr * kmsg)183 static inline void io_mshot_prep_retry(struct io_kiocb *req,
184 				       struct io_async_msghdr *kmsg)
185 {
186 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
187 
188 	req->flags &= ~REQ_F_BL_EMPTY;
189 	sr->done_io = 0;
190 	sr->retry = false;
191 	sr->len = 0; /* get from the provided buffer */
192 	req->buf_index = sr->buf_group;
193 }
194 
io_net_import_vec(struct io_kiocb * req,struct io_async_msghdr * iomsg,const struct iovec __user * uiov,unsigned uvec_seg,int ddir)195 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
196 			     const struct iovec __user *uiov, unsigned uvec_seg,
197 			     int ddir)
198 {
199 	struct iovec *iov;
200 	int ret, nr_segs;
201 
202 	if (iomsg->free_iov) {
203 		nr_segs = iomsg->free_iov_nr;
204 		iov = iomsg->free_iov;
205 	} else {
206 		nr_segs = 1;
207 		iov = &iomsg->fast_iov;
208 	}
209 
210 	ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov,
211 			     &iomsg->msg.msg_iter, io_is_compat(req->ctx));
212 	if (unlikely(ret < 0))
213 		return ret;
214 	io_net_vec_assign(req, iomsg, iov);
215 	return 0;
216 }
217 
io_compat_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct compat_msghdr * msg,int ddir,struct sockaddr __user ** save_addr)218 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
219 				  struct io_async_msghdr *iomsg,
220 				  struct compat_msghdr *msg, int ddir,
221 				  struct sockaddr __user **save_addr)
222 {
223 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
224 	struct compat_iovec __user *uiov;
225 	int ret;
226 
227 	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
228 		return -EFAULT;
229 
230 	ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr);
231 	if (ret)
232 		return ret;
233 
234 	uiov = compat_ptr(msg->msg_iov);
235 	if (req->flags & REQ_F_BUFFER_SELECT) {
236 		if (msg->msg_iovlen == 0) {
237 			sr->len = 0;
238 		} else if (msg->msg_iovlen > 1) {
239 			return -EINVAL;
240 		} else {
241 			struct compat_iovec tmp_iov;
242 
243 			if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
244 				return -EFAULT;
245 			sr->len = tmp_iov.iov_len;
246 		}
247 
248 		return 0;
249 	}
250 
251 	return io_net_import_vec(req, iomsg, (struct iovec __user *)uiov,
252 				 msg->msg_iovlen, ddir);
253 }
254 
io_copy_msghdr_from_user(struct user_msghdr * msg,struct user_msghdr __user * umsg)255 static int io_copy_msghdr_from_user(struct user_msghdr *msg,
256 				    struct user_msghdr __user *umsg)
257 {
258 	if (!user_access_begin(umsg, sizeof(*umsg)))
259 		return -EFAULT;
260 	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
261 	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
262 	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
263 	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
264 	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
265 	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
266 	user_access_end();
267 	return 0;
268 ua_end:
269 	user_access_end();
270 	return -EFAULT;
271 }
272 
io_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct user_msghdr * msg,int ddir,struct sockaddr __user ** save_addr)273 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
274 			   struct user_msghdr *msg, int ddir,
275 			   struct sockaddr __user **save_addr)
276 {
277 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
278 	struct user_msghdr __user *umsg = sr->umsg;
279 	int ret;
280 
281 	ret = io_copy_msghdr_from_user(msg, umsg);
282 	if (unlikely(ret))
283 		return ret;
284 
285 	msg->msg_flags = 0;
286 
287 	ret = __copy_msghdr(&iomsg->msg, msg, save_addr);
288 	if (ret)
289 		return ret;
290 
291 	if (req->flags & REQ_F_BUFFER_SELECT) {
292 		if (msg->msg_iovlen == 0) {
293 			sr->len = 0;
294 		} else if (msg->msg_iovlen > 1) {
295 			return -EINVAL;
296 		} else {
297 			struct iovec __user *uiov = msg->msg_iov;
298 			struct iovec tmp_iov;
299 
300 			if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
301 				return -EFAULT;
302 			sr->len = tmp_iov.iov_len;
303 		}
304 		return 0;
305 	}
306 
307 	return io_net_import_vec(req, iomsg, msg->msg_iov, msg->msg_iovlen, ddir);
308 }
309 
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)310 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
311 			       struct io_async_msghdr *iomsg)
312 {
313 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
314 	struct user_msghdr msg;
315 	int ret;
316 
317 	iomsg->msg.msg_name = &iomsg->addr;
318 	iomsg->msg.msg_iter.nr_segs = 0;
319 
320 	if (io_is_compat(req->ctx)) {
321 		struct compat_msghdr cmsg;
322 
323 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE,
324 					     NULL);
325 		sr->msg_control = iomsg->msg.msg_control_user;
326 		return ret;
327 	}
328 
329 	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL);
330 	/* save msg_control as sys_sendmsg() overwrites it */
331 	sr->msg_control = iomsg->msg.msg_control_user;
332 	return ret;
333 }
334 
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)335 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
336 {
337 	struct io_async_msghdr *io = req->async_data;
338 
339 	io_netmsg_iovec_free(io);
340 }
341 
io_send_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)342 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
343 {
344 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
345 	struct io_async_msghdr *kmsg = req->async_data;
346 	void __user *addr;
347 	u16 addr_len;
348 	int ret;
349 
350 	sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
351 
352 	if (READ_ONCE(sqe->__pad3[0]))
353 		return -EINVAL;
354 
355 	kmsg->msg.msg_name = NULL;
356 	kmsg->msg.msg_namelen = 0;
357 	kmsg->msg.msg_control = NULL;
358 	kmsg->msg.msg_controllen = 0;
359 	kmsg->msg.msg_ubuf = NULL;
360 
361 	addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
362 	addr_len = READ_ONCE(sqe->addr_len);
363 	if (addr) {
364 		ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
365 		if (unlikely(ret < 0))
366 			return ret;
367 		kmsg->msg.msg_name = &kmsg->addr;
368 		kmsg->msg.msg_namelen = addr_len;
369 	}
370 	if (!io_do_buffer_select(req)) {
371 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
372 				  &kmsg->msg.msg_iter);
373 		if (unlikely(ret < 0))
374 			return ret;
375 	}
376 	return 0;
377 }
378 
io_sendmsg_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)379 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
380 {
381 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
382 	struct io_async_msghdr *kmsg = req->async_data;
383 
384 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
385 
386 	return io_sendmsg_copy_hdr(req, kmsg);
387 }
388 
389 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
390 
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)391 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
392 {
393 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
394 
395 	sr->done_io = 0;
396 	sr->retry = false;
397 
398 	if (req->opcode != IORING_OP_SEND) {
399 		if (sqe->addr2 || sqe->file_index)
400 			return -EINVAL;
401 	}
402 
403 	sr->len = READ_ONCE(sqe->len);
404 	sr->flags = READ_ONCE(sqe->ioprio);
405 	if (sr->flags & ~SENDMSG_FLAGS)
406 		return -EINVAL;
407 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
408 	if (sr->msg_flags & MSG_DONTWAIT)
409 		req->flags |= REQ_F_NOWAIT;
410 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
411 		if (req->opcode == IORING_OP_SENDMSG)
412 			return -EINVAL;
413 		if (!(req->flags & REQ_F_BUFFER_SELECT))
414 			return -EINVAL;
415 		sr->msg_flags |= MSG_WAITALL;
416 		sr->buf_group = req->buf_index;
417 		req->buf_list = NULL;
418 	}
419 
420 	if (io_is_compat(req->ctx))
421 		sr->msg_flags |= MSG_CMSG_COMPAT;
422 
423 	if (unlikely(!io_msg_alloc_async(req)))
424 		return -ENOMEM;
425 	if (req->opcode != IORING_OP_SENDMSG)
426 		return io_send_setup(req, sqe);
427 	return io_sendmsg_setup(req, sqe);
428 }
429 
io_req_msg_cleanup(struct io_kiocb * req,unsigned int issue_flags)430 static void io_req_msg_cleanup(struct io_kiocb *req,
431 			       unsigned int issue_flags)
432 {
433 	io_netmsg_recycle(req, issue_flags);
434 }
435 
436 /*
437  * For bundle completions, we need to figure out how many segments we consumed.
438  * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
439  * could be using an ITER_IOVEC. If the latter, then if we consumed all of
440  * the segments, then it's a trivial questiont o answer. If we have residual
441  * data in the iter, then loop the segments to figure out how much we
442  * transferred.
443  */
io_bundle_nbufs(struct io_async_msghdr * kmsg,int ret)444 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
445 {
446 	struct iovec *iov;
447 	int nbufs;
448 
449 	/* no data is always zero segments, and a ubuf is always 1 segment */
450 	if (ret <= 0)
451 		return 0;
452 	if (iter_is_ubuf(&kmsg->msg.msg_iter))
453 		return 1;
454 
455 	iov = kmsg->free_iov;
456 	if (!iov)
457 		iov = &kmsg->fast_iov;
458 
459 	/* if all data was transferred, it's basic pointer math */
460 	if (!iov_iter_count(&kmsg->msg.msg_iter))
461 		return iter_iov(&kmsg->msg.msg_iter) - iov;
462 
463 	/* short transfer, count segments */
464 	nbufs = 0;
465 	do {
466 		int this_len = min_t(int, iov[nbufs].iov_len, ret);
467 
468 		nbufs++;
469 		ret -= this_len;
470 	} while (ret);
471 
472 	return nbufs;
473 }
474 
io_send_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,unsigned issue_flags)475 static inline bool io_send_finish(struct io_kiocb *req, int *ret,
476 				  struct io_async_msghdr *kmsg,
477 				  unsigned issue_flags)
478 {
479 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
480 	bool bundle_finished = *ret <= 0;
481 	unsigned int cflags;
482 
483 	if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
484 		cflags = io_put_kbuf(req, *ret, issue_flags);
485 		goto finish;
486 	}
487 
488 	cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
489 
490 	if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
491 		goto finish;
492 
493 	/*
494 	 * Fill CQE for this receive and see if we should keep trying to
495 	 * receive from this socket.
496 	 */
497 	if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
498 		io_mshot_prep_retry(req, kmsg);
499 		return false;
500 	}
501 
502 	/* Otherwise stop bundle and use the current result. */
503 finish:
504 	io_req_set_res(req, *ret, cflags);
505 	*ret = IOU_OK;
506 	return true;
507 }
508 
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)509 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
510 {
511 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
512 	struct io_async_msghdr *kmsg = req->async_data;
513 	struct socket *sock;
514 	unsigned flags;
515 	int min_ret = 0;
516 	int ret;
517 
518 	sock = sock_from_file(req->file);
519 	if (unlikely(!sock))
520 		return -ENOTSOCK;
521 
522 	if (!(req->flags & REQ_F_POLLED) &&
523 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
524 		return -EAGAIN;
525 
526 	flags = sr->msg_flags;
527 	if (issue_flags & IO_URING_F_NONBLOCK)
528 		flags |= MSG_DONTWAIT;
529 	if (flags & MSG_WAITALL)
530 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
531 
532 	kmsg->msg.msg_control_user = sr->msg_control;
533 
534 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
535 
536 	if (ret < min_ret) {
537 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
538 			return -EAGAIN;
539 		if (ret > 0 && io_net_retry(sock, flags)) {
540 			kmsg->msg.msg_controllen = 0;
541 			kmsg->msg.msg_control = NULL;
542 			sr->done_io += ret;
543 			req->flags |= REQ_F_BL_NO_RECYCLE;
544 			return -EAGAIN;
545 		}
546 		if (ret == -ERESTARTSYS)
547 			ret = -EINTR;
548 		req_set_fail(req);
549 	}
550 	io_req_msg_cleanup(req, issue_flags);
551 	if (ret >= 0)
552 		ret += sr->done_io;
553 	else if (sr->done_io)
554 		ret = sr->done_io;
555 	io_req_set_res(req, ret, 0);
556 	return IOU_OK;
557 }
558 
io_send_select_buffer(struct io_kiocb * req,unsigned int issue_flags,struct io_async_msghdr * kmsg)559 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
560 				 struct io_async_msghdr *kmsg)
561 {
562 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
563 
564 	int ret;
565 	struct buf_sel_arg arg = {
566 		.iovs = &kmsg->fast_iov,
567 		.max_len = min_not_zero(sr->len, INT_MAX),
568 		.nr_iovs = 1,
569 	};
570 
571 	if (kmsg->free_iov) {
572 		arg.nr_iovs = kmsg->free_iov_nr;
573 		arg.iovs = kmsg->free_iov;
574 		arg.mode = KBUF_MODE_FREE;
575 	}
576 
577 	if (!(sr->flags & IORING_RECVSEND_BUNDLE))
578 		arg.nr_iovs = 1;
579 	else
580 		arg.mode |= KBUF_MODE_EXPAND;
581 
582 	ret = io_buffers_select(req, &arg, issue_flags);
583 	if (unlikely(ret < 0))
584 		return ret;
585 
586 	if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
587 		kmsg->free_iov_nr = ret;
588 		kmsg->free_iov = arg.iovs;
589 		req->flags |= REQ_F_NEED_CLEANUP;
590 	}
591 	sr->len = arg.out_len;
592 
593 	if (ret == 1) {
594 		sr->buf = arg.iovs[0].iov_base;
595 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
596 					&kmsg->msg.msg_iter);
597 		if (unlikely(ret))
598 			return ret;
599 	} else {
600 		iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
601 				arg.iovs, ret, arg.out_len);
602 	}
603 
604 	return 0;
605 }
606 
io_send(struct io_kiocb * req,unsigned int issue_flags)607 int io_send(struct io_kiocb *req, unsigned int issue_flags)
608 {
609 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
610 	struct io_async_msghdr *kmsg = req->async_data;
611 	struct socket *sock;
612 	unsigned flags;
613 	int min_ret = 0;
614 	int ret;
615 
616 	sock = sock_from_file(req->file);
617 	if (unlikely(!sock))
618 		return -ENOTSOCK;
619 
620 	if (!(req->flags & REQ_F_POLLED) &&
621 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
622 		return -EAGAIN;
623 
624 	flags = sr->msg_flags;
625 	if (issue_flags & IO_URING_F_NONBLOCK)
626 		flags |= MSG_DONTWAIT;
627 
628 retry_bundle:
629 	if (io_do_buffer_select(req)) {
630 		ret = io_send_select_buffer(req, issue_flags, kmsg);
631 		if (ret)
632 			return ret;
633 	}
634 
635 	/*
636 	 * If MSG_WAITALL is set, or this is a bundle send, then we need
637 	 * the full amount. If just bundle is set, if we do a short send
638 	 * then we complete the bundle sequence rather than continue on.
639 	 */
640 	if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
641 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
642 
643 	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
644 	kmsg->msg.msg_flags = flags;
645 	ret = sock_sendmsg(sock, &kmsg->msg);
646 	if (ret < min_ret) {
647 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
648 			return -EAGAIN;
649 
650 		if (ret > 0 && io_net_retry(sock, flags)) {
651 			sr->len -= ret;
652 			sr->buf += ret;
653 			sr->done_io += ret;
654 			req->flags |= REQ_F_BL_NO_RECYCLE;
655 			return -EAGAIN;
656 		}
657 		if (ret == -ERESTARTSYS)
658 			ret = -EINTR;
659 		req_set_fail(req);
660 	}
661 	if (ret >= 0)
662 		ret += sr->done_io;
663 	else if (sr->done_io)
664 		ret = sr->done_io;
665 
666 	if (!io_send_finish(req, &ret, kmsg, issue_flags))
667 		goto retry_bundle;
668 
669 	io_req_msg_cleanup(req, issue_flags);
670 	return ret;
671 }
672 
io_recvmsg_mshot_prep(struct io_kiocb * req,struct io_async_msghdr * iomsg,int namelen,size_t controllen)673 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
674 				 struct io_async_msghdr *iomsg,
675 				 int namelen, size_t controllen)
676 {
677 	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
678 			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
679 		int hdr;
680 
681 		if (unlikely(namelen < 0))
682 			return -EOVERFLOW;
683 		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
684 					namelen, &hdr))
685 			return -EOVERFLOW;
686 		if (check_add_overflow(hdr, controllen, &hdr))
687 			return -EOVERFLOW;
688 
689 		iomsg->namelen = namelen;
690 		iomsg->controllen = controllen;
691 		return 0;
692 	}
693 
694 	return 0;
695 }
696 
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)697 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
698 			       struct io_async_msghdr *iomsg)
699 {
700 	struct user_msghdr msg;
701 	int ret;
702 
703 	iomsg->msg.msg_name = &iomsg->addr;
704 	iomsg->msg.msg_iter.nr_segs = 0;
705 
706 	if (io_is_compat(req->ctx)) {
707 		struct compat_msghdr cmsg;
708 
709 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST,
710 					     &iomsg->uaddr);
711 		memset(&msg, 0, sizeof(msg));
712 		msg.msg_namelen = cmsg.msg_namelen;
713 		msg.msg_controllen = cmsg.msg_controllen;
714 	} else {
715 		ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr);
716 	}
717 
718 	if (unlikely(ret))
719 		return ret;
720 	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
721 					msg.msg_controllen);
722 }
723 
io_recvmsg_prep_setup(struct io_kiocb * req)724 static int io_recvmsg_prep_setup(struct io_kiocb *req)
725 {
726 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
727 	struct io_async_msghdr *kmsg;
728 	int ret;
729 
730 	kmsg = io_msg_alloc_async(req);
731 	if (unlikely(!kmsg))
732 		return -ENOMEM;
733 
734 	if (req->opcode == IORING_OP_RECV) {
735 		kmsg->msg.msg_name = NULL;
736 		kmsg->msg.msg_namelen = 0;
737 		kmsg->msg.msg_inq = 0;
738 		kmsg->msg.msg_control = NULL;
739 		kmsg->msg.msg_get_inq = 1;
740 		kmsg->msg.msg_controllen = 0;
741 		kmsg->msg.msg_iocb = NULL;
742 		kmsg->msg.msg_ubuf = NULL;
743 
744 		if (!io_do_buffer_select(req)) {
745 			ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
746 					  &kmsg->msg.msg_iter);
747 			if (unlikely(ret))
748 				return ret;
749 		}
750 		return 0;
751 	}
752 
753 	return io_recvmsg_copy_hdr(req, kmsg);
754 }
755 
756 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
757 			IORING_RECVSEND_BUNDLE)
758 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)759 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
760 {
761 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
762 
763 	sr->done_io = 0;
764 	sr->retry = false;
765 
766 	if (unlikely(sqe->file_index || sqe->addr2))
767 		return -EINVAL;
768 
769 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
770 	sr->len = READ_ONCE(sqe->len);
771 	sr->flags = READ_ONCE(sqe->ioprio);
772 	if (sr->flags & ~RECVMSG_FLAGS)
773 		return -EINVAL;
774 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
775 	if (sr->msg_flags & MSG_DONTWAIT)
776 		req->flags |= REQ_F_NOWAIT;
777 	if (sr->msg_flags & MSG_ERRQUEUE)
778 		req->flags |= REQ_F_CLEAR_POLLIN;
779 	if (req->flags & REQ_F_BUFFER_SELECT) {
780 		/*
781 		 * Store the buffer group for this multishot receive separately,
782 		 * as if we end up doing an io-wq based issue that selects a
783 		 * buffer, it has to be committed immediately and that will
784 		 * clear ->buf_list. This means we lose the link to the buffer
785 		 * list, and the eventual buffer put on completion then cannot
786 		 * restore it.
787 		 */
788 		sr->buf_group = req->buf_index;
789 		req->buf_list = NULL;
790 	}
791 	if (sr->flags & IORING_RECV_MULTISHOT) {
792 		if (!(req->flags & REQ_F_BUFFER_SELECT))
793 			return -EINVAL;
794 		if (sr->msg_flags & MSG_WAITALL)
795 			return -EINVAL;
796 		if (req->opcode == IORING_OP_RECV && sr->len)
797 			return -EINVAL;
798 		req->flags |= REQ_F_APOLL_MULTISHOT;
799 	}
800 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
801 		if (req->opcode == IORING_OP_RECVMSG)
802 			return -EINVAL;
803 	}
804 
805 	if (io_is_compat(req->ctx))
806 		sr->msg_flags |= MSG_CMSG_COMPAT;
807 
808 	sr->nr_multishot_loops = 0;
809 	return io_recvmsg_prep_setup(req);
810 }
811 
812 /* bits to clear in old and inherit in new cflags on bundle retry */
813 #define CQE_F_MASK	(IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
814 
815 /*
816  * Finishes io_recv and io_recvmsg.
817  *
818  * Returns true if it is actually finished, or false if it should run
819  * again (for multishot).
820  */
io_recv_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,bool mshot_finished,unsigned issue_flags)821 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
822 				  struct io_async_msghdr *kmsg,
823 				  bool mshot_finished, unsigned issue_flags)
824 {
825 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
826 	unsigned int cflags = 0;
827 
828 	if (kmsg->msg.msg_inq > 0)
829 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
830 
831 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
832 		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
833 				      issue_flags);
834 		if (sr->retry)
835 			cflags = req->cqe.flags | (cflags & CQE_F_MASK);
836 		/* bundle with no more immediate buffers, we're done */
837 		if (req->flags & REQ_F_BL_EMPTY)
838 			goto finish;
839 		/* if more is available, retry and append to this one */
840 		if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) {
841 			req->cqe.flags = cflags & ~CQE_F_MASK;
842 			sr->len = kmsg->msg.msg_inq;
843 			sr->done_io += *ret;
844 			sr->retry = true;
845 			return false;
846 		}
847 	} else {
848 		cflags |= io_put_kbuf(req, *ret, issue_flags);
849 	}
850 
851 	/*
852 	 * Fill CQE for this receive and see if we should keep trying to
853 	 * receive from this socket.
854 	 */
855 	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
856 	    io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
857 		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
858 
859 		io_mshot_prep_retry(req, kmsg);
860 		/* Known not-empty or unknown state, retry */
861 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
862 			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
863 				return false;
864 			/* mshot retries exceeded, force a requeue */
865 			sr->nr_multishot_loops = 0;
866 			mshot_retry_ret = IOU_REQUEUE;
867 		}
868 		if (issue_flags & IO_URING_F_MULTISHOT)
869 			*ret = mshot_retry_ret;
870 		else
871 			*ret = -EAGAIN;
872 		return true;
873 	}
874 
875 	/* Finish the request / stop multishot. */
876 finish:
877 	io_req_set_res(req, *ret, cflags);
878 
879 	if (issue_flags & IO_URING_F_MULTISHOT)
880 		*ret = IOU_STOP_MULTISHOT;
881 	else
882 		*ret = IOU_OK;
883 	io_req_msg_cleanup(req, issue_flags);
884 	return true;
885 }
886 
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)887 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
888 				     struct io_sr_msg *sr, void __user **buf,
889 				     size_t *len)
890 {
891 	unsigned long ubuf = (unsigned long) *buf;
892 	unsigned long hdr;
893 
894 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
895 		kmsg->controllen;
896 	if (*len < hdr)
897 		return -EFAULT;
898 
899 	if (kmsg->controllen) {
900 		unsigned long control = ubuf + hdr - kmsg->controllen;
901 
902 		kmsg->msg.msg_control_user = (void __user *) control;
903 		kmsg->msg.msg_controllen = kmsg->controllen;
904 	}
905 
906 	sr->buf = *buf; /* stash for later copy */
907 	*buf = (void __user *) (ubuf + hdr);
908 	kmsg->payloadlen = *len = *len - hdr;
909 	return 0;
910 }
911 
912 struct io_recvmsg_multishot_hdr {
913 	struct io_uring_recvmsg_out msg;
914 	struct sockaddr_storage addr;
915 };
916 
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)917 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
918 				struct io_async_msghdr *kmsg,
919 				unsigned int flags, bool *finished)
920 {
921 	int err;
922 	int copy_len;
923 	struct io_recvmsg_multishot_hdr hdr;
924 
925 	if (kmsg->namelen)
926 		kmsg->msg.msg_name = &hdr.addr;
927 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
928 	kmsg->msg.msg_namelen = 0;
929 
930 	if (sock->file->f_flags & O_NONBLOCK)
931 		flags |= MSG_DONTWAIT;
932 
933 	err = sock_recvmsg(sock, &kmsg->msg, flags);
934 	*finished = err <= 0;
935 	if (err < 0)
936 		return err;
937 
938 	hdr.msg = (struct io_uring_recvmsg_out) {
939 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
940 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
941 	};
942 
943 	hdr.msg.payloadlen = err;
944 	if (err > kmsg->payloadlen)
945 		err = kmsg->payloadlen;
946 
947 	copy_len = sizeof(struct io_uring_recvmsg_out);
948 	if (kmsg->msg.msg_namelen > kmsg->namelen)
949 		copy_len += kmsg->namelen;
950 	else
951 		copy_len += kmsg->msg.msg_namelen;
952 
953 	/*
954 	 *      "fromlen shall refer to the value before truncation.."
955 	 *                      1003.1g
956 	 */
957 	hdr.msg.namelen = kmsg->msg.msg_namelen;
958 
959 	/* ensure that there is no gap between hdr and sockaddr_storage */
960 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
961 		     sizeof(struct io_uring_recvmsg_out));
962 	if (copy_to_user(io->buf, &hdr, copy_len)) {
963 		*finished = true;
964 		return -EFAULT;
965 	}
966 
967 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
968 			kmsg->controllen + err;
969 }
970 
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)971 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
972 {
973 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
974 	struct io_async_msghdr *kmsg = req->async_data;
975 	struct socket *sock;
976 	unsigned flags;
977 	int ret, min_ret = 0;
978 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
979 	bool mshot_finished = true;
980 
981 	sock = sock_from_file(req->file);
982 	if (unlikely(!sock))
983 		return -ENOTSOCK;
984 
985 	if (!(req->flags & REQ_F_POLLED) &&
986 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
987 		return -EAGAIN;
988 
989 	flags = sr->msg_flags;
990 	if (force_nonblock)
991 		flags |= MSG_DONTWAIT;
992 
993 retry_multishot:
994 	if (io_do_buffer_select(req)) {
995 		void __user *buf;
996 		size_t len = sr->len;
997 
998 		buf = io_buffer_select(req, &len, issue_flags);
999 		if (!buf)
1000 			return -ENOBUFS;
1001 
1002 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
1003 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1004 			if (ret) {
1005 				io_kbuf_recycle(req, issue_flags);
1006 				return ret;
1007 			}
1008 		}
1009 
1010 		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1011 	}
1012 
1013 	kmsg->msg.msg_get_inq = 1;
1014 	kmsg->msg.msg_inq = -1;
1015 	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1016 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1017 					   &mshot_finished);
1018 	} else {
1019 		/* disable partial retry for recvmsg with cmsg attached */
1020 		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1021 			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1022 
1023 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1024 					 kmsg->uaddr, flags);
1025 	}
1026 
1027 	if (ret < min_ret) {
1028 		if (ret == -EAGAIN && force_nonblock) {
1029 			if (issue_flags & IO_URING_F_MULTISHOT) {
1030 				io_kbuf_recycle(req, issue_flags);
1031 				return IOU_ISSUE_SKIP_COMPLETE;
1032 			}
1033 			return -EAGAIN;
1034 		}
1035 		if (ret > 0 && io_net_retry(sock, flags)) {
1036 			sr->done_io += ret;
1037 			req->flags |= REQ_F_BL_NO_RECYCLE;
1038 			return -EAGAIN;
1039 		}
1040 		if (ret == -ERESTARTSYS)
1041 			ret = -EINTR;
1042 		req_set_fail(req);
1043 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1044 		req_set_fail(req);
1045 	}
1046 
1047 	if (ret > 0)
1048 		ret += sr->done_io;
1049 	else if (sr->done_io)
1050 		ret = sr->done_io;
1051 	else
1052 		io_kbuf_recycle(req, issue_flags);
1053 
1054 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1055 		goto retry_multishot;
1056 
1057 	return ret;
1058 }
1059 
io_recv_buf_select(struct io_kiocb * req,struct io_async_msghdr * kmsg,size_t * len,unsigned int issue_flags)1060 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1061 			      size_t *len, unsigned int issue_flags)
1062 {
1063 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1064 	int ret;
1065 
1066 	/*
1067 	 * If the ring isn't locked, then don't use the peek interface
1068 	 * to grab multiple buffers as we will lock/unlock between
1069 	 * this selection and posting the buffers.
1070 	 */
1071 	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1072 	    sr->flags & IORING_RECVSEND_BUNDLE) {
1073 		struct buf_sel_arg arg = {
1074 			.iovs = &kmsg->fast_iov,
1075 			.nr_iovs = 1,
1076 			.mode = KBUF_MODE_EXPAND,
1077 		};
1078 
1079 		if (kmsg->free_iov) {
1080 			arg.nr_iovs = kmsg->free_iov_nr;
1081 			arg.iovs = kmsg->free_iov;
1082 			arg.mode |= KBUF_MODE_FREE;
1083 		}
1084 
1085 		if (kmsg->msg.msg_inq > 0)
1086 			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1087 
1088 		ret = io_buffers_peek(req, &arg);
1089 		if (unlikely(ret < 0))
1090 			return ret;
1091 
1092 		/* special case 1 vec, can be a fast path */
1093 		if (ret == 1) {
1094 			sr->buf = arg.iovs[0].iov_base;
1095 			sr->len = arg.iovs[0].iov_len;
1096 			goto map_ubuf;
1097 		}
1098 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1099 				arg.out_len);
1100 		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1101 			kmsg->free_iov_nr = ret;
1102 			kmsg->free_iov = arg.iovs;
1103 			req->flags |= REQ_F_NEED_CLEANUP;
1104 		}
1105 	} else {
1106 		void __user *buf;
1107 
1108 		*len = sr->len;
1109 		buf = io_buffer_select(req, len, issue_flags);
1110 		if (!buf)
1111 			return -ENOBUFS;
1112 		sr->buf = buf;
1113 		sr->len = *len;
1114 map_ubuf:
1115 		ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1116 				  &kmsg->msg.msg_iter);
1117 		if (unlikely(ret))
1118 			return ret;
1119 	}
1120 
1121 	return 0;
1122 }
1123 
io_recv(struct io_kiocb * req,unsigned int issue_flags)1124 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1125 {
1126 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1127 	struct io_async_msghdr *kmsg = req->async_data;
1128 	struct socket *sock;
1129 	unsigned flags;
1130 	int ret, min_ret = 0;
1131 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1132 	size_t len = sr->len;
1133 	bool mshot_finished;
1134 
1135 	if (!(req->flags & REQ_F_POLLED) &&
1136 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1137 		return -EAGAIN;
1138 
1139 	sock = sock_from_file(req->file);
1140 	if (unlikely(!sock))
1141 		return -ENOTSOCK;
1142 
1143 	flags = sr->msg_flags;
1144 	if (force_nonblock)
1145 		flags |= MSG_DONTWAIT;
1146 
1147 retry_multishot:
1148 	if (io_do_buffer_select(req)) {
1149 		ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1150 		if (unlikely(ret)) {
1151 			kmsg->msg.msg_inq = -1;
1152 			goto out_free;
1153 		}
1154 		sr->buf = NULL;
1155 	}
1156 
1157 	kmsg->msg.msg_flags = 0;
1158 	kmsg->msg.msg_inq = -1;
1159 
1160 	if (flags & MSG_WAITALL)
1161 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1162 
1163 	ret = sock_recvmsg(sock, &kmsg->msg, flags);
1164 	if (ret < min_ret) {
1165 		if (ret == -EAGAIN && force_nonblock) {
1166 			if (issue_flags & IO_URING_F_MULTISHOT) {
1167 				io_kbuf_recycle(req, issue_flags);
1168 				return IOU_ISSUE_SKIP_COMPLETE;
1169 			}
1170 
1171 			return -EAGAIN;
1172 		}
1173 		if (ret > 0 && io_net_retry(sock, flags)) {
1174 			sr->len -= ret;
1175 			sr->buf += ret;
1176 			sr->done_io += ret;
1177 			req->flags |= REQ_F_BL_NO_RECYCLE;
1178 			return -EAGAIN;
1179 		}
1180 		if (ret == -ERESTARTSYS)
1181 			ret = -EINTR;
1182 		req_set_fail(req);
1183 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1184 out_free:
1185 		req_set_fail(req);
1186 	}
1187 
1188 	mshot_finished = ret <= 0;
1189 	if (ret > 0)
1190 		ret += sr->done_io;
1191 	else if (sr->done_io)
1192 		ret = sr->done_io;
1193 	else
1194 		io_kbuf_recycle(req, issue_flags);
1195 
1196 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1197 		goto retry_multishot;
1198 
1199 	return ret;
1200 }
1201 
io_send_zc_cleanup(struct io_kiocb * req)1202 void io_send_zc_cleanup(struct io_kiocb *req)
1203 {
1204 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1205 	struct io_async_msghdr *io = req->async_data;
1206 
1207 	if (req_has_async_data(req))
1208 		io_netmsg_iovec_free(io);
1209 	if (zc->notif) {
1210 		io_notif_flush(zc->notif);
1211 		zc->notif = NULL;
1212 	}
1213 }
1214 
1215 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1216 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1217 
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1218 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1219 {
1220 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1221 	struct io_ring_ctx *ctx = req->ctx;
1222 	struct io_kiocb *notif;
1223 
1224 	zc->done_io = 0;
1225 	zc->retry = false;
1226 	req->flags |= REQ_F_POLL_NO_LAZY;
1227 
1228 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1229 		return -EINVAL;
1230 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1231 	if (req->flags & REQ_F_CQE_SKIP)
1232 		return -EINVAL;
1233 
1234 	notif = zc->notif = io_alloc_notif(ctx);
1235 	if (!notif)
1236 		return -ENOMEM;
1237 	notif->cqe.user_data = req->cqe.user_data;
1238 	notif->cqe.res = 0;
1239 	notif->cqe.flags = IORING_CQE_F_NOTIF;
1240 	req->flags |= REQ_F_NEED_CLEANUP;
1241 
1242 	zc->flags = READ_ONCE(sqe->ioprio);
1243 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1244 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1245 			return -EINVAL;
1246 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1247 			struct io_notif_data *nd = io_notif_to_data(notif);
1248 
1249 			nd->zc_report = true;
1250 			nd->zc_used = false;
1251 			nd->zc_copied = false;
1252 		}
1253 	}
1254 
1255 	if (req->opcode != IORING_OP_SEND_ZC) {
1256 		if (unlikely(sqe->addr2 || sqe->file_index))
1257 			return -EINVAL;
1258 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1259 			return -EINVAL;
1260 	}
1261 
1262 	zc->len = READ_ONCE(sqe->len);
1263 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1264 	req->buf_index = READ_ONCE(sqe->buf_index);
1265 	if (zc->msg_flags & MSG_DONTWAIT)
1266 		req->flags |= REQ_F_NOWAIT;
1267 
1268 	if (io_is_compat(req->ctx))
1269 		zc->msg_flags |= MSG_CMSG_COMPAT;
1270 
1271 	if (unlikely(!io_msg_alloc_async(req)))
1272 		return -ENOMEM;
1273 	if (req->opcode != IORING_OP_SENDMSG_ZC)
1274 		return io_send_setup(req, sqe);
1275 	return io_sendmsg_setup(req, sqe);
1276 }
1277 
io_sg_from_iter_iovec(struct sk_buff * skb,struct iov_iter * from,size_t length)1278 static int io_sg_from_iter_iovec(struct sk_buff *skb,
1279 				 struct iov_iter *from, size_t length)
1280 {
1281 	skb_zcopy_downgrade_managed(skb);
1282 	return zerocopy_fill_skb_from_iter(skb, from, length);
1283 }
1284 
io_sg_from_iter(struct sk_buff * skb,struct iov_iter * from,size_t length)1285 static int io_sg_from_iter(struct sk_buff *skb,
1286 			   struct iov_iter *from, size_t length)
1287 {
1288 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1289 	int frag = shinfo->nr_frags;
1290 	int ret = 0;
1291 	struct bvec_iter bi;
1292 	ssize_t copied = 0;
1293 	unsigned long truesize = 0;
1294 
1295 	if (!frag)
1296 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1297 	else if (unlikely(!skb_zcopy_managed(skb)))
1298 		return zerocopy_fill_skb_from_iter(skb, from, length);
1299 
1300 	bi.bi_size = min(from->count, length);
1301 	bi.bi_bvec_done = from->iov_offset;
1302 	bi.bi_idx = 0;
1303 
1304 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1305 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1306 
1307 		copied += v.bv_len;
1308 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1309 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1310 					   v.bv_offset, v.bv_len);
1311 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1312 	}
1313 	if (bi.bi_size)
1314 		ret = -EMSGSIZE;
1315 
1316 	shinfo->nr_frags = frag;
1317 	from->bvec += bi.bi_idx;
1318 	from->nr_segs -= bi.bi_idx;
1319 	from->count -= copied;
1320 	from->iov_offset = bi.bi_bvec_done;
1321 
1322 	skb->data_len += copied;
1323 	skb->len += copied;
1324 	skb->truesize += truesize;
1325 	return ret;
1326 }
1327 
io_send_zc_import(struct io_kiocb * req,unsigned int issue_flags)1328 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1329 {
1330 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1331 	struct io_async_msghdr *kmsg = req->async_data;
1332 	int ret;
1333 
1334 	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1335 		sr->notif->buf_index = req->buf_index;
1336 		ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
1337 					(u64)(uintptr_t)sr->buf, sr->len,
1338 					ITER_SOURCE, issue_flags);
1339 		if (unlikely(ret))
1340 			return ret;
1341 		kmsg->msg.sg_from_iter = io_sg_from_iter;
1342 	} else {
1343 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1344 		if (unlikely(ret))
1345 			return ret;
1346 		ret = io_notif_account_mem(sr->notif, sr->len);
1347 		if (unlikely(ret))
1348 			return ret;
1349 		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1350 	}
1351 
1352 	return ret;
1353 }
1354 
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1355 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1356 {
1357 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1358 	struct io_async_msghdr *kmsg = req->async_data;
1359 	struct socket *sock;
1360 	unsigned msg_flags;
1361 	int ret, min_ret = 0;
1362 
1363 	sock = sock_from_file(req->file);
1364 	if (unlikely(!sock))
1365 		return -ENOTSOCK;
1366 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1367 		return -EOPNOTSUPP;
1368 
1369 	if (!(req->flags & REQ_F_POLLED) &&
1370 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1371 		return -EAGAIN;
1372 
1373 	if (!zc->done_io) {
1374 		ret = io_send_zc_import(req, issue_flags);
1375 		if (unlikely(ret))
1376 			return ret;
1377 	}
1378 
1379 	msg_flags = zc->msg_flags;
1380 	if (issue_flags & IO_URING_F_NONBLOCK)
1381 		msg_flags |= MSG_DONTWAIT;
1382 	if (msg_flags & MSG_WAITALL)
1383 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1384 	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1385 
1386 	kmsg->msg.msg_flags = msg_flags;
1387 	kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1388 	ret = sock_sendmsg(sock, &kmsg->msg);
1389 
1390 	if (unlikely(ret < min_ret)) {
1391 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1392 			return -EAGAIN;
1393 
1394 		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1395 			zc->len -= ret;
1396 			zc->buf += ret;
1397 			zc->done_io += ret;
1398 			req->flags |= REQ_F_BL_NO_RECYCLE;
1399 			return -EAGAIN;
1400 		}
1401 		if (ret == -ERESTARTSYS)
1402 			ret = -EINTR;
1403 		req_set_fail(req);
1404 	}
1405 
1406 	if (ret >= 0)
1407 		ret += zc->done_io;
1408 	else if (zc->done_io)
1409 		ret = zc->done_io;
1410 
1411 	/*
1412 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1413 	 * flushing notif to io_send_zc_cleanup()
1414 	 */
1415 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1416 		io_notif_flush(zc->notif);
1417 		zc->notif = NULL;
1418 		io_req_msg_cleanup(req, 0);
1419 	}
1420 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1421 	return IOU_OK;
1422 }
1423 
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1424 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1425 {
1426 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1427 	struct io_async_msghdr *kmsg = req->async_data;
1428 	struct socket *sock;
1429 	unsigned flags;
1430 	int ret, min_ret = 0;
1431 
1432 	sock = sock_from_file(req->file);
1433 	if (unlikely(!sock))
1434 		return -ENOTSOCK;
1435 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1436 		return -EOPNOTSUPP;
1437 
1438 	if (!(req->flags & REQ_F_POLLED) &&
1439 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1440 		return -EAGAIN;
1441 
1442 	flags = sr->msg_flags;
1443 	if (issue_flags & IO_URING_F_NONBLOCK)
1444 		flags |= MSG_DONTWAIT;
1445 	if (flags & MSG_WAITALL)
1446 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1447 
1448 	kmsg->msg.msg_control_user = sr->msg_control;
1449 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1450 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1451 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1452 
1453 	if (unlikely(ret < min_ret)) {
1454 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1455 			return -EAGAIN;
1456 
1457 		if (ret > 0 && io_net_retry(sock, flags)) {
1458 			sr->done_io += ret;
1459 			req->flags |= REQ_F_BL_NO_RECYCLE;
1460 			return -EAGAIN;
1461 		}
1462 		if (ret == -ERESTARTSYS)
1463 			ret = -EINTR;
1464 		req_set_fail(req);
1465 	}
1466 
1467 	if (ret >= 0)
1468 		ret += sr->done_io;
1469 	else if (sr->done_io)
1470 		ret = sr->done_io;
1471 
1472 	/*
1473 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1474 	 * flushing notif to io_send_zc_cleanup()
1475 	 */
1476 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1477 		io_notif_flush(sr->notif);
1478 		sr->notif = NULL;
1479 		io_req_msg_cleanup(req, 0);
1480 	}
1481 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1482 	return IOU_OK;
1483 }
1484 
io_sendrecv_fail(struct io_kiocb * req)1485 void io_sendrecv_fail(struct io_kiocb *req)
1486 {
1487 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1488 
1489 	if (sr->done_io)
1490 		req->cqe.res = sr->done_io;
1491 
1492 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1493 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1494 		req->cqe.flags |= IORING_CQE_F_MORE;
1495 }
1496 
1497 #define ACCEPT_FLAGS	(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1498 			 IORING_ACCEPT_POLL_FIRST)
1499 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1500 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1501 {
1502 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1503 
1504 	if (sqe->len || sqe->buf_index)
1505 		return -EINVAL;
1506 
1507 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1508 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1509 	accept->flags = READ_ONCE(sqe->accept_flags);
1510 	accept->nofile = rlimit(RLIMIT_NOFILE);
1511 	accept->iou_flags = READ_ONCE(sqe->ioprio);
1512 	if (accept->iou_flags & ~ACCEPT_FLAGS)
1513 		return -EINVAL;
1514 
1515 	accept->file_slot = READ_ONCE(sqe->file_index);
1516 	if (accept->file_slot) {
1517 		if (accept->flags & SOCK_CLOEXEC)
1518 			return -EINVAL;
1519 		if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1520 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1521 			return -EINVAL;
1522 	}
1523 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1524 		return -EINVAL;
1525 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1526 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1527 	if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1528 		req->flags |= REQ_F_APOLL_MULTISHOT;
1529 	if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1530 		req->flags |= REQ_F_NOWAIT;
1531 	return 0;
1532 }
1533 
io_accept(struct io_kiocb * req,unsigned int issue_flags)1534 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1535 {
1536 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1537 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1538 	bool fixed = !!accept->file_slot;
1539 	struct proto_accept_arg arg = {
1540 		.flags = force_nonblock ? O_NONBLOCK : 0,
1541 	};
1542 	struct file *file;
1543 	unsigned cflags;
1544 	int ret, fd;
1545 
1546 	if (!(req->flags & REQ_F_POLLED) &&
1547 	    accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1548 		return -EAGAIN;
1549 
1550 retry:
1551 	if (!fixed) {
1552 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1553 		if (unlikely(fd < 0))
1554 			return fd;
1555 	}
1556 	arg.err = 0;
1557 	arg.is_empty = -1;
1558 	file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1559 			 accept->flags);
1560 	if (IS_ERR(file)) {
1561 		if (!fixed)
1562 			put_unused_fd(fd);
1563 		ret = PTR_ERR(file);
1564 		if (ret == -EAGAIN && force_nonblock &&
1565 		    !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1566 			/*
1567 			 * if it's multishot and polled, we don't need to
1568 			 * return EAGAIN to arm the poll infra since it
1569 			 * has already been done
1570 			 */
1571 			if (issue_flags & IO_URING_F_MULTISHOT)
1572 				return IOU_ISSUE_SKIP_COMPLETE;
1573 			return ret;
1574 		}
1575 		if (ret == -ERESTARTSYS)
1576 			ret = -EINTR;
1577 	} else if (!fixed) {
1578 		fd_install(fd, file);
1579 		ret = fd;
1580 	} else {
1581 		ret = io_fixed_fd_install(req, issue_flags, file,
1582 						accept->file_slot);
1583 	}
1584 
1585 	cflags = 0;
1586 	if (!arg.is_empty)
1587 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1588 
1589 	if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
1590 	    io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1591 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1592 			goto retry;
1593 		if (issue_flags & IO_URING_F_MULTISHOT)
1594 			return IOU_ISSUE_SKIP_COMPLETE;
1595 		return -EAGAIN;
1596 	}
1597 
1598 	io_req_set_res(req, ret, cflags);
1599 	if (ret < 0)
1600 		req_set_fail(req);
1601 	if (!(issue_flags & IO_URING_F_MULTISHOT))
1602 		return IOU_OK;
1603 	return IOU_STOP_MULTISHOT;
1604 }
1605 
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1606 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1607 {
1608 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1609 
1610 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1611 		return -EINVAL;
1612 
1613 	sock->domain = READ_ONCE(sqe->fd);
1614 	sock->type = READ_ONCE(sqe->off);
1615 	sock->protocol = READ_ONCE(sqe->len);
1616 	sock->file_slot = READ_ONCE(sqe->file_index);
1617 	sock->nofile = rlimit(RLIMIT_NOFILE);
1618 
1619 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1620 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1621 		return -EINVAL;
1622 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1623 		return -EINVAL;
1624 	return 0;
1625 }
1626 
io_socket(struct io_kiocb * req,unsigned int issue_flags)1627 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1628 {
1629 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1630 	bool fixed = !!sock->file_slot;
1631 	struct file *file;
1632 	int ret, fd;
1633 
1634 	if (!fixed) {
1635 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1636 		if (unlikely(fd < 0))
1637 			return fd;
1638 	}
1639 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1640 	if (IS_ERR(file)) {
1641 		if (!fixed)
1642 			put_unused_fd(fd);
1643 		ret = PTR_ERR(file);
1644 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1645 			return -EAGAIN;
1646 		if (ret == -ERESTARTSYS)
1647 			ret = -EINTR;
1648 		req_set_fail(req);
1649 	} else if (!fixed) {
1650 		fd_install(fd, file);
1651 		ret = fd;
1652 	} else {
1653 		ret = io_fixed_fd_install(req, issue_flags, file,
1654 					    sock->file_slot);
1655 	}
1656 	io_req_set_res(req, ret, 0);
1657 	return IOU_OK;
1658 }
1659 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1660 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1661 {
1662 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1663 	struct io_async_msghdr *io;
1664 
1665 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1666 		return -EINVAL;
1667 
1668 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1669 	conn->addr_len =  READ_ONCE(sqe->addr2);
1670 	conn->in_progress = conn->seen_econnaborted = false;
1671 
1672 	io = io_msg_alloc_async(req);
1673 	if (unlikely(!io))
1674 		return -ENOMEM;
1675 
1676 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1677 }
1678 
io_connect(struct io_kiocb * req,unsigned int issue_flags)1679 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1680 {
1681 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1682 	struct io_async_msghdr *io = req->async_data;
1683 	unsigned file_flags;
1684 	int ret;
1685 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1686 
1687 	if (unlikely(req->flags & REQ_F_FAIL)) {
1688 		ret = -ECONNRESET;
1689 		goto out;
1690 	}
1691 
1692 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1693 
1694 	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1695 				 file_flags);
1696 	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1697 	    && force_nonblock) {
1698 		if (ret == -EINPROGRESS) {
1699 			connect->in_progress = true;
1700 		} else if (ret == -ECONNABORTED) {
1701 			if (connect->seen_econnaborted)
1702 				goto out;
1703 			connect->seen_econnaborted = true;
1704 		}
1705 		return -EAGAIN;
1706 	}
1707 	if (connect->in_progress) {
1708 		/*
1709 		 * At least bluetooth will return -EBADFD on a re-connect
1710 		 * attempt, and it's (supposedly) also valid to get -EISCONN
1711 		 * which means the previous result is good. For both of these,
1712 		 * grab the sock_error() and use that for the completion.
1713 		 */
1714 		if (ret == -EBADFD || ret == -EISCONN)
1715 			ret = sock_error(sock_from_file(req->file)->sk);
1716 	}
1717 	if (ret == -ERESTARTSYS)
1718 		ret = -EINTR;
1719 out:
1720 	if (ret < 0)
1721 		req_set_fail(req);
1722 	io_req_msg_cleanup(req, issue_flags);
1723 	io_req_set_res(req, ret, 0);
1724 	return IOU_OK;
1725 }
1726 
io_bind_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1727 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1728 {
1729 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1730 	struct sockaddr __user *uaddr;
1731 	struct io_async_msghdr *io;
1732 
1733 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1734 		return -EINVAL;
1735 
1736 	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1737 	bind->addr_len =  READ_ONCE(sqe->addr2);
1738 
1739 	io = io_msg_alloc_async(req);
1740 	if (unlikely(!io))
1741 		return -ENOMEM;
1742 	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1743 }
1744 
io_bind(struct io_kiocb * req,unsigned int issue_flags)1745 int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1746 {
1747 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1748 	struct io_async_msghdr *io = req->async_data;
1749 	struct socket *sock;
1750 	int ret;
1751 
1752 	sock = sock_from_file(req->file);
1753 	if (unlikely(!sock))
1754 		return -ENOTSOCK;
1755 
1756 	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1757 	if (ret < 0)
1758 		req_set_fail(req);
1759 	io_req_set_res(req, ret, 0);
1760 	return 0;
1761 }
1762 
io_listen_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1763 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1764 {
1765 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1766 
1767 	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1768 		return -EINVAL;
1769 
1770 	listen->backlog = READ_ONCE(sqe->len);
1771 	return 0;
1772 }
1773 
io_listen(struct io_kiocb * req,unsigned int issue_flags)1774 int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1775 {
1776 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1777 	struct socket *sock;
1778 	int ret;
1779 
1780 	sock = sock_from_file(req->file);
1781 	if (unlikely(!sock))
1782 		return -ENOTSOCK;
1783 
1784 	ret = __sys_listen_socket(sock, listen->backlog);
1785 	if (ret < 0)
1786 		req_set_fail(req);
1787 	io_req_set_res(req, ret, 0);
1788 	return 0;
1789 }
1790 
io_netmsg_cache_free(const void * entry)1791 void io_netmsg_cache_free(const void *entry)
1792 {
1793 	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1794 
1795 	if (kmsg->free_iov)
1796 		io_netmsg_iovec_free(kmsg);
1797 	kfree(kmsg);
1798 }
1799 #endif
1800