xref: /linux/io_uring/net.c (revision 3e5d15dd83e110da062d825be985529aa1d44029)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	int				iou_flags;
32 	u32				file_slot;
33 	unsigned long			nofile;
34 };
35 
36 struct io_socket {
37 	struct file			*file;
38 	int				domain;
39 	int				type;
40 	int				protocol;
41 	int				flags;
42 	u32				file_slot;
43 	unsigned long			nofile;
44 };
45 
46 struct io_connect {
47 	struct file			*file;
48 	struct sockaddr __user		*addr;
49 	int				addr_len;
50 	bool				in_progress;
51 	bool				seen_econnaborted;
52 };
53 
54 struct io_bind {
55 	struct file			*file;
56 	int				addr_len;
57 };
58 
59 struct io_listen {
60 	struct file			*file;
61 	int				backlog;
62 };
63 
64 struct io_sr_msg {
65 	struct file			*file;
66 	union {
67 		struct compat_msghdr __user	*umsg_compat;
68 		struct user_msghdr __user	*umsg;
69 		void __user			*buf;
70 	};
71 	int				len;
72 	unsigned			done_io;
73 	unsigned			msg_flags;
74 	unsigned			nr_multishot_loops;
75 	u16				flags;
76 	/* initialised and used only by !msg send variants */
77 	u16				buf_group;
78 	u16				buf_index;
79 	void __user			*msg_control;
80 	/* used only for send zerocopy */
81 	struct io_kiocb 		*notif;
82 };
83 
84 /*
85  * Number of times we'll try and do receives if there's more data. If we
86  * exceed this limit, then add us to the back of the queue and retry from
87  * there. This helps fairness between flooding clients.
88  */
89 #define MULTISHOT_MAX_RETRY	32
90 
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)91 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
92 {
93 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
94 
95 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
96 		     sqe->buf_index || sqe->splice_fd_in))
97 		return -EINVAL;
98 
99 	shutdown->how = READ_ONCE(sqe->len);
100 	req->flags |= REQ_F_FORCE_ASYNC;
101 	return 0;
102 }
103 
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)104 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
105 {
106 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
107 	struct socket *sock;
108 	int ret;
109 
110 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
111 
112 	sock = sock_from_file(req->file);
113 	if (unlikely(!sock))
114 		return -ENOTSOCK;
115 
116 	ret = __sys_shutdown_sock(sock, shutdown->how);
117 	io_req_set_res(req, ret, 0);
118 	return IOU_OK;
119 }
120 
io_net_retry(struct socket * sock,int flags)121 static bool io_net_retry(struct socket *sock, int flags)
122 {
123 	if (!(flags & MSG_WAITALL))
124 		return false;
125 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
126 }
127 
io_netmsg_iovec_free(struct io_async_msghdr * kmsg)128 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
129 {
130 	if (kmsg->free_iov) {
131 		kfree(kmsg->free_iov);
132 		kmsg->free_iov_nr = 0;
133 		kmsg->free_iov = NULL;
134 	}
135 }
136 
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)137 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
138 {
139 	struct io_async_msghdr *hdr = req->async_data;
140 
141 	/* can't recycle, ensure we free the iovec if we have one */
142 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
143 		io_netmsg_iovec_free(hdr);
144 		return;
145 	}
146 
147 	/* Let normal cleanup path reap it if we fail adding to the cache */
148 	io_alloc_cache_kasan(&hdr->free_iov, &hdr->free_iov_nr);
149 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
150 		req->async_data = NULL;
151 		req->flags &= ~REQ_F_ASYNC_DATA;
152 	}
153 }
154 
io_msg_alloc_async(struct io_kiocb * req)155 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
156 {
157 	struct io_ring_ctx *ctx = req->ctx;
158 	struct io_async_msghdr *hdr;
159 
160 	hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
161 	if (!hdr)
162 		return NULL;
163 
164 	/* If the async data was cached, we might have an iov cached inside. */
165 	if (hdr->free_iov)
166 		req->flags |= REQ_F_NEED_CLEANUP;
167 	return hdr;
168 }
169 
170 /* assign new iovec to kmsg, if we need to */
io_net_vec_assign(struct io_kiocb * req,struct io_async_msghdr * kmsg,struct iovec * iov)171 static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
172 			     struct iovec *iov)
173 {
174 	if (iov) {
175 		req->flags |= REQ_F_NEED_CLEANUP;
176 		kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
177 		if (kmsg->free_iov)
178 			kfree(kmsg->free_iov);
179 		kmsg->free_iov = iov;
180 	}
181 }
182 
io_mshot_prep_retry(struct io_kiocb * req,struct io_async_msghdr * kmsg)183 static inline void io_mshot_prep_retry(struct io_kiocb *req,
184 				       struct io_async_msghdr *kmsg)
185 {
186 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
187 
188 	req->flags &= ~REQ_F_BL_EMPTY;
189 	sr->done_io = 0;
190 	sr->len = 0; /* get from the provided buffer */
191 	req->buf_index = sr->buf_group;
192 }
193 
194 #ifdef CONFIG_COMPAT
io_compat_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct compat_msghdr * msg,int ddir)195 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
196 				  struct io_async_msghdr *iomsg,
197 				  struct compat_msghdr *msg, int ddir)
198 {
199 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
200 	struct compat_iovec __user *uiov;
201 	struct iovec *iov;
202 	int ret, nr_segs;
203 
204 	if (iomsg->free_iov) {
205 		nr_segs = iomsg->free_iov_nr;
206 		iov = iomsg->free_iov;
207 	} else {
208 		iov = &iomsg->fast_iov;
209 		nr_segs = 1;
210 	}
211 
212 	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
213 		return -EFAULT;
214 
215 	uiov = compat_ptr(msg->msg_iov);
216 	if (req->flags & REQ_F_BUFFER_SELECT) {
217 		compat_ssize_t clen;
218 
219 		if (msg->msg_iovlen == 0) {
220 			sr->len = iov->iov_len = 0;
221 			iov->iov_base = NULL;
222 		} else if (msg->msg_iovlen > 1) {
223 			return -EINVAL;
224 		} else {
225 			if (!access_ok(uiov, sizeof(*uiov)))
226 				return -EFAULT;
227 			if (__get_user(clen, &uiov->iov_len))
228 				return -EFAULT;
229 			if (clen < 0)
230 				return -EINVAL;
231 			sr->len = clen;
232 		}
233 
234 		return 0;
235 	}
236 
237 	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
238 				nr_segs, &iov, &iomsg->msg.msg_iter, true);
239 	if (unlikely(ret < 0))
240 		return ret;
241 
242 	io_net_vec_assign(req, iomsg, iov);
243 	return 0;
244 }
245 #endif
246 
io_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct user_msghdr * msg,int ddir)247 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
248 			   struct user_msghdr *msg, int ddir)
249 {
250 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
251 	struct user_msghdr __user *umsg = sr->umsg;
252 	struct iovec *iov;
253 	int ret, nr_segs;
254 
255 	if (iomsg->free_iov) {
256 		nr_segs = iomsg->free_iov_nr;
257 		iov = iomsg->free_iov;
258 	} else {
259 		iov = &iomsg->fast_iov;
260 		nr_segs = 1;
261 	}
262 
263 	if (!user_access_begin(umsg, sizeof(*umsg)))
264 		return -EFAULT;
265 
266 	ret = -EFAULT;
267 	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
268 	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
269 	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
270 	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
271 	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
272 	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
273 	msg->msg_flags = 0;
274 
275 	if (req->flags & REQ_F_BUFFER_SELECT) {
276 		if (msg->msg_iovlen == 0) {
277 			sr->len = iov->iov_len = 0;
278 			iov->iov_base = NULL;
279 		} else if (msg->msg_iovlen > 1) {
280 			ret = -EINVAL;
281 			goto ua_end;
282 		} else {
283 			struct iovec __user *uiov = msg->msg_iov;
284 
285 			/* we only need the length for provided buffers */
286 			if (!access_ok(&uiov->iov_len, sizeof(uiov->iov_len)))
287 				goto ua_end;
288 			unsafe_get_user(iov->iov_len, &uiov->iov_len, ua_end);
289 			sr->len = iov->iov_len;
290 		}
291 		ret = 0;
292 ua_end:
293 		user_access_end();
294 		return ret;
295 	}
296 
297 	user_access_end();
298 	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
299 				&iov, &iomsg->msg.msg_iter, false);
300 	if (unlikely(ret < 0))
301 		return ret;
302 
303 	io_net_vec_assign(req, iomsg, iov);
304 	return 0;
305 }
306 
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)307 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
308 			       struct io_async_msghdr *iomsg)
309 {
310 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
311 	struct user_msghdr msg;
312 	int ret;
313 
314 	iomsg->msg.msg_name = &iomsg->addr;
315 	iomsg->msg.msg_iter.nr_segs = 0;
316 
317 #ifdef CONFIG_COMPAT
318 	if (unlikely(req->ctx->compat)) {
319 		struct compat_msghdr cmsg;
320 
321 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
322 		if (unlikely(ret))
323 			return ret;
324 
325 		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
326 		sr->msg_control = iomsg->msg.msg_control_user;
327 		return ret;
328 	}
329 #endif
330 
331 	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
332 	if (unlikely(ret))
333 		return ret;
334 
335 	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
336 
337 	/* save msg_control as sys_sendmsg() overwrites it */
338 	sr->msg_control = iomsg->msg.msg_control_user;
339 	return ret;
340 }
341 
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)342 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
343 {
344 	struct io_async_msghdr *io = req->async_data;
345 
346 	io_netmsg_iovec_free(io);
347 }
348 
io_send_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)349 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
350 {
351 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
352 	struct io_async_msghdr *kmsg = req->async_data;
353 	void __user *addr;
354 	u16 addr_len;
355 	int ret;
356 
357 	sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
358 
359 	if (READ_ONCE(sqe->__pad3[0]))
360 		return -EINVAL;
361 
362 	kmsg->msg.msg_name = NULL;
363 	kmsg->msg.msg_namelen = 0;
364 	kmsg->msg.msg_control = NULL;
365 	kmsg->msg.msg_controllen = 0;
366 	kmsg->msg.msg_ubuf = NULL;
367 
368 	addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
369 	addr_len = READ_ONCE(sqe->addr_len);
370 	if (addr) {
371 		ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
372 		if (unlikely(ret < 0))
373 			return ret;
374 		kmsg->msg.msg_name = &kmsg->addr;
375 		kmsg->msg.msg_namelen = addr_len;
376 	}
377 	if (!io_do_buffer_select(req)) {
378 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
379 				  &kmsg->msg.msg_iter);
380 		if (unlikely(ret < 0))
381 			return ret;
382 	}
383 	return 0;
384 }
385 
io_sendmsg_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)386 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
387 {
388 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
389 	struct io_async_msghdr *kmsg = req->async_data;
390 	int ret;
391 
392 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
393 
394 	ret = io_sendmsg_copy_hdr(req, kmsg);
395 	if (!ret)
396 		req->flags |= REQ_F_NEED_CLEANUP;
397 	return ret;
398 }
399 
400 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
401 
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)402 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
403 {
404 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
405 
406 	sr->done_io = 0;
407 
408 	if (req->opcode != IORING_OP_SEND) {
409 		if (sqe->addr2 || sqe->file_index)
410 			return -EINVAL;
411 	}
412 
413 	sr->len = READ_ONCE(sqe->len);
414 	sr->flags = READ_ONCE(sqe->ioprio);
415 	if (sr->flags & ~SENDMSG_FLAGS)
416 		return -EINVAL;
417 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
418 	if (sr->msg_flags & MSG_DONTWAIT)
419 		req->flags |= REQ_F_NOWAIT;
420 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
421 		if (req->opcode == IORING_OP_SENDMSG)
422 			return -EINVAL;
423 		if (!(req->flags & REQ_F_BUFFER_SELECT))
424 			return -EINVAL;
425 		sr->msg_flags |= MSG_WAITALL;
426 		sr->buf_group = req->buf_index;
427 		req->buf_list = NULL;
428 	}
429 
430 #ifdef CONFIG_COMPAT
431 	if (req->ctx->compat)
432 		sr->msg_flags |= MSG_CMSG_COMPAT;
433 #endif
434 	if (unlikely(!io_msg_alloc_async(req)))
435 		return -ENOMEM;
436 	if (req->opcode != IORING_OP_SENDMSG)
437 		return io_send_setup(req, sqe);
438 	return io_sendmsg_setup(req, sqe);
439 }
440 
io_req_msg_cleanup(struct io_kiocb * req,unsigned int issue_flags)441 static void io_req_msg_cleanup(struct io_kiocb *req,
442 			       unsigned int issue_flags)
443 {
444 	req->flags &= ~REQ_F_NEED_CLEANUP;
445 	io_netmsg_recycle(req, issue_flags);
446 }
447 
448 /*
449  * For bundle completions, we need to figure out how many segments we consumed.
450  * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
451  * could be using an ITER_IOVEC. If the latter, then if we consumed all of
452  * the segments, then it's a trivial questiont o answer. If we have residual
453  * data in the iter, then loop the segments to figure out how much we
454  * transferred.
455  */
io_bundle_nbufs(struct io_async_msghdr * kmsg,int ret)456 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
457 {
458 	struct iovec *iov;
459 	int nbufs;
460 
461 	/* no data is always zero segments, and a ubuf is always 1 segment */
462 	if (ret <= 0)
463 		return 0;
464 	if (iter_is_ubuf(&kmsg->msg.msg_iter))
465 		return 1;
466 
467 	iov = kmsg->free_iov;
468 	if (!iov)
469 		iov = &kmsg->fast_iov;
470 
471 	/* if all data was transferred, it's basic pointer math */
472 	if (!iov_iter_count(&kmsg->msg.msg_iter))
473 		return iter_iov(&kmsg->msg.msg_iter) - iov;
474 
475 	/* short transfer, count segments */
476 	nbufs = 0;
477 	do {
478 		int this_len = min_t(int, iov[nbufs].iov_len, ret);
479 
480 		nbufs++;
481 		ret -= this_len;
482 	} while (ret);
483 
484 	return nbufs;
485 }
486 
io_send_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,unsigned issue_flags)487 static inline bool io_send_finish(struct io_kiocb *req, int *ret,
488 				  struct io_async_msghdr *kmsg,
489 				  unsigned issue_flags)
490 {
491 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
492 	bool bundle_finished = *ret <= 0;
493 	unsigned int cflags;
494 
495 	if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
496 		cflags = io_put_kbuf(req, *ret, issue_flags);
497 		goto finish;
498 	}
499 
500 	cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
501 
502 	if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
503 		goto finish;
504 
505 	/*
506 	 * Fill CQE for this receive and see if we should keep trying to
507 	 * receive from this socket.
508 	 */
509 	if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
510 		io_mshot_prep_retry(req, kmsg);
511 		return false;
512 	}
513 
514 	/* Otherwise stop bundle and use the current result. */
515 finish:
516 	io_req_set_res(req, *ret, cflags);
517 	*ret = IOU_OK;
518 	return true;
519 }
520 
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)521 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
522 {
523 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
524 	struct io_async_msghdr *kmsg = req->async_data;
525 	struct socket *sock;
526 	unsigned flags;
527 	int min_ret = 0;
528 	int ret;
529 
530 	sock = sock_from_file(req->file);
531 	if (unlikely(!sock))
532 		return -ENOTSOCK;
533 
534 	if (!(req->flags & REQ_F_POLLED) &&
535 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
536 		return -EAGAIN;
537 
538 	flags = sr->msg_flags;
539 	if (issue_flags & IO_URING_F_NONBLOCK)
540 		flags |= MSG_DONTWAIT;
541 	if (flags & MSG_WAITALL)
542 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
543 
544 	kmsg->msg.msg_control_user = sr->msg_control;
545 
546 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
547 
548 	if (ret < min_ret) {
549 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
550 			return -EAGAIN;
551 		if (ret > 0 && io_net_retry(sock, flags)) {
552 			kmsg->msg.msg_controllen = 0;
553 			kmsg->msg.msg_control = NULL;
554 			sr->done_io += ret;
555 			req->flags |= REQ_F_BL_NO_RECYCLE;
556 			return -EAGAIN;
557 		}
558 		if (ret == -ERESTARTSYS)
559 			ret = -EINTR;
560 		req_set_fail(req);
561 	}
562 	io_req_msg_cleanup(req, issue_flags);
563 	if (ret >= 0)
564 		ret += sr->done_io;
565 	else if (sr->done_io)
566 		ret = sr->done_io;
567 	io_req_set_res(req, ret, 0);
568 	return IOU_OK;
569 }
570 
io_send_select_buffer(struct io_kiocb * req,unsigned int issue_flags,struct io_async_msghdr * kmsg)571 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
572 				 struct io_async_msghdr *kmsg)
573 {
574 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
575 
576 	int ret;
577 	struct buf_sel_arg arg = {
578 		.iovs = &kmsg->fast_iov,
579 		.max_len = min_not_zero(sr->len, INT_MAX),
580 		.nr_iovs = 1,
581 	};
582 
583 	if (kmsg->free_iov) {
584 		arg.nr_iovs = kmsg->free_iov_nr;
585 		arg.iovs = kmsg->free_iov;
586 		arg.mode = KBUF_MODE_FREE;
587 	}
588 
589 	if (!(sr->flags & IORING_RECVSEND_BUNDLE))
590 		arg.nr_iovs = 1;
591 	else
592 		arg.mode |= KBUF_MODE_EXPAND;
593 
594 	ret = io_buffers_select(req, &arg, issue_flags);
595 	if (unlikely(ret < 0))
596 		return ret;
597 
598 	if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
599 		kmsg->free_iov_nr = ret;
600 		kmsg->free_iov = arg.iovs;
601 		req->flags |= REQ_F_NEED_CLEANUP;
602 	}
603 	sr->len = arg.out_len;
604 
605 	if (ret == 1) {
606 		sr->buf = arg.iovs[0].iov_base;
607 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
608 					&kmsg->msg.msg_iter);
609 		if (unlikely(ret))
610 			return ret;
611 	} else {
612 		iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
613 				arg.iovs, ret, arg.out_len);
614 	}
615 
616 	return 0;
617 }
618 
io_send(struct io_kiocb * req,unsigned int issue_flags)619 int io_send(struct io_kiocb *req, unsigned int issue_flags)
620 {
621 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
622 	struct io_async_msghdr *kmsg = req->async_data;
623 	struct socket *sock;
624 	unsigned flags;
625 	int min_ret = 0;
626 	int ret;
627 
628 	sock = sock_from_file(req->file);
629 	if (unlikely(!sock))
630 		return -ENOTSOCK;
631 
632 	if (!(req->flags & REQ_F_POLLED) &&
633 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
634 		return -EAGAIN;
635 
636 	flags = sr->msg_flags;
637 	if (issue_flags & IO_URING_F_NONBLOCK)
638 		flags |= MSG_DONTWAIT;
639 
640 retry_bundle:
641 	if (io_do_buffer_select(req)) {
642 		ret = io_send_select_buffer(req, issue_flags, kmsg);
643 		if (ret)
644 			return ret;
645 	}
646 
647 	/*
648 	 * If MSG_WAITALL is set, or this is a bundle send, then we need
649 	 * the full amount. If just bundle is set, if we do a short send
650 	 * then we complete the bundle sequence rather than continue on.
651 	 */
652 	if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
653 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
654 
655 	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
656 	kmsg->msg.msg_flags = flags;
657 	ret = sock_sendmsg(sock, &kmsg->msg);
658 	if (ret < min_ret) {
659 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
660 			return -EAGAIN;
661 
662 		if (ret > 0 && io_net_retry(sock, flags)) {
663 			sr->len -= ret;
664 			sr->buf += ret;
665 			sr->done_io += ret;
666 			req->flags |= REQ_F_BL_NO_RECYCLE;
667 			return -EAGAIN;
668 		}
669 		if (ret == -ERESTARTSYS)
670 			ret = -EINTR;
671 		req_set_fail(req);
672 	}
673 	if (ret >= 0)
674 		ret += sr->done_io;
675 	else if (sr->done_io)
676 		ret = sr->done_io;
677 
678 	if (!io_send_finish(req, &ret, kmsg, issue_flags))
679 		goto retry_bundle;
680 
681 	io_req_msg_cleanup(req, issue_flags);
682 	return ret;
683 }
684 
io_recvmsg_mshot_prep(struct io_kiocb * req,struct io_async_msghdr * iomsg,int namelen,size_t controllen)685 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
686 				 struct io_async_msghdr *iomsg,
687 				 int namelen, size_t controllen)
688 {
689 	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
690 			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
691 		int hdr;
692 
693 		if (unlikely(namelen < 0))
694 			return -EOVERFLOW;
695 		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
696 					namelen, &hdr))
697 			return -EOVERFLOW;
698 		if (check_add_overflow(hdr, controllen, &hdr))
699 			return -EOVERFLOW;
700 
701 		iomsg->namelen = namelen;
702 		iomsg->controllen = controllen;
703 		return 0;
704 	}
705 
706 	return 0;
707 }
708 
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)709 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
710 			       struct io_async_msghdr *iomsg)
711 {
712 	struct user_msghdr msg;
713 	int ret;
714 
715 	iomsg->msg.msg_name = &iomsg->addr;
716 	iomsg->msg.msg_iter.nr_segs = 0;
717 
718 #ifdef CONFIG_COMPAT
719 	if (unlikely(req->ctx->compat)) {
720 		struct compat_msghdr cmsg;
721 
722 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
723 		if (unlikely(ret))
724 			return ret;
725 
726 		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
727 		if (unlikely(ret))
728 			return ret;
729 
730 		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
731 						cmsg.msg_controllen);
732 	}
733 #endif
734 
735 	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
736 	if (unlikely(ret))
737 		return ret;
738 
739 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
740 	if (unlikely(ret))
741 		return ret;
742 
743 	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
744 					msg.msg_controllen);
745 }
746 
io_recvmsg_prep_setup(struct io_kiocb * req)747 static int io_recvmsg_prep_setup(struct io_kiocb *req)
748 {
749 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
750 	struct io_async_msghdr *kmsg;
751 	int ret;
752 
753 	kmsg = io_msg_alloc_async(req);
754 	if (unlikely(!kmsg))
755 		return -ENOMEM;
756 
757 	if (req->opcode == IORING_OP_RECV) {
758 		kmsg->msg.msg_name = NULL;
759 		kmsg->msg.msg_namelen = 0;
760 		kmsg->msg.msg_inq = 0;
761 		kmsg->msg.msg_control = NULL;
762 		kmsg->msg.msg_get_inq = 1;
763 		kmsg->msg.msg_controllen = 0;
764 		kmsg->msg.msg_iocb = NULL;
765 		kmsg->msg.msg_ubuf = NULL;
766 
767 		if (!io_do_buffer_select(req)) {
768 			ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
769 					  &kmsg->msg.msg_iter);
770 			if (unlikely(ret))
771 				return ret;
772 		}
773 		return 0;
774 	}
775 
776 	ret = io_recvmsg_copy_hdr(req, kmsg);
777 	if (!ret)
778 		req->flags |= REQ_F_NEED_CLEANUP;
779 	return ret;
780 }
781 
782 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
783 			IORING_RECVSEND_BUNDLE)
784 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)785 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
786 {
787 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
788 
789 	sr->done_io = 0;
790 
791 	if (unlikely(sqe->file_index || sqe->addr2))
792 		return -EINVAL;
793 
794 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
795 	sr->len = READ_ONCE(sqe->len);
796 	sr->flags = READ_ONCE(sqe->ioprio);
797 	if (sr->flags & ~RECVMSG_FLAGS)
798 		return -EINVAL;
799 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
800 	if (sr->msg_flags & MSG_DONTWAIT)
801 		req->flags |= REQ_F_NOWAIT;
802 	if (sr->msg_flags & MSG_ERRQUEUE)
803 		req->flags |= REQ_F_CLEAR_POLLIN;
804 	if (req->flags & REQ_F_BUFFER_SELECT) {
805 		/*
806 		 * Store the buffer group for this multishot receive separately,
807 		 * as if we end up doing an io-wq based issue that selects a
808 		 * buffer, it has to be committed immediately and that will
809 		 * clear ->buf_list. This means we lose the link to the buffer
810 		 * list, and the eventual buffer put on completion then cannot
811 		 * restore it.
812 		 */
813 		sr->buf_group = req->buf_index;
814 		req->buf_list = NULL;
815 	}
816 	if (sr->flags & IORING_RECV_MULTISHOT) {
817 		if (!(req->flags & REQ_F_BUFFER_SELECT))
818 			return -EINVAL;
819 		if (sr->msg_flags & MSG_WAITALL)
820 			return -EINVAL;
821 		if (req->opcode == IORING_OP_RECV && sr->len)
822 			return -EINVAL;
823 		req->flags |= REQ_F_APOLL_MULTISHOT;
824 	}
825 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
826 		if (req->opcode == IORING_OP_RECVMSG)
827 			return -EINVAL;
828 	}
829 
830 #ifdef CONFIG_COMPAT
831 	if (req->ctx->compat)
832 		sr->msg_flags |= MSG_CMSG_COMPAT;
833 #endif
834 	sr->nr_multishot_loops = 0;
835 	return io_recvmsg_prep_setup(req);
836 }
837 
838 /*
839  * Finishes io_recv and io_recvmsg.
840  *
841  * Returns true if it is actually finished, or false if it should run
842  * again (for multishot).
843  */
io_recv_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,bool mshot_finished,unsigned issue_flags)844 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
845 				  struct io_async_msghdr *kmsg,
846 				  bool mshot_finished, unsigned issue_flags)
847 {
848 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
849 	unsigned int cflags = 0;
850 
851 	if (kmsg->msg.msg_inq > 0)
852 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
853 
854 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
855 		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
856 				      issue_flags);
857 		/* bundle with no more immediate buffers, we're done */
858 		if (req->flags & REQ_F_BL_EMPTY)
859 			goto finish;
860 	} else {
861 		cflags |= io_put_kbuf(req, *ret, issue_flags);
862 	}
863 
864 	/*
865 	 * Fill CQE for this receive and see if we should keep trying to
866 	 * receive from this socket.
867 	 */
868 	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
869 	    io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
870 		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
871 
872 		io_mshot_prep_retry(req, kmsg);
873 		/* Known not-empty or unknown state, retry */
874 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
875 			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
876 				return false;
877 			/* mshot retries exceeded, force a requeue */
878 			sr->nr_multishot_loops = 0;
879 			mshot_retry_ret = IOU_REQUEUE;
880 		}
881 		if (issue_flags & IO_URING_F_MULTISHOT)
882 			*ret = mshot_retry_ret;
883 		else
884 			*ret = -EAGAIN;
885 		return true;
886 	}
887 
888 	/* Finish the request / stop multishot. */
889 finish:
890 	io_req_set_res(req, *ret, cflags);
891 
892 	if (issue_flags & IO_URING_F_MULTISHOT)
893 		*ret = IOU_STOP_MULTISHOT;
894 	else
895 		*ret = IOU_OK;
896 	io_req_msg_cleanup(req, issue_flags);
897 	return true;
898 }
899 
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)900 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
901 				     struct io_sr_msg *sr, void __user **buf,
902 				     size_t *len)
903 {
904 	unsigned long ubuf = (unsigned long) *buf;
905 	unsigned long hdr;
906 
907 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
908 		kmsg->controllen;
909 	if (*len < hdr)
910 		return -EFAULT;
911 
912 	if (kmsg->controllen) {
913 		unsigned long control = ubuf + hdr - kmsg->controllen;
914 
915 		kmsg->msg.msg_control_user = (void __user *) control;
916 		kmsg->msg.msg_controllen = kmsg->controllen;
917 	}
918 
919 	sr->buf = *buf; /* stash for later copy */
920 	*buf = (void __user *) (ubuf + hdr);
921 	kmsg->payloadlen = *len = *len - hdr;
922 	return 0;
923 }
924 
925 struct io_recvmsg_multishot_hdr {
926 	struct io_uring_recvmsg_out msg;
927 	struct sockaddr_storage addr;
928 };
929 
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)930 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
931 				struct io_async_msghdr *kmsg,
932 				unsigned int flags, bool *finished)
933 {
934 	int err;
935 	int copy_len;
936 	struct io_recvmsg_multishot_hdr hdr;
937 
938 	if (kmsg->namelen)
939 		kmsg->msg.msg_name = &hdr.addr;
940 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
941 	kmsg->msg.msg_namelen = 0;
942 
943 	if (sock->file->f_flags & O_NONBLOCK)
944 		flags |= MSG_DONTWAIT;
945 
946 	err = sock_recvmsg(sock, &kmsg->msg, flags);
947 	*finished = err <= 0;
948 	if (err < 0)
949 		return err;
950 
951 	hdr.msg = (struct io_uring_recvmsg_out) {
952 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
953 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
954 	};
955 
956 	hdr.msg.payloadlen = err;
957 	if (err > kmsg->payloadlen)
958 		err = kmsg->payloadlen;
959 
960 	copy_len = sizeof(struct io_uring_recvmsg_out);
961 	if (kmsg->msg.msg_namelen > kmsg->namelen)
962 		copy_len += kmsg->namelen;
963 	else
964 		copy_len += kmsg->msg.msg_namelen;
965 
966 	/*
967 	 *      "fromlen shall refer to the value before truncation.."
968 	 *                      1003.1g
969 	 */
970 	hdr.msg.namelen = kmsg->msg.msg_namelen;
971 
972 	/* ensure that there is no gap between hdr and sockaddr_storage */
973 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
974 		     sizeof(struct io_uring_recvmsg_out));
975 	if (copy_to_user(io->buf, &hdr, copy_len)) {
976 		*finished = true;
977 		return -EFAULT;
978 	}
979 
980 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
981 			kmsg->controllen + err;
982 }
983 
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)984 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
985 {
986 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
987 	struct io_async_msghdr *kmsg = req->async_data;
988 	struct socket *sock;
989 	unsigned flags;
990 	int ret, min_ret = 0;
991 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
992 	bool mshot_finished = true;
993 
994 	sock = sock_from_file(req->file);
995 	if (unlikely(!sock))
996 		return -ENOTSOCK;
997 
998 	if (!(req->flags & REQ_F_POLLED) &&
999 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1000 		return -EAGAIN;
1001 
1002 	flags = sr->msg_flags;
1003 	if (force_nonblock)
1004 		flags |= MSG_DONTWAIT;
1005 
1006 retry_multishot:
1007 	if (io_do_buffer_select(req)) {
1008 		void __user *buf;
1009 		size_t len = sr->len;
1010 
1011 		buf = io_buffer_select(req, &len, issue_flags);
1012 		if (!buf)
1013 			return -ENOBUFS;
1014 
1015 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
1016 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1017 			if (ret) {
1018 				io_kbuf_recycle(req, issue_flags);
1019 				return ret;
1020 			}
1021 		}
1022 
1023 		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1024 	}
1025 
1026 	kmsg->msg.msg_get_inq = 1;
1027 	kmsg->msg.msg_inq = -1;
1028 	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1029 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1030 					   &mshot_finished);
1031 	} else {
1032 		/* disable partial retry for recvmsg with cmsg attached */
1033 		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1034 			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1035 
1036 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1037 					 kmsg->uaddr, flags);
1038 	}
1039 
1040 	if (ret < min_ret) {
1041 		if (ret == -EAGAIN && force_nonblock) {
1042 			if (issue_flags & IO_URING_F_MULTISHOT) {
1043 				io_kbuf_recycle(req, issue_flags);
1044 				return IOU_ISSUE_SKIP_COMPLETE;
1045 			}
1046 			return -EAGAIN;
1047 		}
1048 		if (ret > 0 && io_net_retry(sock, flags)) {
1049 			sr->done_io += ret;
1050 			req->flags |= REQ_F_BL_NO_RECYCLE;
1051 			return -EAGAIN;
1052 		}
1053 		if (ret == -ERESTARTSYS)
1054 			ret = -EINTR;
1055 		req_set_fail(req);
1056 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1057 		req_set_fail(req);
1058 	}
1059 
1060 	if (ret > 0)
1061 		ret += sr->done_io;
1062 	else if (sr->done_io)
1063 		ret = sr->done_io;
1064 	else
1065 		io_kbuf_recycle(req, issue_flags);
1066 
1067 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1068 		goto retry_multishot;
1069 
1070 	return ret;
1071 }
1072 
io_recv_buf_select(struct io_kiocb * req,struct io_async_msghdr * kmsg,size_t * len,unsigned int issue_flags)1073 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1074 			      size_t *len, unsigned int issue_flags)
1075 {
1076 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1077 	int ret;
1078 
1079 	/*
1080 	 * If the ring isn't locked, then don't use the peek interface
1081 	 * to grab multiple buffers as we will lock/unlock between
1082 	 * this selection and posting the buffers.
1083 	 */
1084 	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1085 	    sr->flags & IORING_RECVSEND_BUNDLE) {
1086 		struct buf_sel_arg arg = {
1087 			.iovs = &kmsg->fast_iov,
1088 			.nr_iovs = 1,
1089 			.mode = KBUF_MODE_EXPAND,
1090 		};
1091 
1092 		if (kmsg->free_iov) {
1093 			arg.nr_iovs = kmsg->free_iov_nr;
1094 			arg.iovs = kmsg->free_iov;
1095 			arg.mode |= KBUF_MODE_FREE;
1096 		}
1097 
1098 		if (kmsg->msg.msg_inq > 0)
1099 			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1100 
1101 		ret = io_buffers_peek(req, &arg);
1102 		if (unlikely(ret < 0))
1103 			return ret;
1104 
1105 		/* special case 1 vec, can be a fast path */
1106 		if (ret == 1) {
1107 			sr->buf = arg.iovs[0].iov_base;
1108 			sr->len = arg.iovs[0].iov_len;
1109 			goto map_ubuf;
1110 		}
1111 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1112 				arg.out_len);
1113 		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1114 			kmsg->free_iov_nr = ret;
1115 			kmsg->free_iov = arg.iovs;
1116 			req->flags |= REQ_F_NEED_CLEANUP;
1117 		}
1118 	} else {
1119 		void __user *buf;
1120 
1121 		*len = sr->len;
1122 		buf = io_buffer_select(req, len, issue_flags);
1123 		if (!buf)
1124 			return -ENOBUFS;
1125 		sr->buf = buf;
1126 		sr->len = *len;
1127 map_ubuf:
1128 		ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1129 				  &kmsg->msg.msg_iter);
1130 		if (unlikely(ret))
1131 			return ret;
1132 	}
1133 
1134 	return 0;
1135 }
1136 
io_recv(struct io_kiocb * req,unsigned int issue_flags)1137 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1138 {
1139 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1140 	struct io_async_msghdr *kmsg = req->async_data;
1141 	struct socket *sock;
1142 	unsigned flags;
1143 	int ret, min_ret = 0;
1144 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1145 	size_t len = sr->len;
1146 	bool mshot_finished;
1147 
1148 	if (!(req->flags & REQ_F_POLLED) &&
1149 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1150 		return -EAGAIN;
1151 
1152 	sock = sock_from_file(req->file);
1153 	if (unlikely(!sock))
1154 		return -ENOTSOCK;
1155 
1156 	flags = sr->msg_flags;
1157 	if (force_nonblock)
1158 		flags |= MSG_DONTWAIT;
1159 
1160 retry_multishot:
1161 	if (io_do_buffer_select(req)) {
1162 		ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1163 		if (unlikely(ret)) {
1164 			kmsg->msg.msg_inq = -1;
1165 			goto out_free;
1166 		}
1167 		sr->buf = NULL;
1168 	}
1169 
1170 	kmsg->msg.msg_flags = 0;
1171 	kmsg->msg.msg_inq = -1;
1172 
1173 	if (flags & MSG_WAITALL)
1174 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1175 
1176 	ret = sock_recvmsg(sock, &kmsg->msg, flags);
1177 	if (ret < min_ret) {
1178 		if (ret == -EAGAIN && force_nonblock) {
1179 			if (issue_flags & IO_URING_F_MULTISHOT) {
1180 				io_kbuf_recycle(req, issue_flags);
1181 				return IOU_ISSUE_SKIP_COMPLETE;
1182 			}
1183 
1184 			return -EAGAIN;
1185 		}
1186 		if (ret > 0 && io_net_retry(sock, flags)) {
1187 			sr->len -= ret;
1188 			sr->buf += ret;
1189 			sr->done_io += ret;
1190 			req->flags |= REQ_F_BL_NO_RECYCLE;
1191 			return -EAGAIN;
1192 		}
1193 		if (ret == -ERESTARTSYS)
1194 			ret = -EINTR;
1195 		req_set_fail(req);
1196 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1197 out_free:
1198 		req_set_fail(req);
1199 	}
1200 
1201 	mshot_finished = ret <= 0;
1202 	if (ret > 0)
1203 		ret += sr->done_io;
1204 	else if (sr->done_io)
1205 		ret = sr->done_io;
1206 	else
1207 		io_kbuf_recycle(req, issue_flags);
1208 
1209 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1210 		goto retry_multishot;
1211 
1212 	return ret;
1213 }
1214 
io_send_zc_cleanup(struct io_kiocb * req)1215 void io_send_zc_cleanup(struct io_kiocb *req)
1216 {
1217 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1218 	struct io_async_msghdr *io = req->async_data;
1219 
1220 	if (req_has_async_data(req))
1221 		io_netmsg_iovec_free(io);
1222 	if (zc->notif) {
1223 		io_notif_flush(zc->notif);
1224 		zc->notif = NULL;
1225 	}
1226 }
1227 
1228 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1229 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1230 
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1231 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1232 {
1233 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1234 	struct io_ring_ctx *ctx = req->ctx;
1235 	struct io_kiocb *notif;
1236 
1237 	zc->done_io = 0;
1238 	req->flags |= REQ_F_POLL_NO_LAZY;
1239 
1240 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1241 		return -EINVAL;
1242 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1243 	if (req->flags & REQ_F_CQE_SKIP)
1244 		return -EINVAL;
1245 
1246 	notif = zc->notif = io_alloc_notif(ctx);
1247 	if (!notif)
1248 		return -ENOMEM;
1249 	notif->cqe.user_data = req->cqe.user_data;
1250 	notif->cqe.res = 0;
1251 	notif->cqe.flags = IORING_CQE_F_NOTIF;
1252 	req->flags |= REQ_F_NEED_CLEANUP;
1253 
1254 	zc->flags = READ_ONCE(sqe->ioprio);
1255 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1256 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1257 			return -EINVAL;
1258 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1259 			struct io_notif_data *nd = io_notif_to_data(notif);
1260 
1261 			nd->zc_report = true;
1262 			nd->zc_used = false;
1263 			nd->zc_copied = false;
1264 		}
1265 	}
1266 
1267 	if (req->opcode != IORING_OP_SEND_ZC) {
1268 		if (unlikely(sqe->addr2 || sqe->file_index))
1269 			return -EINVAL;
1270 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1271 			return -EINVAL;
1272 	}
1273 
1274 	zc->len = READ_ONCE(sqe->len);
1275 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1276 	zc->buf_index = READ_ONCE(sqe->buf_index);
1277 	if (zc->msg_flags & MSG_DONTWAIT)
1278 		req->flags |= REQ_F_NOWAIT;
1279 
1280 #ifdef CONFIG_COMPAT
1281 	if (req->ctx->compat)
1282 		zc->msg_flags |= MSG_CMSG_COMPAT;
1283 #endif
1284 	if (unlikely(!io_msg_alloc_async(req)))
1285 		return -ENOMEM;
1286 	if (req->opcode != IORING_OP_SENDMSG_ZC)
1287 		return io_send_setup(req, sqe);
1288 	return io_sendmsg_setup(req, sqe);
1289 }
1290 
io_sg_from_iter_iovec(struct sk_buff * skb,struct iov_iter * from,size_t length)1291 static int io_sg_from_iter_iovec(struct sk_buff *skb,
1292 				 struct iov_iter *from, size_t length)
1293 {
1294 	skb_zcopy_downgrade_managed(skb);
1295 	return zerocopy_fill_skb_from_iter(skb, from, length);
1296 }
1297 
io_sg_from_iter(struct sk_buff * skb,struct iov_iter * from,size_t length)1298 static int io_sg_from_iter(struct sk_buff *skb,
1299 			   struct iov_iter *from, size_t length)
1300 {
1301 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1302 	int frag = shinfo->nr_frags;
1303 	int ret = 0;
1304 	struct bvec_iter bi;
1305 	ssize_t copied = 0;
1306 	unsigned long truesize = 0;
1307 
1308 	if (!frag)
1309 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1310 	else if (unlikely(!skb_zcopy_managed(skb)))
1311 		return zerocopy_fill_skb_from_iter(skb, from, length);
1312 
1313 	bi.bi_size = min(from->count, length);
1314 	bi.bi_bvec_done = from->iov_offset;
1315 	bi.bi_idx = 0;
1316 
1317 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1318 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1319 
1320 		copied += v.bv_len;
1321 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1322 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1323 					   v.bv_offset, v.bv_len);
1324 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1325 	}
1326 	if (bi.bi_size)
1327 		ret = -EMSGSIZE;
1328 
1329 	shinfo->nr_frags = frag;
1330 	from->bvec += bi.bi_idx;
1331 	from->nr_segs -= bi.bi_idx;
1332 	from->count -= copied;
1333 	from->iov_offset = bi.bi_bvec_done;
1334 
1335 	skb->data_len += copied;
1336 	skb->len += copied;
1337 	skb->truesize += truesize;
1338 	return ret;
1339 }
1340 
io_send_zc_import(struct io_kiocb * req,unsigned int issue_flags)1341 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1342 {
1343 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1344 	struct io_async_msghdr *kmsg = req->async_data;
1345 	int ret;
1346 
1347 	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1348 		struct io_ring_ctx *ctx = req->ctx;
1349 		struct io_rsrc_node *node;
1350 
1351 		ret = -EFAULT;
1352 		io_ring_submit_lock(ctx, issue_flags);
1353 		node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
1354 		if (node) {
1355 			io_req_assign_buf_node(sr->notif, node);
1356 			ret = 0;
1357 		}
1358 		io_ring_submit_unlock(ctx, issue_flags);
1359 
1360 		if (unlikely(ret))
1361 			return ret;
1362 
1363 		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
1364 					node->buf, (u64)(uintptr_t)sr->buf,
1365 					sr->len);
1366 		if (unlikely(ret))
1367 			return ret;
1368 		kmsg->msg.sg_from_iter = io_sg_from_iter;
1369 	} else {
1370 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1371 		if (unlikely(ret))
1372 			return ret;
1373 		ret = io_notif_account_mem(sr->notif, sr->len);
1374 		if (unlikely(ret))
1375 			return ret;
1376 		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1377 	}
1378 
1379 	return ret;
1380 }
1381 
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1382 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1383 {
1384 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1385 	struct io_async_msghdr *kmsg = req->async_data;
1386 	struct socket *sock;
1387 	unsigned msg_flags;
1388 	int ret, min_ret = 0;
1389 
1390 	sock = sock_from_file(req->file);
1391 	if (unlikely(!sock))
1392 		return -ENOTSOCK;
1393 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1394 		return -EOPNOTSUPP;
1395 
1396 	if (!(req->flags & REQ_F_POLLED) &&
1397 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1398 		return -EAGAIN;
1399 
1400 	if (!zc->done_io) {
1401 		ret = io_send_zc_import(req, issue_flags);
1402 		if (unlikely(ret))
1403 			return ret;
1404 	}
1405 
1406 	msg_flags = zc->msg_flags;
1407 	if (issue_flags & IO_URING_F_NONBLOCK)
1408 		msg_flags |= MSG_DONTWAIT;
1409 	if (msg_flags & MSG_WAITALL)
1410 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1411 	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1412 
1413 	kmsg->msg.msg_flags = msg_flags;
1414 	kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1415 	ret = sock_sendmsg(sock, &kmsg->msg);
1416 
1417 	if (unlikely(ret < min_ret)) {
1418 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1419 			return -EAGAIN;
1420 
1421 		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1422 			zc->len -= ret;
1423 			zc->buf += ret;
1424 			zc->done_io += ret;
1425 			req->flags |= REQ_F_BL_NO_RECYCLE;
1426 			return -EAGAIN;
1427 		}
1428 		if (ret == -ERESTARTSYS)
1429 			ret = -EINTR;
1430 		req_set_fail(req);
1431 	}
1432 
1433 	if (ret >= 0)
1434 		ret += zc->done_io;
1435 	else if (zc->done_io)
1436 		ret = zc->done_io;
1437 
1438 	/*
1439 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1440 	 * flushing notif to io_send_zc_cleanup()
1441 	 */
1442 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1443 		io_notif_flush(zc->notif);
1444 		io_req_msg_cleanup(req, 0);
1445 	}
1446 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1447 	return IOU_OK;
1448 }
1449 
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1450 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1451 {
1452 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1453 	struct io_async_msghdr *kmsg = req->async_data;
1454 	struct socket *sock;
1455 	unsigned flags;
1456 	int ret, min_ret = 0;
1457 
1458 	sock = sock_from_file(req->file);
1459 	if (unlikely(!sock))
1460 		return -ENOTSOCK;
1461 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1462 		return -EOPNOTSUPP;
1463 
1464 	if (!(req->flags & REQ_F_POLLED) &&
1465 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1466 		return -EAGAIN;
1467 
1468 	flags = sr->msg_flags;
1469 	if (issue_flags & IO_URING_F_NONBLOCK)
1470 		flags |= MSG_DONTWAIT;
1471 	if (flags & MSG_WAITALL)
1472 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1473 
1474 	kmsg->msg.msg_control_user = sr->msg_control;
1475 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1476 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1477 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1478 
1479 	if (unlikely(ret < min_ret)) {
1480 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1481 			return -EAGAIN;
1482 
1483 		if (ret > 0 && io_net_retry(sock, flags)) {
1484 			sr->done_io += ret;
1485 			req->flags |= REQ_F_BL_NO_RECYCLE;
1486 			return -EAGAIN;
1487 		}
1488 		if (ret == -ERESTARTSYS)
1489 			ret = -EINTR;
1490 		req_set_fail(req);
1491 	}
1492 
1493 	if (ret >= 0)
1494 		ret += sr->done_io;
1495 	else if (sr->done_io)
1496 		ret = sr->done_io;
1497 
1498 	/*
1499 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1500 	 * flushing notif to io_send_zc_cleanup()
1501 	 */
1502 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1503 		io_notif_flush(sr->notif);
1504 		io_req_msg_cleanup(req, 0);
1505 	}
1506 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1507 	return IOU_OK;
1508 }
1509 
io_sendrecv_fail(struct io_kiocb * req)1510 void io_sendrecv_fail(struct io_kiocb *req)
1511 {
1512 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1513 
1514 	if (sr->done_io)
1515 		req->cqe.res = sr->done_io;
1516 
1517 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1518 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1519 		req->cqe.flags |= IORING_CQE_F_MORE;
1520 }
1521 
1522 #define ACCEPT_FLAGS	(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1523 			 IORING_ACCEPT_POLL_FIRST)
1524 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1525 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1526 {
1527 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1528 
1529 	if (sqe->len || sqe->buf_index)
1530 		return -EINVAL;
1531 
1532 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1533 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1534 	accept->flags = READ_ONCE(sqe->accept_flags);
1535 	accept->nofile = rlimit(RLIMIT_NOFILE);
1536 	accept->iou_flags = READ_ONCE(sqe->ioprio);
1537 	if (accept->iou_flags & ~ACCEPT_FLAGS)
1538 		return -EINVAL;
1539 
1540 	accept->file_slot = READ_ONCE(sqe->file_index);
1541 	if (accept->file_slot) {
1542 		if (accept->flags & SOCK_CLOEXEC)
1543 			return -EINVAL;
1544 		if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1545 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1546 			return -EINVAL;
1547 	}
1548 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1549 		return -EINVAL;
1550 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1551 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1552 	if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1553 		req->flags |= REQ_F_APOLL_MULTISHOT;
1554 	if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1555 		req->flags |= REQ_F_NOWAIT;
1556 	return 0;
1557 }
1558 
io_accept(struct io_kiocb * req,unsigned int issue_flags)1559 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1560 {
1561 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1562 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1563 	bool fixed = !!accept->file_slot;
1564 	struct proto_accept_arg arg = {
1565 		.flags = force_nonblock ? O_NONBLOCK : 0,
1566 	};
1567 	struct file *file;
1568 	unsigned cflags;
1569 	int ret, fd;
1570 
1571 	if (!(req->flags & REQ_F_POLLED) &&
1572 	    accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1573 		return -EAGAIN;
1574 
1575 retry:
1576 	if (!fixed) {
1577 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1578 		if (unlikely(fd < 0))
1579 			return fd;
1580 	}
1581 	arg.err = 0;
1582 	arg.is_empty = -1;
1583 	file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1584 			 accept->flags);
1585 	if (IS_ERR(file)) {
1586 		if (!fixed)
1587 			put_unused_fd(fd);
1588 		ret = PTR_ERR(file);
1589 		if (ret == -EAGAIN && force_nonblock &&
1590 		    !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1591 			/*
1592 			 * if it's multishot and polled, we don't need to
1593 			 * return EAGAIN to arm the poll infra since it
1594 			 * has already been done
1595 			 */
1596 			if (issue_flags & IO_URING_F_MULTISHOT)
1597 				return IOU_ISSUE_SKIP_COMPLETE;
1598 			return ret;
1599 		}
1600 		if (ret == -ERESTARTSYS)
1601 			ret = -EINTR;
1602 		req_set_fail(req);
1603 	} else if (!fixed) {
1604 		fd_install(fd, file);
1605 		ret = fd;
1606 	} else {
1607 		ret = io_fixed_fd_install(req, issue_flags, file,
1608 						accept->file_slot);
1609 	}
1610 
1611 	cflags = 0;
1612 	if (!arg.is_empty)
1613 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1614 
1615 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1616 		io_req_set_res(req, ret, cflags);
1617 		return IOU_OK;
1618 	}
1619 
1620 	if (ret < 0)
1621 		return ret;
1622 	if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1623 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1624 			goto retry;
1625 		if (issue_flags & IO_URING_F_MULTISHOT)
1626 			return IOU_ISSUE_SKIP_COMPLETE;
1627 		return -EAGAIN;
1628 	}
1629 
1630 	io_req_set_res(req, ret, cflags);
1631 	return IOU_STOP_MULTISHOT;
1632 }
1633 
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1634 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1635 {
1636 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1637 
1638 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1639 		return -EINVAL;
1640 
1641 	sock->domain = READ_ONCE(sqe->fd);
1642 	sock->type = READ_ONCE(sqe->off);
1643 	sock->protocol = READ_ONCE(sqe->len);
1644 	sock->file_slot = READ_ONCE(sqe->file_index);
1645 	sock->nofile = rlimit(RLIMIT_NOFILE);
1646 
1647 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1648 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1649 		return -EINVAL;
1650 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1651 		return -EINVAL;
1652 	return 0;
1653 }
1654 
io_socket(struct io_kiocb * req,unsigned int issue_flags)1655 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1656 {
1657 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1658 	bool fixed = !!sock->file_slot;
1659 	struct file *file;
1660 	int ret, fd;
1661 
1662 	if (!fixed) {
1663 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1664 		if (unlikely(fd < 0))
1665 			return fd;
1666 	}
1667 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1668 	if (IS_ERR(file)) {
1669 		if (!fixed)
1670 			put_unused_fd(fd);
1671 		ret = PTR_ERR(file);
1672 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1673 			return -EAGAIN;
1674 		if (ret == -ERESTARTSYS)
1675 			ret = -EINTR;
1676 		req_set_fail(req);
1677 	} else if (!fixed) {
1678 		fd_install(fd, file);
1679 		ret = fd;
1680 	} else {
1681 		ret = io_fixed_fd_install(req, issue_flags, file,
1682 					    sock->file_slot);
1683 	}
1684 	io_req_set_res(req, ret, 0);
1685 	return IOU_OK;
1686 }
1687 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1688 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1689 {
1690 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1691 	struct io_async_msghdr *io;
1692 
1693 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1694 		return -EINVAL;
1695 
1696 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1697 	conn->addr_len =  READ_ONCE(sqe->addr2);
1698 	conn->in_progress = conn->seen_econnaborted = false;
1699 
1700 	io = io_msg_alloc_async(req);
1701 	if (unlikely(!io))
1702 		return -ENOMEM;
1703 
1704 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1705 }
1706 
io_connect(struct io_kiocb * req,unsigned int issue_flags)1707 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1708 {
1709 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1710 	struct io_async_msghdr *io = req->async_data;
1711 	unsigned file_flags;
1712 	int ret;
1713 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1714 
1715 	if (unlikely(req->flags & REQ_F_FAIL)) {
1716 		ret = -ECONNRESET;
1717 		goto out;
1718 	}
1719 
1720 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1721 
1722 	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1723 				 file_flags);
1724 	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1725 	    && force_nonblock) {
1726 		if (ret == -EINPROGRESS) {
1727 			connect->in_progress = true;
1728 		} else if (ret == -ECONNABORTED) {
1729 			if (connect->seen_econnaborted)
1730 				goto out;
1731 			connect->seen_econnaborted = true;
1732 		}
1733 		return -EAGAIN;
1734 	}
1735 	if (connect->in_progress) {
1736 		/*
1737 		 * At least bluetooth will return -EBADFD on a re-connect
1738 		 * attempt, and it's (supposedly) also valid to get -EISCONN
1739 		 * which means the previous result is good. For both of these,
1740 		 * grab the sock_error() and use that for the completion.
1741 		 */
1742 		if (ret == -EBADFD || ret == -EISCONN)
1743 			ret = sock_error(sock_from_file(req->file)->sk);
1744 	}
1745 	if (ret == -ERESTARTSYS)
1746 		ret = -EINTR;
1747 out:
1748 	if (ret < 0)
1749 		req_set_fail(req);
1750 	io_req_msg_cleanup(req, issue_flags);
1751 	io_req_set_res(req, ret, 0);
1752 	return IOU_OK;
1753 }
1754 
io_bind_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1755 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1756 {
1757 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1758 	struct sockaddr __user *uaddr;
1759 	struct io_async_msghdr *io;
1760 
1761 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1762 		return -EINVAL;
1763 
1764 	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1765 	bind->addr_len =  READ_ONCE(sqe->addr2);
1766 
1767 	io = io_msg_alloc_async(req);
1768 	if (unlikely(!io))
1769 		return -ENOMEM;
1770 	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1771 }
1772 
io_bind(struct io_kiocb * req,unsigned int issue_flags)1773 int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1774 {
1775 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1776 	struct io_async_msghdr *io = req->async_data;
1777 	struct socket *sock;
1778 	int ret;
1779 
1780 	sock = sock_from_file(req->file);
1781 	if (unlikely(!sock))
1782 		return -ENOTSOCK;
1783 
1784 	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1785 	if (ret < 0)
1786 		req_set_fail(req);
1787 	io_req_set_res(req, ret, 0);
1788 	return 0;
1789 }
1790 
io_listen_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1791 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1792 {
1793 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1794 
1795 	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1796 		return -EINVAL;
1797 
1798 	listen->backlog = READ_ONCE(sqe->len);
1799 	return 0;
1800 }
1801 
io_listen(struct io_kiocb * req,unsigned int issue_flags)1802 int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1803 {
1804 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1805 	struct socket *sock;
1806 	int ret;
1807 
1808 	sock = sock_from_file(req->file);
1809 	if (unlikely(!sock))
1810 		return -ENOTSOCK;
1811 
1812 	ret = __sys_listen_socket(sock, listen->backlog);
1813 	if (ret < 0)
1814 		req_set_fail(req);
1815 	io_req_set_res(req, ret, 0);
1816 	return 0;
1817 }
1818 
io_netmsg_cache_free(const void * entry)1819 void io_netmsg_cache_free(const void *entry)
1820 {
1821 	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1822 
1823 	if (kmsg->free_iov)
1824 		io_netmsg_iovec_free(kmsg);
1825 	kfree(kmsg);
1826 }
1827 #endif
1828