1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 #include "zcrx.h" 20 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 int iou_flags; 32 u32 file_slot; 33 unsigned long nofile; 34 }; 35 36 struct io_socket { 37 struct file *file; 38 int domain; 39 int type; 40 int protocol; 41 int flags; 42 u32 file_slot; 43 unsigned long nofile; 44 }; 45 46 struct io_connect { 47 struct file *file; 48 struct sockaddr __user *addr; 49 int addr_len; 50 bool in_progress; 51 bool seen_econnaborted; 52 }; 53 54 struct io_bind { 55 struct file *file; 56 int addr_len; 57 }; 58 59 struct io_listen { 60 struct file *file; 61 int backlog; 62 }; 63 64 struct io_sr_msg { 65 struct file *file; 66 union { 67 struct compat_msghdr __user *umsg_compat; 68 struct user_msghdr __user *umsg; 69 void __user *buf; 70 }; 71 int len; 72 unsigned done_io; 73 unsigned msg_flags; 74 unsigned nr_multishot_loops; 75 u16 flags; 76 /* initialised and used only by !msg send variants */ 77 u16 buf_group; 78 /* per-invocation mshot limit */ 79 unsigned mshot_len; 80 /* overall mshot byte limit */ 81 unsigned mshot_total_len; 82 void __user *msg_control; 83 /* used only for send zerocopy */ 84 struct io_kiocb *notif; 85 }; 86 87 /* 88 * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold 89 * anyway. Use the upper 8 bits for internal uses. 90 */ 91 enum sr_retry_flags { 92 IORING_RECV_RETRY = (1U << 15), 93 IORING_RECV_PARTIAL_MAP = (1U << 14), 94 IORING_RECV_MSHOT_CAP = (1U << 13), 95 IORING_RECV_MSHOT_LIM = (1U << 12), 96 IORING_RECV_MSHOT_DONE = (1U << 11), 97 98 IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP, 99 IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP | 100 IORING_RECV_MSHOT_CAP | IORING_RECV_MSHOT_DONE, 101 }; 102 103 /* 104 * Number of times we'll try and do receives if there's more data. If we 105 * exceed this limit, then add us to the back of the queue and retry from 106 * there. This helps fairness between flooding clients. 107 */ 108 #define MULTISHOT_MAX_RETRY 32 109 110 struct io_recvzc { 111 struct file *file; 112 unsigned msg_flags; 113 u16 flags; 114 u32 len; 115 struct io_zcrx_ifq *ifq; 116 }; 117 118 static int io_sg_from_iter_iovec(struct sk_buff *skb, 119 struct iov_iter *from, size_t length); 120 static int io_sg_from_iter(struct sk_buff *skb, 121 struct iov_iter *from, size_t length); 122 123 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 124 { 125 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 126 127 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 128 sqe->buf_index || sqe->splice_fd_in)) 129 return -EINVAL; 130 131 shutdown->how = READ_ONCE(sqe->len); 132 req->flags |= REQ_F_FORCE_ASYNC; 133 return 0; 134 } 135 136 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 137 { 138 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 139 struct socket *sock; 140 int ret; 141 142 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 143 144 sock = sock_from_file(req->file); 145 if (unlikely(!sock)) 146 return -ENOTSOCK; 147 148 ret = __sys_shutdown_sock(sock, shutdown->how); 149 io_req_set_res(req, ret, 0); 150 return IOU_COMPLETE; 151 } 152 153 static bool io_net_retry(struct socket *sock, int flags) 154 { 155 if (!(flags & MSG_WAITALL)) 156 return false; 157 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 158 } 159 160 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) 161 { 162 if (kmsg->vec.iovec) 163 io_vec_free(&kmsg->vec); 164 } 165 166 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 167 { 168 struct io_async_msghdr *hdr = req->async_data; 169 170 /* can't recycle, ensure we free the iovec if we have one */ 171 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { 172 io_netmsg_iovec_free(hdr); 173 return; 174 } 175 176 /* Let normal cleanup path reap it if we fail adding to the cache */ 177 io_alloc_cache_vec_kasan(&hdr->vec); 178 if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP) 179 io_vec_free(&hdr->vec); 180 181 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { 182 req->async_data = NULL; 183 req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP); 184 } 185 } 186 187 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) 188 { 189 struct io_ring_ctx *ctx = req->ctx; 190 struct io_async_msghdr *hdr; 191 192 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req); 193 if (!hdr) 194 return NULL; 195 196 /* If the async data was cached, we might have an iov cached inside. */ 197 if (hdr->vec.iovec) 198 req->flags |= REQ_F_NEED_CLEANUP; 199 return hdr; 200 } 201 202 static inline void io_mshot_prep_retry(struct io_kiocb *req, 203 struct io_async_msghdr *kmsg) 204 { 205 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 206 207 req->flags &= ~REQ_F_BL_EMPTY; 208 sr->done_io = 0; 209 sr->flags &= ~IORING_RECV_RETRY_CLEAR; 210 sr->len = sr->mshot_len; 211 } 212 213 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, 214 const struct iovec __user *uiov, unsigned uvec_seg, 215 int ddir) 216 { 217 struct iovec *iov; 218 int ret, nr_segs; 219 220 if (iomsg->vec.iovec) { 221 nr_segs = iomsg->vec.nr; 222 iov = iomsg->vec.iovec; 223 } else { 224 nr_segs = 1; 225 iov = &iomsg->fast_iov; 226 } 227 228 ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov, 229 &iomsg->msg.msg_iter, io_is_compat(req->ctx)); 230 if (unlikely(ret < 0)) 231 return ret; 232 233 if (iov) { 234 req->flags |= REQ_F_NEED_CLEANUP; 235 io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs); 236 } 237 return 0; 238 } 239 240 static int io_compat_msg_copy_hdr(struct io_kiocb *req, 241 struct io_async_msghdr *iomsg, 242 struct compat_msghdr *msg, int ddir, 243 struct sockaddr __user **save_addr) 244 { 245 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 246 struct compat_iovec __user *uiov; 247 int ret; 248 249 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) 250 return -EFAULT; 251 252 ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr); 253 if (ret) 254 return ret; 255 256 uiov = compat_ptr(msg->msg_iov); 257 if (req->flags & REQ_F_BUFFER_SELECT) { 258 if (msg->msg_iovlen == 0) { 259 sr->len = 0; 260 } else if (msg->msg_iovlen > 1) { 261 return -EINVAL; 262 } else { 263 struct compat_iovec tmp_iov; 264 265 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 266 return -EFAULT; 267 sr->len = tmp_iov.iov_len; 268 } 269 } 270 return 0; 271 } 272 273 static int io_copy_msghdr_from_user(struct user_msghdr *msg, 274 struct user_msghdr __user *umsg) 275 { 276 if (!user_access_begin(umsg, sizeof(*umsg))) 277 return -EFAULT; 278 unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end); 279 unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end); 280 unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end); 281 unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end); 282 unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end); 283 unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end); 284 user_access_end(); 285 return 0; 286 ua_end: 287 user_access_end(); 288 return -EFAULT; 289 } 290 291 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, 292 struct user_msghdr *msg, int ddir, 293 struct sockaddr __user **save_addr) 294 { 295 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 296 struct user_msghdr __user *umsg = sr->umsg; 297 int ret; 298 299 iomsg->msg.msg_name = &iomsg->addr; 300 iomsg->msg.msg_iter.nr_segs = 0; 301 302 if (io_is_compat(req->ctx)) { 303 struct compat_msghdr cmsg; 304 305 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr); 306 if (ret) 307 return ret; 308 309 memset(msg, 0, sizeof(*msg)); 310 msg->msg_namelen = cmsg.msg_namelen; 311 msg->msg_controllen = cmsg.msg_controllen; 312 msg->msg_iov = compat_ptr(cmsg.msg_iov); 313 msg->msg_iovlen = cmsg.msg_iovlen; 314 return 0; 315 } 316 317 ret = io_copy_msghdr_from_user(msg, umsg); 318 if (unlikely(ret)) 319 return ret; 320 321 msg->msg_flags = 0; 322 323 ret = __copy_msghdr(&iomsg->msg, msg, save_addr); 324 if (ret) 325 return ret; 326 327 if (req->flags & REQ_F_BUFFER_SELECT) { 328 if (msg->msg_iovlen == 0) { 329 sr->len = 0; 330 } else if (msg->msg_iovlen > 1) { 331 return -EINVAL; 332 } else { 333 struct iovec __user *uiov = msg->msg_iov; 334 struct iovec tmp_iov; 335 336 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 337 return -EFAULT; 338 sr->len = tmp_iov.iov_len; 339 } 340 } 341 return 0; 342 } 343 344 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 345 { 346 struct io_async_msghdr *io = req->async_data; 347 348 io_netmsg_iovec_free(io); 349 } 350 351 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 352 { 353 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 354 struct io_async_msghdr *kmsg = req->async_data; 355 void __user *addr; 356 u16 addr_len; 357 int ret; 358 359 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 360 361 if (READ_ONCE(sqe->__pad3[0])) 362 return -EINVAL; 363 364 kmsg->msg.msg_name = NULL; 365 kmsg->msg.msg_namelen = 0; 366 kmsg->msg.msg_control = NULL; 367 kmsg->msg.msg_controllen = 0; 368 kmsg->msg.msg_ubuf = NULL; 369 370 addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 371 addr_len = READ_ONCE(sqe->addr_len); 372 if (addr) { 373 ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr); 374 if (unlikely(ret < 0)) 375 return ret; 376 kmsg->msg.msg_name = &kmsg->addr; 377 kmsg->msg.msg_namelen = addr_len; 378 } 379 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 380 req->flags |= REQ_F_IMPORT_BUFFER; 381 return 0; 382 } 383 if (req->flags & REQ_F_BUFFER_SELECT) 384 return 0; 385 386 if (sr->flags & IORING_SEND_VECTORIZED) 387 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 388 389 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 390 } 391 392 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 393 { 394 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 395 struct io_async_msghdr *kmsg = req->async_data; 396 struct user_msghdr msg; 397 int ret; 398 399 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 400 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL); 401 if (unlikely(ret)) 402 return ret; 403 /* save msg_control as sys_sendmsg() overwrites it */ 404 sr->msg_control = kmsg->msg.msg_control_user; 405 406 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 407 kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; 408 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, 409 msg.msg_iovlen); 410 } 411 if (req->flags & REQ_F_BUFFER_SELECT) 412 return 0; 413 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); 414 } 415 416 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED) 417 418 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 419 { 420 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 421 422 sr->done_io = 0; 423 sr->len = READ_ONCE(sqe->len); 424 sr->flags = READ_ONCE(sqe->ioprio); 425 if (sr->flags & ~SENDMSG_FLAGS) 426 return -EINVAL; 427 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 428 if (sr->msg_flags & MSG_DONTWAIT) 429 req->flags |= REQ_F_NOWAIT; 430 if (req->flags & REQ_F_BUFFER_SELECT) 431 sr->buf_group = req->buf_index; 432 if (sr->flags & IORING_RECVSEND_BUNDLE) { 433 if (req->opcode == IORING_OP_SENDMSG) 434 return -EINVAL; 435 sr->msg_flags |= MSG_WAITALL; 436 req->buf_list = NULL; 437 req->flags |= REQ_F_MULTISHOT; 438 } 439 440 if (io_is_compat(req->ctx)) 441 sr->msg_flags |= MSG_CMSG_COMPAT; 442 443 if (unlikely(!io_msg_alloc_async(req))) 444 return -ENOMEM; 445 if (req->opcode != IORING_OP_SENDMSG) 446 return io_send_setup(req, sqe); 447 if (unlikely(sqe->addr2 || sqe->file_index)) 448 return -EINVAL; 449 return io_sendmsg_setup(req, sqe); 450 } 451 452 static void io_req_msg_cleanup(struct io_kiocb *req, 453 unsigned int issue_flags) 454 { 455 io_netmsg_recycle(req, issue_flags); 456 } 457 458 /* 459 * For bundle completions, we need to figure out how many segments we consumed. 460 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it 461 * could be using an ITER_IOVEC. If the latter, then if we consumed all of 462 * the segments, then it's a trivial questiont o answer. If we have residual 463 * data in the iter, then loop the segments to figure out how much we 464 * transferred. 465 */ 466 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) 467 { 468 struct iovec *iov; 469 int nbufs; 470 471 /* no data is always zero segments, and a ubuf is always 1 segment */ 472 if (ret <= 0) 473 return 0; 474 if (iter_is_ubuf(&kmsg->msg.msg_iter)) 475 return 1; 476 477 iov = kmsg->vec.iovec; 478 if (!iov) 479 iov = &kmsg->fast_iov; 480 481 /* if all data was transferred, it's basic pointer math */ 482 if (!iov_iter_count(&kmsg->msg.msg_iter)) 483 return iter_iov(&kmsg->msg.msg_iter) - iov; 484 485 /* short transfer, count segments */ 486 nbufs = 0; 487 do { 488 int this_len = min_t(int, iov[nbufs].iov_len, ret); 489 490 nbufs++; 491 ret -= this_len; 492 } while (ret); 493 494 return nbufs; 495 } 496 497 static inline bool io_send_finish(struct io_kiocb *req, int *ret, 498 struct io_async_msghdr *kmsg, 499 unsigned issue_flags) 500 { 501 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 502 bool bundle_finished = *ret <= 0; 503 unsigned int cflags; 504 505 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { 506 cflags = io_put_kbuf(req, *ret, issue_flags); 507 goto finish; 508 } 509 510 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); 511 512 if (bundle_finished || req->flags & REQ_F_BL_EMPTY) 513 goto finish; 514 515 /* 516 * Fill CQE for this receive and see if we should keep trying to 517 * receive from this socket. 518 */ 519 if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 520 io_mshot_prep_retry(req, kmsg); 521 return false; 522 } 523 524 /* Otherwise stop bundle and use the current result. */ 525 finish: 526 io_req_set_res(req, *ret, cflags); 527 *ret = IOU_COMPLETE; 528 return true; 529 } 530 531 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 532 { 533 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 534 struct io_async_msghdr *kmsg = req->async_data; 535 struct socket *sock; 536 unsigned flags; 537 int min_ret = 0; 538 int ret; 539 540 sock = sock_from_file(req->file); 541 if (unlikely(!sock)) 542 return -ENOTSOCK; 543 544 if (!(req->flags & REQ_F_POLLED) && 545 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 546 return -EAGAIN; 547 548 flags = sr->msg_flags; 549 if (issue_flags & IO_URING_F_NONBLOCK) 550 flags |= MSG_DONTWAIT; 551 if (flags & MSG_WAITALL) 552 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 553 554 kmsg->msg.msg_control_user = sr->msg_control; 555 556 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 557 558 if (ret < min_ret) { 559 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 560 return -EAGAIN; 561 if (ret > 0 && io_net_retry(sock, flags)) { 562 kmsg->msg.msg_controllen = 0; 563 kmsg->msg.msg_control = NULL; 564 sr->done_io += ret; 565 req->flags |= REQ_F_BL_NO_RECYCLE; 566 return -EAGAIN; 567 } 568 if (ret == -ERESTARTSYS) 569 ret = -EINTR; 570 req_set_fail(req); 571 } 572 io_req_msg_cleanup(req, issue_flags); 573 if (ret >= 0) 574 ret += sr->done_io; 575 else if (sr->done_io) 576 ret = sr->done_io; 577 io_req_set_res(req, ret, 0); 578 return IOU_COMPLETE; 579 } 580 581 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, 582 struct io_async_msghdr *kmsg) 583 { 584 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 585 586 int ret; 587 struct buf_sel_arg arg = { 588 .iovs = &kmsg->fast_iov, 589 .max_len = min_not_zero(sr->len, INT_MAX), 590 .nr_iovs = 1, 591 .buf_group = sr->buf_group, 592 }; 593 594 if (kmsg->vec.iovec) { 595 arg.nr_iovs = kmsg->vec.nr; 596 arg.iovs = kmsg->vec.iovec; 597 arg.mode = KBUF_MODE_FREE; 598 } 599 600 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 601 arg.nr_iovs = 1; 602 else 603 arg.mode |= KBUF_MODE_EXPAND; 604 605 ret = io_buffers_select(req, &arg, issue_flags); 606 if (unlikely(ret < 0)) 607 return ret; 608 609 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 610 kmsg->vec.nr = ret; 611 kmsg->vec.iovec = arg.iovs; 612 req->flags |= REQ_F_NEED_CLEANUP; 613 } 614 sr->len = arg.out_len; 615 616 if (ret == 1) { 617 sr->buf = arg.iovs[0].iov_base; 618 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 619 &kmsg->msg.msg_iter); 620 if (unlikely(ret)) 621 return ret; 622 } else { 623 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, 624 arg.iovs, ret, arg.out_len); 625 } 626 627 return 0; 628 } 629 630 int io_send(struct io_kiocb *req, unsigned int issue_flags) 631 { 632 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 633 struct io_async_msghdr *kmsg = req->async_data; 634 struct socket *sock; 635 unsigned flags; 636 int min_ret = 0; 637 int ret; 638 639 sock = sock_from_file(req->file); 640 if (unlikely(!sock)) 641 return -ENOTSOCK; 642 643 if (!(req->flags & REQ_F_POLLED) && 644 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 645 return -EAGAIN; 646 647 flags = sr->msg_flags; 648 if (issue_flags & IO_URING_F_NONBLOCK) 649 flags |= MSG_DONTWAIT; 650 651 retry_bundle: 652 if (io_do_buffer_select(req)) { 653 ret = io_send_select_buffer(req, issue_flags, kmsg); 654 if (ret) 655 return ret; 656 } 657 658 /* 659 * If MSG_WAITALL is set, or this is a bundle send, then we need 660 * the full amount. If just bundle is set, if we do a short send 661 * then we complete the bundle sequence rather than continue on. 662 */ 663 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) 664 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 665 666 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 667 kmsg->msg.msg_flags = flags; 668 ret = sock_sendmsg(sock, &kmsg->msg); 669 if (ret < min_ret) { 670 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 671 return -EAGAIN; 672 673 if (ret > 0 && io_net_retry(sock, flags)) { 674 sr->len -= ret; 675 sr->buf += ret; 676 sr->done_io += ret; 677 req->flags |= REQ_F_BL_NO_RECYCLE; 678 return -EAGAIN; 679 } 680 if (ret == -ERESTARTSYS) 681 ret = -EINTR; 682 req_set_fail(req); 683 } 684 if (ret >= 0) 685 ret += sr->done_io; 686 else if (sr->done_io) 687 ret = sr->done_io; 688 689 if (!io_send_finish(req, &ret, kmsg, issue_flags)) 690 goto retry_bundle; 691 692 io_req_msg_cleanup(req, issue_flags); 693 return ret; 694 } 695 696 static int io_recvmsg_mshot_prep(struct io_kiocb *req, 697 struct io_async_msghdr *iomsg, 698 int namelen, size_t controllen) 699 { 700 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == 701 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { 702 int hdr; 703 704 if (unlikely(namelen < 0)) 705 return -EOVERFLOW; 706 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), 707 namelen, &hdr)) 708 return -EOVERFLOW; 709 if (check_add_overflow(hdr, controllen, &hdr)) 710 return -EOVERFLOW; 711 712 iomsg->namelen = namelen; 713 iomsg->controllen = controllen; 714 return 0; 715 } 716 717 return 0; 718 } 719 720 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 721 struct io_async_msghdr *iomsg) 722 { 723 struct user_msghdr msg; 724 int ret; 725 726 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr); 727 if (unlikely(ret)) 728 return ret; 729 730 if (!(req->flags & REQ_F_BUFFER_SELECT)) { 731 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, 732 ITER_DEST); 733 if (unlikely(ret)) 734 return ret; 735 } 736 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, 737 msg.msg_controllen); 738 } 739 740 static int io_recvmsg_prep_setup(struct io_kiocb *req) 741 { 742 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 743 struct io_async_msghdr *kmsg; 744 745 kmsg = io_msg_alloc_async(req); 746 if (unlikely(!kmsg)) 747 return -ENOMEM; 748 749 if (req->opcode == IORING_OP_RECV) { 750 kmsg->msg.msg_name = NULL; 751 kmsg->msg.msg_namelen = 0; 752 kmsg->msg.msg_inq = 0; 753 kmsg->msg.msg_control = NULL; 754 kmsg->msg.msg_get_inq = 1; 755 kmsg->msg.msg_controllen = 0; 756 kmsg->msg.msg_iocb = NULL; 757 kmsg->msg.msg_ubuf = NULL; 758 759 if (req->flags & REQ_F_BUFFER_SELECT) 760 return 0; 761 return import_ubuf(ITER_DEST, sr->buf, sr->len, 762 &kmsg->msg.msg_iter); 763 } 764 765 return io_recvmsg_copy_hdr(req, kmsg); 766 } 767 768 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \ 769 IORING_RECVSEND_BUNDLE) 770 771 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 772 { 773 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 774 775 sr->done_io = 0; 776 777 if (unlikely(sqe->addr2)) 778 return -EINVAL; 779 780 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 781 sr->len = READ_ONCE(sqe->len); 782 sr->flags = READ_ONCE(sqe->ioprio); 783 if (sr->flags & ~RECVMSG_FLAGS) 784 return -EINVAL; 785 sr->msg_flags = READ_ONCE(sqe->msg_flags); 786 if (sr->msg_flags & MSG_DONTWAIT) 787 req->flags |= REQ_F_NOWAIT; 788 if (sr->msg_flags & MSG_ERRQUEUE) 789 req->flags |= REQ_F_CLEAR_POLLIN; 790 if (req->flags & REQ_F_BUFFER_SELECT) { 791 /* 792 * Store the buffer group for this multishot receive separately, 793 * as if we end up doing an io-wq based issue that selects a 794 * buffer, it has to be committed immediately and that will 795 * clear ->buf_list. This means we lose the link to the buffer 796 * list, and the eventual buffer put on completion then cannot 797 * restore it. 798 */ 799 sr->buf_group = req->buf_index; 800 req->buf_list = NULL; 801 } 802 sr->mshot_total_len = sr->mshot_len = 0; 803 if (sr->flags & IORING_RECV_MULTISHOT) { 804 if (!(req->flags & REQ_F_BUFFER_SELECT)) 805 return -EINVAL; 806 if (sr->msg_flags & MSG_WAITALL) 807 return -EINVAL; 808 if (req->opcode == IORING_OP_RECV) { 809 sr->mshot_len = sr->len; 810 sr->mshot_total_len = READ_ONCE(sqe->optlen); 811 if (sr->mshot_total_len) 812 sr->flags |= IORING_RECV_MSHOT_LIM; 813 } else if (sqe->optlen) { 814 return -EINVAL; 815 } 816 req->flags |= REQ_F_APOLL_MULTISHOT; 817 } else if (sqe->optlen) { 818 return -EINVAL; 819 } 820 821 if (sr->flags & IORING_RECVSEND_BUNDLE) { 822 if (req->opcode == IORING_OP_RECVMSG) 823 return -EINVAL; 824 } 825 826 if (io_is_compat(req->ctx)) 827 sr->msg_flags |= MSG_CMSG_COMPAT; 828 829 sr->nr_multishot_loops = 0; 830 return io_recvmsg_prep_setup(req); 831 } 832 833 /* bits to clear in old and inherit in new cflags on bundle retry */ 834 #define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE) 835 836 /* 837 * Finishes io_recv and io_recvmsg. 838 * 839 * Returns true if it is actually finished, or false if it should run 840 * again (for multishot). 841 */ 842 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 843 struct io_async_msghdr *kmsg, 844 bool mshot_finished, unsigned issue_flags) 845 { 846 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 847 unsigned int cflags = 0; 848 849 if (kmsg->msg.msg_inq > 0) 850 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 851 852 if (*ret > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { 853 /* 854 * If sr->len hits zero, the limit has been reached. Mark 855 * mshot as finished, and flag MSHOT_DONE as well to prevent 856 * a potential bundle from being retried. 857 */ 858 sr->mshot_total_len -= min_t(int, *ret, sr->mshot_total_len); 859 if (!sr->mshot_total_len) { 860 sr->flags |= IORING_RECV_MSHOT_DONE; 861 mshot_finished = true; 862 } 863 } 864 865 if (sr->flags & IORING_RECVSEND_BUNDLE) { 866 size_t this_ret = *ret - sr->done_io; 867 868 cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret), 869 issue_flags); 870 if (sr->flags & IORING_RECV_RETRY) 871 cflags = req->cqe.flags | (cflags & CQE_F_MASK); 872 if (sr->mshot_len && *ret >= sr->mshot_len) 873 sr->flags |= IORING_RECV_MSHOT_CAP; 874 /* bundle with no more immediate buffers, we're done */ 875 if (req->flags & REQ_F_BL_EMPTY) 876 goto finish; 877 /* 878 * If more is available AND it was a full transfer, retry and 879 * append to this one 880 */ 881 if (!(sr->flags & IORING_RECV_NO_RETRY) && 882 kmsg->msg.msg_inq > 1 && this_ret > 0 && 883 !iov_iter_count(&kmsg->msg.msg_iter)) { 884 req->cqe.flags = cflags & ~CQE_F_MASK; 885 sr->len = kmsg->msg.msg_inq; 886 sr->done_io += this_ret; 887 sr->flags |= IORING_RECV_RETRY; 888 return false; 889 } 890 } else { 891 cflags |= io_put_kbuf(req, *ret, issue_flags); 892 } 893 894 /* 895 * Fill CQE for this receive and see if we should keep trying to 896 * receive from this socket. 897 */ 898 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && 899 io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 900 *ret = IOU_RETRY; 901 io_mshot_prep_retry(req, kmsg); 902 /* Known not-empty or unknown state, retry */ 903 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { 904 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY && 905 !(sr->flags & IORING_RECV_MSHOT_CAP)) { 906 return false; 907 } 908 /* mshot retries exceeded, force a requeue */ 909 sr->nr_multishot_loops = 0; 910 sr->flags &= ~IORING_RECV_MSHOT_CAP; 911 if (issue_flags & IO_URING_F_MULTISHOT) 912 *ret = IOU_REQUEUE; 913 } 914 return true; 915 } 916 917 /* Finish the request / stop multishot. */ 918 finish: 919 io_req_set_res(req, *ret, cflags); 920 *ret = IOU_COMPLETE; 921 io_req_msg_cleanup(req, issue_flags); 922 return true; 923 } 924 925 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 926 struct io_sr_msg *sr, void __user **buf, 927 size_t *len) 928 { 929 unsigned long ubuf = (unsigned long) *buf; 930 unsigned long hdr; 931 932 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 933 kmsg->controllen; 934 if (*len < hdr) 935 return -EFAULT; 936 937 if (kmsg->controllen) { 938 unsigned long control = ubuf + hdr - kmsg->controllen; 939 940 kmsg->msg.msg_control_user = (void __user *) control; 941 kmsg->msg.msg_controllen = kmsg->controllen; 942 } 943 944 sr->buf = *buf; /* stash for later copy */ 945 *buf = (void __user *) (ubuf + hdr); 946 kmsg->payloadlen = *len = *len - hdr; 947 return 0; 948 } 949 950 struct io_recvmsg_multishot_hdr { 951 struct io_uring_recvmsg_out msg; 952 struct sockaddr_storage addr; 953 }; 954 955 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 956 struct io_async_msghdr *kmsg, 957 unsigned int flags, bool *finished) 958 { 959 int err; 960 int copy_len; 961 struct io_recvmsg_multishot_hdr hdr; 962 963 if (kmsg->namelen) 964 kmsg->msg.msg_name = &hdr.addr; 965 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 966 kmsg->msg.msg_namelen = 0; 967 968 if (sock->file->f_flags & O_NONBLOCK) 969 flags |= MSG_DONTWAIT; 970 971 err = sock_recvmsg(sock, &kmsg->msg, flags); 972 *finished = err <= 0; 973 if (err < 0) 974 return err; 975 976 hdr.msg = (struct io_uring_recvmsg_out) { 977 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 978 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 979 }; 980 981 hdr.msg.payloadlen = err; 982 if (err > kmsg->payloadlen) 983 err = kmsg->payloadlen; 984 985 copy_len = sizeof(struct io_uring_recvmsg_out); 986 if (kmsg->msg.msg_namelen > kmsg->namelen) 987 copy_len += kmsg->namelen; 988 else 989 copy_len += kmsg->msg.msg_namelen; 990 991 /* 992 * "fromlen shall refer to the value before truncation.." 993 * 1003.1g 994 */ 995 hdr.msg.namelen = kmsg->msg.msg_namelen; 996 997 /* ensure that there is no gap between hdr and sockaddr_storage */ 998 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 999 sizeof(struct io_uring_recvmsg_out)); 1000 if (copy_to_user(io->buf, &hdr, copy_len)) { 1001 *finished = true; 1002 return -EFAULT; 1003 } 1004 1005 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 1006 kmsg->controllen + err; 1007 } 1008 1009 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 1010 { 1011 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1012 struct io_async_msghdr *kmsg = req->async_data; 1013 struct socket *sock; 1014 unsigned flags; 1015 int ret, min_ret = 0; 1016 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1017 bool mshot_finished = true; 1018 1019 sock = sock_from_file(req->file); 1020 if (unlikely(!sock)) 1021 return -ENOTSOCK; 1022 1023 if (!(req->flags & REQ_F_POLLED) && 1024 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1025 return -EAGAIN; 1026 1027 flags = sr->msg_flags; 1028 if (force_nonblock) 1029 flags |= MSG_DONTWAIT; 1030 1031 retry_multishot: 1032 if (io_do_buffer_select(req)) { 1033 void __user *buf; 1034 size_t len = sr->len; 1035 1036 buf = io_buffer_select(req, &len, sr->buf_group, issue_flags); 1037 if (!buf) 1038 return -ENOBUFS; 1039 1040 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1041 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 1042 if (ret) { 1043 io_kbuf_recycle(req, issue_flags); 1044 return ret; 1045 } 1046 } 1047 1048 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 1049 } 1050 1051 kmsg->msg.msg_get_inq = 1; 1052 kmsg->msg.msg_inq = -1; 1053 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1054 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 1055 &mshot_finished); 1056 } else { 1057 /* disable partial retry for recvmsg with cmsg attached */ 1058 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 1059 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1060 1061 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 1062 kmsg->uaddr, flags); 1063 } 1064 1065 if (ret < min_ret) { 1066 if (ret == -EAGAIN && force_nonblock) { 1067 if (issue_flags & IO_URING_F_MULTISHOT) 1068 io_kbuf_recycle(req, issue_flags); 1069 1070 return IOU_RETRY; 1071 } 1072 if (ret > 0 && io_net_retry(sock, flags)) { 1073 sr->done_io += ret; 1074 req->flags |= REQ_F_BL_NO_RECYCLE; 1075 return IOU_RETRY; 1076 } 1077 if (ret == -ERESTARTSYS) 1078 ret = -EINTR; 1079 req_set_fail(req); 1080 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1081 req_set_fail(req); 1082 } 1083 1084 if (ret > 0) 1085 ret += sr->done_io; 1086 else if (sr->done_io) 1087 ret = sr->done_io; 1088 else 1089 io_kbuf_recycle(req, issue_flags); 1090 1091 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1092 goto retry_multishot; 1093 1094 return ret; 1095 } 1096 1097 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, 1098 size_t *len, unsigned int issue_flags) 1099 { 1100 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1101 int ret; 1102 1103 /* 1104 * If the ring isn't locked, then don't use the peek interface 1105 * to grab multiple buffers as we will lock/unlock between 1106 * this selection and posting the buffers. 1107 */ 1108 if (!(issue_flags & IO_URING_F_UNLOCKED) && 1109 sr->flags & IORING_RECVSEND_BUNDLE) { 1110 struct buf_sel_arg arg = { 1111 .iovs = &kmsg->fast_iov, 1112 .nr_iovs = 1, 1113 .mode = KBUF_MODE_EXPAND, 1114 .buf_group = sr->buf_group, 1115 }; 1116 1117 if (kmsg->vec.iovec) { 1118 arg.nr_iovs = kmsg->vec.nr; 1119 arg.iovs = kmsg->vec.iovec; 1120 arg.mode |= KBUF_MODE_FREE; 1121 } 1122 1123 if (*len) 1124 arg.max_len = *len; 1125 else if (kmsg->msg.msg_inq > 1) 1126 arg.max_len = min_not_zero(*len, (size_t) kmsg->msg.msg_inq); 1127 1128 /* if mshot limited, ensure we don't go over */ 1129 if (sr->flags & IORING_RECV_MSHOT_LIM) 1130 arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len); 1131 ret = io_buffers_peek(req, &arg); 1132 if (unlikely(ret < 0)) 1133 return ret; 1134 1135 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 1136 kmsg->vec.nr = ret; 1137 kmsg->vec.iovec = arg.iovs; 1138 req->flags |= REQ_F_NEED_CLEANUP; 1139 } 1140 if (arg.partial_map) 1141 sr->flags |= IORING_RECV_PARTIAL_MAP; 1142 1143 /* special case 1 vec, can be a fast path */ 1144 if (ret == 1) { 1145 sr->buf = arg.iovs[0].iov_base; 1146 sr->len = arg.iovs[0].iov_len; 1147 goto map_ubuf; 1148 } 1149 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, 1150 arg.out_len); 1151 } else { 1152 void __user *buf; 1153 1154 *len = sr->len; 1155 buf = io_buffer_select(req, len, sr->buf_group, issue_flags); 1156 if (!buf) 1157 return -ENOBUFS; 1158 sr->buf = buf; 1159 sr->len = *len; 1160 map_ubuf: 1161 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 1162 &kmsg->msg.msg_iter); 1163 if (unlikely(ret)) 1164 return ret; 1165 } 1166 1167 return 0; 1168 } 1169 1170 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 1171 { 1172 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1173 struct io_async_msghdr *kmsg = req->async_data; 1174 struct socket *sock; 1175 unsigned flags; 1176 int ret, min_ret = 0; 1177 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1178 size_t len = sr->len; 1179 bool mshot_finished; 1180 1181 if (!(req->flags & REQ_F_POLLED) && 1182 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1183 return -EAGAIN; 1184 1185 sock = sock_from_file(req->file); 1186 if (unlikely(!sock)) 1187 return -ENOTSOCK; 1188 1189 flags = sr->msg_flags; 1190 if (force_nonblock) 1191 flags |= MSG_DONTWAIT; 1192 1193 retry_multishot: 1194 if (io_do_buffer_select(req)) { 1195 ret = io_recv_buf_select(req, kmsg, &len, issue_flags); 1196 if (unlikely(ret)) { 1197 kmsg->msg.msg_inq = -1; 1198 goto out_free; 1199 } 1200 sr->buf = NULL; 1201 } 1202 1203 kmsg->msg.msg_flags = 0; 1204 kmsg->msg.msg_inq = -1; 1205 1206 if (flags & MSG_WAITALL) 1207 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1208 1209 ret = sock_recvmsg(sock, &kmsg->msg, flags); 1210 if (ret < min_ret) { 1211 if (ret == -EAGAIN && force_nonblock) { 1212 if (issue_flags & IO_URING_F_MULTISHOT) 1213 io_kbuf_recycle(req, issue_flags); 1214 1215 return IOU_RETRY; 1216 } 1217 if (ret > 0 && io_net_retry(sock, flags)) { 1218 sr->len -= ret; 1219 sr->buf += ret; 1220 sr->done_io += ret; 1221 req->flags |= REQ_F_BL_NO_RECYCLE; 1222 return -EAGAIN; 1223 } 1224 if (ret == -ERESTARTSYS) 1225 ret = -EINTR; 1226 req_set_fail(req); 1227 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1228 out_free: 1229 req_set_fail(req); 1230 } 1231 1232 mshot_finished = ret <= 0; 1233 if (ret > 0) 1234 ret += sr->done_io; 1235 else if (sr->done_io) 1236 ret = sr->done_io; 1237 else 1238 io_kbuf_recycle(req, issue_flags); 1239 1240 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1241 goto retry_multishot; 1242 1243 return ret; 1244 } 1245 1246 int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1247 { 1248 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1249 unsigned ifq_idx; 1250 1251 if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3)) 1252 return -EINVAL; 1253 1254 ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx); 1255 zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx); 1256 if (!zc->ifq) 1257 return -EINVAL; 1258 1259 zc->len = READ_ONCE(sqe->len); 1260 zc->flags = READ_ONCE(sqe->ioprio); 1261 zc->msg_flags = READ_ONCE(sqe->msg_flags); 1262 if (zc->msg_flags) 1263 return -EINVAL; 1264 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)) 1265 return -EINVAL; 1266 /* multishot required */ 1267 if (!(zc->flags & IORING_RECV_MULTISHOT)) 1268 return -EINVAL; 1269 /* All data completions are posted as aux CQEs. */ 1270 req->flags |= REQ_F_APOLL_MULTISHOT; 1271 1272 return 0; 1273 } 1274 1275 int io_recvzc(struct io_kiocb *req, unsigned int issue_flags) 1276 { 1277 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1278 struct socket *sock; 1279 unsigned int len; 1280 int ret; 1281 1282 if (!(req->flags & REQ_F_POLLED) && 1283 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1284 return -EAGAIN; 1285 1286 sock = sock_from_file(req->file); 1287 if (unlikely(!sock)) 1288 return -ENOTSOCK; 1289 1290 len = zc->len; 1291 ret = io_zcrx_recv(req, zc->ifq, sock, zc->msg_flags | MSG_DONTWAIT, 1292 issue_flags, &zc->len); 1293 if (len && zc->len == 0) { 1294 io_req_set_res(req, 0, 0); 1295 1296 return IOU_COMPLETE; 1297 } 1298 if (unlikely(ret <= 0) && ret != -EAGAIN) { 1299 if (ret == -ERESTARTSYS) 1300 ret = -EINTR; 1301 if (ret == IOU_REQUEUE) 1302 return IOU_REQUEUE; 1303 1304 req_set_fail(req); 1305 io_req_set_res(req, ret, 0); 1306 return IOU_COMPLETE; 1307 } 1308 return IOU_RETRY; 1309 } 1310 1311 void io_send_zc_cleanup(struct io_kiocb *req) 1312 { 1313 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1314 struct io_async_msghdr *io = req->async_data; 1315 1316 if (req_has_async_data(req)) 1317 io_netmsg_iovec_free(io); 1318 if (zc->notif) { 1319 io_notif_flush(zc->notif); 1320 zc->notif = NULL; 1321 } 1322 } 1323 1324 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1325 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \ 1326 IORING_SEND_VECTORIZED) 1327 1328 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1329 { 1330 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1331 struct io_ring_ctx *ctx = req->ctx; 1332 struct io_async_msghdr *iomsg; 1333 struct io_kiocb *notif; 1334 int ret; 1335 1336 zc->done_io = 0; 1337 1338 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 1339 return -EINVAL; 1340 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 1341 if (req->flags & REQ_F_CQE_SKIP) 1342 return -EINVAL; 1343 1344 notif = zc->notif = io_alloc_notif(ctx); 1345 if (!notif) 1346 return -ENOMEM; 1347 notif->cqe.user_data = req->cqe.user_data; 1348 notif->cqe.res = 0; 1349 notif->cqe.flags = IORING_CQE_F_NOTIF; 1350 req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY; 1351 1352 zc->flags = READ_ONCE(sqe->ioprio); 1353 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1354 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1355 return -EINVAL; 1356 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1357 struct io_notif_data *nd = io_notif_to_data(notif); 1358 1359 nd->zc_report = true; 1360 nd->zc_used = false; 1361 nd->zc_copied = false; 1362 } 1363 } 1364 1365 zc->len = READ_ONCE(sqe->len); 1366 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY; 1367 req->buf_index = READ_ONCE(sqe->buf_index); 1368 if (zc->msg_flags & MSG_DONTWAIT) 1369 req->flags |= REQ_F_NOWAIT; 1370 1371 if (io_is_compat(req->ctx)) 1372 zc->msg_flags |= MSG_CMSG_COMPAT; 1373 1374 iomsg = io_msg_alloc_async(req); 1375 if (unlikely(!iomsg)) 1376 return -ENOMEM; 1377 1378 if (req->opcode == IORING_OP_SEND_ZC) { 1379 ret = io_send_setup(req, sqe); 1380 } else { 1381 if (unlikely(sqe->addr2 || sqe->file_index)) 1382 return -EINVAL; 1383 ret = io_sendmsg_setup(req, sqe); 1384 } 1385 if (unlikely(ret)) 1386 return ret; 1387 1388 if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) { 1389 iomsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1390 return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count); 1391 } 1392 iomsg->msg.sg_from_iter = io_sg_from_iter; 1393 return 0; 1394 } 1395 1396 static int io_sg_from_iter_iovec(struct sk_buff *skb, 1397 struct iov_iter *from, size_t length) 1398 { 1399 skb_zcopy_downgrade_managed(skb); 1400 return zerocopy_fill_skb_from_iter(skb, from, length); 1401 } 1402 1403 static int io_sg_from_iter(struct sk_buff *skb, 1404 struct iov_iter *from, size_t length) 1405 { 1406 struct skb_shared_info *shinfo = skb_shinfo(skb); 1407 int frag = shinfo->nr_frags; 1408 int ret = 0; 1409 struct bvec_iter bi; 1410 ssize_t copied = 0; 1411 unsigned long truesize = 0; 1412 1413 if (!frag) 1414 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1415 else if (unlikely(!skb_zcopy_managed(skb))) 1416 return zerocopy_fill_skb_from_iter(skb, from, length); 1417 1418 bi.bi_size = min(from->count, length); 1419 bi.bi_bvec_done = from->iov_offset; 1420 bi.bi_idx = 0; 1421 1422 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1423 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1424 1425 copied += v.bv_len; 1426 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1427 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1428 v.bv_offset, v.bv_len); 1429 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1430 } 1431 if (bi.bi_size) 1432 ret = -EMSGSIZE; 1433 1434 shinfo->nr_frags = frag; 1435 from->bvec += bi.bi_idx; 1436 from->nr_segs -= bi.bi_idx; 1437 from->count -= copied; 1438 from->iov_offset = bi.bi_bvec_done; 1439 1440 skb->data_len += copied; 1441 skb->len += copied; 1442 skb->truesize += truesize; 1443 return ret; 1444 } 1445 1446 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags) 1447 { 1448 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1449 struct io_async_msghdr *kmsg = req->async_data; 1450 1451 WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF)); 1452 1453 sr->notif->buf_index = req->buf_index; 1454 return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, 1455 (u64)(uintptr_t)sr->buf, sr->len, 1456 ITER_SOURCE, issue_flags); 1457 } 1458 1459 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1460 { 1461 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1462 struct io_async_msghdr *kmsg = req->async_data; 1463 struct socket *sock; 1464 unsigned msg_flags; 1465 int ret, min_ret = 0; 1466 1467 sock = sock_from_file(req->file); 1468 if (unlikely(!sock)) 1469 return -ENOTSOCK; 1470 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1471 return -EOPNOTSUPP; 1472 1473 if (!(req->flags & REQ_F_POLLED) && 1474 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1475 return -EAGAIN; 1476 1477 if (req->flags & REQ_F_IMPORT_BUFFER) { 1478 req->flags &= ~REQ_F_IMPORT_BUFFER; 1479 ret = io_send_zc_import(req, issue_flags); 1480 if (unlikely(ret)) 1481 return ret; 1482 } 1483 1484 msg_flags = zc->msg_flags; 1485 if (issue_flags & IO_URING_F_NONBLOCK) 1486 msg_flags |= MSG_DONTWAIT; 1487 if (msg_flags & MSG_WAITALL) 1488 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1489 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1490 1491 kmsg->msg.msg_flags = msg_flags; 1492 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1493 ret = sock_sendmsg(sock, &kmsg->msg); 1494 1495 if (unlikely(ret < min_ret)) { 1496 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1497 return -EAGAIN; 1498 1499 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { 1500 zc->len -= ret; 1501 zc->buf += ret; 1502 zc->done_io += ret; 1503 req->flags |= REQ_F_BL_NO_RECYCLE; 1504 return -EAGAIN; 1505 } 1506 if (ret == -ERESTARTSYS) 1507 ret = -EINTR; 1508 req_set_fail(req); 1509 } 1510 1511 if (ret >= 0) 1512 ret += zc->done_io; 1513 else if (zc->done_io) 1514 ret = zc->done_io; 1515 1516 /* 1517 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1518 * flushing notif to io_send_zc_cleanup() 1519 */ 1520 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1521 io_notif_flush(zc->notif); 1522 zc->notif = NULL; 1523 io_req_msg_cleanup(req, 0); 1524 } 1525 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1526 return IOU_COMPLETE; 1527 } 1528 1529 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1530 { 1531 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1532 struct io_async_msghdr *kmsg = req->async_data; 1533 struct socket *sock; 1534 unsigned flags; 1535 int ret, min_ret = 0; 1536 1537 if (req->flags & REQ_F_IMPORT_BUFFER) { 1538 unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs; 1539 int ret; 1540 1541 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, req, 1542 &kmsg->vec, uvec_segs, issue_flags); 1543 if (unlikely(ret)) 1544 return ret; 1545 req->flags &= ~REQ_F_IMPORT_BUFFER; 1546 } 1547 1548 sock = sock_from_file(req->file); 1549 if (unlikely(!sock)) 1550 return -ENOTSOCK; 1551 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1552 return -EOPNOTSUPP; 1553 1554 if (!(req->flags & REQ_F_POLLED) && 1555 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1556 return -EAGAIN; 1557 1558 flags = sr->msg_flags; 1559 if (issue_flags & IO_URING_F_NONBLOCK) 1560 flags |= MSG_DONTWAIT; 1561 if (flags & MSG_WAITALL) 1562 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1563 1564 kmsg->msg.msg_control_user = sr->msg_control; 1565 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1566 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1567 1568 if (unlikely(ret < min_ret)) { 1569 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1570 return -EAGAIN; 1571 1572 if (ret > 0 && io_net_retry(sock, flags)) { 1573 sr->done_io += ret; 1574 req->flags |= REQ_F_BL_NO_RECYCLE; 1575 return -EAGAIN; 1576 } 1577 if (ret == -ERESTARTSYS) 1578 ret = -EINTR; 1579 req_set_fail(req); 1580 } 1581 1582 if (ret >= 0) 1583 ret += sr->done_io; 1584 else if (sr->done_io) 1585 ret = sr->done_io; 1586 1587 /* 1588 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1589 * flushing notif to io_send_zc_cleanup() 1590 */ 1591 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1592 io_notif_flush(sr->notif); 1593 sr->notif = NULL; 1594 io_req_msg_cleanup(req, 0); 1595 } 1596 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1597 return IOU_COMPLETE; 1598 } 1599 1600 void io_sendrecv_fail(struct io_kiocb *req) 1601 { 1602 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1603 1604 if (sr->done_io) 1605 req->cqe.res = sr->done_io; 1606 1607 if ((req->flags & REQ_F_NEED_CLEANUP) && 1608 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1609 req->cqe.flags |= IORING_CQE_F_MORE; 1610 } 1611 1612 #define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \ 1613 IORING_ACCEPT_POLL_FIRST) 1614 1615 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1616 { 1617 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1618 1619 if (sqe->len || sqe->buf_index) 1620 return -EINVAL; 1621 1622 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1623 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1624 accept->flags = READ_ONCE(sqe->accept_flags); 1625 accept->nofile = rlimit(RLIMIT_NOFILE); 1626 accept->iou_flags = READ_ONCE(sqe->ioprio); 1627 if (accept->iou_flags & ~ACCEPT_FLAGS) 1628 return -EINVAL; 1629 1630 accept->file_slot = READ_ONCE(sqe->file_index); 1631 if (accept->file_slot) { 1632 if (accept->flags & SOCK_CLOEXEC) 1633 return -EINVAL; 1634 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT && 1635 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1636 return -EINVAL; 1637 } 1638 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1639 return -EINVAL; 1640 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1641 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1642 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT) 1643 req->flags |= REQ_F_APOLL_MULTISHOT; 1644 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT) 1645 req->flags |= REQ_F_NOWAIT; 1646 return 0; 1647 } 1648 1649 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1650 { 1651 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1652 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1653 bool fixed = !!accept->file_slot; 1654 struct proto_accept_arg arg = { 1655 .flags = force_nonblock ? O_NONBLOCK : 0, 1656 }; 1657 struct file *file; 1658 unsigned cflags; 1659 int ret, fd; 1660 1661 if (!(req->flags & REQ_F_POLLED) && 1662 accept->iou_flags & IORING_ACCEPT_POLL_FIRST) 1663 return -EAGAIN; 1664 1665 retry: 1666 if (!fixed) { 1667 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1668 if (unlikely(fd < 0)) 1669 return fd; 1670 } 1671 arg.err = 0; 1672 arg.is_empty = -1; 1673 file = do_accept(req->file, &arg, accept->addr, accept->addr_len, 1674 accept->flags); 1675 if (IS_ERR(file)) { 1676 if (!fixed) 1677 put_unused_fd(fd); 1678 ret = PTR_ERR(file); 1679 if (ret == -EAGAIN && force_nonblock && 1680 !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) 1681 return IOU_RETRY; 1682 1683 if (ret == -ERESTARTSYS) 1684 ret = -EINTR; 1685 } else if (!fixed) { 1686 fd_install(fd, file); 1687 ret = fd; 1688 } else { 1689 ret = io_fixed_fd_install(req, issue_flags, file, 1690 accept->file_slot); 1691 } 1692 1693 cflags = 0; 1694 if (!arg.is_empty) 1695 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 1696 1697 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) && 1698 io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1699 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) 1700 goto retry; 1701 return IOU_RETRY; 1702 } 1703 1704 io_req_set_res(req, ret, cflags); 1705 if (ret < 0) 1706 req_set_fail(req); 1707 return IOU_COMPLETE; 1708 } 1709 1710 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1711 { 1712 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1713 1714 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1715 return -EINVAL; 1716 1717 sock->domain = READ_ONCE(sqe->fd); 1718 sock->type = READ_ONCE(sqe->off); 1719 sock->protocol = READ_ONCE(sqe->len); 1720 sock->file_slot = READ_ONCE(sqe->file_index); 1721 sock->nofile = rlimit(RLIMIT_NOFILE); 1722 1723 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1724 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1725 return -EINVAL; 1726 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1727 return -EINVAL; 1728 return 0; 1729 } 1730 1731 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1732 { 1733 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1734 bool fixed = !!sock->file_slot; 1735 struct file *file; 1736 int ret, fd; 1737 1738 if (!fixed) { 1739 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1740 if (unlikely(fd < 0)) 1741 return fd; 1742 } 1743 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1744 if (IS_ERR(file)) { 1745 if (!fixed) 1746 put_unused_fd(fd); 1747 ret = PTR_ERR(file); 1748 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1749 return -EAGAIN; 1750 if (ret == -ERESTARTSYS) 1751 ret = -EINTR; 1752 req_set_fail(req); 1753 } else if (!fixed) { 1754 fd_install(fd, file); 1755 ret = fd; 1756 } else { 1757 ret = io_fixed_fd_install(req, issue_flags, file, 1758 sock->file_slot); 1759 } 1760 io_req_set_res(req, ret, 0); 1761 return IOU_COMPLETE; 1762 } 1763 1764 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1765 { 1766 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1767 struct io_async_msghdr *io; 1768 1769 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1770 return -EINVAL; 1771 1772 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1773 conn->addr_len = READ_ONCE(sqe->addr2); 1774 conn->in_progress = conn->seen_econnaborted = false; 1775 1776 io = io_msg_alloc_async(req); 1777 if (unlikely(!io)) 1778 return -ENOMEM; 1779 1780 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr); 1781 } 1782 1783 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1784 { 1785 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1786 struct io_async_msghdr *io = req->async_data; 1787 unsigned file_flags; 1788 int ret; 1789 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1790 1791 if (connect->in_progress) { 1792 struct poll_table_struct pt = { ._key = EPOLLERR }; 1793 1794 if (vfs_poll(req->file, &pt) & EPOLLERR) 1795 goto get_sock_err; 1796 } 1797 1798 file_flags = force_nonblock ? O_NONBLOCK : 0; 1799 1800 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len, 1801 file_flags); 1802 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1803 && force_nonblock) { 1804 if (ret == -EINPROGRESS) { 1805 connect->in_progress = true; 1806 } else if (ret == -ECONNABORTED) { 1807 if (connect->seen_econnaborted) 1808 goto out; 1809 connect->seen_econnaborted = true; 1810 } 1811 return -EAGAIN; 1812 } 1813 if (connect->in_progress) { 1814 /* 1815 * At least bluetooth will return -EBADFD on a re-connect 1816 * attempt, and it's (supposedly) also valid to get -EISCONN 1817 * which means the previous result is good. For both of these, 1818 * grab the sock_error() and use that for the completion. 1819 */ 1820 if (ret == -EBADFD || ret == -EISCONN) { 1821 get_sock_err: 1822 ret = sock_error(sock_from_file(req->file)->sk); 1823 } 1824 } 1825 if (ret == -ERESTARTSYS) 1826 ret = -EINTR; 1827 out: 1828 if (ret < 0) 1829 req_set_fail(req); 1830 io_req_msg_cleanup(req, issue_flags); 1831 io_req_set_res(req, ret, 0); 1832 return IOU_COMPLETE; 1833 } 1834 1835 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1836 { 1837 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1838 struct sockaddr __user *uaddr; 1839 struct io_async_msghdr *io; 1840 1841 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1842 return -EINVAL; 1843 1844 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1845 bind->addr_len = READ_ONCE(sqe->addr2); 1846 1847 io = io_msg_alloc_async(req); 1848 if (unlikely(!io)) 1849 return -ENOMEM; 1850 return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); 1851 } 1852 1853 int io_bind(struct io_kiocb *req, unsigned int issue_flags) 1854 { 1855 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1856 struct io_async_msghdr *io = req->async_data; 1857 struct socket *sock; 1858 int ret; 1859 1860 sock = sock_from_file(req->file); 1861 if (unlikely(!sock)) 1862 return -ENOTSOCK; 1863 1864 ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); 1865 if (ret < 0) 1866 req_set_fail(req); 1867 io_req_set_res(req, ret, 0); 1868 return 0; 1869 } 1870 1871 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1872 { 1873 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1874 1875 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) 1876 return -EINVAL; 1877 1878 listen->backlog = READ_ONCE(sqe->len); 1879 return 0; 1880 } 1881 1882 int io_listen(struct io_kiocb *req, unsigned int issue_flags) 1883 { 1884 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1885 struct socket *sock; 1886 int ret; 1887 1888 sock = sock_from_file(req->file); 1889 if (unlikely(!sock)) 1890 return -ENOTSOCK; 1891 1892 ret = __sys_listen_socket(sock, listen->backlog); 1893 if (ret < 0) 1894 req_set_fail(req); 1895 io_req_set_res(req, ret, 0); 1896 return 0; 1897 } 1898 1899 void io_netmsg_cache_free(const void *entry) 1900 { 1901 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; 1902 1903 io_vec_free(&kmsg->vec); 1904 kfree(kmsg); 1905 } 1906