1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "filetable.h" 14 #include "io_uring.h" 15 #include "kbuf.h" 16 #include "alloc_cache.h" 17 #include "net.h" 18 #include "notif.h" 19 #include "rsrc.h" 20 #include "zcrx.h" 21 22 struct io_shutdown { 23 struct file *file; 24 int how; 25 }; 26 27 struct io_accept { 28 struct file *file; 29 struct sockaddr __user *addr; 30 int __user *addr_len; 31 int flags; 32 int iou_flags; 33 u32 file_slot; 34 unsigned long nofile; 35 }; 36 37 struct io_socket { 38 struct file *file; 39 int domain; 40 int type; 41 int protocol; 42 int flags; 43 u32 file_slot; 44 unsigned long nofile; 45 }; 46 47 struct io_connect { 48 struct file *file; 49 struct sockaddr __user *addr; 50 int addr_len; 51 bool in_progress; 52 bool seen_econnaborted; 53 }; 54 55 struct io_bind { 56 struct file *file; 57 int addr_len; 58 }; 59 60 struct io_listen { 61 struct file *file; 62 int backlog; 63 }; 64 65 struct io_sr_msg { 66 struct file *file; 67 union { 68 struct compat_msghdr __user *umsg_compat; 69 struct user_msghdr __user *umsg; 70 void __user *buf; 71 }; 72 int len; 73 unsigned done_io; 74 unsigned msg_flags; 75 unsigned nr_multishot_loops; 76 u16 flags; 77 /* initialised and used only by !msg send variants */ 78 u16 buf_group; 79 /* per-invocation mshot limit */ 80 unsigned mshot_len; 81 /* overall mshot byte limit */ 82 unsigned mshot_total_len; 83 void __user *msg_control; 84 /* used only for send zerocopy */ 85 struct io_kiocb *notif; 86 }; 87 88 /* 89 * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold 90 * anyway. Use the upper 8 bits for internal uses. 91 */ 92 enum sr_retry_flags { 93 IORING_RECV_RETRY = (1U << 15), 94 IORING_RECV_PARTIAL_MAP = (1U << 14), 95 IORING_RECV_MSHOT_CAP = (1U << 13), 96 IORING_RECV_MSHOT_LIM = (1U << 12), 97 IORING_RECV_MSHOT_DONE = (1U << 11), 98 99 IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP, 100 IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP | 101 IORING_RECV_MSHOT_CAP | IORING_RECV_MSHOT_DONE, 102 }; 103 104 /* 105 * Number of times we'll try and do receives if there's more data. If we 106 * exceed this limit, then add us to the back of the queue and retry from 107 * there. This helps fairness between flooding clients. 108 */ 109 #define MULTISHOT_MAX_RETRY 32 110 111 struct io_recvzc { 112 struct file *file; 113 u16 flags; 114 u32 len; 115 struct io_zcrx_ifq *ifq; 116 }; 117 118 static int io_sg_from_iter_iovec(struct sk_buff *skb, 119 struct iov_iter *from, size_t length); 120 static int io_sg_from_iter(struct sk_buff *skb, 121 struct iov_iter *from, size_t length); 122 123 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 124 { 125 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 126 127 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 128 sqe->buf_index || sqe->splice_fd_in)) 129 return -EINVAL; 130 131 shutdown->how = READ_ONCE(sqe->len); 132 req->flags |= REQ_F_FORCE_ASYNC; 133 return 0; 134 } 135 136 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 137 { 138 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 139 struct socket *sock; 140 int ret; 141 142 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 143 144 sock = sock_from_file(req->file); 145 if (unlikely(!sock)) 146 return -ENOTSOCK; 147 148 ret = __sys_shutdown_sock(sock, shutdown->how); 149 io_req_set_res(req, ret, 0); 150 return IOU_COMPLETE; 151 } 152 153 static bool io_net_retry(struct socket *sock, int flags) 154 { 155 if (!(flags & MSG_WAITALL)) 156 return false; 157 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 158 } 159 160 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) 161 { 162 if (kmsg->vec.iovec) 163 io_vec_free(&kmsg->vec); 164 } 165 166 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 167 { 168 struct io_async_msghdr *hdr = req->async_data; 169 170 /* can't recycle, ensure we free the iovec if we have one */ 171 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { 172 io_netmsg_iovec_free(hdr); 173 return; 174 } 175 176 /* Let normal cleanup path reap it if we fail adding to the cache */ 177 io_alloc_cache_vec_kasan(&hdr->vec); 178 if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP) 179 io_vec_free(&hdr->vec); 180 181 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) 182 io_req_async_data_clear(req, REQ_F_NEED_CLEANUP); 183 } 184 185 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) 186 { 187 struct io_ring_ctx *ctx = req->ctx; 188 struct io_async_msghdr *hdr; 189 190 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req); 191 if (!hdr) 192 return NULL; 193 194 /* If the async data was cached, we might have an iov cached inside. */ 195 if (hdr->vec.iovec) 196 req->flags |= REQ_F_NEED_CLEANUP; 197 return hdr; 198 } 199 200 static inline void io_mshot_prep_retry(struct io_kiocb *req, 201 struct io_async_msghdr *kmsg) 202 { 203 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 204 205 req->flags &= ~REQ_F_BL_EMPTY; 206 sr->done_io = 0; 207 sr->flags &= ~IORING_RECV_RETRY_CLEAR; 208 sr->len = sr->mshot_len; 209 } 210 211 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, 212 const struct iovec __user *uiov, unsigned uvec_seg, 213 int ddir) 214 { 215 struct iovec *iov; 216 int ret, nr_segs; 217 218 if (iomsg->vec.iovec) { 219 nr_segs = iomsg->vec.nr; 220 iov = iomsg->vec.iovec; 221 } else { 222 nr_segs = 1; 223 iov = &iomsg->fast_iov; 224 } 225 226 ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov, 227 &iomsg->msg.msg_iter, io_is_compat(req->ctx)); 228 if (unlikely(ret < 0)) 229 return ret; 230 231 if (iov) { 232 req->flags |= REQ_F_NEED_CLEANUP; 233 io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs); 234 } 235 return 0; 236 } 237 238 static int io_compat_msg_copy_hdr(struct io_kiocb *req, 239 struct io_async_msghdr *iomsg, 240 struct compat_msghdr *msg, int ddir, 241 struct sockaddr __user **save_addr) 242 { 243 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 244 struct compat_iovec __user *uiov; 245 int ret; 246 247 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) 248 return -EFAULT; 249 250 ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr); 251 if (ret) 252 return ret; 253 254 uiov = compat_ptr(msg->msg_iov); 255 if (req->flags & REQ_F_BUFFER_SELECT) { 256 if (msg->msg_iovlen == 0) { 257 sr->len = 0; 258 } else if (msg->msg_iovlen > 1) { 259 return -EINVAL; 260 } else { 261 struct compat_iovec tmp_iov; 262 263 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 264 return -EFAULT; 265 sr->len = tmp_iov.iov_len; 266 } 267 } 268 return 0; 269 } 270 271 static int io_copy_msghdr_from_user(struct user_msghdr *msg, 272 struct user_msghdr __user *umsg) 273 { 274 if (!user_access_begin(umsg, sizeof(*umsg))) 275 return -EFAULT; 276 unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end); 277 unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end); 278 unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end); 279 unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end); 280 unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end); 281 unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end); 282 user_access_end(); 283 return 0; 284 ua_end: 285 user_access_end(); 286 return -EFAULT; 287 } 288 289 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, 290 struct user_msghdr *msg, int ddir, 291 struct sockaddr __user **save_addr) 292 { 293 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 294 struct user_msghdr __user *umsg = sr->umsg; 295 int ret; 296 297 iomsg->msg.msg_name = &iomsg->addr; 298 iomsg->msg.msg_iter.nr_segs = 0; 299 300 if (io_is_compat(req->ctx)) { 301 struct compat_msghdr cmsg; 302 303 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr); 304 if (ret) 305 return ret; 306 307 memset(msg, 0, sizeof(*msg)); 308 msg->msg_namelen = cmsg.msg_namelen; 309 msg->msg_controllen = cmsg.msg_controllen; 310 msg->msg_iov = compat_ptr(cmsg.msg_iov); 311 msg->msg_iovlen = cmsg.msg_iovlen; 312 return 0; 313 } 314 315 ret = io_copy_msghdr_from_user(msg, umsg); 316 if (unlikely(ret)) 317 return ret; 318 319 msg->msg_flags = 0; 320 321 ret = __copy_msghdr(&iomsg->msg, msg, save_addr); 322 if (ret) 323 return ret; 324 325 if (req->flags & REQ_F_BUFFER_SELECT) { 326 if (msg->msg_iovlen == 0) { 327 sr->len = 0; 328 } else if (msg->msg_iovlen > 1) { 329 return -EINVAL; 330 } else { 331 struct iovec __user *uiov = msg->msg_iov; 332 struct iovec tmp_iov; 333 334 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 335 return -EFAULT; 336 sr->len = tmp_iov.iov_len; 337 } 338 } 339 return 0; 340 } 341 342 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 343 { 344 struct io_async_msghdr *io = req->async_data; 345 346 io_netmsg_iovec_free(io); 347 } 348 349 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 350 { 351 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 352 struct io_async_msghdr *kmsg = req->async_data; 353 void __user *addr; 354 u16 addr_len; 355 int ret; 356 357 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 358 359 if (READ_ONCE(sqe->__pad3[0])) 360 return -EINVAL; 361 362 kmsg->msg.msg_name = NULL; 363 kmsg->msg.msg_namelen = 0; 364 kmsg->msg.msg_control = NULL; 365 kmsg->msg.msg_controllen = 0; 366 kmsg->msg.msg_ubuf = NULL; 367 368 addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 369 addr_len = READ_ONCE(sqe->addr_len); 370 if (addr) { 371 ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr); 372 if (unlikely(ret < 0)) 373 return ret; 374 kmsg->msg.msg_name = &kmsg->addr; 375 kmsg->msg.msg_namelen = addr_len; 376 } 377 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 378 req->flags |= REQ_F_IMPORT_BUFFER; 379 return 0; 380 } 381 if (req->flags & REQ_F_BUFFER_SELECT) 382 return 0; 383 384 if (sr->flags & IORING_SEND_VECTORIZED) 385 return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 386 387 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 388 } 389 390 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 391 { 392 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 393 struct io_async_msghdr *kmsg = req->async_data; 394 struct user_msghdr msg; 395 int ret; 396 397 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 398 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL); 399 if (unlikely(ret)) 400 return ret; 401 /* save msg_control as sys_sendmsg() overwrites it */ 402 sr->msg_control = kmsg->msg.msg_control_user; 403 404 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 405 kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; 406 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, 407 msg.msg_iovlen); 408 } 409 if (req->flags & REQ_F_BUFFER_SELECT) 410 return 0; 411 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); 412 } 413 414 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED) 415 416 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 417 { 418 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 419 420 sr->done_io = 0; 421 sr->len = READ_ONCE(sqe->len); 422 sr->flags = READ_ONCE(sqe->ioprio); 423 if (sr->flags & ~SENDMSG_FLAGS) 424 return -EINVAL; 425 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 426 if (sr->msg_flags & MSG_DONTWAIT) 427 req->flags |= REQ_F_NOWAIT; 428 if (req->flags & REQ_F_BUFFER_SELECT) 429 sr->buf_group = req->buf_index; 430 if (sr->flags & IORING_RECVSEND_BUNDLE) { 431 if (req->opcode == IORING_OP_SENDMSG) 432 return -EINVAL; 433 sr->msg_flags |= MSG_WAITALL; 434 req->flags |= REQ_F_MULTISHOT; 435 } 436 437 if (io_is_compat(req->ctx)) 438 sr->msg_flags |= MSG_CMSG_COMPAT; 439 440 if (unlikely(!io_msg_alloc_async(req))) 441 return -ENOMEM; 442 if (req->opcode != IORING_OP_SENDMSG) 443 return io_send_setup(req, sqe); 444 if (unlikely(sqe->addr2 || sqe->file_index)) 445 return -EINVAL; 446 return io_sendmsg_setup(req, sqe); 447 } 448 449 static void io_req_msg_cleanup(struct io_kiocb *req, 450 unsigned int issue_flags) 451 { 452 io_netmsg_recycle(req, issue_flags); 453 } 454 455 /* 456 * For bundle completions, we need to figure out how many segments we consumed. 457 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it 458 * could be using an ITER_IOVEC. If the latter, then if we consumed all of 459 * the segments, then it's a trivial questiont o answer. If we have residual 460 * data in the iter, then loop the segments to figure out how much we 461 * transferred. 462 */ 463 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) 464 { 465 struct iovec *iov; 466 int nbufs; 467 468 /* no data is always zero segments, and a ubuf is always 1 segment */ 469 if (ret <= 0) 470 return 0; 471 if (iter_is_ubuf(&kmsg->msg.msg_iter)) 472 return 1; 473 474 iov = kmsg->vec.iovec; 475 if (!iov) 476 iov = &kmsg->fast_iov; 477 478 /* if all data was transferred, it's basic pointer math */ 479 if (!iov_iter_count(&kmsg->msg.msg_iter)) 480 return iter_iov(&kmsg->msg.msg_iter) - iov; 481 482 /* short transfer, count segments */ 483 nbufs = 0; 484 do { 485 int this_len = min_t(int, iov[nbufs].iov_len, ret); 486 487 nbufs++; 488 ret -= this_len; 489 } while (ret); 490 491 return nbufs; 492 } 493 494 static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl, 495 struct io_async_msghdr *kmsg, int len) 496 { 497 req->flags |= REQ_F_BL_NO_RECYCLE; 498 if (req->flags & REQ_F_BUFFERS_COMMIT) 499 io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len)); 500 return IOU_RETRY; 501 } 502 503 static inline bool io_send_finish(struct io_kiocb *req, 504 struct io_async_msghdr *kmsg, 505 struct io_br_sel *sel) 506 { 507 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 508 bool bundle_finished = sel->val <= 0; 509 unsigned int cflags; 510 511 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { 512 cflags = io_put_kbuf(req, sel->val, sel->buf_list); 513 goto finish; 514 } 515 516 cflags = io_put_kbufs(req, sel->val, sel->buf_list, io_bundle_nbufs(kmsg, sel->val)); 517 518 /* 519 * Don't start new bundles if the buffer list is empty, or if the 520 * current operation needed to go through polling to complete. 521 */ 522 if (bundle_finished || req->flags & (REQ_F_BL_EMPTY | REQ_F_POLLED)) 523 goto finish; 524 525 /* 526 * Fill CQE for this receive and see if we should keep trying to 527 * receive from this socket. 528 */ 529 if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) { 530 io_mshot_prep_retry(req, kmsg); 531 return false; 532 } 533 534 /* Otherwise stop bundle and use the current result. */ 535 finish: 536 io_req_set_res(req, sel->val, cflags); 537 sel->val = IOU_COMPLETE; 538 return true; 539 } 540 541 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 542 { 543 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 544 struct io_async_msghdr *kmsg = req->async_data; 545 struct socket *sock; 546 unsigned flags; 547 int min_ret = 0; 548 int ret; 549 550 sock = sock_from_file(req->file); 551 if (unlikely(!sock)) 552 return -ENOTSOCK; 553 554 if (!(req->flags & REQ_F_POLLED) && 555 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 556 return -EAGAIN; 557 558 flags = sr->msg_flags; 559 if (issue_flags & IO_URING_F_NONBLOCK) 560 flags |= MSG_DONTWAIT; 561 if (flags & MSG_WAITALL) 562 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 563 564 kmsg->msg.msg_control_user = sr->msg_control; 565 566 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 567 568 if (ret < min_ret) { 569 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 570 return -EAGAIN; 571 if (ret > 0 && io_net_retry(sock, flags)) { 572 kmsg->msg.msg_controllen = 0; 573 kmsg->msg.msg_control = NULL; 574 sr->done_io += ret; 575 return -EAGAIN; 576 } 577 if (ret == -ERESTARTSYS) 578 ret = -EINTR; 579 req_set_fail(req); 580 } 581 io_req_msg_cleanup(req, issue_flags); 582 if (ret >= 0) 583 ret += sr->done_io; 584 else if (sr->done_io) 585 ret = sr->done_io; 586 io_req_set_res(req, ret, 0); 587 return IOU_COMPLETE; 588 } 589 590 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, 591 struct io_br_sel *sel, struct io_async_msghdr *kmsg) 592 { 593 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 594 struct buf_sel_arg arg = { 595 .iovs = &kmsg->fast_iov, 596 .max_len = min_not_zero(sr->len, INT_MAX), 597 .nr_iovs = 1, 598 .buf_group = sr->buf_group, 599 }; 600 int ret; 601 602 if (kmsg->vec.iovec) { 603 arg.nr_iovs = kmsg->vec.nr; 604 arg.iovs = kmsg->vec.iovec; 605 arg.mode = KBUF_MODE_FREE; 606 } 607 608 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 609 arg.nr_iovs = 1; 610 else 611 arg.mode |= KBUF_MODE_EXPAND; 612 613 ret = io_buffers_select(req, &arg, sel, issue_flags); 614 if (unlikely(ret < 0)) 615 return ret; 616 617 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 618 kmsg->vec.nr = ret; 619 kmsg->vec.iovec = arg.iovs; 620 req->flags |= REQ_F_NEED_CLEANUP; 621 } 622 sr->len = arg.out_len; 623 624 if (ret == 1) { 625 sr->buf = arg.iovs[0].iov_base; 626 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 627 &kmsg->msg.msg_iter); 628 if (unlikely(ret)) 629 return ret; 630 } else { 631 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, 632 arg.iovs, ret, arg.out_len); 633 } 634 635 return 0; 636 } 637 638 int io_send(struct io_kiocb *req, unsigned int issue_flags) 639 { 640 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 641 struct io_async_msghdr *kmsg = req->async_data; 642 struct io_br_sel sel = { }; 643 struct socket *sock; 644 unsigned flags; 645 int min_ret = 0; 646 int ret; 647 648 sock = sock_from_file(req->file); 649 if (unlikely(!sock)) 650 return -ENOTSOCK; 651 652 if (!(req->flags & REQ_F_POLLED) && 653 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 654 return -EAGAIN; 655 656 flags = sr->msg_flags; 657 if (issue_flags & IO_URING_F_NONBLOCK) 658 flags |= MSG_DONTWAIT; 659 660 retry_bundle: 661 sel.buf_list = NULL; 662 if (io_do_buffer_select(req)) { 663 ret = io_send_select_buffer(req, issue_flags, &sel, kmsg); 664 if (ret) 665 return ret; 666 } 667 668 /* 669 * If MSG_WAITALL is set, or this is a bundle send, then we need 670 * the full amount. If just bundle is set, if we do a short send 671 * then we complete the bundle sequence rather than continue on. 672 */ 673 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) 674 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 675 676 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 677 kmsg->msg.msg_flags = flags; 678 ret = sock_sendmsg(sock, &kmsg->msg); 679 if (ret < min_ret) { 680 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 681 return -EAGAIN; 682 683 if (ret > 0 && io_net_retry(sock, flags)) { 684 sr->len -= ret; 685 sr->buf += ret; 686 sr->done_io += ret; 687 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); 688 } 689 if (ret == -ERESTARTSYS) 690 ret = -EINTR; 691 req_set_fail(req); 692 } 693 if (ret >= 0) 694 ret += sr->done_io; 695 else if (sr->done_io) 696 ret = sr->done_io; 697 698 sel.val = ret; 699 if (!io_send_finish(req, kmsg, &sel)) 700 goto retry_bundle; 701 702 io_req_msg_cleanup(req, issue_flags); 703 return sel.val; 704 } 705 706 static int io_recvmsg_mshot_prep(struct io_kiocb *req, 707 struct io_async_msghdr *iomsg, 708 int namelen, size_t controllen) 709 { 710 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == 711 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { 712 int hdr; 713 714 if (unlikely(namelen < 0)) 715 return -EOVERFLOW; 716 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), 717 namelen, &hdr)) 718 return -EOVERFLOW; 719 if (check_add_overflow(hdr, controllen, &hdr)) 720 return -EOVERFLOW; 721 722 iomsg->namelen = namelen; 723 iomsg->controllen = controllen; 724 return 0; 725 } 726 727 return 0; 728 } 729 730 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 731 struct io_async_msghdr *iomsg) 732 { 733 struct user_msghdr msg; 734 int ret; 735 736 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr); 737 if (unlikely(ret)) 738 return ret; 739 740 if (!(req->flags & REQ_F_BUFFER_SELECT)) { 741 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, 742 ITER_DEST); 743 if (unlikely(ret)) 744 return ret; 745 } 746 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, 747 msg.msg_controllen); 748 } 749 750 static int io_recvmsg_prep_setup(struct io_kiocb *req) 751 { 752 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 753 struct io_async_msghdr *kmsg; 754 755 kmsg = io_msg_alloc_async(req); 756 if (unlikely(!kmsg)) 757 return -ENOMEM; 758 759 if (req->opcode == IORING_OP_RECV) { 760 kmsg->msg.msg_name = NULL; 761 kmsg->msg.msg_namelen = 0; 762 kmsg->msg.msg_inq = 0; 763 kmsg->msg.msg_control = NULL; 764 kmsg->msg.msg_get_inq = 1; 765 kmsg->msg.msg_controllen = 0; 766 kmsg->msg.msg_iocb = NULL; 767 kmsg->msg.msg_ubuf = NULL; 768 769 if (req->flags & REQ_F_BUFFER_SELECT) 770 return 0; 771 return import_ubuf(ITER_DEST, sr->buf, sr->len, 772 &kmsg->msg.msg_iter); 773 } 774 775 return io_recvmsg_copy_hdr(req, kmsg); 776 } 777 778 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \ 779 IORING_RECVSEND_BUNDLE) 780 781 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 782 { 783 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 784 785 sr->done_io = 0; 786 787 if (unlikely(sqe->addr2)) 788 return -EINVAL; 789 790 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 791 sr->len = READ_ONCE(sqe->len); 792 sr->flags = READ_ONCE(sqe->ioprio); 793 if (sr->flags & ~RECVMSG_FLAGS) 794 return -EINVAL; 795 sr->msg_flags = READ_ONCE(sqe->msg_flags); 796 if (sr->msg_flags & MSG_DONTWAIT) 797 req->flags |= REQ_F_NOWAIT; 798 if (sr->msg_flags & MSG_ERRQUEUE) 799 req->flags |= REQ_F_CLEAR_POLLIN; 800 if (req->flags & REQ_F_BUFFER_SELECT) 801 sr->buf_group = req->buf_index; 802 sr->mshot_total_len = sr->mshot_len = 0; 803 if (sr->flags & IORING_RECV_MULTISHOT) { 804 if (!(req->flags & REQ_F_BUFFER_SELECT)) 805 return -EINVAL; 806 if (sr->msg_flags & MSG_WAITALL) 807 return -EINVAL; 808 if (req->opcode == IORING_OP_RECV) { 809 sr->mshot_len = sr->len; 810 sr->mshot_total_len = READ_ONCE(sqe->optlen); 811 if (sr->mshot_total_len) 812 sr->flags |= IORING_RECV_MSHOT_LIM; 813 } else if (sqe->optlen) { 814 return -EINVAL; 815 } 816 req->flags |= REQ_F_APOLL_MULTISHOT; 817 } else if (sqe->optlen) { 818 return -EINVAL; 819 } 820 821 if (sr->flags & IORING_RECVSEND_BUNDLE) { 822 if (req->opcode == IORING_OP_RECVMSG) 823 return -EINVAL; 824 } 825 826 if (io_is_compat(req->ctx)) 827 sr->msg_flags |= MSG_CMSG_COMPAT; 828 829 sr->nr_multishot_loops = 0; 830 return io_recvmsg_prep_setup(req); 831 } 832 833 /* bits to clear in old and inherit in new cflags on bundle retry */ 834 #define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE) 835 836 /* 837 * Finishes io_recv and io_recvmsg. 838 * 839 * Returns true if it is actually finished, or false if it should run 840 * again (for multishot). 841 */ 842 static inline bool io_recv_finish(struct io_kiocb *req, 843 struct io_async_msghdr *kmsg, 844 struct io_br_sel *sel, bool mshot_finished, 845 unsigned issue_flags) 846 { 847 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 848 unsigned int cflags = 0; 849 850 if (kmsg->msg.msg_inq > 0) 851 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 852 853 if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { 854 /* 855 * If sr->len hits zero, the limit has been reached. Mark 856 * mshot as finished, and flag MSHOT_DONE as well to prevent 857 * a potential bundle from being retried. 858 */ 859 sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len); 860 if (!sr->mshot_total_len) { 861 sr->flags |= IORING_RECV_MSHOT_DONE; 862 mshot_finished = true; 863 } 864 } 865 866 if (sr->flags & IORING_RECVSEND_BUNDLE) { 867 size_t this_ret = sel->val - sr->done_io; 868 869 cflags |= io_put_kbufs(req, this_ret, sel->buf_list, io_bundle_nbufs(kmsg, this_ret)); 870 if (sr->flags & IORING_RECV_RETRY) 871 cflags = req->cqe.flags | (cflags & CQE_F_MASK); 872 if (sr->mshot_len && sel->val >= sr->mshot_len) 873 sr->flags |= IORING_RECV_MSHOT_CAP; 874 /* bundle with no more immediate buffers, we're done */ 875 if (req->flags & REQ_F_BL_EMPTY) 876 goto finish; 877 /* 878 * If more is available AND it was a full transfer, retry and 879 * append to this one 880 */ 881 if (!(sr->flags & IORING_RECV_NO_RETRY) && 882 kmsg->msg.msg_inq > 1 && this_ret > 0 && 883 !iov_iter_count(&kmsg->msg.msg_iter)) { 884 req->cqe.flags = cflags & ~CQE_F_MASK; 885 sr->len = kmsg->msg.msg_inq; 886 sr->done_io += this_ret; 887 sr->flags |= IORING_RECV_RETRY; 888 return false; 889 } 890 } else { 891 cflags |= io_put_kbuf(req, sel->val, sel->buf_list); 892 } 893 894 /* 895 * Fill CQE for this receive and see if we should keep trying to 896 * receive from this socket. 897 */ 898 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && 899 io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) { 900 sel->val = IOU_RETRY; 901 io_mshot_prep_retry(req, kmsg); 902 /* Known not-empty or unknown state, retry */ 903 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { 904 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY && 905 !(sr->flags & IORING_RECV_MSHOT_CAP)) { 906 return false; 907 } 908 /* mshot retries exceeded, force a requeue */ 909 sr->nr_multishot_loops = 0; 910 sr->flags &= ~IORING_RECV_MSHOT_CAP; 911 if (issue_flags & IO_URING_F_MULTISHOT) 912 sel->val = IOU_REQUEUE; 913 } 914 return true; 915 } 916 917 /* Finish the request / stop multishot. */ 918 finish: 919 io_req_set_res(req, sel->val, cflags); 920 sel->val = IOU_COMPLETE; 921 io_req_msg_cleanup(req, issue_flags); 922 return true; 923 } 924 925 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 926 struct io_sr_msg *sr, void __user **buf, 927 size_t *len) 928 { 929 unsigned long ubuf = (unsigned long) *buf; 930 unsigned long hdr; 931 932 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 933 kmsg->controllen; 934 if (*len < hdr) 935 return -EFAULT; 936 937 if (kmsg->controllen) { 938 unsigned long control = ubuf + hdr - kmsg->controllen; 939 940 kmsg->msg.msg_control_user = (void __user *) control; 941 kmsg->msg.msg_controllen = kmsg->controllen; 942 } 943 944 sr->buf = *buf; /* stash for later copy */ 945 *buf = (void __user *) (ubuf + hdr); 946 kmsg->payloadlen = *len = *len - hdr; 947 return 0; 948 } 949 950 struct io_recvmsg_multishot_hdr { 951 struct io_uring_recvmsg_out msg; 952 struct sockaddr_storage addr; 953 }; 954 955 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 956 struct io_async_msghdr *kmsg, 957 unsigned int flags, bool *finished) 958 { 959 int err; 960 int copy_len; 961 struct io_recvmsg_multishot_hdr hdr; 962 963 if (kmsg->namelen) 964 kmsg->msg.msg_name = &hdr.addr; 965 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 966 kmsg->msg.msg_namelen = 0; 967 968 if (sock->file->f_flags & O_NONBLOCK) 969 flags |= MSG_DONTWAIT; 970 971 err = sock_recvmsg(sock, &kmsg->msg, flags); 972 *finished = err <= 0; 973 if (err < 0) 974 return err; 975 976 hdr.msg = (struct io_uring_recvmsg_out) { 977 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 978 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 979 }; 980 981 hdr.msg.payloadlen = err; 982 if (err > kmsg->payloadlen) 983 err = kmsg->payloadlen; 984 985 copy_len = sizeof(struct io_uring_recvmsg_out); 986 if (kmsg->msg.msg_namelen > kmsg->namelen) 987 copy_len += kmsg->namelen; 988 else 989 copy_len += kmsg->msg.msg_namelen; 990 991 /* 992 * "fromlen shall refer to the value before truncation.." 993 * 1003.1g 994 */ 995 hdr.msg.namelen = kmsg->msg.msg_namelen; 996 997 /* ensure that there is no gap between hdr and sockaddr_storage */ 998 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 999 sizeof(struct io_uring_recvmsg_out)); 1000 if (copy_to_user(io->buf, &hdr, copy_len)) { 1001 *finished = true; 1002 return -EFAULT; 1003 } 1004 1005 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 1006 kmsg->controllen + err; 1007 } 1008 1009 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 1010 { 1011 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1012 struct io_async_msghdr *kmsg = req->async_data; 1013 struct io_br_sel sel = { }; 1014 struct socket *sock; 1015 unsigned flags; 1016 int ret, min_ret = 0; 1017 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1018 bool mshot_finished = true; 1019 1020 sock = sock_from_file(req->file); 1021 if (unlikely(!sock)) 1022 return -ENOTSOCK; 1023 1024 if (!(req->flags & REQ_F_POLLED) && 1025 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1026 return -EAGAIN; 1027 1028 flags = sr->msg_flags; 1029 if (force_nonblock) 1030 flags |= MSG_DONTWAIT; 1031 1032 retry_multishot: 1033 sel.buf_list = NULL; 1034 if (io_do_buffer_select(req)) { 1035 size_t len = sr->len; 1036 1037 sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); 1038 if (!sel.addr) 1039 return -ENOBUFS; 1040 1041 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1042 ret = io_recvmsg_prep_multishot(kmsg, sr, &sel.addr, &len); 1043 if (ret) { 1044 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1045 return ret; 1046 } 1047 } 1048 1049 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, sel.addr, len); 1050 } 1051 1052 kmsg->msg.msg_get_inq = 1; 1053 kmsg->msg.msg_inq = -1; 1054 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1055 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 1056 &mshot_finished); 1057 } else { 1058 /* disable partial retry for recvmsg with cmsg attached */ 1059 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 1060 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1061 1062 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 1063 kmsg->uaddr, flags); 1064 } 1065 1066 if (ret < min_ret) { 1067 if (ret == -EAGAIN && force_nonblock) { 1068 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1069 return IOU_RETRY; 1070 } 1071 if (ret > 0 && io_net_retry(sock, flags)) { 1072 sr->done_io += ret; 1073 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); 1074 } 1075 if (ret == -ERESTARTSYS) 1076 ret = -EINTR; 1077 req_set_fail(req); 1078 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1079 req_set_fail(req); 1080 } 1081 1082 if (ret > 0) 1083 ret += sr->done_io; 1084 else if (sr->done_io) 1085 ret = sr->done_io; 1086 else 1087 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1088 1089 sel.val = ret; 1090 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) 1091 goto retry_multishot; 1092 1093 return sel.val; 1094 } 1095 1096 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, 1097 struct io_br_sel *sel, unsigned int issue_flags) 1098 { 1099 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1100 int ret; 1101 1102 /* 1103 * If the ring isn't locked, then don't use the peek interface 1104 * to grab multiple buffers as we will lock/unlock between 1105 * this selection and posting the buffers. 1106 */ 1107 if (!(issue_flags & IO_URING_F_UNLOCKED) && 1108 sr->flags & IORING_RECVSEND_BUNDLE) { 1109 struct buf_sel_arg arg = { 1110 .iovs = &kmsg->fast_iov, 1111 .nr_iovs = 1, 1112 .mode = KBUF_MODE_EXPAND, 1113 .buf_group = sr->buf_group, 1114 }; 1115 1116 if (kmsg->vec.iovec) { 1117 arg.nr_iovs = kmsg->vec.nr; 1118 arg.iovs = kmsg->vec.iovec; 1119 arg.mode |= KBUF_MODE_FREE; 1120 } 1121 1122 if (sel->val) 1123 arg.max_len = sel->val; 1124 else if (kmsg->msg.msg_inq > 1) 1125 arg.max_len = min_not_zero(sel->val, (ssize_t) kmsg->msg.msg_inq); 1126 1127 /* if mshot limited, ensure we don't go over */ 1128 if (sr->flags & IORING_RECV_MSHOT_LIM) 1129 arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len); 1130 ret = io_buffers_peek(req, &arg, sel); 1131 if (unlikely(ret < 0)) 1132 return ret; 1133 1134 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 1135 kmsg->vec.nr = ret; 1136 kmsg->vec.iovec = arg.iovs; 1137 req->flags |= REQ_F_NEED_CLEANUP; 1138 } 1139 if (arg.partial_map) 1140 sr->flags |= IORING_RECV_PARTIAL_MAP; 1141 1142 /* special case 1 vec, can be a fast path */ 1143 if (ret == 1) { 1144 sr->buf = arg.iovs[0].iov_base; 1145 sr->len = arg.iovs[0].iov_len; 1146 goto map_ubuf; 1147 } 1148 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, 1149 arg.out_len); 1150 } else { 1151 size_t len = sel->val; 1152 1153 *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags); 1154 if (!sel->addr) 1155 return -ENOBUFS; 1156 sr->buf = sel->addr; 1157 sr->len = len; 1158 map_ubuf: 1159 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 1160 &kmsg->msg.msg_iter); 1161 if (unlikely(ret)) 1162 return ret; 1163 } 1164 1165 return 0; 1166 } 1167 1168 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 1169 { 1170 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1171 struct io_async_msghdr *kmsg = req->async_data; 1172 struct io_br_sel sel; 1173 struct socket *sock; 1174 unsigned flags; 1175 int ret, min_ret = 0; 1176 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1177 bool mshot_finished; 1178 1179 if (!(req->flags & REQ_F_POLLED) && 1180 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1181 return -EAGAIN; 1182 1183 sock = sock_from_file(req->file); 1184 if (unlikely(!sock)) 1185 return -ENOTSOCK; 1186 1187 flags = sr->msg_flags; 1188 if (force_nonblock) 1189 flags |= MSG_DONTWAIT; 1190 1191 retry_multishot: 1192 sel.buf_list = NULL; 1193 if (io_do_buffer_select(req)) { 1194 sel.val = sr->len; 1195 ret = io_recv_buf_select(req, kmsg, &sel, issue_flags); 1196 if (unlikely(ret < 0)) { 1197 kmsg->msg.msg_inq = -1; 1198 goto out_free; 1199 } 1200 sr->buf = NULL; 1201 } 1202 1203 kmsg->msg.msg_flags = 0; 1204 kmsg->msg.msg_inq = -1; 1205 1206 if (flags & MSG_WAITALL) 1207 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1208 1209 ret = sock_recvmsg(sock, &kmsg->msg, flags); 1210 if (ret < min_ret) { 1211 if (ret == -EAGAIN && force_nonblock) { 1212 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1213 return IOU_RETRY; 1214 } 1215 if (ret > 0 && io_net_retry(sock, flags)) { 1216 sr->len -= ret; 1217 sr->buf += ret; 1218 sr->done_io += ret; 1219 return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret); 1220 } 1221 if (ret == -ERESTARTSYS) 1222 ret = -EINTR; 1223 req_set_fail(req); 1224 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1225 out_free: 1226 req_set_fail(req); 1227 } 1228 1229 mshot_finished = ret <= 0; 1230 if (ret > 0) 1231 ret += sr->done_io; 1232 else if (sr->done_io) 1233 ret = sr->done_io; 1234 else 1235 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1236 1237 sel.val = ret; 1238 if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags)) 1239 goto retry_multishot; 1240 1241 return sel.val; 1242 } 1243 1244 int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1245 { 1246 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1247 unsigned ifq_idx; 1248 1249 if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3)) 1250 return -EINVAL; 1251 1252 ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx); 1253 zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx); 1254 if (!zc->ifq) 1255 return -EINVAL; 1256 1257 zc->len = READ_ONCE(sqe->len); 1258 zc->flags = READ_ONCE(sqe->ioprio); 1259 if (READ_ONCE(sqe->msg_flags)) 1260 return -EINVAL; 1261 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)) 1262 return -EINVAL; 1263 /* multishot required */ 1264 if (!(zc->flags & IORING_RECV_MULTISHOT)) 1265 return -EINVAL; 1266 /* All data completions are posted as aux CQEs. */ 1267 req->flags |= REQ_F_APOLL_MULTISHOT; 1268 1269 return 0; 1270 } 1271 1272 int io_recvzc(struct io_kiocb *req, unsigned int issue_flags) 1273 { 1274 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1275 struct socket *sock; 1276 unsigned int len; 1277 int ret; 1278 1279 if (!(req->flags & REQ_F_POLLED) && 1280 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1281 return -EAGAIN; 1282 1283 sock = sock_from_file(req->file); 1284 if (unlikely(!sock)) 1285 return -ENOTSOCK; 1286 1287 len = zc->len; 1288 ret = io_zcrx_recv(req, zc->ifq, sock, 0, issue_flags, &zc->len); 1289 if (len && zc->len == 0) { 1290 io_req_set_res(req, 0, 0); 1291 1292 return IOU_COMPLETE; 1293 } 1294 if (unlikely(ret <= 0) && ret != -EAGAIN) { 1295 if (ret == -ERESTARTSYS) 1296 ret = -EINTR; 1297 if (ret == IOU_REQUEUE) 1298 return IOU_REQUEUE; 1299 1300 req_set_fail(req); 1301 io_req_set_res(req, ret, 0); 1302 return IOU_COMPLETE; 1303 } 1304 return IOU_RETRY; 1305 } 1306 1307 void io_send_zc_cleanup(struct io_kiocb *req) 1308 { 1309 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1310 struct io_async_msghdr *io = req->async_data; 1311 1312 if (req_has_async_data(req)) 1313 io_netmsg_iovec_free(io); 1314 if (zc->notif) { 1315 io_notif_flush(zc->notif); 1316 zc->notif = NULL; 1317 } 1318 } 1319 1320 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1321 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \ 1322 IORING_SEND_VECTORIZED) 1323 1324 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1325 { 1326 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1327 struct io_ring_ctx *ctx = req->ctx; 1328 struct io_async_msghdr *iomsg; 1329 struct io_kiocb *notif; 1330 int ret; 1331 1332 zc->done_io = 0; 1333 1334 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 1335 return -EINVAL; 1336 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 1337 if (req->flags & REQ_F_CQE_SKIP) 1338 return -EINVAL; 1339 1340 notif = zc->notif = io_alloc_notif(ctx); 1341 if (!notif) 1342 return -ENOMEM; 1343 notif->cqe.user_data = req->cqe.user_data; 1344 notif->cqe.res = 0; 1345 notif->cqe.flags = IORING_CQE_F_NOTIF; 1346 req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY; 1347 1348 zc->flags = READ_ONCE(sqe->ioprio); 1349 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1350 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1351 return -EINVAL; 1352 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1353 struct io_notif_data *nd = io_notif_to_data(notif); 1354 1355 nd->zc_report = true; 1356 nd->zc_used = false; 1357 nd->zc_copied = false; 1358 } 1359 } 1360 1361 zc->len = READ_ONCE(sqe->len); 1362 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY; 1363 req->buf_index = READ_ONCE(sqe->buf_index); 1364 if (zc->msg_flags & MSG_DONTWAIT) 1365 req->flags |= REQ_F_NOWAIT; 1366 1367 if (io_is_compat(req->ctx)) 1368 zc->msg_flags |= MSG_CMSG_COMPAT; 1369 1370 iomsg = io_msg_alloc_async(req); 1371 if (unlikely(!iomsg)) 1372 return -ENOMEM; 1373 1374 if (req->opcode == IORING_OP_SEND_ZC) { 1375 ret = io_send_setup(req, sqe); 1376 } else { 1377 if (unlikely(sqe->addr2 || sqe->file_index)) 1378 return -EINVAL; 1379 ret = io_sendmsg_setup(req, sqe); 1380 } 1381 if (unlikely(ret)) 1382 return ret; 1383 1384 if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) { 1385 iomsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1386 return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count); 1387 } 1388 iomsg->msg.sg_from_iter = io_sg_from_iter; 1389 return 0; 1390 } 1391 1392 static int io_sg_from_iter_iovec(struct sk_buff *skb, 1393 struct iov_iter *from, size_t length) 1394 { 1395 skb_zcopy_downgrade_managed(skb); 1396 return zerocopy_fill_skb_from_iter(skb, from, length); 1397 } 1398 1399 static int io_sg_from_iter(struct sk_buff *skb, 1400 struct iov_iter *from, size_t length) 1401 { 1402 struct skb_shared_info *shinfo = skb_shinfo(skb); 1403 int frag = shinfo->nr_frags; 1404 int ret = 0; 1405 struct bvec_iter bi; 1406 ssize_t copied = 0; 1407 unsigned long truesize = 0; 1408 1409 if (!frag) 1410 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1411 else if (unlikely(!skb_zcopy_managed(skb))) 1412 return zerocopy_fill_skb_from_iter(skb, from, length); 1413 1414 bi.bi_size = min(from->count, length); 1415 bi.bi_bvec_done = from->iov_offset; 1416 bi.bi_idx = 0; 1417 1418 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1419 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1420 1421 copied += v.bv_len; 1422 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1423 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1424 v.bv_offset, v.bv_len); 1425 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1426 } 1427 if (bi.bi_size) 1428 ret = -EMSGSIZE; 1429 1430 shinfo->nr_frags = frag; 1431 from->bvec += bi.bi_idx; 1432 from->nr_segs -= bi.bi_idx; 1433 from->count -= copied; 1434 from->iov_offset = bi.bi_bvec_done; 1435 1436 skb->data_len += copied; 1437 skb->len += copied; 1438 skb->truesize += truesize; 1439 return ret; 1440 } 1441 1442 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags) 1443 { 1444 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1445 struct io_async_msghdr *kmsg = req->async_data; 1446 1447 WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF)); 1448 1449 sr->notif->buf_index = req->buf_index; 1450 return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, 1451 (u64)(uintptr_t)sr->buf, sr->len, 1452 ITER_SOURCE, issue_flags); 1453 } 1454 1455 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1456 { 1457 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1458 struct io_async_msghdr *kmsg = req->async_data; 1459 struct socket *sock; 1460 unsigned msg_flags; 1461 int ret, min_ret = 0; 1462 1463 sock = sock_from_file(req->file); 1464 if (unlikely(!sock)) 1465 return -ENOTSOCK; 1466 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1467 return -EOPNOTSUPP; 1468 1469 if (!(req->flags & REQ_F_POLLED) && 1470 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1471 return -EAGAIN; 1472 1473 if (req->flags & REQ_F_IMPORT_BUFFER) { 1474 req->flags &= ~REQ_F_IMPORT_BUFFER; 1475 ret = io_send_zc_import(req, issue_flags); 1476 if (unlikely(ret)) 1477 return ret; 1478 } 1479 1480 msg_flags = zc->msg_flags; 1481 if (issue_flags & IO_URING_F_NONBLOCK) 1482 msg_flags |= MSG_DONTWAIT; 1483 if (msg_flags & MSG_WAITALL) 1484 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1485 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1486 1487 kmsg->msg.msg_flags = msg_flags; 1488 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1489 ret = sock_sendmsg(sock, &kmsg->msg); 1490 1491 if (unlikely(ret < min_ret)) { 1492 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1493 return -EAGAIN; 1494 1495 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { 1496 zc->len -= ret; 1497 zc->buf += ret; 1498 zc->done_io += ret; 1499 return -EAGAIN; 1500 } 1501 if (ret == -ERESTARTSYS) 1502 ret = -EINTR; 1503 req_set_fail(req); 1504 } 1505 1506 if (ret >= 0) 1507 ret += zc->done_io; 1508 else if (zc->done_io) 1509 ret = zc->done_io; 1510 1511 /* 1512 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1513 * flushing notif to io_send_zc_cleanup() 1514 */ 1515 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1516 io_notif_flush(zc->notif); 1517 zc->notif = NULL; 1518 io_req_msg_cleanup(req, 0); 1519 } 1520 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1521 return IOU_COMPLETE; 1522 } 1523 1524 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1525 { 1526 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1527 struct io_async_msghdr *kmsg = req->async_data; 1528 struct socket *sock; 1529 unsigned flags; 1530 int ret, min_ret = 0; 1531 1532 if (req->flags & REQ_F_IMPORT_BUFFER) { 1533 unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs; 1534 int ret; 1535 1536 sr->notif->buf_index = req->buf_index; 1537 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, 1538 sr->notif, &kmsg->vec, uvec_segs, 1539 issue_flags); 1540 if (unlikely(ret)) 1541 return ret; 1542 req->flags &= ~REQ_F_IMPORT_BUFFER; 1543 } 1544 1545 sock = sock_from_file(req->file); 1546 if (unlikely(!sock)) 1547 return -ENOTSOCK; 1548 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1549 return -EOPNOTSUPP; 1550 1551 if (!(req->flags & REQ_F_POLLED) && 1552 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1553 return -EAGAIN; 1554 1555 flags = sr->msg_flags; 1556 if (issue_flags & IO_URING_F_NONBLOCK) 1557 flags |= MSG_DONTWAIT; 1558 if (flags & MSG_WAITALL) 1559 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1560 1561 kmsg->msg.msg_control_user = sr->msg_control; 1562 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1563 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1564 1565 if (unlikely(ret < min_ret)) { 1566 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1567 return -EAGAIN; 1568 1569 if (ret > 0 && io_net_retry(sock, flags)) { 1570 sr->done_io += ret; 1571 return -EAGAIN; 1572 } 1573 if (ret == -ERESTARTSYS) 1574 ret = -EINTR; 1575 req_set_fail(req); 1576 } 1577 1578 if (ret >= 0) 1579 ret += sr->done_io; 1580 else if (sr->done_io) 1581 ret = sr->done_io; 1582 1583 /* 1584 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1585 * flushing notif to io_send_zc_cleanup() 1586 */ 1587 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1588 io_notif_flush(sr->notif); 1589 sr->notif = NULL; 1590 io_req_msg_cleanup(req, 0); 1591 } 1592 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1593 return IOU_COMPLETE; 1594 } 1595 1596 void io_sendrecv_fail(struct io_kiocb *req) 1597 { 1598 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1599 1600 if (sr->done_io) 1601 req->cqe.res = sr->done_io; 1602 1603 if ((req->flags & REQ_F_NEED_CLEANUP) && 1604 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1605 req->cqe.flags |= IORING_CQE_F_MORE; 1606 } 1607 1608 #define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \ 1609 IORING_ACCEPT_POLL_FIRST) 1610 1611 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1612 { 1613 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1614 1615 if (sqe->len || sqe->buf_index) 1616 return -EINVAL; 1617 1618 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1619 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1620 accept->flags = READ_ONCE(sqe->accept_flags); 1621 accept->nofile = rlimit(RLIMIT_NOFILE); 1622 accept->iou_flags = READ_ONCE(sqe->ioprio); 1623 if (accept->iou_flags & ~ACCEPT_FLAGS) 1624 return -EINVAL; 1625 1626 accept->file_slot = READ_ONCE(sqe->file_index); 1627 if (accept->file_slot) { 1628 if (accept->flags & SOCK_CLOEXEC) 1629 return -EINVAL; 1630 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT && 1631 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1632 return -EINVAL; 1633 } 1634 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1635 return -EINVAL; 1636 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1637 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1638 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT) 1639 req->flags |= REQ_F_APOLL_MULTISHOT; 1640 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT) 1641 req->flags |= REQ_F_NOWAIT; 1642 return 0; 1643 } 1644 1645 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1646 { 1647 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1648 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1649 bool fixed = !!accept->file_slot; 1650 struct proto_accept_arg arg = { 1651 .flags = force_nonblock ? O_NONBLOCK : 0, 1652 }; 1653 struct file *file; 1654 unsigned cflags; 1655 int ret, fd; 1656 1657 if (!(req->flags & REQ_F_POLLED) && 1658 accept->iou_flags & IORING_ACCEPT_POLL_FIRST) 1659 return -EAGAIN; 1660 1661 retry: 1662 if (!fixed) { 1663 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1664 if (unlikely(fd < 0)) 1665 return fd; 1666 } 1667 arg.err = 0; 1668 arg.is_empty = -1; 1669 file = do_accept(req->file, &arg, accept->addr, accept->addr_len, 1670 accept->flags); 1671 if (IS_ERR(file)) { 1672 if (!fixed) 1673 put_unused_fd(fd); 1674 ret = PTR_ERR(file); 1675 if (ret == -EAGAIN && force_nonblock && 1676 !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) 1677 return IOU_RETRY; 1678 1679 if (ret == -ERESTARTSYS) 1680 ret = -EINTR; 1681 } else if (!fixed) { 1682 fd_install(fd, file); 1683 ret = fd; 1684 } else { 1685 ret = io_fixed_fd_install(req, issue_flags, file, 1686 accept->file_slot); 1687 } 1688 1689 cflags = 0; 1690 if (!arg.is_empty) 1691 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 1692 1693 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) && 1694 io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1695 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) 1696 goto retry; 1697 return IOU_RETRY; 1698 } 1699 1700 io_req_set_res(req, ret, cflags); 1701 if (ret < 0) 1702 req_set_fail(req); 1703 return IOU_COMPLETE; 1704 } 1705 1706 void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req) 1707 { 1708 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1709 1710 bctx->socket.family = sock->domain; 1711 bctx->socket.type = sock->type; 1712 bctx->socket.protocol = sock->protocol; 1713 } 1714 1715 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1716 { 1717 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1718 1719 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1720 return -EINVAL; 1721 1722 sock->domain = READ_ONCE(sqe->fd); 1723 sock->type = READ_ONCE(sqe->off); 1724 sock->protocol = READ_ONCE(sqe->len); 1725 sock->file_slot = READ_ONCE(sqe->file_index); 1726 sock->nofile = rlimit(RLIMIT_NOFILE); 1727 1728 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1729 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1730 return -EINVAL; 1731 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1732 return -EINVAL; 1733 return 0; 1734 } 1735 1736 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1737 { 1738 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1739 bool fixed = !!sock->file_slot; 1740 struct file *file; 1741 int ret, fd; 1742 1743 if (!fixed) { 1744 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1745 if (unlikely(fd < 0)) 1746 return fd; 1747 } 1748 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1749 if (IS_ERR(file)) { 1750 if (!fixed) 1751 put_unused_fd(fd); 1752 ret = PTR_ERR(file); 1753 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1754 return -EAGAIN; 1755 if (ret == -ERESTARTSYS) 1756 ret = -EINTR; 1757 req_set_fail(req); 1758 } else if (!fixed) { 1759 fd_install(fd, file); 1760 ret = fd; 1761 } else { 1762 ret = io_fixed_fd_install(req, issue_flags, file, 1763 sock->file_slot); 1764 } 1765 io_req_set_res(req, ret, 0); 1766 return IOU_COMPLETE; 1767 } 1768 1769 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1770 { 1771 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1772 struct io_async_msghdr *io; 1773 1774 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1775 return -EINVAL; 1776 1777 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1778 conn->addr_len = READ_ONCE(sqe->addr2); 1779 conn->in_progress = conn->seen_econnaborted = false; 1780 1781 io = io_msg_alloc_async(req); 1782 if (unlikely(!io)) 1783 return -ENOMEM; 1784 1785 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr); 1786 } 1787 1788 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1789 { 1790 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1791 struct io_async_msghdr *io = req->async_data; 1792 unsigned file_flags; 1793 int ret; 1794 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1795 1796 if (connect->in_progress) { 1797 struct poll_table_struct pt = { ._key = EPOLLERR }; 1798 1799 if (vfs_poll(req->file, &pt) & EPOLLERR) 1800 goto get_sock_err; 1801 } 1802 1803 file_flags = force_nonblock ? O_NONBLOCK : 0; 1804 1805 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len, 1806 file_flags); 1807 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1808 && force_nonblock) { 1809 if (ret == -EINPROGRESS) { 1810 connect->in_progress = true; 1811 } else if (ret == -ECONNABORTED) { 1812 if (connect->seen_econnaborted) 1813 goto out; 1814 connect->seen_econnaborted = true; 1815 } 1816 return -EAGAIN; 1817 } 1818 if (connect->in_progress) { 1819 /* 1820 * At least bluetooth will return -EBADFD on a re-connect 1821 * attempt, and it's (supposedly) also valid to get -EISCONN 1822 * which means the previous result is good. For both of these, 1823 * grab the sock_error() and use that for the completion. 1824 */ 1825 if (ret == -EBADFD || ret == -EISCONN) { 1826 get_sock_err: 1827 ret = sock_error(sock_from_file(req->file)->sk); 1828 } 1829 } 1830 if (ret == -ERESTARTSYS) 1831 ret = -EINTR; 1832 out: 1833 if (ret < 0) 1834 req_set_fail(req); 1835 io_req_msg_cleanup(req, issue_flags); 1836 io_req_set_res(req, ret, 0); 1837 return IOU_COMPLETE; 1838 } 1839 1840 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1841 { 1842 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1843 struct sockaddr __user *uaddr; 1844 struct io_async_msghdr *io; 1845 1846 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1847 return -EINVAL; 1848 1849 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1850 bind->addr_len = READ_ONCE(sqe->addr2); 1851 1852 io = io_msg_alloc_async(req); 1853 if (unlikely(!io)) 1854 return -ENOMEM; 1855 return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); 1856 } 1857 1858 int io_bind(struct io_kiocb *req, unsigned int issue_flags) 1859 { 1860 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1861 struct io_async_msghdr *io = req->async_data; 1862 struct socket *sock; 1863 int ret; 1864 1865 sock = sock_from_file(req->file); 1866 if (unlikely(!sock)) 1867 return -ENOTSOCK; 1868 1869 ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); 1870 if (ret < 0) 1871 req_set_fail(req); 1872 io_req_set_res(req, ret, 0); 1873 return 0; 1874 } 1875 1876 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1877 { 1878 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1879 1880 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) 1881 return -EINVAL; 1882 1883 listen->backlog = READ_ONCE(sqe->len); 1884 return 0; 1885 } 1886 1887 int io_listen(struct io_kiocb *req, unsigned int issue_flags) 1888 { 1889 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1890 struct socket *sock; 1891 int ret; 1892 1893 sock = sock_from_file(req->file); 1894 if (unlikely(!sock)) 1895 return -ENOTSOCK; 1896 1897 ret = __sys_listen_socket(sock, listen->backlog); 1898 if (ret < 0) 1899 req_set_fail(req); 1900 io_req_set_res(req, ret, 0); 1901 return 0; 1902 } 1903 1904 void io_netmsg_cache_free(const void *entry) 1905 { 1906 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; 1907 1908 io_vec_free(&kmsg->vec); 1909 kfree(kmsg); 1910 } 1911