1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 20 #if defined(CONFIG_NET) 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 int iou_flags; 32 u32 file_slot; 33 unsigned long nofile; 34 }; 35 36 struct io_socket { 37 struct file *file; 38 int domain; 39 int type; 40 int protocol; 41 int flags; 42 u32 file_slot; 43 unsigned long nofile; 44 }; 45 46 struct io_connect { 47 struct file *file; 48 struct sockaddr __user *addr; 49 int addr_len; 50 bool in_progress; 51 bool seen_econnaborted; 52 }; 53 54 struct io_bind { 55 struct file *file; 56 int addr_len; 57 }; 58 59 struct io_listen { 60 struct file *file; 61 int backlog; 62 }; 63 64 struct io_sr_msg { 65 struct file *file; 66 union { 67 struct compat_msghdr __user *umsg_compat; 68 struct user_msghdr __user *umsg; 69 void __user *buf; 70 }; 71 int len; 72 unsigned done_io; 73 unsigned msg_flags; 74 unsigned nr_multishot_loops; 75 u16 flags; 76 /* initialised and used only by !msg send variants */ 77 u16 buf_group; 78 u16 buf_index; 79 void __user *msg_control; 80 /* used only for send zerocopy */ 81 struct io_kiocb *notif; 82 }; 83 84 /* 85 * Number of times we'll try and do receives if there's more data. If we 86 * exceed this limit, then add us to the back of the queue and retry from 87 * there. This helps fairness between flooding clients. 88 */ 89 #define MULTISHOT_MAX_RETRY 32 90 91 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 92 { 93 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 94 95 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 96 sqe->buf_index || sqe->splice_fd_in)) 97 return -EINVAL; 98 99 shutdown->how = READ_ONCE(sqe->len); 100 req->flags |= REQ_F_FORCE_ASYNC; 101 return 0; 102 } 103 104 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 105 { 106 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 107 struct socket *sock; 108 int ret; 109 110 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 111 112 sock = sock_from_file(req->file); 113 if (unlikely(!sock)) 114 return -ENOTSOCK; 115 116 ret = __sys_shutdown_sock(sock, shutdown->how); 117 io_req_set_res(req, ret, 0); 118 return IOU_OK; 119 } 120 121 static bool io_net_retry(struct socket *sock, int flags) 122 { 123 if (!(flags & MSG_WAITALL)) 124 return false; 125 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 126 } 127 128 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) 129 { 130 if (kmsg->free_iov) { 131 kfree(kmsg->free_iov); 132 kmsg->free_iov_nr = 0; 133 kmsg->free_iov = NULL; 134 } 135 } 136 137 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 138 { 139 struct io_async_msghdr *hdr = req->async_data; 140 141 /* can't recycle, ensure we free the iovec if we have one */ 142 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { 143 io_netmsg_iovec_free(hdr); 144 return; 145 } 146 147 /* Let normal cleanup path reap it if we fail adding to the cache */ 148 io_alloc_cache_kasan(&hdr->free_iov, &hdr->free_iov_nr); 149 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { 150 req->async_data = NULL; 151 req->flags &= ~REQ_F_ASYNC_DATA; 152 } 153 } 154 155 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) 156 { 157 struct io_ring_ctx *ctx = req->ctx; 158 struct io_async_msghdr *hdr; 159 160 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req); 161 if (!hdr) 162 return NULL; 163 164 /* If the async data was cached, we might have an iov cached inside. */ 165 if (hdr->free_iov) 166 req->flags |= REQ_F_NEED_CLEANUP; 167 return hdr; 168 } 169 170 /* assign new iovec to kmsg, if we need to */ 171 static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg, 172 struct iovec *iov) 173 { 174 if (iov) { 175 req->flags |= REQ_F_NEED_CLEANUP; 176 kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs; 177 if (kmsg->free_iov) 178 kfree(kmsg->free_iov); 179 kmsg->free_iov = iov; 180 } 181 } 182 183 static inline void io_mshot_prep_retry(struct io_kiocb *req, 184 struct io_async_msghdr *kmsg) 185 { 186 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 187 188 req->flags &= ~REQ_F_BL_EMPTY; 189 sr->done_io = 0; 190 sr->len = 0; /* get from the provided buffer */ 191 req->buf_index = sr->buf_group; 192 } 193 194 #ifdef CONFIG_COMPAT 195 static int io_compat_msg_copy_hdr(struct io_kiocb *req, 196 struct io_async_msghdr *iomsg, 197 struct compat_msghdr *msg, int ddir) 198 { 199 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 200 struct compat_iovec __user *uiov; 201 struct iovec *iov; 202 int ret, nr_segs; 203 204 if (iomsg->free_iov) { 205 nr_segs = iomsg->free_iov_nr; 206 iov = iomsg->free_iov; 207 } else { 208 iov = &iomsg->fast_iov; 209 nr_segs = 1; 210 } 211 212 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) 213 return -EFAULT; 214 215 uiov = compat_ptr(msg->msg_iov); 216 if (req->flags & REQ_F_BUFFER_SELECT) { 217 compat_ssize_t clen; 218 219 if (msg->msg_iovlen == 0) { 220 sr->len = iov->iov_len = 0; 221 iov->iov_base = NULL; 222 } else if (msg->msg_iovlen > 1) { 223 return -EINVAL; 224 } else { 225 if (!access_ok(uiov, sizeof(*uiov))) 226 return -EFAULT; 227 if (__get_user(clen, &uiov->iov_len)) 228 return -EFAULT; 229 if (clen < 0) 230 return -EINVAL; 231 sr->len = clen; 232 } 233 234 return 0; 235 } 236 237 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen, 238 nr_segs, &iov, &iomsg->msg.msg_iter, true); 239 if (unlikely(ret < 0)) 240 return ret; 241 242 io_net_vec_assign(req, iomsg, iov); 243 return 0; 244 } 245 #endif 246 247 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, 248 struct user_msghdr *msg, int ddir) 249 { 250 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 251 struct user_msghdr __user *umsg = sr->umsg; 252 struct iovec *iov; 253 int ret, nr_segs; 254 255 if (iomsg->free_iov) { 256 nr_segs = iomsg->free_iov_nr; 257 iov = iomsg->free_iov; 258 } else { 259 iov = &iomsg->fast_iov; 260 nr_segs = 1; 261 } 262 263 if (!user_access_begin(umsg, sizeof(*umsg))) 264 return -EFAULT; 265 266 ret = -EFAULT; 267 unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end); 268 unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end); 269 unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end); 270 unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end); 271 unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end); 272 unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end); 273 msg->msg_flags = 0; 274 275 if (req->flags & REQ_F_BUFFER_SELECT) { 276 if (msg->msg_iovlen == 0) { 277 sr->len = iov->iov_len = 0; 278 iov->iov_base = NULL; 279 } else if (msg->msg_iovlen > 1) { 280 ret = -EINVAL; 281 goto ua_end; 282 } else { 283 struct iovec __user *uiov = msg->msg_iov; 284 285 /* we only need the length for provided buffers */ 286 if (!access_ok(&uiov->iov_len, sizeof(uiov->iov_len))) 287 goto ua_end; 288 unsafe_get_user(iov->iov_len, &uiov->iov_len, ua_end); 289 sr->len = iov->iov_len; 290 } 291 ret = 0; 292 ua_end: 293 user_access_end(); 294 return ret; 295 } 296 297 user_access_end(); 298 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs, 299 &iov, &iomsg->msg.msg_iter, false); 300 if (unlikely(ret < 0)) 301 return ret; 302 303 io_net_vec_assign(req, iomsg, iov); 304 return 0; 305 } 306 307 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 308 struct io_async_msghdr *iomsg) 309 { 310 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 311 struct user_msghdr msg; 312 int ret; 313 314 iomsg->msg.msg_name = &iomsg->addr; 315 iomsg->msg.msg_iter.nr_segs = 0; 316 317 #ifdef CONFIG_COMPAT 318 if (unlikely(req->ctx->compat)) { 319 struct compat_msghdr cmsg; 320 321 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE); 322 if (unlikely(ret)) 323 return ret; 324 325 return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); 326 } 327 #endif 328 329 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE); 330 if (unlikely(ret)) 331 return ret; 332 333 ret = __copy_msghdr(&iomsg->msg, &msg, NULL); 334 335 /* save msg_control as sys_sendmsg() overwrites it */ 336 sr->msg_control = iomsg->msg.msg_control_user; 337 return ret; 338 } 339 340 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 341 { 342 struct io_async_msghdr *io = req->async_data; 343 344 io_netmsg_iovec_free(io); 345 } 346 347 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 348 { 349 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 350 struct io_async_msghdr *kmsg = req->async_data; 351 void __user *addr; 352 u16 addr_len; 353 int ret; 354 355 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 356 357 if (READ_ONCE(sqe->__pad3[0])) 358 return -EINVAL; 359 360 kmsg->msg.msg_name = NULL; 361 kmsg->msg.msg_namelen = 0; 362 kmsg->msg.msg_control = NULL; 363 kmsg->msg.msg_controllen = 0; 364 kmsg->msg.msg_ubuf = NULL; 365 366 addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 367 addr_len = READ_ONCE(sqe->addr_len); 368 if (addr) { 369 ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr); 370 if (unlikely(ret < 0)) 371 return ret; 372 kmsg->msg.msg_name = &kmsg->addr; 373 kmsg->msg.msg_namelen = addr_len; 374 } 375 if (!io_do_buffer_select(req)) { 376 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 377 &kmsg->msg.msg_iter); 378 if (unlikely(ret < 0)) 379 return ret; 380 } 381 return 0; 382 } 383 384 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 385 { 386 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 387 struct io_async_msghdr *kmsg = req->async_data; 388 int ret; 389 390 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 391 392 ret = io_sendmsg_copy_hdr(req, kmsg); 393 if (!ret) 394 req->flags |= REQ_F_NEED_CLEANUP; 395 return ret; 396 } 397 398 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) 399 400 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 401 { 402 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 403 404 sr->done_io = 0; 405 406 if (req->opcode != IORING_OP_SEND) { 407 if (sqe->addr2 || sqe->file_index) 408 return -EINVAL; 409 } 410 411 sr->len = READ_ONCE(sqe->len); 412 sr->flags = READ_ONCE(sqe->ioprio); 413 if (sr->flags & ~SENDMSG_FLAGS) 414 return -EINVAL; 415 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 416 if (sr->msg_flags & MSG_DONTWAIT) 417 req->flags |= REQ_F_NOWAIT; 418 if (sr->flags & IORING_RECVSEND_BUNDLE) { 419 if (req->opcode == IORING_OP_SENDMSG) 420 return -EINVAL; 421 if (!(req->flags & REQ_F_BUFFER_SELECT)) 422 return -EINVAL; 423 sr->msg_flags |= MSG_WAITALL; 424 sr->buf_group = req->buf_index; 425 req->buf_list = NULL; 426 } 427 428 #ifdef CONFIG_COMPAT 429 if (req->ctx->compat) 430 sr->msg_flags |= MSG_CMSG_COMPAT; 431 #endif 432 if (unlikely(!io_msg_alloc_async(req))) 433 return -ENOMEM; 434 if (req->opcode != IORING_OP_SENDMSG) 435 return io_send_setup(req, sqe); 436 return io_sendmsg_setup(req, sqe); 437 } 438 439 static void io_req_msg_cleanup(struct io_kiocb *req, 440 unsigned int issue_flags) 441 { 442 req->flags &= ~REQ_F_NEED_CLEANUP; 443 io_netmsg_recycle(req, issue_flags); 444 } 445 446 /* 447 * For bundle completions, we need to figure out how many segments we consumed. 448 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it 449 * could be using an ITER_IOVEC. If the latter, then if we consumed all of 450 * the segments, then it's a trivial questiont o answer. If we have residual 451 * data in the iter, then loop the segments to figure out how much we 452 * transferred. 453 */ 454 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) 455 { 456 struct iovec *iov; 457 int nbufs; 458 459 /* no data is always zero segments, and a ubuf is always 1 segment */ 460 if (ret <= 0) 461 return 0; 462 if (iter_is_ubuf(&kmsg->msg.msg_iter)) 463 return 1; 464 465 iov = kmsg->free_iov; 466 if (!iov) 467 iov = &kmsg->fast_iov; 468 469 /* if all data was transferred, it's basic pointer math */ 470 if (!iov_iter_count(&kmsg->msg.msg_iter)) 471 return iter_iov(&kmsg->msg.msg_iter) - iov; 472 473 /* short transfer, count segments */ 474 nbufs = 0; 475 do { 476 int this_len = min_t(int, iov[nbufs].iov_len, ret); 477 478 nbufs++; 479 ret -= this_len; 480 } while (ret); 481 482 return nbufs; 483 } 484 485 static inline bool io_send_finish(struct io_kiocb *req, int *ret, 486 struct io_async_msghdr *kmsg, 487 unsigned issue_flags) 488 { 489 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 490 bool bundle_finished = *ret <= 0; 491 unsigned int cflags; 492 493 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { 494 cflags = io_put_kbuf(req, *ret, issue_flags); 495 goto finish; 496 } 497 498 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); 499 500 if (bundle_finished || req->flags & REQ_F_BL_EMPTY) 501 goto finish; 502 503 /* 504 * Fill CQE for this receive and see if we should keep trying to 505 * receive from this socket. 506 */ 507 if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 508 io_mshot_prep_retry(req, kmsg); 509 return false; 510 } 511 512 /* Otherwise stop bundle and use the current result. */ 513 finish: 514 io_req_set_res(req, *ret, cflags); 515 *ret = IOU_OK; 516 return true; 517 } 518 519 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 520 { 521 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 522 struct io_async_msghdr *kmsg = req->async_data; 523 struct socket *sock; 524 unsigned flags; 525 int min_ret = 0; 526 int ret; 527 528 sock = sock_from_file(req->file); 529 if (unlikely(!sock)) 530 return -ENOTSOCK; 531 532 if (!(req->flags & REQ_F_POLLED) && 533 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 534 return -EAGAIN; 535 536 flags = sr->msg_flags; 537 if (issue_flags & IO_URING_F_NONBLOCK) 538 flags |= MSG_DONTWAIT; 539 if (flags & MSG_WAITALL) 540 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 541 542 kmsg->msg.msg_control_user = sr->msg_control; 543 544 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 545 546 if (ret < min_ret) { 547 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 548 return -EAGAIN; 549 if (ret > 0 && io_net_retry(sock, flags)) { 550 kmsg->msg.msg_controllen = 0; 551 kmsg->msg.msg_control = NULL; 552 sr->done_io += ret; 553 req->flags |= REQ_F_BL_NO_RECYCLE; 554 return -EAGAIN; 555 } 556 if (ret == -ERESTARTSYS) 557 ret = -EINTR; 558 req_set_fail(req); 559 } 560 io_req_msg_cleanup(req, issue_flags); 561 if (ret >= 0) 562 ret += sr->done_io; 563 else if (sr->done_io) 564 ret = sr->done_io; 565 io_req_set_res(req, ret, 0); 566 return IOU_OK; 567 } 568 569 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, 570 struct io_async_msghdr *kmsg) 571 { 572 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 573 574 int ret; 575 struct buf_sel_arg arg = { 576 .iovs = &kmsg->fast_iov, 577 .max_len = min_not_zero(sr->len, INT_MAX), 578 .nr_iovs = 1, 579 }; 580 581 if (kmsg->free_iov) { 582 arg.nr_iovs = kmsg->free_iov_nr; 583 arg.iovs = kmsg->free_iov; 584 arg.mode = KBUF_MODE_FREE; 585 } 586 587 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 588 arg.nr_iovs = 1; 589 else 590 arg.mode |= KBUF_MODE_EXPAND; 591 592 ret = io_buffers_select(req, &arg, issue_flags); 593 if (unlikely(ret < 0)) 594 return ret; 595 596 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 597 kmsg->free_iov_nr = ret; 598 kmsg->free_iov = arg.iovs; 599 req->flags |= REQ_F_NEED_CLEANUP; 600 } 601 sr->len = arg.out_len; 602 603 if (ret == 1) { 604 sr->buf = arg.iovs[0].iov_base; 605 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 606 &kmsg->msg.msg_iter); 607 if (unlikely(ret)) 608 return ret; 609 } else { 610 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, 611 arg.iovs, ret, arg.out_len); 612 } 613 614 return 0; 615 } 616 617 int io_send(struct io_kiocb *req, unsigned int issue_flags) 618 { 619 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 620 struct io_async_msghdr *kmsg = req->async_data; 621 struct socket *sock; 622 unsigned flags; 623 int min_ret = 0; 624 int ret; 625 626 sock = sock_from_file(req->file); 627 if (unlikely(!sock)) 628 return -ENOTSOCK; 629 630 if (!(req->flags & REQ_F_POLLED) && 631 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 632 return -EAGAIN; 633 634 flags = sr->msg_flags; 635 if (issue_flags & IO_URING_F_NONBLOCK) 636 flags |= MSG_DONTWAIT; 637 638 retry_bundle: 639 if (io_do_buffer_select(req)) { 640 ret = io_send_select_buffer(req, issue_flags, kmsg); 641 if (ret) 642 return ret; 643 } 644 645 /* 646 * If MSG_WAITALL is set, or this is a bundle send, then we need 647 * the full amount. If just bundle is set, if we do a short send 648 * then we complete the bundle sequence rather than continue on. 649 */ 650 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) 651 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 652 653 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 654 kmsg->msg.msg_flags = flags; 655 ret = sock_sendmsg(sock, &kmsg->msg); 656 if (ret < min_ret) { 657 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 658 return -EAGAIN; 659 660 if (ret > 0 && io_net_retry(sock, flags)) { 661 sr->len -= ret; 662 sr->buf += ret; 663 sr->done_io += ret; 664 req->flags |= REQ_F_BL_NO_RECYCLE; 665 return -EAGAIN; 666 } 667 if (ret == -ERESTARTSYS) 668 ret = -EINTR; 669 req_set_fail(req); 670 } 671 if (ret >= 0) 672 ret += sr->done_io; 673 else if (sr->done_io) 674 ret = sr->done_io; 675 676 if (!io_send_finish(req, &ret, kmsg, issue_flags)) 677 goto retry_bundle; 678 679 io_req_msg_cleanup(req, issue_flags); 680 return ret; 681 } 682 683 static int io_recvmsg_mshot_prep(struct io_kiocb *req, 684 struct io_async_msghdr *iomsg, 685 int namelen, size_t controllen) 686 { 687 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == 688 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { 689 int hdr; 690 691 if (unlikely(namelen < 0)) 692 return -EOVERFLOW; 693 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), 694 namelen, &hdr)) 695 return -EOVERFLOW; 696 if (check_add_overflow(hdr, controllen, &hdr)) 697 return -EOVERFLOW; 698 699 iomsg->namelen = namelen; 700 iomsg->controllen = controllen; 701 return 0; 702 } 703 704 return 0; 705 } 706 707 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 708 struct io_async_msghdr *iomsg) 709 { 710 struct user_msghdr msg; 711 int ret; 712 713 iomsg->msg.msg_name = &iomsg->addr; 714 iomsg->msg.msg_iter.nr_segs = 0; 715 716 #ifdef CONFIG_COMPAT 717 if (unlikely(req->ctx->compat)) { 718 struct compat_msghdr cmsg; 719 720 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST); 721 if (unlikely(ret)) 722 return ret; 723 724 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr); 725 if (unlikely(ret)) 726 return ret; 727 728 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen, 729 cmsg.msg_controllen); 730 } 731 #endif 732 733 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST); 734 if (unlikely(ret)) 735 return ret; 736 737 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 738 if (unlikely(ret)) 739 return ret; 740 741 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, 742 msg.msg_controllen); 743 } 744 745 static int io_recvmsg_prep_setup(struct io_kiocb *req) 746 { 747 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 748 struct io_async_msghdr *kmsg; 749 int ret; 750 751 kmsg = io_msg_alloc_async(req); 752 if (unlikely(!kmsg)) 753 return -ENOMEM; 754 755 if (req->opcode == IORING_OP_RECV) { 756 kmsg->msg.msg_name = NULL; 757 kmsg->msg.msg_namelen = 0; 758 kmsg->msg.msg_inq = 0; 759 kmsg->msg.msg_control = NULL; 760 kmsg->msg.msg_get_inq = 1; 761 kmsg->msg.msg_controllen = 0; 762 kmsg->msg.msg_iocb = NULL; 763 kmsg->msg.msg_ubuf = NULL; 764 765 if (!io_do_buffer_select(req)) { 766 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 767 &kmsg->msg.msg_iter); 768 if (unlikely(ret)) 769 return ret; 770 } 771 return 0; 772 } 773 774 ret = io_recvmsg_copy_hdr(req, kmsg); 775 if (!ret) 776 req->flags |= REQ_F_NEED_CLEANUP; 777 return ret; 778 } 779 780 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \ 781 IORING_RECVSEND_BUNDLE) 782 783 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 784 { 785 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 786 787 sr->done_io = 0; 788 789 if (unlikely(sqe->file_index || sqe->addr2)) 790 return -EINVAL; 791 792 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 793 sr->len = READ_ONCE(sqe->len); 794 sr->flags = READ_ONCE(sqe->ioprio); 795 if (sr->flags & ~RECVMSG_FLAGS) 796 return -EINVAL; 797 sr->msg_flags = READ_ONCE(sqe->msg_flags); 798 if (sr->msg_flags & MSG_DONTWAIT) 799 req->flags |= REQ_F_NOWAIT; 800 if (sr->msg_flags & MSG_ERRQUEUE) 801 req->flags |= REQ_F_CLEAR_POLLIN; 802 if (req->flags & REQ_F_BUFFER_SELECT) { 803 /* 804 * Store the buffer group for this multishot receive separately, 805 * as if we end up doing an io-wq based issue that selects a 806 * buffer, it has to be committed immediately and that will 807 * clear ->buf_list. This means we lose the link to the buffer 808 * list, and the eventual buffer put on completion then cannot 809 * restore it. 810 */ 811 sr->buf_group = req->buf_index; 812 req->buf_list = NULL; 813 } 814 if (sr->flags & IORING_RECV_MULTISHOT) { 815 if (!(req->flags & REQ_F_BUFFER_SELECT)) 816 return -EINVAL; 817 if (sr->msg_flags & MSG_WAITALL) 818 return -EINVAL; 819 if (req->opcode == IORING_OP_RECV && sr->len) 820 return -EINVAL; 821 req->flags |= REQ_F_APOLL_MULTISHOT; 822 } 823 if (sr->flags & IORING_RECVSEND_BUNDLE) { 824 if (req->opcode == IORING_OP_RECVMSG) 825 return -EINVAL; 826 } 827 828 #ifdef CONFIG_COMPAT 829 if (req->ctx->compat) 830 sr->msg_flags |= MSG_CMSG_COMPAT; 831 #endif 832 sr->nr_multishot_loops = 0; 833 return io_recvmsg_prep_setup(req); 834 } 835 836 /* 837 * Finishes io_recv and io_recvmsg. 838 * 839 * Returns true if it is actually finished, or false if it should run 840 * again (for multishot). 841 */ 842 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 843 struct io_async_msghdr *kmsg, 844 bool mshot_finished, unsigned issue_flags) 845 { 846 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 847 unsigned int cflags = 0; 848 849 if (kmsg->msg.msg_inq > 0) 850 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 851 852 if (sr->flags & IORING_RECVSEND_BUNDLE) { 853 cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), 854 issue_flags); 855 /* bundle with no more immediate buffers, we're done */ 856 if (req->flags & REQ_F_BL_EMPTY) 857 goto finish; 858 } else { 859 cflags |= io_put_kbuf(req, *ret, issue_flags); 860 } 861 862 /* 863 * Fill CQE for this receive and see if we should keep trying to 864 * receive from this socket. 865 */ 866 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && 867 io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 868 int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE; 869 870 io_mshot_prep_retry(req, kmsg); 871 /* Known not-empty or unknown state, retry */ 872 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { 873 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) 874 return false; 875 /* mshot retries exceeded, force a requeue */ 876 sr->nr_multishot_loops = 0; 877 mshot_retry_ret = IOU_REQUEUE; 878 } 879 if (issue_flags & IO_URING_F_MULTISHOT) 880 *ret = mshot_retry_ret; 881 else 882 *ret = -EAGAIN; 883 return true; 884 } 885 886 /* Finish the request / stop multishot. */ 887 finish: 888 io_req_set_res(req, *ret, cflags); 889 890 if (issue_flags & IO_URING_F_MULTISHOT) 891 *ret = IOU_STOP_MULTISHOT; 892 else 893 *ret = IOU_OK; 894 io_req_msg_cleanup(req, issue_flags); 895 return true; 896 } 897 898 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 899 struct io_sr_msg *sr, void __user **buf, 900 size_t *len) 901 { 902 unsigned long ubuf = (unsigned long) *buf; 903 unsigned long hdr; 904 905 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 906 kmsg->controllen; 907 if (*len < hdr) 908 return -EFAULT; 909 910 if (kmsg->controllen) { 911 unsigned long control = ubuf + hdr - kmsg->controllen; 912 913 kmsg->msg.msg_control_user = (void __user *) control; 914 kmsg->msg.msg_controllen = kmsg->controllen; 915 } 916 917 sr->buf = *buf; /* stash for later copy */ 918 *buf = (void __user *) (ubuf + hdr); 919 kmsg->payloadlen = *len = *len - hdr; 920 return 0; 921 } 922 923 struct io_recvmsg_multishot_hdr { 924 struct io_uring_recvmsg_out msg; 925 struct sockaddr_storage addr; 926 }; 927 928 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 929 struct io_async_msghdr *kmsg, 930 unsigned int flags, bool *finished) 931 { 932 int err; 933 int copy_len; 934 struct io_recvmsg_multishot_hdr hdr; 935 936 if (kmsg->namelen) 937 kmsg->msg.msg_name = &hdr.addr; 938 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 939 kmsg->msg.msg_namelen = 0; 940 941 if (sock->file->f_flags & O_NONBLOCK) 942 flags |= MSG_DONTWAIT; 943 944 err = sock_recvmsg(sock, &kmsg->msg, flags); 945 *finished = err <= 0; 946 if (err < 0) 947 return err; 948 949 hdr.msg = (struct io_uring_recvmsg_out) { 950 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 951 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 952 }; 953 954 hdr.msg.payloadlen = err; 955 if (err > kmsg->payloadlen) 956 err = kmsg->payloadlen; 957 958 copy_len = sizeof(struct io_uring_recvmsg_out); 959 if (kmsg->msg.msg_namelen > kmsg->namelen) 960 copy_len += kmsg->namelen; 961 else 962 copy_len += kmsg->msg.msg_namelen; 963 964 /* 965 * "fromlen shall refer to the value before truncation.." 966 * 1003.1g 967 */ 968 hdr.msg.namelen = kmsg->msg.msg_namelen; 969 970 /* ensure that there is no gap between hdr and sockaddr_storage */ 971 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 972 sizeof(struct io_uring_recvmsg_out)); 973 if (copy_to_user(io->buf, &hdr, copy_len)) { 974 *finished = true; 975 return -EFAULT; 976 } 977 978 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 979 kmsg->controllen + err; 980 } 981 982 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 983 { 984 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 985 struct io_async_msghdr *kmsg = req->async_data; 986 struct socket *sock; 987 unsigned flags; 988 int ret, min_ret = 0; 989 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 990 bool mshot_finished = true; 991 992 sock = sock_from_file(req->file); 993 if (unlikely(!sock)) 994 return -ENOTSOCK; 995 996 if (!(req->flags & REQ_F_POLLED) && 997 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 998 return -EAGAIN; 999 1000 flags = sr->msg_flags; 1001 if (force_nonblock) 1002 flags |= MSG_DONTWAIT; 1003 1004 retry_multishot: 1005 if (io_do_buffer_select(req)) { 1006 void __user *buf; 1007 size_t len = sr->len; 1008 1009 buf = io_buffer_select(req, &len, issue_flags); 1010 if (!buf) 1011 return -ENOBUFS; 1012 1013 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1014 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 1015 if (ret) { 1016 io_kbuf_recycle(req, issue_flags); 1017 return ret; 1018 } 1019 } 1020 1021 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 1022 } 1023 1024 kmsg->msg.msg_get_inq = 1; 1025 kmsg->msg.msg_inq = -1; 1026 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1027 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 1028 &mshot_finished); 1029 } else { 1030 /* disable partial retry for recvmsg with cmsg attached */ 1031 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 1032 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1033 1034 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 1035 kmsg->uaddr, flags); 1036 } 1037 1038 if (ret < min_ret) { 1039 if (ret == -EAGAIN && force_nonblock) { 1040 if (issue_flags & IO_URING_F_MULTISHOT) { 1041 io_kbuf_recycle(req, issue_flags); 1042 return IOU_ISSUE_SKIP_COMPLETE; 1043 } 1044 return -EAGAIN; 1045 } 1046 if (ret > 0 && io_net_retry(sock, flags)) { 1047 sr->done_io += ret; 1048 req->flags |= REQ_F_BL_NO_RECYCLE; 1049 return -EAGAIN; 1050 } 1051 if (ret == -ERESTARTSYS) 1052 ret = -EINTR; 1053 req_set_fail(req); 1054 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1055 req_set_fail(req); 1056 } 1057 1058 if (ret > 0) 1059 ret += sr->done_io; 1060 else if (sr->done_io) 1061 ret = sr->done_io; 1062 else 1063 io_kbuf_recycle(req, issue_flags); 1064 1065 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1066 goto retry_multishot; 1067 1068 return ret; 1069 } 1070 1071 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, 1072 size_t *len, unsigned int issue_flags) 1073 { 1074 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1075 int ret; 1076 1077 /* 1078 * If the ring isn't locked, then don't use the peek interface 1079 * to grab multiple buffers as we will lock/unlock between 1080 * this selection and posting the buffers. 1081 */ 1082 if (!(issue_flags & IO_URING_F_UNLOCKED) && 1083 sr->flags & IORING_RECVSEND_BUNDLE) { 1084 struct buf_sel_arg arg = { 1085 .iovs = &kmsg->fast_iov, 1086 .nr_iovs = 1, 1087 .mode = KBUF_MODE_EXPAND, 1088 }; 1089 1090 if (kmsg->free_iov) { 1091 arg.nr_iovs = kmsg->free_iov_nr; 1092 arg.iovs = kmsg->free_iov; 1093 arg.mode |= KBUF_MODE_FREE; 1094 } 1095 1096 if (kmsg->msg.msg_inq > 0) 1097 arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq); 1098 1099 ret = io_buffers_peek(req, &arg); 1100 if (unlikely(ret < 0)) 1101 return ret; 1102 1103 /* special case 1 vec, can be a fast path */ 1104 if (ret == 1) { 1105 sr->buf = arg.iovs[0].iov_base; 1106 sr->len = arg.iovs[0].iov_len; 1107 goto map_ubuf; 1108 } 1109 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, 1110 arg.out_len); 1111 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { 1112 kmsg->free_iov_nr = ret; 1113 kmsg->free_iov = arg.iovs; 1114 req->flags |= REQ_F_NEED_CLEANUP; 1115 } 1116 } else { 1117 void __user *buf; 1118 1119 *len = sr->len; 1120 buf = io_buffer_select(req, len, issue_flags); 1121 if (!buf) 1122 return -ENOBUFS; 1123 sr->buf = buf; 1124 sr->len = *len; 1125 map_ubuf: 1126 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 1127 &kmsg->msg.msg_iter); 1128 if (unlikely(ret)) 1129 return ret; 1130 } 1131 1132 return 0; 1133 } 1134 1135 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 1136 { 1137 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1138 struct io_async_msghdr *kmsg = req->async_data; 1139 struct socket *sock; 1140 unsigned flags; 1141 int ret, min_ret = 0; 1142 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1143 size_t len = sr->len; 1144 bool mshot_finished; 1145 1146 if (!(req->flags & REQ_F_POLLED) && 1147 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1148 return -EAGAIN; 1149 1150 sock = sock_from_file(req->file); 1151 if (unlikely(!sock)) 1152 return -ENOTSOCK; 1153 1154 flags = sr->msg_flags; 1155 if (force_nonblock) 1156 flags |= MSG_DONTWAIT; 1157 1158 retry_multishot: 1159 if (io_do_buffer_select(req)) { 1160 ret = io_recv_buf_select(req, kmsg, &len, issue_flags); 1161 if (unlikely(ret)) { 1162 kmsg->msg.msg_inq = -1; 1163 goto out_free; 1164 } 1165 sr->buf = NULL; 1166 } 1167 1168 kmsg->msg.msg_flags = 0; 1169 kmsg->msg.msg_inq = -1; 1170 1171 if (flags & MSG_WAITALL) 1172 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1173 1174 ret = sock_recvmsg(sock, &kmsg->msg, flags); 1175 if (ret < min_ret) { 1176 if (ret == -EAGAIN && force_nonblock) { 1177 if (issue_flags & IO_URING_F_MULTISHOT) { 1178 io_kbuf_recycle(req, issue_flags); 1179 return IOU_ISSUE_SKIP_COMPLETE; 1180 } 1181 1182 return -EAGAIN; 1183 } 1184 if (ret > 0 && io_net_retry(sock, flags)) { 1185 sr->len -= ret; 1186 sr->buf += ret; 1187 sr->done_io += ret; 1188 req->flags |= REQ_F_BL_NO_RECYCLE; 1189 return -EAGAIN; 1190 } 1191 if (ret == -ERESTARTSYS) 1192 ret = -EINTR; 1193 req_set_fail(req); 1194 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1195 out_free: 1196 req_set_fail(req); 1197 } 1198 1199 mshot_finished = ret <= 0; 1200 if (ret > 0) 1201 ret += sr->done_io; 1202 else if (sr->done_io) 1203 ret = sr->done_io; 1204 else 1205 io_kbuf_recycle(req, issue_flags); 1206 1207 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1208 goto retry_multishot; 1209 1210 return ret; 1211 } 1212 1213 void io_send_zc_cleanup(struct io_kiocb *req) 1214 { 1215 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1216 struct io_async_msghdr *io = req->async_data; 1217 1218 if (req_has_async_data(req)) 1219 io_netmsg_iovec_free(io); 1220 if (zc->notif) { 1221 io_notif_flush(zc->notif); 1222 zc->notif = NULL; 1223 } 1224 } 1225 1226 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1227 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 1228 1229 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1230 { 1231 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1232 struct io_ring_ctx *ctx = req->ctx; 1233 struct io_kiocb *notif; 1234 1235 zc->done_io = 0; 1236 req->flags |= REQ_F_POLL_NO_LAZY; 1237 1238 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 1239 return -EINVAL; 1240 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 1241 if (req->flags & REQ_F_CQE_SKIP) 1242 return -EINVAL; 1243 1244 notif = zc->notif = io_alloc_notif(ctx); 1245 if (!notif) 1246 return -ENOMEM; 1247 notif->cqe.user_data = req->cqe.user_data; 1248 notif->cqe.res = 0; 1249 notif->cqe.flags = IORING_CQE_F_NOTIF; 1250 req->flags |= REQ_F_NEED_CLEANUP; 1251 1252 zc->flags = READ_ONCE(sqe->ioprio); 1253 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1254 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1255 return -EINVAL; 1256 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1257 struct io_notif_data *nd = io_notif_to_data(notif); 1258 1259 nd->zc_report = true; 1260 nd->zc_used = false; 1261 nd->zc_copied = false; 1262 } 1263 } 1264 1265 if (req->opcode != IORING_OP_SEND_ZC) { 1266 if (unlikely(sqe->addr2 || sqe->file_index)) 1267 return -EINVAL; 1268 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) 1269 return -EINVAL; 1270 } 1271 1272 zc->len = READ_ONCE(sqe->len); 1273 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY; 1274 zc->buf_index = READ_ONCE(sqe->buf_index); 1275 if (zc->msg_flags & MSG_DONTWAIT) 1276 req->flags |= REQ_F_NOWAIT; 1277 1278 #ifdef CONFIG_COMPAT 1279 if (req->ctx->compat) 1280 zc->msg_flags |= MSG_CMSG_COMPAT; 1281 #endif 1282 if (unlikely(!io_msg_alloc_async(req))) 1283 return -ENOMEM; 1284 if (req->opcode != IORING_OP_SENDMSG_ZC) 1285 return io_send_setup(req, sqe); 1286 return io_sendmsg_setup(req, sqe); 1287 } 1288 1289 static int io_sg_from_iter_iovec(struct sk_buff *skb, 1290 struct iov_iter *from, size_t length) 1291 { 1292 skb_zcopy_downgrade_managed(skb); 1293 return zerocopy_fill_skb_from_iter(skb, from, length); 1294 } 1295 1296 static int io_sg_from_iter(struct sk_buff *skb, 1297 struct iov_iter *from, size_t length) 1298 { 1299 struct skb_shared_info *shinfo = skb_shinfo(skb); 1300 int frag = shinfo->nr_frags; 1301 int ret = 0; 1302 struct bvec_iter bi; 1303 ssize_t copied = 0; 1304 unsigned long truesize = 0; 1305 1306 if (!frag) 1307 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1308 else if (unlikely(!skb_zcopy_managed(skb))) 1309 return zerocopy_fill_skb_from_iter(skb, from, length); 1310 1311 bi.bi_size = min(from->count, length); 1312 bi.bi_bvec_done = from->iov_offset; 1313 bi.bi_idx = 0; 1314 1315 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1316 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1317 1318 copied += v.bv_len; 1319 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1320 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1321 v.bv_offset, v.bv_len); 1322 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1323 } 1324 if (bi.bi_size) 1325 ret = -EMSGSIZE; 1326 1327 shinfo->nr_frags = frag; 1328 from->bvec += bi.bi_idx; 1329 from->nr_segs -= bi.bi_idx; 1330 from->count -= copied; 1331 from->iov_offset = bi.bi_bvec_done; 1332 1333 skb->data_len += copied; 1334 skb->len += copied; 1335 skb->truesize += truesize; 1336 return ret; 1337 } 1338 1339 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags) 1340 { 1341 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1342 struct io_async_msghdr *kmsg = req->async_data; 1343 int ret; 1344 1345 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 1346 struct io_ring_ctx *ctx = req->ctx; 1347 struct io_rsrc_node *node; 1348 1349 ret = -EFAULT; 1350 io_ring_submit_lock(ctx, issue_flags); 1351 node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index); 1352 if (node) { 1353 io_req_assign_buf_node(sr->notif, node); 1354 ret = 0; 1355 } 1356 io_ring_submit_unlock(ctx, issue_flags); 1357 1358 if (unlikely(ret)) 1359 return ret; 1360 1361 ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, 1362 node->buf, (u64)(uintptr_t)sr->buf, 1363 sr->len); 1364 if (unlikely(ret)) 1365 return ret; 1366 kmsg->msg.sg_from_iter = io_sg_from_iter; 1367 } else { 1368 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 1369 if (unlikely(ret)) 1370 return ret; 1371 ret = io_notif_account_mem(sr->notif, sr->len); 1372 if (unlikely(ret)) 1373 return ret; 1374 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1375 } 1376 1377 return ret; 1378 } 1379 1380 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1381 { 1382 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1383 struct io_async_msghdr *kmsg = req->async_data; 1384 struct socket *sock; 1385 unsigned msg_flags; 1386 int ret, min_ret = 0; 1387 1388 sock = sock_from_file(req->file); 1389 if (unlikely(!sock)) 1390 return -ENOTSOCK; 1391 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1392 return -EOPNOTSUPP; 1393 1394 if (!(req->flags & REQ_F_POLLED) && 1395 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1396 return -EAGAIN; 1397 1398 if (!zc->done_io) { 1399 ret = io_send_zc_import(req, issue_flags); 1400 if (unlikely(ret)) 1401 return ret; 1402 } 1403 1404 msg_flags = zc->msg_flags; 1405 if (issue_flags & IO_URING_F_NONBLOCK) 1406 msg_flags |= MSG_DONTWAIT; 1407 if (msg_flags & MSG_WAITALL) 1408 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1409 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1410 1411 kmsg->msg.msg_flags = msg_flags; 1412 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1413 ret = sock_sendmsg(sock, &kmsg->msg); 1414 1415 if (unlikely(ret < min_ret)) { 1416 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1417 return -EAGAIN; 1418 1419 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { 1420 zc->len -= ret; 1421 zc->buf += ret; 1422 zc->done_io += ret; 1423 req->flags |= REQ_F_BL_NO_RECYCLE; 1424 return -EAGAIN; 1425 } 1426 if (ret == -ERESTARTSYS) 1427 ret = -EINTR; 1428 req_set_fail(req); 1429 } 1430 1431 if (ret >= 0) 1432 ret += zc->done_io; 1433 else if (zc->done_io) 1434 ret = zc->done_io; 1435 1436 /* 1437 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1438 * flushing notif to io_send_zc_cleanup() 1439 */ 1440 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1441 io_notif_flush(zc->notif); 1442 io_req_msg_cleanup(req, 0); 1443 } 1444 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1445 return IOU_OK; 1446 } 1447 1448 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1449 { 1450 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1451 struct io_async_msghdr *kmsg = req->async_data; 1452 struct socket *sock; 1453 unsigned flags; 1454 int ret, min_ret = 0; 1455 1456 sock = sock_from_file(req->file); 1457 if (unlikely(!sock)) 1458 return -ENOTSOCK; 1459 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1460 return -EOPNOTSUPP; 1461 1462 if (!(req->flags & REQ_F_POLLED) && 1463 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1464 return -EAGAIN; 1465 1466 flags = sr->msg_flags; 1467 if (issue_flags & IO_URING_F_NONBLOCK) 1468 flags |= MSG_DONTWAIT; 1469 if (flags & MSG_WAITALL) 1470 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1471 1472 kmsg->msg.msg_control_user = sr->msg_control; 1473 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1474 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1475 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1476 1477 if (unlikely(ret < min_ret)) { 1478 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1479 return -EAGAIN; 1480 1481 if (ret > 0 && io_net_retry(sock, flags)) { 1482 sr->done_io += ret; 1483 req->flags |= REQ_F_BL_NO_RECYCLE; 1484 return -EAGAIN; 1485 } 1486 if (ret == -ERESTARTSYS) 1487 ret = -EINTR; 1488 req_set_fail(req); 1489 } 1490 1491 if (ret >= 0) 1492 ret += sr->done_io; 1493 else if (sr->done_io) 1494 ret = sr->done_io; 1495 1496 /* 1497 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1498 * flushing notif to io_send_zc_cleanup() 1499 */ 1500 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1501 io_notif_flush(sr->notif); 1502 io_req_msg_cleanup(req, 0); 1503 } 1504 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1505 return IOU_OK; 1506 } 1507 1508 void io_sendrecv_fail(struct io_kiocb *req) 1509 { 1510 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1511 1512 if (sr->done_io) 1513 req->cqe.res = sr->done_io; 1514 1515 if ((req->flags & REQ_F_NEED_CLEANUP) && 1516 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1517 req->cqe.flags |= IORING_CQE_F_MORE; 1518 } 1519 1520 #define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \ 1521 IORING_ACCEPT_POLL_FIRST) 1522 1523 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1524 { 1525 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1526 1527 if (sqe->len || sqe->buf_index) 1528 return -EINVAL; 1529 1530 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1531 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1532 accept->flags = READ_ONCE(sqe->accept_flags); 1533 accept->nofile = rlimit(RLIMIT_NOFILE); 1534 accept->iou_flags = READ_ONCE(sqe->ioprio); 1535 if (accept->iou_flags & ~ACCEPT_FLAGS) 1536 return -EINVAL; 1537 1538 accept->file_slot = READ_ONCE(sqe->file_index); 1539 if (accept->file_slot) { 1540 if (accept->flags & SOCK_CLOEXEC) 1541 return -EINVAL; 1542 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT && 1543 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1544 return -EINVAL; 1545 } 1546 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1547 return -EINVAL; 1548 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1549 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1550 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT) 1551 req->flags |= REQ_F_APOLL_MULTISHOT; 1552 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT) 1553 req->flags |= REQ_F_NOWAIT; 1554 return 0; 1555 } 1556 1557 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1558 { 1559 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1560 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1561 bool fixed = !!accept->file_slot; 1562 struct proto_accept_arg arg = { 1563 .flags = force_nonblock ? O_NONBLOCK : 0, 1564 }; 1565 struct file *file; 1566 unsigned cflags; 1567 int ret, fd; 1568 1569 if (!(req->flags & REQ_F_POLLED) && 1570 accept->iou_flags & IORING_ACCEPT_POLL_FIRST) 1571 return -EAGAIN; 1572 1573 retry: 1574 if (!fixed) { 1575 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1576 if (unlikely(fd < 0)) 1577 return fd; 1578 } 1579 arg.err = 0; 1580 arg.is_empty = -1; 1581 file = do_accept(req->file, &arg, accept->addr, accept->addr_len, 1582 accept->flags); 1583 if (IS_ERR(file)) { 1584 if (!fixed) 1585 put_unused_fd(fd); 1586 ret = PTR_ERR(file); 1587 if (ret == -EAGAIN && force_nonblock && 1588 !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) { 1589 /* 1590 * if it's multishot and polled, we don't need to 1591 * return EAGAIN to arm the poll infra since it 1592 * has already been done 1593 */ 1594 if (issue_flags & IO_URING_F_MULTISHOT) 1595 return IOU_ISSUE_SKIP_COMPLETE; 1596 return ret; 1597 } 1598 if (ret == -ERESTARTSYS) 1599 ret = -EINTR; 1600 req_set_fail(req); 1601 } else if (!fixed) { 1602 fd_install(fd, file); 1603 ret = fd; 1604 } else { 1605 ret = io_fixed_fd_install(req, issue_flags, file, 1606 accept->file_slot); 1607 } 1608 1609 cflags = 0; 1610 if (!arg.is_empty) 1611 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 1612 1613 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1614 io_req_set_res(req, ret, cflags); 1615 return IOU_OK; 1616 } 1617 1618 if (ret < 0) 1619 return ret; 1620 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1621 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) 1622 goto retry; 1623 if (issue_flags & IO_URING_F_MULTISHOT) 1624 return IOU_ISSUE_SKIP_COMPLETE; 1625 return -EAGAIN; 1626 } 1627 1628 io_req_set_res(req, ret, cflags); 1629 return IOU_STOP_MULTISHOT; 1630 } 1631 1632 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1633 { 1634 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1635 1636 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1637 return -EINVAL; 1638 1639 sock->domain = READ_ONCE(sqe->fd); 1640 sock->type = READ_ONCE(sqe->off); 1641 sock->protocol = READ_ONCE(sqe->len); 1642 sock->file_slot = READ_ONCE(sqe->file_index); 1643 sock->nofile = rlimit(RLIMIT_NOFILE); 1644 1645 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1646 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1647 return -EINVAL; 1648 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1649 return -EINVAL; 1650 return 0; 1651 } 1652 1653 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1654 { 1655 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1656 bool fixed = !!sock->file_slot; 1657 struct file *file; 1658 int ret, fd; 1659 1660 if (!fixed) { 1661 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1662 if (unlikely(fd < 0)) 1663 return fd; 1664 } 1665 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1666 if (IS_ERR(file)) { 1667 if (!fixed) 1668 put_unused_fd(fd); 1669 ret = PTR_ERR(file); 1670 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1671 return -EAGAIN; 1672 if (ret == -ERESTARTSYS) 1673 ret = -EINTR; 1674 req_set_fail(req); 1675 } else if (!fixed) { 1676 fd_install(fd, file); 1677 ret = fd; 1678 } else { 1679 ret = io_fixed_fd_install(req, issue_flags, file, 1680 sock->file_slot); 1681 } 1682 io_req_set_res(req, ret, 0); 1683 return IOU_OK; 1684 } 1685 1686 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1687 { 1688 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1689 struct io_async_msghdr *io; 1690 1691 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1692 return -EINVAL; 1693 1694 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1695 conn->addr_len = READ_ONCE(sqe->addr2); 1696 conn->in_progress = conn->seen_econnaborted = false; 1697 1698 io = io_msg_alloc_async(req); 1699 if (unlikely(!io)) 1700 return -ENOMEM; 1701 1702 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr); 1703 } 1704 1705 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1706 { 1707 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1708 struct io_async_msghdr *io = req->async_data; 1709 unsigned file_flags; 1710 int ret; 1711 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1712 1713 if (unlikely(req->flags & REQ_F_FAIL)) { 1714 ret = -ECONNRESET; 1715 goto out; 1716 } 1717 1718 file_flags = force_nonblock ? O_NONBLOCK : 0; 1719 1720 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len, 1721 file_flags); 1722 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1723 && force_nonblock) { 1724 if (ret == -EINPROGRESS) { 1725 connect->in_progress = true; 1726 } else if (ret == -ECONNABORTED) { 1727 if (connect->seen_econnaborted) 1728 goto out; 1729 connect->seen_econnaborted = true; 1730 } 1731 return -EAGAIN; 1732 } 1733 if (connect->in_progress) { 1734 /* 1735 * At least bluetooth will return -EBADFD on a re-connect 1736 * attempt, and it's (supposedly) also valid to get -EISCONN 1737 * which means the previous result is good. For both of these, 1738 * grab the sock_error() and use that for the completion. 1739 */ 1740 if (ret == -EBADFD || ret == -EISCONN) 1741 ret = sock_error(sock_from_file(req->file)->sk); 1742 } 1743 if (ret == -ERESTARTSYS) 1744 ret = -EINTR; 1745 out: 1746 if (ret < 0) 1747 req_set_fail(req); 1748 io_req_msg_cleanup(req, issue_flags); 1749 io_req_set_res(req, ret, 0); 1750 return IOU_OK; 1751 } 1752 1753 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1754 { 1755 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1756 struct sockaddr __user *uaddr; 1757 struct io_async_msghdr *io; 1758 1759 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1760 return -EINVAL; 1761 1762 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1763 bind->addr_len = READ_ONCE(sqe->addr2); 1764 1765 io = io_msg_alloc_async(req); 1766 if (unlikely(!io)) 1767 return -ENOMEM; 1768 return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); 1769 } 1770 1771 int io_bind(struct io_kiocb *req, unsigned int issue_flags) 1772 { 1773 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1774 struct io_async_msghdr *io = req->async_data; 1775 struct socket *sock; 1776 int ret; 1777 1778 sock = sock_from_file(req->file); 1779 if (unlikely(!sock)) 1780 return -ENOTSOCK; 1781 1782 ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); 1783 if (ret < 0) 1784 req_set_fail(req); 1785 io_req_set_res(req, ret, 0); 1786 return 0; 1787 } 1788 1789 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1790 { 1791 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1792 1793 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) 1794 return -EINVAL; 1795 1796 listen->backlog = READ_ONCE(sqe->len); 1797 return 0; 1798 } 1799 1800 int io_listen(struct io_kiocb *req, unsigned int issue_flags) 1801 { 1802 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1803 struct socket *sock; 1804 int ret; 1805 1806 sock = sock_from_file(req->file); 1807 if (unlikely(!sock)) 1808 return -ENOTSOCK; 1809 1810 ret = __sys_listen_socket(sock, listen->backlog); 1811 if (ret < 0) 1812 req_set_fail(req); 1813 io_req_set_res(req, ret, 0); 1814 return 0; 1815 } 1816 1817 void io_netmsg_cache_free(const void *entry) 1818 { 1819 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; 1820 1821 if (kmsg->free_iov) 1822 io_netmsg_iovec_free(kmsg); 1823 kfree(kmsg); 1824 } 1825 #endif 1826